aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexey Ushakov <alexey.ushakov@jetbrains.com>2016-02-15 14:13:35 +0300
committerAlexey Ushakov <alexey.ushakov@jetbrains.com>2016-02-15 14:13:35 +0300
commit7d0e3b05cb04908a0edf82e3f511f8232089ed94 (patch)
treeb078996391da979517a32ccb3d02099159a6c983
parent24a09c463b2d328b6fc90ee555023514efca507c (diff)
parent1a26ca0e98f70c3aa490787dfeca7358fa949a43 (diff)
downloadjdk8u_hotspot-jetbrains-master-mirror-osx.tar.gz
Merged changes from defaultjetbrains-master-mirror-osx
--HG-- branch : 8u40-verified-fixes
-rw-r--r--.hgtags209
-rw-r--r--THIRD_PARTY_README45
-rw-r--r--agent/src/os/linux/LinuxDebuggerLocal.c7
-rw-r--r--agent/src/os/linux/libproc.h7
-rw-r--r--agent/src/os/linux/ps_proc.c27
-rw-r--r--agent/src/os/solaris/proc/saproc.cpp14
-rw-r--r--agent/src/share/classes/com/sun/java/swing/action/ActionManager.java7
-rw-r--r--agent/src/share/classes/com/sun/java/swing/ui/CommonToolBar.java2
-rw-r--r--agent/src/share/classes/com/sun/java/swing/ui/CommonUI.java35
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java12
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java47
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/gc_interface/G1YCType.java45
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java69
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCName.java50
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCWhen.java45
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/gc_interface/ReferenceType.java45
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/memory/AdaptiveFreeList.java77
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java38
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/memory/FreeList.java72
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java20
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java12
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java14
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/opto/CompilerPhaseType.java67
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/runtime/Flags.java48
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/runtime/JavaThread.java2
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java9
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java86
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java23
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/ui/action/HSDBActionManager.java8
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java14
-rw-r--r--make/aix/makefiles/adlc.make8
-rw-r--r--make/aix/makefiles/ppc64.make13
-rw-r--r--make/aix/makefiles/xlc.make18
-rw-r--r--make/bsd/makefiles/gcc.make2
-rw-r--r--make/bsd/makefiles/sa.make4
-rw-r--r--make/bsd/makefiles/saproc.make3
-rw-r--r--make/defs.make25
-rw-r--r--make/hotspot_version6
-rw-r--r--make/linux/Makefile12
-rw-r--r--make/linux/makefiles/arm.make31
-rw-r--r--make/linux/makefiles/build_vm_def.sh16
-rw-r--r--make/linux/makefiles/buildtree.make6
-rw-r--r--make/linux/makefiles/defs.make56
-rw-r--r--make/linux/makefiles/dtrace.make4
-rw-r--r--make/linux/makefiles/gcc.make32
-rw-r--r--make/linux/makefiles/ppc.make33
-rw-r--r--make/linux/makefiles/saproc.make8
-rw-r--r--make/linux/makefiles/vm.make15
-rw-r--r--make/linux/platform_arm17
-rw-r--r--make/linux/platform_ppc17
-rw-r--r--make/solaris/makefiles/buildtree.make3
-rw-r--r--make/solaris/makefiles/sa.make6
-rw-r--r--make/solaris/makefiles/trace.make8
-rw-r--r--make/solaris/makefiles/vm.make2
-rw-r--r--make/windows/makefiles/sa.make18
-rw-r--r--src/cpu/ppc/vm/frame_ppc.cpp7
-rw-r--r--src/cpu/ppc/vm/globals_ppc.hpp2
-rw-r--r--src/cpu/ppc/vm/interpreter_ppc.cpp15
-rw-r--r--src/cpu/ppc/vm/macroAssembler_ppc.cpp8
-rw-r--r--src/cpu/ppc/vm/ppc.ad11
-rw-r--r--src/cpu/ppc/vm/sharedRuntime_ppc.cpp19
-rw-r--r--src/cpu/sparc/vm/frame_sparc.cpp41
-rw-r--r--src/cpu/sparc/vm/frame_sparc.hpp2
-rw-r--r--src/cpu/sparc/vm/globals_sparc.hpp2
-rw-r--r--src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp7
-rw-r--r--src/cpu/sparc/vm/sparc.ad12
-rw-r--r--src/cpu/sparc/vm/vm_version_sparc.cpp4
-rw-r--r--src/cpu/sparc/vm/vm_version_sparc.hpp6
-rw-r--r--src/cpu/x86/vm/assembler_x86.hpp6
-rw-r--r--src/cpu/x86/vm/c1_FrameMap_x86.cpp7
-rw-r--r--src/cpu/x86/vm/c1_MacroAssembler_x86.cpp3
-rw-r--r--src/cpu/x86/vm/c1_Runtime1_x86.cpp12
-rw-r--r--src/cpu/x86/vm/frame_x86.cpp37
-rw-r--r--src/cpu/x86/vm/frame_x86.hpp15
-rw-r--r--src/cpu/x86/vm/frame_x86.inline.hpp8
-rw-r--r--src/cpu/x86/vm/globals_x86.hpp2
-rw-r--r--src/cpu/x86/vm/macroAssembler_x86.cpp27
-rw-r--r--src/cpu/x86/vm/methodHandles_x86.cpp2
-rw-r--r--src/cpu/x86/vm/runtime_x86_32.cpp4
-rw-r--r--src/cpu/x86/vm/sharedRuntime_x86_64.cpp12
-rw-r--r--src/cpu/x86/vm/stubGenerator_x86_32.cpp3
-rw-r--r--src/cpu/x86/vm/stubGenerator_x86_64.cpp6
-rw-r--r--src/cpu/x86/vm/vm_version_x86.hpp8
-rw-r--r--src/cpu/x86/vm/x86.ad15
-rw-r--r--src/cpu/x86/vm/x86_32.ad137
-rw-r--r--src/cpu/x86/vm/x86_64.ad478
-rw-r--r--src/cpu/zero/vm/cppInterpreter_zero.cpp2
-rw-r--r--src/cpu/zero/vm/frame_zero.cpp11
-rw-r--r--src/cpu/zero/vm/frame_zero.inline.hpp2
-rw-r--r--src/cpu/zero/vm/globals_zero.hpp2
-rw-r--r--src/cpu/zero/vm/stack_zero.cpp4
-rw-r--r--src/cpu/zero/vm/stack_zero.inline.hpp6
-rw-r--r--src/os/aix/vm/os_aix.cpp12
-rw-r--r--src/os/aix/vm/perfMemory_aix.cpp33
-rw-r--r--src/os/bsd/dtrace/hotspot.d6
-rw-r--r--src/os/bsd/vm/jsig.c34
-rw-r--r--src/os/bsd/vm/os_bsd.cpp22
-rw-r--r--src/os/bsd/vm/perfMemory_bsd.cpp27
-rw-r--r--src/os/linux/vm/os_linux.cpp207
-rw-r--r--src/os/linux/vm/perfMemory_linux.cpp27
-rw-r--r--src/os/solaris/vm/jvm_solaris.h4
-rw-r--r--src/os/solaris/vm/os_solaris.cpp198
-rw-r--r--src/os/solaris/vm/os_solaris.hpp2
-rw-r--r--src/os/solaris/vm/perfMemory_solaris.cpp27
-rw-r--r--src/os/solaris/vm/thread_solaris.inline.hpp19
-rw-r--r--src/os/windows/vm/os_windows.cpp32
-rw-r--r--src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp12
-rw-r--r--src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp12
-rw-r--r--src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp35
-rw-r--r--src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp16
-rw-r--r--src/os_cpu/linux_zero/vm/os_linux_zero.cpp4
-rw-r--r--src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp2
-rw-r--r--src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp27
-rw-r--r--src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.hpp44
-rw-r--r--src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp112
-rw-r--r--src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp115
-rw-r--r--src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp164
-rw-r--r--src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.hpp56
-rw-r--r--src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java68
-rw-r--r--src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/UncommonTrapEvent.java4
-rw-r--r--src/share/vm/adlc/adlparse.cpp94
-rw-r--r--src/share/vm/adlc/adlparse.hpp3
-rw-r--r--src/share/vm/adlc/archDesc.cpp2
-rw-r--r--src/share/vm/adlc/forms.hpp2
-rw-r--r--src/share/vm/adlc/formsopt.cpp111
-rw-r--r--src/share/vm/adlc/formsopt.hpp127
-rw-r--r--src/share/vm/adlc/formssel.hpp2
-rw-r--r--src/share/vm/adlc/output_c.cpp47
-rw-r--r--src/share/vm/c1/c1_GraphBuilder.cpp2
-rw-r--r--src/share/vm/c1/c1_LIR.cpp14
-rw-r--r--src/share/vm/c1/c1_LIR.hpp32
-rw-r--r--src/share/vm/c1/c1_LIRGenerator.cpp65
-rw-r--r--src/share/vm/c1/c1_LIRGenerator.hpp9
-rw-r--r--src/share/vm/c1/c1_LinearScan.cpp6
-rw-r--r--src/share/vm/c1/c1_Runtime1.cpp4
-rw-r--r--src/share/vm/c1/c1_ValueType.cpp14
-rw-r--r--src/share/vm/ci/bcEscapeAnalyzer.cpp8
-rw-r--r--src/share/vm/ci/ciField.cpp8
-rw-r--r--src/share/vm/ci/ciMethod.cpp11
-rw-r--r--src/share/vm/ci/ciMethod.hpp11
-rw-r--r--src/share/vm/ci/ciTypeFlow.cpp1
-rw-r--r--src/share/vm/classfile/classFileParser.cpp16
-rw-r--r--src/share/vm/classfile/classFileParser.hpp1
-rw-r--r--src/share/vm/classfile/classLoader.cpp14
-rw-r--r--src/share/vm/classfile/classLoader.hpp2
-rw-r--r--src/share/vm/classfile/classLoaderData.cpp32
-rw-r--r--src/share/vm/classfile/classLoaderData.hpp2
-rw-r--r--src/share/vm/classfile/defaultMethods.cpp12
-rw-r--r--src/share/vm/classfile/javaClasses.cpp139
-rw-r--r--src/share/vm/classfile/javaClasses.hpp18
-rw-r--r--src/share/vm/classfile/sharedPathsMiscInfo.cpp4
-rw-r--r--src/share/vm/classfile/sharedPathsMiscInfo.hpp4
-rw-r--r--src/share/vm/classfile/stackMapTable.cpp1
-rw-r--r--src/share/vm/classfile/systemDictionary.cpp50
-rw-r--r--src/share/vm/classfile/verifier.cpp44
-rw-r--r--src/share/vm/classfile/verifier.hpp5
-rw-r--r--src/share/vm/classfile/vmSymbols.hpp17
-rw-r--r--src/share/vm/code/codeCache.cpp39
-rw-r--r--src/share/vm/code/codeCache.hpp1
-rw-r--r--src/share/vm/code/compiledIC.cpp22
-rw-r--r--src/share/vm/code/compiledIC.hpp3
-rw-r--r--src/share/vm/code/dependencies.cpp8
-rw-r--r--src/share/vm/code/nmethod.cpp32
-rw-r--r--src/share/vm/code/nmethod.hpp3
-rw-r--r--src/share/vm/code/vmreg.hpp24
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp29
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp6
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp4
-rw-r--r--src/share/vm/gc_implementation/g1/concurrentMark.cpp207
-rw-r--r--src/share/vm/gc_implementation/g1/concurrentMark.hpp33
-rw-r--r--src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp146
-rw-r--r--src/share/vm/gc_implementation/g1/g1AllocRegion.cpp10
-rw-r--r--src/share/vm/gc_implementation/g1/g1Allocator.cpp51
-rw-r--r--src/share/vm/gc_implementation/g1/g1Allocator.hpp81
-rw-r--r--src/share/vm/gc_implementation/g1/g1BiasedArray.hpp2
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp761
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp204
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp61
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp56
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp30
-rw-r--r--src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp554
-rw-r--r--src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp267
-rw-r--r--src/share/vm/gc_implementation/g1/g1HotCardCache.cpp99
-rw-r--r--src/share/vm/gc_implementation/g1/g1HotCardCache.hpp47
-rw-r--r--src/share/vm/gc_implementation/g1/g1InCSetState.hpp132
-rw-r--r--src/share/vm/gc_implementation/g1/g1Log.hpp6
-rw-r--r--src/share/vm/gc_implementation/g1/g1MarkSweep.cpp37
-rw-r--r--src/share/vm/gc_implementation/g1/g1OopClosures.hpp8
-rw-r--r--src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp6
-rw-r--r--src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp165
-rw-r--r--src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp57
-rw-r--r--src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp164
-rw-r--r--src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp36
-rw-r--r--src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp17
-rw-r--r--src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp64
-rw-r--r--src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp21
-rw-r--r--src/share/vm/gc_implementation/g1/g1RemSet.cpp66
-rw-r--r--src/share/vm/gc_implementation/g1/g1RemSet.hpp8
-rw-r--r--src/share/vm/gc_implementation/g1/g1RootProcessor.cpp339
-rw-r--r--src/share/vm/gc_implementation/g1/g1RootProcessor.hpp121
-rw-r--r--src/share/vm/gc_implementation/g1/g1StringDedup.cpp42
-rw-r--r--src/share/vm/gc_implementation/g1/g1StringDedup.hpp3
-rw-r--r--src/share/vm/gc_implementation/g1/g1_globals.hpp16
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegion.cpp178
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegion.hpp58
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegionManager.cpp20
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegionManager.hpp5
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp12
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp8
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegionSet.cpp1
-rw-r--r--src/share/vm/gc_implementation/g1/satbQueue.cpp185
-rw-r--r--src/share/vm/gc_implementation/g1/satbQueue.hpp72
-rw-r--r--src/share/vm/gc_implementation/g1/vm_operations_g1.cpp33
-rw-r--r--src/share/vm/gc_implementation/g1/vm_operations_g1.hpp25
-rw-r--r--src/share/vm/gc_implementation/parNew/parNewGeneration.cpp8
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp7
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp2
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp8
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp1
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp3
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp7
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp1
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp18
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp15
-rw-r--r--src/share/vm/gc_implementation/shared/ageTable.hpp5
-rw-r--r--src/share/vm/gc_implementation/shared/gcTraceTime.cpp6
-rw-r--r--src/share/vm/gc_implementation/shared/mutableSpace.cpp4
-rw-r--r--src/share/vm/gc_implementation/shared/vmGCOperations.cpp26
-rw-r--r--src/share/vm/gc_implementation/shared/vmGCOperations.hpp67
-rw-r--r--src/share/vm/gc_interface/allocTracer.cpp12
-rw-r--r--src/share/vm/gc_interface/allocTracer.hpp3
-rw-r--r--src/share/vm/gc_interface/gcCause.cpp3
-rw-r--r--src/share/vm/gc_interface/gcCause.hpp1
-rw-r--r--src/share/vm/interpreter/abstractInterpreter.hpp21
-rw-r--r--src/share/vm/interpreter/bytecodeInterpreter.cpp2
-rw-r--r--src/share/vm/interpreter/bytecodes.hpp3
-rw-r--r--src/share/vm/interpreter/interpreterRuntime.cpp6
-rw-r--r--src/share/vm/interpreter/linkResolver.cpp73
-rw-r--r--src/share/vm/interpreter/linkResolver.hpp8
-rw-r--r--src/share/vm/interpreter/oopMapCache.cpp4
-rw-r--r--src/share/vm/interpreter/oopMapCache.hpp11
-rw-r--r--src/share/vm/interpreter/templateTable.hpp43
-rw-r--r--src/share/vm/memory/allocation.inline.hpp7
-rw-r--r--src/share/vm/memory/collectorPolicy.cpp6
-rw-r--r--src/share/vm/memory/defNewGeneration.cpp2
-rw-r--r--src/share/vm/memory/genCollectedHeap.cpp200
-rw-r--r--src/share/vm/memory/genCollectedHeap.hpp22
-rw-r--r--src/share/vm/memory/genMarkSweep.cpp6
-rw-r--r--src/share/vm/memory/generation.hpp4
-rw-r--r--src/share/vm/memory/guardedMemory.hpp8
-rw-r--r--src/share/vm/memory/heap.cpp8
-rw-r--r--src/share/vm/memory/referenceProcessor.cpp2
-rw-r--r--src/share/vm/memory/sharedHeap.cpp215
-rw-r--r--src/share/vm/memory/sharedHeap.hpp78
-rw-r--r--src/share/vm/memory/tenuredGeneration.cpp6
-rw-r--r--src/share/vm/memory/universe.cpp58
-rw-r--r--src/share/vm/memory/universe.hpp8
-rw-r--r--src/share/vm/oops/constMethod.hpp6
-rw-r--r--src/share/vm/oops/cpCache.cpp69
-rw-r--r--src/share/vm/oops/cpCache.hpp9
-rw-r--r--src/share/vm/oops/instanceKlass.cpp144
-rw-r--r--src/share/vm/oops/instanceKlass.hpp20
-rw-r--r--src/share/vm/oops/klass.cpp3
-rw-r--r--src/share/vm/oops/klassVtable.cpp164
-rw-r--r--src/share/vm/oops/klassVtable.hpp8
-rw-r--r--src/share/vm/oops/markOop.cpp33
-rw-r--r--src/share/vm/oops/method.cpp4
-rw-r--r--src/share/vm/oops/method.hpp43
-rw-r--r--src/share/vm/opto/block.cpp2
-rw-r--r--src/share/vm/opto/bytecodeInfo.cpp8
-rw-r--r--src/share/vm/opto/c2_globals.hpp6
-rw-r--r--src/share/vm/opto/c2compiler.cpp24
-rw-r--r--src/share/vm/opto/callGenerator.cpp3
-rw-r--r--src/share/vm/opto/callnode.cpp67
-rw-r--r--src/share/vm/opto/callnode.hpp24
-rw-r--r--src/share/vm/opto/cfgnode.cpp2
-rw-r--r--src/share/vm/opto/chaitin.cpp7
-rw-r--r--src/share/vm/opto/chaitin.hpp26
-rw-r--r--src/share/vm/opto/classes.hpp1
-rw-r--r--src/share/vm/opto/compile.cpp45
-rw-r--r--src/share/vm/opto/compile.hpp6
-rw-r--r--src/share/vm/opto/connode.cpp24
-rw-r--r--src/share/vm/opto/connode.hpp25
-rw-r--r--src/share/vm/opto/doCall.cpp11
-rw-r--r--src/share/vm/opto/domgraph.cpp4
-rw-r--r--src/share/vm/opto/escape.cpp12
-rw-r--r--src/share/vm/opto/gcm.cpp30
-rw-r--r--src/share/vm/opto/graphKit.cpp35
-rw-r--r--src/share/vm/opto/graphKit.hpp22
-rw-r--r--src/share/vm/opto/lcm.cpp33
-rw-r--r--src/share/vm/opto/library_call.cpp88
-rw-r--r--src/share/vm/opto/locknode.hpp24
-rw-r--r--src/share/vm/opto/loopPredicate.cpp8
-rw-r--r--src/share/vm/opto/loopTransform.cpp11
-rw-r--r--src/share/vm/opto/loopnode.cpp137
-rw-r--r--src/share/vm/opto/loopnode.hpp5
-rw-r--r--src/share/vm/opto/machnode.hpp23
-rw-r--r--src/share/vm/opto/macro.cpp75
-rw-r--r--src/share/vm/opto/matcher.cpp36
-rw-r--r--src/share/vm/opto/memnode.cpp40
-rw-r--r--src/share/vm/opto/memnode.hpp129
-rw-r--r--src/share/vm/opto/node.cpp37
-rw-r--r--src/share/vm/opto/node.hpp16
-rw-r--r--src/share/vm/opto/output.cpp2
-rw-r--r--src/share/vm/opto/output.hpp24
-rw-r--r--src/share/vm/opto/parse.hpp4
-rw-r--r--src/share/vm/opto/parse2.cpp72
-rw-r--r--src/share/vm/opto/parse3.cpp2
-rw-r--r--src/share/vm/opto/phase.cpp9
-rw-r--r--src/share/vm/opto/phase.hpp35
-rw-r--r--src/share/vm/opto/phaseX.cpp116
-rw-r--r--src/share/vm/opto/phaseX.hpp20
-rw-r--r--src/share/vm/opto/postaloc.cpp103
-rw-r--r--src/share/vm/opto/regmask.cpp24
-rw-r--r--src/share/vm/opto/regmask.hpp24
-rw-r--r--src/share/vm/opto/runtime.cpp24
-rw-r--r--src/share/vm/opto/stringopts.cpp11
-rw-r--r--src/share/vm/opto/superword.cpp107
-rw-r--r--src/share/vm/opto/superword.hpp5
-rw-r--r--src/share/vm/opto/type.cpp20
-rw-r--r--src/share/vm/opto/vectornode.cpp5
-rw-r--r--src/share/vm/opto/vectornode.hpp7
-rw-r--r--src/share/vm/prims/forte.cpp112
-rw-r--r--src/share/vm/prims/jni.cpp117
-rw-r--r--src/share/vm/prims/jniCheck.cpp278
-rw-r--r--src/share/vm/prims/jvm.cpp6
-rw-r--r--src/share/vm/prims/jvmtiClassFileReconstituter.hpp4
-rw-r--r--src/share/vm/prims/jvmtiExport.cpp9
-rw-r--r--src/share/vm/prims/jvmtiExport.hpp1
-rw-r--r--src/share/vm/prims/jvmtiImpl.cpp8
-rw-r--r--src/share/vm/prims/jvmtiRedefineClasses.cpp76
-rw-r--r--src/share/vm/prims/jvmtiTagMap.cpp8
-rw-r--r--src/share/vm/prims/methodHandles.cpp45
-rw-r--r--src/share/vm/prims/methodHandles.hpp6
-rw-r--r--src/share/vm/prims/unsafe.cpp50
-rw-r--r--src/share/vm/prims/whitebox.cpp54
-rw-r--r--src/share/vm/runtime/arguments.cpp26
-rw-r--r--src/share/vm/runtime/arguments.hpp7
-rw-r--r--src/share/vm/runtime/arguments_ext.hpp10
-rw-r--r--src/share/vm/runtime/basicLock.cpp5
-rw-r--r--src/share/vm/runtime/deoptimization.cpp29
-rw-r--r--src/share/vm/runtime/frame.hpp33
-rw-r--r--src/share/vm/runtime/globals.hpp20
-rw-r--r--src/share/vm/runtime/interfaceSupport.cpp24
-rw-r--r--src/share/vm/runtime/jniHandles.cpp9
-rw-r--r--src/share/vm/runtime/jniHandles.hpp10
-rw-r--r--src/share/vm/runtime/mutexLocker.cpp19
-rw-r--r--src/share/vm/runtime/mutexLocker.hpp7
-rw-r--r--src/share/vm/runtime/objectMonitor.cpp8
-rw-r--r--src/share/vm/runtime/os.cpp128
-rw-r--r--src/share/vm/runtime/os.hpp31
-rw-r--r--src/share/vm/runtime/reflection.cpp12
-rw-r--r--src/share/vm/runtime/safepoint.cpp71
-rw-r--r--src/share/vm/runtime/sharedRuntime.cpp2
-rw-r--r--src/share/vm/runtime/stubRoutines.hpp25
-rw-r--r--src/share/vm/runtime/sweeper.cpp12
-rw-r--r--src/share/vm/runtime/synchronizer.cpp2
-rw-r--r--src/share/vm/runtime/thread.cpp13
-rw-r--r--src/share/vm/runtime/thread.hpp11
-rw-r--r--src/share/vm/runtime/threadLocalStorage.cpp9
-rw-r--r--src/share/vm/runtime/threadLocalStorage.hpp14
-rw-r--r--src/share/vm/runtime/vframe.cpp30
-rw-r--r--src/share/vm/runtime/vframe.hpp20
-rw-r--r--src/share/vm/runtime/virtualspace.cpp38
-rw-r--r--src/share/vm/runtime/virtualspace.hpp6
-rw-r--r--src/share/vm/runtime/vmStructs.cpp82
-rw-r--r--src/share/vm/runtime/vmStructs_trace.hpp35
-rw-r--r--src/share/vm/runtime/vm_operations.cpp4
-rw-r--r--src/share/vm/runtime/vm_version.cpp12
-rw-r--r--src/share/vm/runtime/vm_version.hpp7
-rw-r--r--src/share/vm/services/classLoadingService.cpp4
-rw-r--r--src/share/vm/services/management.cpp11
-rw-r--r--src/share/vm/trace/trace.xml8
-rw-r--r--src/share/vm/utilities/accessFlags.hpp7
-rw-r--r--src/share/vm/utilities/debug.cpp72
-rw-r--r--src/share/vm/utilities/debug.hpp3
-rw-r--r--src/share/vm/utilities/elfFile.cpp5
-rw-r--r--src/share/vm/utilities/globalDefinitions_gcc.hpp12
-rw-r--r--src/share/vm/utilities/globalDefinitions_sparcWorks.hpp13
-rw-r--r--src/share/vm/utilities/globalDefinitions_xlc.hpp8
-rw-r--r--src/share/vm/utilities/macros.hpp11
-rw-r--r--src/share/vm/utilities/vmError.cpp58
-rw-r--r--test/TEST.groups11
-rw-r--r--test/compiler/codegen/IntRotateWithImmediate.java64
-rw-r--r--test/compiler/codegen/LoadWithMask.java2
-rw-r--r--test/compiler/codegen/LoadWithMask2.java2
-rw-r--r--test/compiler/escapeAnalysis/TestEABadMergeMem.java86
-rw-r--r--test/compiler/escapeAnalysis/TestEscapeThroughInvoke.java74
-rw-r--r--test/compiler/inlining/DefaultMethodsDependencies.java63
-rw-r--r--test/compiler/jsr292/MHInlineTest.java207
-rw-r--r--test/compiler/jsr292/PollutedTrapCounts.java109
-rw-r--r--test/compiler/loopopts/ConstFPVectorization.java63
-rw-r--r--test/compiler/loopopts/CountedLoopProblem.java54
-rw-r--r--test/compiler/loopopts/UseCountedLoopSafepoints.java67
-rw-r--r--test/compiler/loopopts/superword/TestVectorizationWithInvariant.java144
-rw-r--r--test/compiler/regalloc/C1ObjectSpillInLogicOp.java3
-rw-r--r--test/compiler/rtm/locking/TestRTMAbortRatio.java9
-rw-r--r--test/compiler/rtm/locking/TestRTMAfterNonRTMDeopt.java9
-rw-r--r--test/compiler/rtm/locking/TestRTMDeoptOnLowAbortRatio.java10
-rw-r--r--test/compiler/rtm/locking/TestRTMLockingThreshold.java10
-rw-r--r--test/compiler/rtm/locking/TestRTMTotalCountIncrRate.java10
-rw-r--r--test/compiler/rtm/locking/TestUseRTMAfterLockInflation.java5
-rw-r--r--test/compiler/stable/TestStableBoolean.java28
-rw-r--r--test/compiler/stable/TestStableByte.java28
-rw-r--r--test/compiler/stable/TestStableChar.java28
-rw-r--r--test/compiler/stable/TestStableDouble.java28
-rw-r--r--test/compiler/stable/TestStableFloat.java28
-rw-r--r--test/compiler/stable/TestStableInt.java28
-rw-r--r--test/compiler/stable/TestStableLong.java28
-rw-r--r--test/compiler/stable/TestStableObject.java28
-rw-r--r--test/compiler/stable/TestStableShort.java28
-rw-r--r--test/compiler/stringopts/TestOptimizeStringConcat.java89
-rw-r--r--test/compiler/testlibrary/rtm/AbortProvoker.java81
-rw-r--r--test/compiler/testlibrary/rtm/BusyLock.java8
-rw-r--r--test/compiler/testlibrary/rtm/MemoryConflictProvoker.java10
-rw-r--r--test/compiler/testlibrary/rtm/RTMTestBase.java4
-rw-r--r--test/compiler/types/TestMeetExactConstantArrays.java70
-rw-r--r--test/compiler/types/TestTypePropagationToCmpU.java59
-rw-r--r--test/compiler/uncommontrap/UncommonTrapStackBang.java6
-rw-r--r--test/compiler/unsafe/TestUnsafeLoadControl.java103
-rw-r--r--test/gc/6581734/Test6581734.java3
-rw-r--r--test/gc/TestSystemGC.java3
-rw-r--r--test/gc/arguments/TestAlignmentToUseLargePages.java3
-rw-r--r--test/gc/arguments/TestG1HeapRegionSize.java17
-rw-r--r--test/gc/concurrentMarkSweep/DisableResizePLAB.java1
-rw-r--r--test/gc/g1/Test2GbHeap.java62
-rw-r--r--test/gc/g1/TestEagerReclaimHumongousRegionsClearMarkBits.java (renamed from test/gc/g1/TestEagerReclaimHumongousRegions2.java)4
-rw-r--r--test/gc/g1/TestEagerReclaimHumongousRegionsWithRefs.java113
-rw-r--r--test/gc/g1/TestG1TraceEagerReclaimHumongousObjects.java142
-rw-r--r--test/gc/g1/TestGCLogMessages.java279
-rw-r--r--test/gc/g1/TestGreyReclaimedHumongousObjects.java176
-rw-r--r--test/gc/g1/TestHumongousShrinkHeap.java1
-rw-r--r--test/gc/g1/TestLargePageUseForAuxMemory.java129
-rw-r--r--test/gc/g1/TestRegionAlignment.java3
-rw-r--r--test/gc/g1/TestShrinkAuxiliaryData.java174
-rw-r--r--test/gc/g1/TestShrinkAuxiliaryData00.java10
-rw-r--r--test/gc/g1/TestShrinkAuxiliaryData05.java10
-rw-r--r--test/gc/g1/TestShrinkAuxiliaryData10.java8
-rw-r--r--test/gc/g1/TestShrinkAuxiliaryData15.java8
-rw-r--r--test/gc/g1/TestShrinkAuxiliaryData20.java8
-rw-r--r--test/gc/g1/TestShrinkAuxiliaryData25.java8
-rw-r--r--test/gc/g1/TestShrinkAuxiliaryData30.java8
-rw-r--r--test/gc/g1/TestShrinkToOneRegion.java3
-rw-r--r--test/gc/metaspace/G1AddMetaspaceDependency.java3
-rw-r--r--test/gc/metaspace/TestMetaspacePerfCounters.java3
-rw-r--r--test/gc/metaspace/TestPerfCountersAndMemoryPools.java3
-rw-r--r--test/gc/whitebox/TestConcMarkCycleWB.java57
-rw-r--r--test/runtime/6888954/vmerrors.sh5
-rw-r--r--test/runtime/ErrorHandling/TestCrashOnOutOfMemoryError.java106
-rw-r--r--test/runtime/ErrorHandling/TestExitOnOutOfMemoryError.java63
-rw-r--r--test/runtime/ErrorHandling/TestOnError.java66
-rw-r--r--test/runtime/ErrorHandling/TestOnOutOfMemoryError.java70
-rw-r--r--test/runtime/InitialThreadOverflow/testme.sh2
-rw-r--r--test/runtime/NMT/JcmdWithNMTDisabled.java26
-rw-r--r--test/runtime/RedefineFinalizer/RedefineFinalizer.java64
-rw-r--r--test/runtime/RedefineTests/RedefineRunningMethodsWithResolutionErrors.java143
-rw-r--r--test/runtime/handlerInTry/HandlerInTry.jasm115
-rw-r--r--test/runtime/handlerInTry/IsolatedHandlerInTry.jasm124
-rw-r--r--test/runtime/handlerInTry/LoadHandlerInTry.java86
-rw-r--r--test/runtime/invokedynamic/BootstrapMethodErrorTest.java115
-rw-r--r--test/runtime/lambda-features/InvokespecialInterface.java2
-rw-r--r--test/runtime/memory/LargePages/TestLargePageSizeInBytes.java61
-rw-r--r--test/runtime/memory/ReadVMPageSize.java46
-rw-r--r--test/runtime/stackMapCheck/BadMap.jasm152
-rw-r--r--test/runtime/stackMapCheck/BadMapDstore.jasm79
-rw-r--r--test/runtime/stackMapCheck/BadMapIstore.jasm79
-rw-r--r--test/runtime/stackMapCheck/StackMapCheck.java63
-rw-r--r--test/serviceability/jvmti/GetObjectSizeOverflow.java2
-rw-r--r--test/serviceability/sa/jmap-hashcode/Test8028623.java9
-rw-r--r--test/test_env.sh26
-rw-r--r--test/testlibrary/RedefineClassHelper.java79
-rw-r--r--test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java39
-rw-r--r--test/testlibrary/com/oracle/java/testlibrary/Platform.java99
-rw-r--r--test/testlibrary/com/oracle/java/testlibrary/Utils.java29
-rw-r--r--test/testlibrary/ctw/Makefile2
-rw-r--r--test/testlibrary/ctw/README46
-rw-r--r--test/testlibrary/whitebox/Makefile2
-rw-r--r--test/testlibrary/whitebox/sun/hotspot/WhiteBox.java18
-rw-r--r--test/testlibrary_tests/RedefineClassTest.java54
-rw-r--r--test/testlibrary_tests/TestMutuallyExclusivePlatformPredicates.java135
480 files changed, 12911 insertions, 6341 deletions
diff --git a/.hgtags b/.hgtags
index eddb94870..d91c6f603 100644
--- a/.hgtags
+++ b/.hgtags
@@ -547,6 +547,8 @@ e62c06b887310b5bd23be9b817a9a6f0daf0d0e1 jdk8u25-b15
28b50d07f6f8c5a567b6a25e95a423948114a004 jdk8u25-b17
639abc668bfe995dba811dd35411b9ea8a9041cd jdk8u25-b18
c3528699fb33fe3eb1d117504184ae7ab2507aa1 jdk8u25-b31
+631f0c7b49c091c6865d79d248d6551a270ac22f jdk8u25-b32
+4e1f52384f9ffa803838acad545cd63de48a7b35 jdk8u25-b33
5bb683bbe2c74876d585b5c3232fc3aab7b23e97 jdk8u31-b00
5bb686ae3b89f8aa1c74331b2d24e2a5ebd43448 jdk8u31-b01
087678da96603c9705b38b6cc4a6569ac7b4420a jdk8u31-b02
@@ -561,6 +563,10 @@ d7b6bdd51abe68b16411d5b292fb830a43c5bc09 jdk8u31-b10
9906d432d6dbd2cda242e3f3cfde7cf6c90245bf jdk8u31-b11
e13839545238d1ecf17f0489bb6fb765de46719a jdk8u31-b12
4206e725d584be942c25ff46ff23d8e299ca4a4c jdk8u31-b13
+b517d3a9aebf0fee64808f9a7c0ef8e0b82d5ed3 jdk8u31-b31
+15d8108258cb60a58bdd03b9ff8e77dd6727a804 jdk8u31-b32
+26b1dc6891c4fae03575a9090f7d04bd631d9164 jdk8u31-b33
+70e73f8f43fc71c0629253e22121c974bfd79b86 jdk8u31-b34
1b3abbeee961dee49780c0e4af5337feb918c555 jdk8u40-b10
f10fe402dfb1543723b4b117a7cba3ea3d4159f1 hs25.40-b15
99372b2fee0eb8b3452f47230e84aa6e97003184 jdk8u40-b11
@@ -590,3 +596,206 @@ fa4e797f61e6dda1a60e06944018213bff2a1b76 jdk8u40-b24
698dd28ecc785ffc43e3f12266b13e85382c26a8 jdk8u40-b25
f39b6944ad447269b81e06ca5da9edff9e9e67c8 jdk8u40-b26
6824e2475e0432e27f9cc51838bc34ea5fbf5113 jdk8u40-b27
+8220f68a195f6eeed2f5fb6e8a303726b512e899 jdk8u40-b31
+850a290eb1088a61178d1910c500e170ef4f4386 jdk8u40-b32
+1b3abbeee961dee49780c0e4af5337feb918c555 jdk8u40-b10
+f10fe402dfb1543723b4b117a7cba3ea3d4159f1 hs25.40-b15
+99372b2fee0eb8b3452f47230e84aa6e97003184 jdk8u40-b11
+8b9ec2da541a74ac698560b6a2bc45fccb789919 hs25.40-b16
+6b93bf9ea3ea57ed0fe53cfedb2f9ab912c324e5 jdk8u40-b12
+521e269ae1daa9df1cb0835b97aa76bdf340fcb2 hs25.40-b17
+86307d47790785398d0695acc361bccaefe25f94 jdk8u40-b13
+4d5dc0d0f8799fafa1135d51d85edd4edd566501 hs25.40-b18
+b8ca8ec1daea70f7c0d519e866f9f147ec247055 jdk8u40-b14
+eb16b24e2eba9bdf04a9b377bebc2db9f713ff5e jdk8u40-b15
+3a8a0fd171c5876023112941b1c7254262f9adfc hs25.40-b19
+aa2442f89230dc46147c721812f3b3bd4c612e83 hs25.40-b20
+5ea68fb91139081304357f9b937f32c5fdfeca6d jdk8u40-b16
+6bf89bfe8185747a57193efb6cec1f17ccc80414 hs25.40-b21
+fc1f9b67fd8c5d5cd94ecc03569d93e7ce7fb574 jdk8u40-b17
+bc5a90a4db47f1c497d7894434c42325f595cd02 hs25.40-b22
+31d3306aad29e39929418ed43f28212a5f5306a3 jdk8u40-b18
+f8fc5cbe082ce0fb0c6c1dcd39493a16ed916353 hs25.40-b23
+d9349fa8822336e0244da0a8448f3e6b2d62741d jdk8u40-b19
+c3933f52eeb33f70ee562464edddfe9f01d944fd jdk8u40-b20
+d2e9a6bec4f2eec8506eed16f7324992a85d8480 hs25.40-b24
+25ec4a67433744bbe3406e5069e7fd1876ebbf2f jdk8u40-b21
+0f0cb4eeab2d871274f4ffdcd6017d2fdfa89238 hs25.40-b25
+0ee548a1cda08c884eccd563e2d5fdb6ee769b5a jdk8u40-b22
+0e67683b700174eab71ea205d1cfa4f1cf4523ba jdk8u40-b23
+fa4e797f61e6dda1a60e06944018213bff2a1b76 jdk8u40-b24
+698dd28ecc785ffc43e3f12266b13e85382c26a8 jdk8u40-b25
+f39b6944ad447269b81e06ca5da9edff9e9e67c8 jdk8u40-b26
+6824e2475e0432e27f9cc51838bc34ea5fbf5113 jdk8u40-b27
+b95f13f05f553309cd74d6ccf8fcedb259c6716c jdk8u45-b00
+41c3c456e326185053f0654be838f4b0bfb38078 jdk8u45-b01
+626fd8c2eec63e2a2dff3839bfe12c0431bf00a4 jdk8u45-b02
+f41aa01b0a043611ee0abcb81a40f7d80085ec27 jdk8u45-b03
+2f586e3c4b6db807ac6036b485b2890ff82f7bfd jdk8u45-b04
+344ff6e45a1e2960ac4a583f63ebfb54cd52e6b4 jdk8u45-b05
+3afa9cc6e8d537ee456b8e12d1abb1da520b5ddc jdk8u45-b06
+5871f3dd9b4a2c4b44e7da2184f4430323e0c04b jdk8u45-b07
+35c7330b68e21d0dfaaedaaf74b794fd10606e9c jdk8u45-b08
+35d8318de0b6d4e68e2e0a04f6e20cafd113ca54 jdk8u45-b09
+a9f5786079202b74b3651e1097c0b2341b2178b9 jdk8u45-b10
+f4822d12204179e6a3e7aaf98991b6171670cbf2 jdk8u45-b11
+dc29108bcbcbfcd49eaa9135368306dc85db73a6 jdk8u45-b12
+efbf340fc7f56e49735111c23cef030413146409 jdk8u45-b13
+5321d26956b283b7cb73b04b91db41c7c9fe9158 jdk8u45-b14
+a5ba7c9a0b916ea088aaac5d40e17b4675c2b026 jdk8u45-b15
+894b92a02c533bcd1203c4beb5b6ec067b63466e jdk8u45-b31
+1428b6aa09c4e17202b801530c3c4993c7ce8e5b jdk8u45-b32
+9b2bf0d8a9a0fa7fe7486eb29454eae4b08f3d82 jdk8u45-b33
+dc3c47fc6218003b23338b978b3f13a6d7976b41 jdk8u45-b34
+3cb364e46590add7cb42ec8b6565a3c62adf824d jdk8u45-b35
+48b09bb741171b0069000ac1cf5407ef2357d3d1 jdk8u45-b36
+3c2ea5da6afd55a524e25dc56747940324befda2 jdk8u45-b37
+b22b01407a8140041545afe1f2d6335db4d94ba5 jdk8u51-b00
+c1de2652a48c1d4a0c96707acc73db3cd317df2a jdk8u51-b01
+8f03c2f5fc170da5fca2cf65734941efb619feca jdk8u51-b02
+cf295659243009ded76b6c14307c177a02f9fe82 jdk8u51-b03
+0b3f449553884d88f6c9d7ab067fa858f18cc3f1 jdk8u51-b04
+6ce994385353023e6b3f9c5ef331f390b324a355 jdk8u51-b05
+3816de51b5e7d6050584057fae5f2262dae53d7e jdk8u51-b06
+5c017acbaf015fb8ecca6f00870965f3deb4e1ac jdk8u51-b07
+631d4029d851b59613e6748e17447001a682276e jdk8u51-b08
+ce81c4487dd1e9f89d4570a8cd25e349f6bae00d jdk8u51-b09
+928e1994ad43272f808ca22b9cc1b08a7ce2824f jdk8u51-b10
+1a122beb9dc6881850ef1d1250f40a83709b8b72 jdk8u51-b11
+05c80f1060f0c0d5720de9eadd09162af1168eab jdk8u51-b12
+07e103f3f43886a3b47945e5295eb5accad505de jdk8u51-b13
+a4eea4bee2d4fdb05f1a8358d70ec6adb1135526 jdk8u51-b14
+9a70cba6a3c3e44486f9c199d03a16b2b09d0a13 jdk8u51-b15
+3639e38bd73f5efa8ce092f0a745bb0c90759575 jdk8u51-b16
+20bad8c6c7b406c3603b4e22b15cd990840a9d62 jdk8u51-b31
+e51afd2a1fc17de59ff3c79003210a40a28ac960 jdk8u51-b32
+b2427357cc98aeb2716ee2e89e733794afb4057b jdk8u51-b33
+d9349fa8822336e0244da0a8448f3e6b2d62741d jdk8u60-b00
+d9349fa8822336e0244da0a8448f3e6b2d62741d hs25.60-b00
+ebf89088c08ab0508b9002b48dd3d68a340259af hs25.60-b01
+5fa73007ceb92a13742fc4a24ec935a6494f8045 hs25.60-b02
+702cc6067686acaa45f7b455b7490edc056c2ae0 jdk8u60-b01
+1f6ba0d2923dadba87aac4ed779dd1ed0161ec2b hs25.60-b03
+38f6080523831ae9a6907c780f2042b82f3213ca jdk8u60-b02
+9d6eb2757167744a17ea71f8b860430d70941eda jdk8u60-b03
+0fb1ac49ae7764c5d7c6dfb9fe046d0e1a4eb5aa hs25.60-b04
+586a449cd30332dd53c0f74bf2ead6f3d4724bfc jdk8u60-b04
+74931e85352be8556eaa511ca0dd7c38fe272ec3 hs25.60-b05
+b13f1890afb8abc31ecb9c21fd2ba95aba3e33f8 jdk8u60-b05
+b17a8a22a0344e3c93e2e4677de20d35f99cf4f5 hs25.60-b06
+7b70923c8e04920b60278f90ad23a63c773cee7b jdk8u60-b06
+d51ef6da82b486e7b2b3c08eef9ca0a186935ded hs25.60-b07
+353e580ce6878d80c7b7cd27f8ad24609b12c58b jdk8u60-b07
+a72a4192a36d6d84766d6135fe6515346c742007 hs25.60-b08
+bf68e15dc8fe73eeb1eb3c656df51fdb1f707a97 jdk8u60-b08
+d937e6a0674841d670232ecf1611f52e1ae998e7 hs25.60-b09
+f1058b5c6294235d8ad032dcc72c8f8bc202cb5a jdk8u60-b09
+57a14c3927eba6372d909ae164fa90bb9b6a6ce4 hs25.60-b10
+8e4518dc2b38957072704ffe4cbf29f046dc9325 jdk8u60-b10
+64a32bc18e88eed6131ed036dc3e10e566ef339b hs25.60-b11
+d8f133adf05d310bd7e1d9adf32cbeb71ff33c37 jdk8u60-b11
+4390345de45c7768c04bfafabf006a401824c5b5 hs25.60-b12
+ccca7162738eee1be74890342c67d3b26540dcf6 jdk8u60-b12
+ced08ed4924fc6581626c7ce2d769fc18d7b23e0 jdk8u60-b13
+30e04eba9e298cc5094793e279306535239187cc hs25.60-b13
+1f0d760ccac1ff82a03a9b7d6bd5c697ef0a7c4a hs25.60-b14
+c9f8b7319d0a5ab07310cf53507642a8fd91589b jdk8u60-b14
+4187dc92e90b16b4097627b8af4f5e6e63f3b497 hs25.60-b15
+b99f1bf208f385277b03a985d35b6614b4095f3e jdk8u60-b15
+f5800068c61d0627c14e99836e9ce5cf0ef00075 hs25.60-b16
+ab2353694ea7fd4907c5c88b8334f8feaafca8c7 jdk8u60-b16
+5efc25c367164b6856554b0d625f3c422fdf9558 hs25.60-b17
+c26d09f1065cd26bd8b926efc5d3938b71e09eb5 jdk8u60-b17
+624f4cc05e7e95dd2103f343c54d7bdea6a81919 hs25.60-b18
+3fa5c654c143fe309e5ddda92adc5fb132365bcf jdk8u60-b18
+b852350a2bc6d5f43006e2be53fb74d148290708 hs25.60-b19
+bd9221771f6e34e63b3b340ffcf9906ccf882dae jdk8u60-b19
+e01a710549a962cee94728271248a7d89fb56c49 hs25.60-b20
+3b6c97747ccc61d189bca64b4afa3ffc13680810 jdk8u60-b20
+4b6687a4f2fe84211b8b3b5afb34b5186afbddf6 hs25.60-b21
+e0d75c284bd1c09fd7d9ef09627d8a99b88d468d jdk8u60-b21
+ff8fdeb2fb6d6f3348597339c53412f8f6202c3f hs25.60-b22
+878cb0df27c22c6b1e9f4add1eb3da3edc8ab51d jdk8u60-b22
+ad04e0ef0f85625b68ed18e949c75399b8d9b99b hs25.66-b01
+0e4094950cd312c8f95c7f37336606323fe049fe jdk8u60-b23
+d89ceecf1bad55e1aee2932b8895d60fc64c15db hs25.60-b23
+fb157d537278cda4150740e27bb57cd8694e15bf jdk8u60-b24
+6b4ea38c01bd9cc86d0aa8926f4855ff6ee365ee jdk8u60-b25
+6a6759372807f49aa7a66ddc36aa91d6648d2097 jdk8u60-b26
+10ad4b9d79f98fa3545c88a342a68a80d198b808 jdk8u60-b27
+1f646daf0d673b0925da1ed7809d6bd097388ece jdk8u60-b31
+9bf9a733246b5e89986233f42a8e00c798ca703f jdk8u60-b32
+0219ab69f00782e5c49687e2fa75138a7ffddea1 jdk8u52-b06
+9b6f44853eed8caba935915c7e710c546b205c8e jdk8u52-b07
+0219ab69f00782e5c49687e2fa75138a7ffddea1 jdk8u65-b00
+b6815d853c359be92e61f422ea5018af1f81d0c7 jdk8u65-b01
+488661b9cbdaeb24be14873f3bd39a945575692a jdk8u65-b02
+ea47136e6ea4253c0bf238fb61760f98a8d01ebc jdk8u65-b03
+2a03fd592fe60fd113c1c89e431ebaa6857c4998 jdk8u65-b04
+aa915217a00c4b8ce0e82d1b23fa1df8a9e4cc70 jdk8u65-b05
+3070e116da4cfebc2ceb0df8f40faeefd38a6d4a jdk8u65-b06
+008b42595f2babc98e1b23bc00f27e308f9a35b9 jdk8u65-b07
+d8519b30e607f87cad6c949c6b52c1dba0cde7a8 jdk8u65-b08
+c6d1a21e213d62f423ac9013bcc7dc0bc05e38b6 jdk8u65-b09
+0274b03fdf4a3e4d125547d68ed74b4f55d515ec jdk8u65-b10
+a106723ba50719c479614d1f599e951edb51506c jdk8u65-b11
+a5fbc6967cf5d170be56c9804b90ebb6bbd02832 jdk8u65-b12
+529bea83b67412ffb5a7eb0ed8f8772732bbf446 jdk8u65-b13
+ccc03258fbcfbcdf515ceb87bd3699c8f849dc0d jdk8u65-b14
+03ca585abe68e4dcce5bc162a0fa0593eb8fa2ee jdk8u65-b15
+b4137dc9b1a83409fbd2b80d2ffd5ae949cd7a6b jdk8u65-b16
+8e9c73f18f7e565f1d2100fab7cb8e9e785b1991 jdk8u65-b17
+878cb0df27c22c6b1e9f4add1eb3da3edc8ab51d jdk8u66-b00
+777a354cada52b831a32bfc5362ad7cedfde4450 jdk8u66-b01
+0366ad2644f58ec88af9cb2ea8c23a02559fb2d1 hs25.66-b02
+47110b037994f9006c22abcb12569fcafad84edb hs25.66-b03
+ae5624088d86abe8e7981dbb893c1b6da5140a1c jdk8u66-b02
+6594411c4eb4d00e439330a61744f077d0b96363 jdk8u66-b07
+aedefb75358f3cda5181bf594c2cda833056b25a jdk8u66-b08
+83621deea0fcb4a517b94c5546b4f8738588cb6c jdk8u66-b09
+5280a8174aea13bd242480419e6228857dac7b59 jdk8u66-b10
+8a23b6392c590b7bf5f6ad2c4746dc03981a7f60 jdk8u66-b11
+ab64d7ea4f48ea4bdbcc43d4a653be157d9c29e3 jdk8u66-b12
+4d699853544cf869d4edaf23b7cc9cfbb2900a2b jdk8u66-b13
+9f7f29ff487ae023ca1f697445004a532751b0d2 jdk8u66-b14
+5b67af3317bce1f940c5dc1535411a1002bed6c1 jdk8u66-b15
+4bbf0e9196f2786e0bc4f17664e2533808d944bf jdk8u66-b16
+a6f2a7ba281291f5dab79fa494f7cfaa6232c88b jdk8u66-b17
+9ae2a5adabba97d9ebf79e13b325f1ff368ce014 jdk8u66-b18
+527f62b70faee4a0c15f365def024b87012e6934 jdk8u66-b31
+9a158a0c243beb610dbaabd63d6218d3ce5825f1 jdk8u71-b00
+67df26e363fb7e722032fd286673642fc999957c jdk8u71-b01
+1a799d49de23d84f658ade1d3805a1924e7e1e84 jdk8u71-b02
+e06f49d82ef8128b3637937d383b6f0862650deb jdk8u71-b03
+7466029bf3cd3d5eea3055c4f790728263be4a2e jdk8u71-b04
+8a402d51763c083151d0cb434647bd6e1ba4353f jdk8u71-b05
+7dd34cca3538c9bef74a8a1976e14ca51e9857f9 jdk8u71-b06
+bf9d01d1dc6c58b52cb2d0fb07820c51b62c3827 jdk8u71-b07
+19d7052b56715803f40e39aaff00d34ca71f5bc3 jdk8u71-b08
+4fb1acaab7a3205277fa21b083694f1bbb9ae957 jdk8u71-b09
+db019338ef0b6c0ed98c4804c11e07fbee4c6b4c jdk8u71-b10
+9905af25dff7d5bbc73356f5bd14aa10f691a050 jdk8u71-b11
+8aeeda200ace7038e5420b3f27bdd2949abf1029 jdk8u71-b12
+9ba75ccbb03a597abf8d1d2ff5d1d12621725d48 jdk8u71-b13
+12a617fd56938aed03ed5fe48d91f2eafc305825 jdk8u71-b14
+21d9c51cf7df883e67caeccf5fabd0505874f5b6 jdk8u71-b15
+b8f426369187c32551f0a3d571d933908988c81c jdk8u72-b00
+c0205eddb31766ece562483595ec28a7506971e9 jdk8u72-b01
+15ef554f2f2e0a8d7c330191432fcd2126d19dab jdk8u72-b02
+bb98a4ba1556d0505461de98aa3dddf75278c09b jdk8u72-b03
+6c8ceb05ccf78f2f8f72f0870e3f6f3bd4895bb1 jdk8u72-b04
+a2969911663ab29c71a61aa3403e53243ad89923 jdk8u72-b05
+acf0d80cb84f14d787c34360abf2bc38b186999a jdk8u72-b06
+a382a72730f480078af5454afe782de9b6c210d3 jdk8u72-b07
+f26ad2273cd42cde957084ee953a9dc1a2598b5d jdk8u72-b08
+defd2b9a94584d372dea05272ad4f1d6a662c5d1 jdk8u72-b09
+78b0df74e3743be0b578c63e209a03685136d1fa jdk8u72-b10
+37a99b0f443e45fbd0b0a3ed844e6c97d661b754 jdk8u72-b11
+b8e7dd0e21173ad829b40361763d27cb6ac532e9 jdk8u72-b12
+a8e4754b89aecc388623394a20f6d43d4c58f083 jdk8u72-b13
+dc2fdd4e0b8105268b8231040f761f27ab4523f2 jdk8u72-b14
+d6670c5d49ba381405ec9f69a78ccc5b8b0c8473 jdk8u72-b15
+d7b01fb81aa8a5437cb03bc36afe15cf0e55fb89 jdk8u76-b00
+c1679cc87ba045219169cabb6b9b378c2b5cc578 jdk8u76-b01
+218483967e52b419d885d34af4488a81c5133804 jdk8u76-b02
+2a2720daacaa8d9a3ba9435cfaaf9751241d2062 jdk8u76-b03
diff --git a/THIRD_PARTY_README b/THIRD_PARTY_README
index c34ce6b31..2b20da2c3 100644
--- a/THIRD_PARTY_README
+++ b/THIRD_PARTY_README
@@ -1140,37 +1140,6 @@ included with JRE 8, JDK 8, and OpenJDK 8.
--------------------------------------------------------------------------------
-%% This notice is provided with respect to JSON, which may be included
-with JRE 8 & JDK 8.
-
---- begin of LICENSE ---
-
-Copyright (c) 2002 JSON.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-The Software shall be used for Good, not Evil.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
---- end of LICENSE ---
-
--------------------------------------------------------------------------------
-
%% This notice is provided with respect to Kerberos functionality, which
which may be included with JRE 8, JDK 8, and OpenJDK 8.
@@ -1250,7 +1219,7 @@ included with JDK 8 and OpenJDK 8 source distributions.
-------------------------------------------------------------------------------
-%% This notice is provided with respect to libpng 1.5.4, which may be
+%% This notice is provided with respect to libpng 1.6.16, which may be
included with JRE 8, JDK 8, and OpenJDK 8.
--- begin of LICENSE ---
@@ -1266,8 +1235,8 @@ this sentence.
This code is released under the libpng license.
-libpng versions 1.2.6, August 15, 2004, through 1.5.4, July 7, 2011, are
-Copyright (c) 2004, 2006-2011 Glenn Randers-Pehrson, and are
+libpng versions 1.2.6, August 15, 2004, through 1.6.16, December 22, 2014, are
+Copyright (c) 2004, 2006-2014 Glenn Randers-Pehrson, and are
distributed according to the same disclaimer and license as libpng-1.2.5
with the following individual added to the list of Contributing Authors
@@ -1364,13 +1333,13 @@ certification mark of the Open Source Initiative.
Glenn Randers-Pehrson
glennrp at users.sourceforge.net
-July 7, 2011
+December 22, 2014
--- end of LICENSE ---
-------------------------------------------------------------------------------
-%% This notice is provided with respect to libungif 4.1.3, which may be
+%% This notice is provided with respect to GIFLIB 5.1.1 & libungif 4.1.3, which may be
included with JRE 8, JDK 8, and OpenJDK 8.
--- begin of LICENSE ---
@@ -1399,13 +1368,13 @@ THE SOFTWARE.
-------------------------------------------------------------------------------
-%% This notice is provided with respect to Little CMS 2.5, which may be
+%% This notice is provided with respect to Little CMS 2.7, which may be
included with JRE 8, JDK 8, and OpenJDK 8.
--- begin of LICENSE ---
Little CMS
-Copyright (c) 1998-2011 Marti Maria Saguer
+Copyright (c) 1998-2015 Marti Maria Saguer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/agent/src/os/linux/LinuxDebuggerLocal.c b/agent/src/os/linux/LinuxDebuggerLocal.c
index 0d3960f7a..d250ce5d6 100644
--- a/agent/src/os/linux/LinuxDebuggerLocal.c
+++ b/agent/src/os/linux/LinuxDebuggerLocal.c
@@ -209,9 +209,12 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_at
verifyBitness(env, (char *) &buf);
CHECK_EXCEPTION;
+ char err_buf[200];
struct ps_prochandle* ph;
- if ( (ph = Pgrab(jpid)) == NULL) {
- THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the process");
+ if ( (ph = Pgrab(jpid, err_buf, sizeof(err_buf))) == NULL) {
+ char msg[230];
+ snprintf(msg, sizeof(msg), "Can't attach to the process: %s", err_buf);
+ THROW_NEW_DEBUGGER_EXCEPTION(msg);
}
(*env)->SetLongField(env, this_obj, p_ps_prochandle_ID, (jlong)(intptr_t)ph);
fillThreadsAndLoadObjects(env, this_obj, ph);
diff --git a/agent/src/os/linux/libproc.h b/agent/src/os/linux/libproc.h
index b07c090bf..03426c96f 100644
--- a/agent/src/os/linux/libproc.h
+++ b/agent/src/os/linux/libproc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
#include <stdint.h>
#include "proc_service.h"
-#if defined(arm) || defined(ppc)
+#ifdef ALT_SASRCDIR
#include "libproc_md.h"
#endif
@@ -69,6 +69,7 @@ combination of ptrace and /proc calls.
#if defined(sparc) || defined(sparcv9) || defined(ppc64)
+#include <asm/ptrace.h>
#define user_regs_struct pt_regs
#endif
@@ -82,7 +83,7 @@ typedef int bool;
struct ps_prochandle;
// attach to a process
-struct ps_prochandle* Pgrab(pid_t pid);
+struct ps_prochandle* Pgrab(pid_t pid, char* err_buf, size_t err_buf_len);
// attach to a core dump
struct ps_prochandle* Pgrab_core(const char* execfile, const char* corefile);
diff --git a/agent/src/os/linux/ps_proc.c b/agent/src/os/linux/ps_proc.c
index 61923a255..1516842e4 100644
--- a/agent/src/os/linux/ps_proc.c
+++ b/agent/src/os/linux/ps_proc.c
@@ -27,9 +27,11 @@
#include <string.h>
#include <signal.h>
#include <errno.h>
+#include <elf.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/ptrace.h>
+#include <sys/uio.h>
#include "libproc_impl.h"
#if defined(x86_64) && !defined(amd64)
@@ -138,6 +140,15 @@ static bool process_get_lwp_regs(struct ps_prochandle* ph, pid_t pid, struct use
return false;
}
return true;
+#elif defined(PTRACE_GETREGSET)
+ struct iovec iov;
+ iov.iov_base = user;
+ iov.iov_len = sizeof(*user);
+ if (ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, (void*) &iov) < 0) {
+ print_debug("ptrace(PTRACE_GETREGSET, ...) failed for lwp %d\n", pid);
+ return false;
+ }
+ return true;
#else
print_debug("ptrace(PTRACE_GETREGS, ...) not supported\n");
return false;
@@ -204,9 +215,12 @@ static bool ptrace_waitpid(pid_t pid) {
}
// attach to a process/thread specified by "pid"
-static bool ptrace_attach(pid_t pid) {
+static bool ptrace_attach(pid_t pid, char* err_buf, size_t err_buf_len) {
if (ptrace(PTRACE_ATTACH, pid, NULL, NULL) < 0) {
- print_debug("ptrace(PTRACE_ATTACH, ..) failed for %d\n", pid);
+ char buf[200];
+ char* msg = strerror_r(errno, buf, sizeof(buf));
+ snprintf(err_buf, err_buf_len, "ptrace(PTRACE_ATTACH, ..) failed for %d: %s", pid, msg);
+ print_debug("%s\n", err_buf);
return false;
} else {
return ptrace_waitpid(pid);
@@ -328,16 +342,17 @@ static ps_prochandle_ops process_ops = {
};
// attach to the process. One and only one exposed stuff
-struct ps_prochandle* Pgrab(pid_t pid) {
+struct ps_prochandle* Pgrab(pid_t pid, char* err_buf, size_t err_buf_len) {
struct ps_prochandle* ph = NULL;
thread_info* thr = NULL;
if ( (ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle))) == NULL) {
- print_debug("can't allocate memory for ps_prochandle\n");
+ snprintf(err_buf, err_buf_len, "can't allocate memory for ps_prochandle");
+ print_debug("%s\n", err_buf);
return NULL;
}
- if (ptrace_attach(pid) != true) {
+ if (ptrace_attach(pid, err_buf, err_buf_len) != true) {
free(ph);
return NULL;
}
@@ -360,7 +375,7 @@ struct ps_prochandle* Pgrab(pid_t pid) {
thr = ph->threads;
while (thr) {
// don't attach to the main thread again
- if (ph->pid != thr->lwp_id && ptrace_attach(thr->lwp_id) != true) {
+ if (ph->pid != thr->lwp_id && ptrace_attach(thr->lwp_id, err_buf, err_buf_len) != true) {
// even if one attach fails, we get return NULL
Prelease(ph);
return NULL;
diff --git a/agent/src/os/solaris/proc/saproc.cpp b/agent/src/os/solaris/proc/saproc.cpp
index 5a910d3a3..3f0e6b258 100644
--- a/agent/src/os/solaris/proc/saproc.cpp
+++ b/agent/src/os/solaris/proc/saproc.cpp
@@ -314,7 +314,7 @@ static void * pathmap_dlopen(const char * name, int mode) {
handle = dlopen(name, mode);
}
if (_libsaproc_debug) {
- printf("libsaproc DEBUG: pathmap_dlopen %s return 0x%x\n", name, handle);
+ printf("libsaproc DEBUG: pathmap_dlopen %s return 0x%lx\n", name, (unsigned long) handle);
}
return handle;
}
@@ -661,30 +661,30 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
// read FileMapHeader
size_t n = read(fd, pheader, sizeof(struct FileMapHeader));
if (n != sizeof(struct FileMapHeader)) {
- free(pheader);
- close(fd);
char errMsg[ERR_MSG_SIZE];
sprintf(errMsg, "unable to read shared archive file map header from %s", classes_jsa);
+ close(fd);
+ free(pheader);
THROW_NEW_DEBUGGER_EXCEPTION_(errMsg, 1);
}
// check file magic
if (pheader->_magic != 0xf00baba2) {
- free(pheader);
- close(fd);
char errMsg[ERR_MSG_SIZE];
sprintf(errMsg, "%s has bad shared archive magic 0x%x, expecting 0xf00baba2",
classes_jsa, pheader->_magic);
+ close(fd);
+ free(pheader);
THROW_NEW_DEBUGGER_EXCEPTION_(errMsg, 1);
}
// check version
if (pheader->_version != CURRENT_ARCHIVE_VERSION) {
- free(pheader);
- close(fd);
char errMsg[ERR_MSG_SIZE];
sprintf(errMsg, "%s has wrong shared archive version %d, expecting %d",
classes_jsa, pheader->_version, CURRENT_ARCHIVE_VERSION);
+ close(fd);
+ free(pheader);
THROW_NEW_DEBUGGER_EXCEPTION_(errMsg, 1);
}
diff --git a/agent/src/share/classes/com/sun/java/swing/action/ActionManager.java b/agent/src/share/classes/com/sun/java/swing/action/ActionManager.java
index 751d4a631..7da5b6534 100644
--- a/agent/src/share/classes/com/sun/java/swing/action/ActionManager.java
+++ b/agent/src/share/classes/com/sun/java/swing/action/ActionManager.java
@@ -46,6 +46,11 @@ public abstract class ActionManager
return manager;
}
+ protected static void setInstance(ActionManager m)
+ {
+ manager = m;
+ }
+
protected abstract void addActions();
protected void addAction(String cmdname, Action action)
@@ -90,6 +95,6 @@ public abstract class ActionManager
private HashMap actions;
private static ActionUtilities utilities = new ActionUtilities();
- protected static ActionManager manager;
+ private static ActionManager manager;
}
diff --git a/agent/src/share/classes/com/sun/java/swing/ui/CommonToolBar.java b/agent/src/share/classes/com/sun/java/swing/ui/CommonToolBar.java
index b4eb95df3..d2d85c308 100644
--- a/agent/src/share/classes/com/sun/java/swing/ui/CommonToolBar.java
+++ b/agent/src/share/classes/com/sun/java/swing/ui/CommonToolBar.java
@@ -46,7 +46,7 @@ public abstract class CommonToolBar extends JToolBar
{
this.manager = manager;
statusBar = status;
- buttonSize = new Dimension(CommonUI.buttconPrefSize);
+ buttonSize = new Dimension(CommonUI.getButtconPrefSize());
buttonInsets = new Insets(0, 0, 0, 0);
addComponents();
}
diff --git a/agent/src/share/classes/com/sun/java/swing/ui/CommonUI.java b/agent/src/share/classes/com/sun/java/swing/ui/CommonUI.java
index e6adc5020..a1c8127f4 100644
--- a/agent/src/share/classes/com/sun/java/swing/ui/CommonUI.java
+++ b/agent/src/share/classes/com/sun/java/swing/ui/CommonUI.java
@@ -373,20 +373,25 @@ public class CommonUI
comp.setCursor(Cursor.getPredefinedCursor(0));
}
- public static final int BUTTON_WIDTH = 100;
- public static final int BUTTON_HEIGHT = 26;
- public static final int BUTTCON_WIDTH = 28;
- public static final int BUTTCON_HEIGHT = 28;
- public static final int SM_BUTTON_WIDTH = 72;
- public static final int SM_BUTTON_HEIGHT = 26;
- public static final int LABEL_WIDTH = 100;
- public static final int LABEL_HEIGHT = 20;
- public static final int TEXT_WIDTH = 150;
- public static final int TEXT_HEIGHT = 20;
- public static Dimension buttonPrefSize = new Dimension(100, 26);
- public static Dimension buttconPrefSize = new Dimension(28, 28);
- public static Dimension smbuttonPrefSize = new Dimension(72, 26);
- public static Dimension labelPrefSize = new Dimension(100, 20);
- public static Dimension textPrefSize = new Dimension(150, 20);
+ public static Dimension getButtconPrefSize()
+ {
+ return buttconPrefSize;
+ }
+
+ private static final int BUTTON_WIDTH = 100;
+ private static final int BUTTON_HEIGHT = 26;
+ private static final int BUTTCON_WIDTH = 28;
+ private static final int BUTTCON_HEIGHT = 28;
+ private static final int SM_BUTTON_WIDTH = 72;
+ private static final int SM_BUTTON_HEIGHT = 26;
+ private static final int LABEL_WIDTH = 100;
+ private static final int LABEL_HEIGHT = 20;
+ private static final int TEXT_WIDTH = 150;
+ private static final int TEXT_HEIGHT = 20;
+ private static final Dimension buttonPrefSize = new Dimension(100, 26);
+ private static final Dimension buttconPrefSize = new Dimension(28, 28);
+ private static final Dimension smbuttonPrefSize = new Dimension(72, 26);
+ private static final Dimension labelPrefSize = new Dimension(100, 20);
+ private static final Dimension textPrefSize = new Dimension(150, 20);
}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java
index e200826e4..9c70c9734 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java
@@ -1446,7 +1446,7 @@ public class CommandProcessor {
if (type.equals("threads")) {
Threads threads = VM.getVM().getThreads();
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
- Address base = thread.getBaseOfStackPointer();
+ Address base = thread.getStackBase();
Address end = thread.getLastJavaSP();
if (end == null) continue;
if (end.lessThan(base)) {
@@ -1454,11 +1454,13 @@ public class CommandProcessor {
base = end;
end = tmp;
}
- out.println("Searching " + base + " " + end);
+ //out.println("Searching " + base + " " + end);
while (base != null && base.lessThan(end)) {
Address val = base.getAddressAt(0);
if (AddressOps.equal(val, value)) {
- out.println(base);
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ thread.printThreadIDOn(new PrintStream(bos));
+ out.println("found on the stack of thread " + bos.toString() + " at " + base);
}
base = base.addOffsetTo(stride);
}
@@ -1601,6 +1603,8 @@ public class CommandProcessor {
thread.printThreadIDOn(new PrintStream(bos));
if (all || bos.toString().equals(name)) {
out.println("Thread " + bos.toString() + " Address " + thread.getAddress());
+ thread.printInfoOn(out);
+ out.println(" ");
if (!all) return;
}
}
@@ -1618,6 +1622,8 @@ public class CommandProcessor {
for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
thread.printThreadIDOn(out);
out.println(" " + thread.getThreadName());
+ thread.printInfoOn(out);
+ out.println("\n...");
}
}
}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java b/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java
index 2b9a7bc72..1097fb4df 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java
@@ -51,6 +51,9 @@ public class HotSpotTypeDataBase extends BasicTypeDataBase {
private static final int C_INT32_SIZE = 4;
private static final int C_INT64_SIZE = 8;
private static int pointerSize = UNINITIALIZED_SIZE;
+ // Counter to ensure read loops terminate:
+ private static final int MAX_DUPLICATE_DEFINITIONS = 100;
+ private int duplicateDefCount = 0;
private static final boolean DEBUG;
static {
@@ -166,6 +169,10 @@ public class HotSpotTypeDataBase extends BasicTypeDataBase {
typeEntrySizeOffset = getLongValueFromProcess("gHotSpotVMTypeEntrySizeOffset");
typeEntryArrayStride = getLongValueFromProcess("gHotSpotVMTypeEntryArrayStride");
+ if (typeEntryArrayStride == 0L) {
+ throw new RuntimeException("zero stride: cannot read types.");
+ }
+
// Start iterating down it until we find an entry with no name
Address typeNameAddr = null;
do {
@@ -192,7 +199,11 @@ public class HotSpotTypeDataBase extends BasicTypeDataBase {
}
entryAddr = entryAddr.addOffsetTo(typeEntryArrayStride);
- } while (typeNameAddr != null);
+ } while (typeNameAddr != null && duplicateDefCount < MAX_DUPLICATE_DEFINITIONS);
+
+ if (duplicateDefCount >= MAX_DUPLICATE_DEFINITIONS) {
+ throw new RuntimeException("too many duplicate definitions");
+ }
}
private void initializePrimitiveTypes() {
@@ -395,6 +406,10 @@ public class HotSpotTypeDataBase extends BasicTypeDataBase {
structEntryAddressOffset = getLongValueFromProcess("gHotSpotVMStructEntryAddressOffset");
structEntryArrayStride = getLongValueFromProcess("gHotSpotVMStructEntryArrayStride");
+ if (structEntryArrayStride == 0L) {
+ throw new RuntimeException("zero stride: cannot read types.");
+ }
+
// Fetch the address of the VMStructEntry*
Address entryAddr = lookupInProcess("gHotSpotVMStructs");
// Dereference this once to get the pointer to the first VMStructEntry
@@ -472,6 +487,11 @@ public class HotSpotTypeDataBase extends BasicTypeDataBase {
intConstantEntryValueOffset = getLongValueFromProcess("gHotSpotVMIntConstantEntryValueOffset");
intConstantEntryArrayStride = getLongValueFromProcess("gHotSpotVMIntConstantEntryArrayStride");
+ if (intConstantEntryArrayStride == 0L) {
+ throw new RuntimeException("zero stride: cannot read types.");
+ }
+
+
// Fetch the address of the VMIntConstantEntry*
Address entryAddr = lookupInProcess("gHotSpotVMIntConstants");
// Dereference this once to get the pointer to the first VMIntConstantEntry
@@ -501,12 +521,17 @@ public class HotSpotTypeDataBase extends BasicTypeDataBase {
} else {
System.err.println("Warning: the int constant \"" + name + "\" (declared in the remote VM in VMStructs::localHotSpotVMIntConstants) " +
"had its value declared as " + value + " twice. Continuing.");
+ duplicateDefCount++;
}
}
}
entryAddr = entryAddr.addOffsetTo(intConstantEntryArrayStride);
- } while (nameAddr != null);
+ } while (nameAddr != null && duplicateDefCount < MAX_DUPLICATE_DEFINITIONS);
+
+ if (duplicateDefCount >= MAX_DUPLICATE_DEFINITIONS) {
+ throw new RuntimeException("too many duplicate definitions");
+ }
}
private void readVMLongConstants() {
@@ -519,6 +544,10 @@ public class HotSpotTypeDataBase extends BasicTypeDataBase {
longConstantEntryValueOffset = getLongValueFromProcess("gHotSpotVMLongConstantEntryValueOffset");
longConstantEntryArrayStride = getLongValueFromProcess("gHotSpotVMLongConstantEntryArrayStride");
+ if (longConstantEntryArrayStride == 0L) {
+ throw new RuntimeException("zero stride: cannot read types.");
+ }
+
// Fetch the address of the VMLongConstantEntry*
Address entryAddr = lookupInProcess("gHotSpotVMLongConstants");
// Dereference this once to get the pointer to the first VMLongConstantEntry
@@ -548,12 +577,17 @@ public class HotSpotTypeDataBase extends BasicTypeDataBase {
} else {
System.err.println("Warning: the long constant \"" + name + "\" (declared in the remote VM in VMStructs::localHotSpotVMLongConstants) " +
"had its value declared as " + value + " twice. Continuing.");
+ duplicateDefCount++;
}
}
}
entryAddr = entryAddr.addOffsetTo(longConstantEntryArrayStride);
- } while (nameAddr != null);
+ } while (nameAddr != null && duplicateDefCount < MAX_DUPLICATE_DEFINITIONS);
+
+ if (duplicateDefCount >= MAX_DUPLICATE_DEFINITIONS) {
+ throw new RuntimeException("too many duplicate definitions.");
+ }
}
private BasicType lookupOrFail(String typeName) {
@@ -740,9 +774,10 @@ public class HotSpotTypeDataBase extends BasicTypeDataBase {
}
if (!typeNameIsPointerType(typeName)) {
- System.err.println("Warning: the type \"" + typeName + "\" (declared in the remote VM in VMStructs::localHotSpotVMTypes) " +
- "had its size declared as " + size + " twice. Continuing.");
- }
+ System.err.println("Warning: the type \"" + typeName + "\" (declared in the remote VM in VMStructs::localHotSpotVMTypes) " +
+ "had its size declared as " + size + " twice. Continuing.");
+ duplicateDefCount++;
+ }
}
}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/gc_interface/G1YCType.java b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/G1YCType.java
new file mode 100644
index 000000000..dba3566f1
--- /dev/null
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/G1YCType.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_interface;
+
+//These definitions should be kept in sync with the definitions in the HotSpot
+//code.
+
+public enum G1YCType {
+ Normal ("Normal"),
+ InitialMark ("Initial Mark"),
+ DuringMark ("During Mark"),
+ Mixed ("Mixed"),
+ G1YCTypeEndSentinel ("Unknown");
+
+ private final String value;
+
+ G1YCType(String val) {
+ this.value = val;
+ }
+ public String value() {
+ return value;
+ }
+}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java
new file mode 100644
index 000000000..bc75c0fac
--- /dev/null
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_interface;
+
+//These definitions should be kept in sync with the definitions in the HotSpot code.
+
+public enum GCCause {
+ _java_lang_system_gc ("System.gc()"),
+ _full_gc_alot ("FullGCAlot"),
+ _scavenge_alot ("ScavengeAlot"),
+ _allocation_profiler ("Allocation Profiler"),
+ _jvmti_force_gc ("JvmtiEnv ForceGarbageCollection"),
+ _gc_locker ("GCLocker Initiated GC"),
+ _heap_inspection ("Heap Inspection Initiated GC"),
+ _heap_dump ("Heap Dump Initiated GC"),
+
+ _no_gc ("No GC"),
+ _no_cause_specified ("Unknown GCCause"),
+ _allocation_failure ("Allocation Failure"),
+
+ _tenured_generation_full ("Tenured Generation Full"),
+ _metadata_GC_threshold ("Metadata GC Threshold"),
+
+ _cms_generation_full ("CMS Generation Full"),
+ _cms_initial_mark ("CMS Initial Mark"),
+ _cms_final_remark ("CMS Final Remark"),
+ _cms_concurrent_mark ("CMS Concurrent Mark"),
+
+ _old_generation_expanded_on_last_scavenge ("Old Generation Expanded On Last Scavenge"),
+ _old_generation_too_full_to_scavenge ("Old Generation Too Full To Scavenge"),
+ _adaptive_size_policy ("Ergonomics"),
+
+ _g1_inc_collection_pause ("G1 Evacuation Pause"),
+ _g1_humongous_allocation ("G1 Humongous Allocation"),
+
+ _last_ditch_collection ("Last ditch collection"),
+ _last_gc_cause ("ILLEGAL VALUE - last gc cause - ILLEGAL VALUE");
+
+ private final String value;
+
+ GCCause(String val) {
+ this.value = val;
+ }
+ public String value() {
+ return value;
+ }
+}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCName.java b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCName.java
new file mode 100644
index 000000000..bf0000870
--- /dev/null
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCName.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_interface;
+
+//These definitions should be kept in sync with the definitions in the HotSpot code.
+
+public enum GCName {
+ ParallelOld ("ParallelOld"),
+ SerialOld ("SerialOld"),
+ PSMarkSweep ("PSMarkSweep"),
+ ParallelScavenge ("ParallelScavenge"),
+ DefNew ("DefNew"),
+ ParNew ("ParNew"),
+ G1New ("G1New"),
+ ConcurrentMarkSweep ("ConcurrentMarkSweep"),
+ G1Old ("G1Old"),
+ GCNameEndSentinel ("GCNameEndSentinel");
+
+ private final String value;
+
+ GCName(String val) {
+ this.value = val;
+ }
+ public String value() {
+ return value;
+ }
+}
+
diff --git a/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCWhen.java b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCWhen.java
new file mode 100644
index 000000000..5337b2b82
--- /dev/null
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCWhen.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_interface;
+
+//These definitions should be kept in sync with the definitions in the HotSpot code.
+
+public enum GCWhen {
+ BeforeGC ("Before GC"),
+ AfterGC ("After GC"),
+ GCWhenEndSentinel ("GCWhenEndSentinel");
+
+ private final String value;
+
+ GCWhen(String val) {
+ this.value = val;
+ }
+ public String value() {
+ return value;
+ }
+}
+
+
+
diff --git a/agent/src/share/classes/sun/jvm/hotspot/gc_interface/ReferenceType.java b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/ReferenceType.java
new file mode 100644
index 000000000..23f0cdd0d
--- /dev/null
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/ReferenceType.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_interface;
+
+//These definitions should be kept in sync with the definitions in the HotSpot code.
+
+public enum ReferenceType {
+ REF_NONE ("None reference"), // Regular class
+ REF_OTHER ("Other reference"), // Subclass of java/lang/ref/Reference, but not subclass of one of the classes below
+ REF_SOFT ("Soft reference"), // Subclass of java/lang/ref/SoftReference
+ REF_WEAK ("Weak reference"), // Subclass of java/lang/ref/WeakReference
+ REF_FINAL ("Final reference"), // Subclass of java/lang/ref/FinalReference
+ REF_PHANTOM ("Phantom reference"); // Subclass of java/lang/ref/PhantomReference
+
+ private final String value;
+
+ ReferenceType(String val) {
+ this.value = val;
+ }
+ public String value() {
+ return value;
+ }
+}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/memory/AdaptiveFreeList.java b/agent/src/share/classes/sun/jvm/hotspot/memory/AdaptiveFreeList.java
new file mode 100644
index 000000000..19dca2610
--- /dev/null
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/AdaptiveFreeList.java
@@ -0,0 +1,77 @@
+/*
+ * @(#)AdaptiveFreeList.java
+ *
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.memory;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class AdaptiveFreeList extends VMObject {
+ static {
+ VM.registerVMInitializedObserver(new Observer() {
+ public void update(Observable o, Object data) {
+ initialize(VM.getVM().getTypeDataBase());
+ }
+ });
+ }
+
+ private static synchronized void initialize(TypeDataBase db) {
+ Type type = db.lookupType("AdaptiveFreeList<FreeChunk>");
+ sizeField = type.getCIntegerField("_size");
+ countField = type.getCIntegerField("_count");
+ headerSize = type.getSize();
+ }
+
+ // Fields
+ private static CIntegerField sizeField;
+ private static CIntegerField countField;
+ private static long headerSize;
+
+ //Constructor
+ public AdaptiveFreeList(Address address) {
+ super(address);
+ }
+
+ // Accessors
+ public long size() {
+ return sizeField.getValue(addr);
+ }
+
+ public long count() {
+ return countField.getValue(addr);
+ }
+
+ public static long sizeOf() {
+ return headerSize;
+ }
+}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java b/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java
index 23cb0bcb7..ed26af725 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,25 +24,29 @@
package sun.jvm.hotspot.memory;
-import java.io.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.oops.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.utilities.*;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.debugger.Debugger;
+import sun.jvm.hotspot.oops.ObjectHeap;
+import sun.jvm.hotspot.oops.Oop;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+import sun.jvm.hotspot.utilities.Assert;
public class CompactibleFreeListSpace extends CompactibleSpace {
private static AddressField collectorField;
-
- // for free size, three fields
- // FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks
- // FreeList _indexedFreeList[IndexSetSize]; // indexed array for small size blocks
- // LinearAllocBlock _smallLinearAllocBlock; // small linear alloc in TLAB
private static AddressField indexedFreeListField;
private static AddressField dictionaryField;
private static long smallLinearAllocBlockFieldOffset;
- private static long indexedFreeListSizeOf;
private int heapWordSize; // 4 for 32bit, 8 for 64 bits
private int IndexSetStart; // for small indexed list
@@ -109,11 +113,11 @@ public class CompactibleFreeListSpace extends CompactibleSpace {
// small chunks
long size = 0;
Address cur = addr.addOffsetTo( indexedFreeListField.getOffset() );
- cur = cur.addOffsetTo(IndexSetStart*FreeList.sizeOf());
+ cur = cur.addOffsetTo(IndexSetStart*AdaptiveFreeList.sizeOf());
for (int i=IndexSetStart; i<IndexSetSize; i += IndexSetStride) {
- FreeList freeList = (FreeList) VMObjectFactory.newObject(FreeList.class, cur);
+ AdaptiveFreeList freeList = (AdaptiveFreeList) VMObjectFactory.newObject(AdaptiveFreeList.class, cur);
size += i*freeList.count();
- cur= cur.addOffsetTo(IndexSetStride*FreeList.sizeOf());
+ cur= cur.addOffsetTo(IndexSetStride*AdaptiveFreeList.sizeOf());
}
// large block
diff --git a/agent/src/share/classes/sun/jvm/hotspot/memory/FreeList.java b/agent/src/share/classes/sun/jvm/hotspot/memory/FreeList.java
deleted file mode 100644
index 2d0024e1d..000000000
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/FreeList.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * @(#)FreeList.java
- *
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.memory;
-
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.runtime.*;
-
-public class FreeList extends VMObject {
- static {
- VM.registerVMInitializedObserver(new Observer() {
- public void update(Observable o, Object data) {
- initialize(VM.getVM().getTypeDataBase());
- }
- });
- }
-
- private static synchronized void initialize(TypeDataBase db) {
- Type type = db.lookupType("FreeList<FreeChunk>");
- sizeField = type.getCIntegerField("_size");
- countField = type.getCIntegerField("_count");
- headerSize = type.getSize();
- }
-
- // Fields
- private static CIntegerField sizeField;
- private static CIntegerField countField;
- private static long headerSize;
-
- //Constructor
- public FreeList(Address address) {
- super(address);
- }
-
- // Accessors
- public long size() {
- return sizeField.getValue(addr);
- }
-
- public long count() {
- return countField.getValue(addr);
- }
-
- public static long sizeOf() {
- return headerSize;
- }
-}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java b/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java
index c23ce3a40..41cc12e23 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -56,6 +56,12 @@ public class Universe {
private static AddressField narrowKlassBaseField;
private static CIntegerField narrowKlassShiftField;
+ public enum NARROW_OOP_MODE {
+ UnscaledNarrowOop,
+ ZeroBasedNarrowOop,
+ HeapBasedNarrowOop
+ }
+
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
@@ -94,7 +100,17 @@ public class Universe {
public Universe() {
}
-
+ public static String narrowOopModeToString(NARROW_OOP_MODE mode) {
+ switch (mode) {
+ case UnscaledNarrowOop:
+ return "32-bits Oops";
+ case ZeroBasedNarrowOop:
+ return "zero based Compressed Oops";
+ case HeapBasedNarrowOop:
+ return "Compressed Oops with base";
+ }
+ return "";
+ }
public CollectedHeap heap() {
try {
return (CollectedHeap) heapConstructor.instantiateWrapperFor(collectedHeapField.getValue());
diff --git a/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java b/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java
index 9e1561bd3..fe1779d6d 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,6 +55,10 @@ public class Klass extends Metadata implements ClassConstants {
layoutHelper = new IntField(type.getJIntField("_layout_helper"), 0);
name = type.getAddressField("_name");
accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0);
+ try {
+ traceIDField = type.getField("_trace_id");
+ } catch(Exception e) {
+ }
subklass = new MetadataField(type.getAddressField("_subklass"), 0);
nextSibling = new MetadataField(type.getAddressField("_next_sibling"), 0);
@@ -86,6 +90,7 @@ public class Klass extends Metadata implements ClassConstants {
private static CIntField accessFlags;
private static MetadataField subklass;
private static MetadataField nextSibling;
+ private static sun.jvm.hotspot.types.Field traceIDField;
private Address getValue(AddressField field) {
return addr.getAddressAt(field.getOffset());
@@ -107,6 +112,11 @@ public class Klass extends Metadata implements ClassConstants {
public Klass getSubklassKlass() { return (Klass) subklass.getValue(this); }
public Klass getNextSiblingKlass() { return (Klass) nextSibling.getValue(this); }
+ public long traceID() {
+ if (traceIDField == null) return 0;
+ return traceIDField.getJLong(addr);
+ }
+
// computed access flags - takes care of inner classes etc.
// This is closer to actual source level than getAccessFlags() etc.
public long computeModifierFlags() {
diff --git a/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java b/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java
index a42d8be68..aa5f2bc21 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,8 @@ public class OopUtilities implements /* imports */ JVMTIThreadState {
private static OopField threadNameField;
private static OopField threadGroupField;
private static LongField threadEETopField;
+ //tid field is new since 1.5
+ private static LongField threadTIDField;
// threadStatus field is new since 1.5
private static IntField threadStatusField;
// parkBlocker field is new since 1.6
@@ -220,6 +222,7 @@ public class OopUtilities implements /* imports */ JVMTIThreadState {
threadNameField = (OopField) k.findField("name", "[C");
threadGroupField = (OopField) k.findField("group", "Ljava/lang/ThreadGroup;");
threadEETopField = (LongField) k.findField("eetop", "J");
+ threadTIDField = (LongField) k.findField("tid", "J");
threadStatusField = (IntField) k.findField("threadStatus", "I");
threadParkBlockerField = (OopField) k.findField("parkBlocker",
"Ljava/lang/Object;");
@@ -268,6 +271,15 @@ public class OopUtilities implements /* imports */ JVMTIThreadState {
return VM.getVM().getThreads().createJavaThreadWrapper(addr);
}
+ public static long threadOopGetTID(Oop threadOop) {
+ initThreadFields();
+ if (threadTIDField != null) {
+ return threadTIDField.getValue(threadOop);
+ } else {
+ return 0;
+ }
+ }
+
/** returns value of java.lang.Thread.threadStatus field */
public static int threadOopGetThreadStatus(Oop threadOop) {
initThreadFields();
diff --git a/agent/src/share/classes/sun/jvm/hotspot/opto/CompilerPhaseType.java b/agent/src/share/classes/sun/jvm/hotspot/opto/CompilerPhaseType.java
new file mode 100644
index 000000000..2405c22f3
--- /dev/null
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/CompilerPhaseType.java
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.opto;
+
+//These definitions should be kept in sync with the definitions in the HotSpot code.
+
+public enum CompilerPhaseType {
+ PHASE_BEFORE_STRINGOPTS ("Before StringOpts"),
+ PHASE_AFTER_STRINGOPTS ("After StringOpts"),
+ PHASE_BEFORE_REMOVEUSELESS ("Before RemoveUseless"),
+ PHASE_AFTER_PARSING ("After Parsing"),
+ PHASE_ITER_GVN1 ("Iter GVN 1"),
+ PHASE_PHASEIDEAL_BEFORE_EA ("PhaseIdealLoop before EA"),
+ PHASE_ITER_GVN_AFTER_EA ("Iter GVN after EA"),
+ PHASE_ITER_GVN_AFTER_ELIMINATION ("Iter GVN after eliminating allocations and locks"),
+ PHASE_PHASEIDEALLOOP1 ("PhaseIdealLoop 1"),
+ PHASE_PHASEIDEALLOOP2 ("PhaseIdealLoop 2"),
+ PHASE_PHASEIDEALLOOP3 ("PhaseIdealLoop 3"),
+ PHASE_CPP1 ("PhaseCPP 1"),
+ PHASE_ITER_GVN2 ("Iter GVN 2"),
+ PHASE_PHASEIDEALLOOP_ITERATIONS ("PhaseIdealLoop iterations"),
+ PHASE_OPTIMIZE_FINISHED ("Optimize finished"),
+ PHASE_GLOBAL_CODE_MOTION ("Global code motion"),
+ PHASE_FINAL_CODE ("Final Code"),
+ PHASE_AFTER_EA ("After Escape Analysis"),
+ PHASE_BEFORE_CLOOPS ("Before CountedLoop"),
+ PHASE_AFTER_CLOOPS ("After CountedLoop"),
+ PHASE_BEFORE_BEAUTIFY_LOOPS ("Before beautify loops"),
+ PHASE_AFTER_BEAUTIFY_LOOPS ("After beautify loops"),
+ PHASE_BEFORE_MATCHING ("Before Matching"),
+ PHASE_INCREMENTAL_INLINE ("Incremental Inline"),
+ PHASE_INCREMENTAL_BOXING_INLINE ("Incremental Boxing Inline"),
+ PHASE_END ("End"),
+ PHASE_FAILURE ("Failure"),
+ PHASE_NUM_TYPES ("Number of Phase Types");
+
+ private final String value;
+
+ CompilerPhaseType(String val) {
+ this.value = val;
+ }
+ public String value() {
+ return value;
+ }
+}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/runtime/Flags.java b/agent/src/share/classes/sun/jvm/hotspot/runtime/Flags.java
new file mode 100644
index 000000000..4a0abc224
--- /dev/null
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/Flags.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime;
+
+//These definitions should be kept in sync with the definitions in the HotSpot code.
+
+public enum Flags {
+ // value origin
+ DEFAULT ("Default"),
+ COMMAND_LINE ("Command line"),
+ ENVIRON_VAR ("Environment variable"),
+ CONFIG_FILE ("Config file"),
+ MANAGEMENT ("Management"),
+ ERGONOMIC ("Ergonomic"),
+ ATTACH_ON_DEMAND ("Attach on demand"),
+ INTERNAL ("Internal");
+
+ private final String value;
+
+ Flags(String val) {
+ this.value = val;
+ }
+ public String value() {
+ return value;
+ }
+}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaThread.java b/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaThread.java
index 926c11c4b..fa5844097 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaThread.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaThread.java
@@ -415,7 +415,7 @@ public class JavaThread extends Thread {
} else {
tty.println("No Java frames present");
}
- tty.println("Base of Stack: " + getBaseOfStackPointer());
+ tty.println("Base of Stack: " + getStackBase());
tty.println("Last_Java_SP: " + getLastJavaSP());
tty.println("Last_Java_FP: " + getLastJavaFP());
tty.println("Last_Java_PC: " + getLastJavaPC());
diff --git a/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java b/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java
index 2948f7b95..51644d7e8 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,6 +41,8 @@ public class Thread extends VMObject {
private static AddressField currentPendingMonitorField;
private static AddressField currentWaitingMonitorField;
+ private static JLongField allocatedBytesField;
+
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
@@ -61,6 +63,7 @@ public class Thread extends VMObject {
activeHandlesField = type.getAddressField("_active_handles");
currentPendingMonitorField = type.getAddressField("_current_pending_monitor");
currentWaitingMonitorField = type.getAddressField("_current_waiting_monitor");
+ allocatedBytesField = type.getJLongField("_allocated_bytes");
}
public Thread(Address addr) {
@@ -104,6 +107,10 @@ public class Thread extends VMObject {
return new JNIHandleBlock(a);
}
+ public long allocatedBytes() {
+ return allocatedBytesField.getValue(addr);
+ }
+
public boolean isVMThread() { return false; }
public boolean isJavaThread() { return false; }
public boolean isCompilerThread() { return false; }
diff --git a/agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java b/agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java
new file mode 100644
index 000000000..84f535285
--- /dev/null
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime;
+
+//These definitions should be kept in sync with the definitions in the HotSpot code.
+
+public enum VMOps {
+ Dummy,
+ ThreadStop,
+ ThreadDump,
+ PrintThreads,
+ FindDeadlocks,
+ ForceSafepoint,
+ ForceAsyncSafepoint,
+ Deoptimize,
+ DeoptimizeFrame,
+ DeoptimizeAll,
+ ZombieAll,
+ UnlinkSymbols,
+ Verify,
+ PrintJNI,
+ HeapDumper,
+ DeoptimizeTheWorld,
+ CollectForMetadataAllocation,
+ GC_HeapInspection,
+ GenCollectFull,
+ GenCollectFullConcurrent,
+ GenCollectForAllocation,
+ ParallelGCFailedAllocation,
+ ParallelGCSystemGC,
+ CGC_Operation,
+ CMS_Initial_Mark,
+ CMS_Final_Remark,
+ G1CollectFull,
+ G1CollectForAllocation,
+ G1IncCollectionPause,
+ EnableBiasedLocking,
+ RevokeBias,
+ BulkRevokeBias,
+ PopulateDumpSharedSpace,
+ JNIFunctionTableCopier,
+ RedefineClasses,
+ GetOwnedMonitorInfo,
+ GetObjectMonitorUsage,
+ GetCurrentContendedMonitor,
+ GetStackTrace,
+ GetMultipleStackTraces,
+ GetAllStackTraces,
+ GetThreadListStackTraces,
+ GetFrameCount,
+ GetFrameLocation,
+ ChangeBreakpoints,
+ GetOrSetLocal,
+ GetCurrentLocation,
+ EnterInterpOnlyMode,
+ ChangeSingleStep,
+ HeapWalkOperation,
+ HeapIterateOperation,
+ ReportJavaOutOfMemory,
+ JFRCheckpoint,
+ Exit,
+ LinuxDllLoad,
+ Terminating
+}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java b/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java
index 69f8c7952..28f854ce3 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java
@@ -317,26 +317,17 @@ public class X86Frame extends Frame {
//------------------------------------------------------------------------------
// frame::adjust_unextended_sp
private void adjustUnextendedSP() {
- // If we are returning to a compiled MethodHandle call site, the
- // saved_fp will in fact be a saved value of the unextended SP. The
- // simplest way to tell whether we are returning to such a call site
- // is as follows:
+ // On x86, sites calling method handle intrinsics and lambda forms are treated
+ // as any other call site. Therefore, no special action is needed when we are
+ // returning to any of these call sites.
CodeBlob cb = cb();
NMethod senderNm = (cb == null) ? null : cb.asNMethodOrNull();
if (senderNm != null) {
- // If the sender PC is a deoptimization point, get the original
- // PC. For MethodHandle call site the unextended_sp is stored in
- // saved_fp.
- if (senderNm.isDeoptMhEntry(getPC())) {
- // DEBUG_ONLY(verifyDeoptMhOriginalPc(senderNm, getFP()));
- raw_unextendedSP = getFP();
- }
- else if (senderNm.isDeoptEntry(getPC())) {
- // DEBUG_ONLY(verifyDeoptOriginalPc(senderNm, raw_unextendedSp));
- }
- else if (senderNm.isMethodHandleReturn(getPC())) {
- raw_unextendedSP = getFP();
+ // If the sender PC is a deoptimization point, get the original PC.
+ if (senderNm.isDeoptEntry(getPC()) ||
+ senderNm.isDeoptMhEntry(getPC())) {
+ // DEBUG_ONLY(verifyDeoptriginalPc(senderNm, raw_unextendedSp));
}
}
}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/ui/action/HSDBActionManager.java b/agent/src/share/classes/sun/jvm/hotspot/ui/action/HSDBActionManager.java
index f234231e3..1ebcb1b38 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/action/HSDBActionManager.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/ui/action/HSDBActionManager.java
@@ -32,10 +32,12 @@ import com.sun.java.swing.action.ActionManager;
public class HSDBActionManager extends ActionManager {
public static ActionManager getInstance() {
- if (manager == null) {
- manager = new HSDBActionManager();
+ ActionManager m = ActionManager.getInstance();
+ if (m == null) {
+ m = new HSDBActionManager();
+ ActionManager.setInstance(m);
}
- return manager;
+ return m;
}
protected void addActions() {
diff --git a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java
index 60df0ffe9..319aecdaa 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -799,6 +799,18 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
writeObjectID(klass.getJavaMirror());
ClassData cd = (ClassData) classDataCache.get(klass);
+ if (cd == null) {
+ // The class is not present in the system dictionary, probably Lambda.
+ // Add it to cache here
+ if (klass instanceof InstanceKlass) {
+ InstanceKlass ik = (InstanceKlass) klass;
+ List fields = getInstanceFields(ik);
+ int instSize = getSizeForFields(fields);
+ cd = new ClassData(instSize, fields);
+ classDataCache.put(ik, cd);
+ }
+ }
+
if (Assert.ASSERTS_ENABLED) {
Assert.that(cd != null, "can not get class data for " + klass.getName().asString() + klass.getAddress());
}
diff --git a/make/aix/makefiles/adlc.make b/make/aix/makefiles/adlc.make
index 4355d73b7..8407fd8f5 100644
--- a/make/aix/makefiles/adlc.make
+++ b/make/aix/makefiles/adlc.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -140,13 +140,7 @@ ADLCFLAGS += $(SYSDEFS)
# Note "+="; it is a hook so flags.make can add more flags, like -g or -DFOO.
ADLCFLAGS += -q -T
-# Normally, debugging is done directly on the ad_<arch>*.cpp files.
-# But -g will put #line directives in those files pointing back to <arch>.ad.
-# Some builds of gcc 3.2 have a bug that gets tickled by the extra #line directives
-# so skip it for 3.2 and ealier.
-ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
ADLCFLAGS += -g
-endif
ifdef LP64
ADLCFLAGS += -D_LP64
diff --git a/make/aix/makefiles/ppc64.make b/make/aix/makefiles/ppc64.make
index 079d07647..2634d8adc 100644
--- a/make/aix/makefiles/ppc64.make
+++ b/make/aix/makefiles/ppc64.make
@@ -1,6 +1,6 @@
#
-# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
-# Copyright 2012, 2013 SAP AG. All rights reserved.
+# Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2015 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,9 @@ CFLAGS += -qsuppress=1540-0198
# - 1540-1090 (I) The destructor of "..." might not be called.
# - 1500-010: (W) WARNING in ...: Infinite loop. Program may not stop.
# There are several infinite loops in the vm, suppress.
-CFLAGS += -qsuppress=1540-1090 -qsuppress=1500-010
+# - 1540-1639 (I) The behavior of long type bit fields has changed ...
+# ... long type bit fields now default to long, not int.
+CFLAGS += -qsuppress=1540-1090 -qsuppress=1500-010 -qsuppress=1540-1639
# Suppress
# - 540-1088 (W) The exception specification is being ignored.
@@ -69,9 +71,6 @@ OPT_CFLAGS += -qstrict
OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT)
OPT_CFLAGS/sharedRuntimeTrans.o = $(OPT_CFLAGS/NOOPT)
-# xlc 10.01 parameters for ipa compile.
-QIPA_COMPILE=$(if $(CXX_IS_V10),-qipa)
-
# Xlc 10.1 parameters for aggressive optimization:
# - qhot=level=1: Most aggressive loop optimizations.
# - qignerrno: Assume errno is not modified by system calls.
@@ -86,7 +85,7 @@ QV10_OPT_CONSERVATIVE=$(if $(CXX_IS_V10),-qhot=level=1 -qignerrno -qinline)
OPT_CFLAGS/synchronizer.o = $(OPT_CFLAGS) -qnoinline
# Set all the xlC V10.1 options here.
-OPT_CFLAGS += $(QIPA_COMPILE) $(QV10_OPT) $(QV10_OPT_AGGRESSIVE)
+OPT_CFLAGS += $(QV10_OPT) $(QV10_OPT_AGGRESSIVE)
export OBJECT_MODE=64
diff --git a/make/aix/makefiles/xlc.make b/make/aix/makefiles/xlc.make
index fa2ceaf25..cf8d085c3 100644
--- a/make/aix/makefiles/xlc.make
+++ b/make/aix/makefiles/xlc.make
@@ -1,6 +1,6 @@
#
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
-# Copyright (c) 2012, 2013 SAP. All rights reserved.
+# Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2015 SAP. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -34,13 +34,17 @@ HOSTCC = $(CC)
AS = $(CC) -c
-# get xlc version
-CXX_VERSION := $(shell $(CXX) -qversion 2>&1 | sed -n 's/.*Version: \([0-9.]*\)/\1/p')
+# get xlc version which comes as VV.RR.MMMM.LLLL where 'VV' is the version,
+# 'RR' is the release, 'MMMM' is the modification and 'LLLL' is the level.
+# We only use 'VV.RR.LLLL' to avoid integer overflows in bash when comparing
+# the version numbers (some shells only support 32-bit integer compares!).
+CXX_VERSION := $(shell $(CXX) -qversion 2>&1 | \
+ sed -n 's/.*Version: \([0-9]\{2\}\).\([0-9]\{2\}\).[0-9]\{4\}.\([0-9]\{4\}\)/\1\2\3/p')
# xlc 08.00.0000.0023 and higher supports -qtune=balanced
-CXX_SUPPORTS_BALANCED_TUNING=$(shell if [ $(subst .,,$(CXX_VERSION)) -ge 080000000023 ] ; then echo "true" ; fi)
+CXX_SUPPORTS_BALANCED_TUNING := $(shell if [ $(CXX_VERSION) -ge 08000023 ] ; then echo "true" ; fi)
# xlc 10.01 is used with aggressive optimizations to boost performance
-CXX_IS_V10=$(shell if [ $(subst .,,$(CXX_VERSION)) -ge 100100000000 ] ; then echo "true" ; fi)
+CXX_IS_V10 := $(shell if [ $(CXX_VERSION) -ge 10010000 ] ; then echo "true" ; fi)
# check for precompiled headers support
@@ -130,7 +134,7 @@ STATIC_STDCXX = -Wl,-lC_r
# MAPFLAG = -Xlinker --version-script=FILENAME
# Build shared library
-SHARED_FLAG = -q64 -b64 -bexpall -G -bnoentry -qmkshrobj -brtl -bnolibpath
+SHARED_FLAG = -q64 -b64 -bexpall -G -bnoentry -qmkshrobj -brtl -bnolibpath -bernotok
#------------------------------------------------------------------------
# Debug flags
diff --git a/make/bsd/makefiles/gcc.make b/make/bsd/makefiles/gcc.make
index 840148e63..9571ad7ad 100644
--- a/make/bsd/makefiles/gcc.make
+++ b/make/bsd/makefiles/gcc.make
@@ -349,7 +349,7 @@ ifeq ($(OS_VENDOR), Darwin)
# The macro takes the version with no dots, ex: 1070
CFLAGS += -DMAC_OS_X_VERSION_MAX_ALLOWED=$(subst .,,$(MACOSX_VERSION_MIN)) \
-mmacosx-version-min=$(MACOSX_VERSION_MIN)
- LDFLAGS += -mmacosx-version-min=$(MACOSX_VERSION_MIN)
+ LFLAGS += -mmacosx-version-min=$(MACOSX_VERSION_MIN)
endif
diff --git a/make/bsd/makefiles/sa.make b/make/bsd/makefiles/sa.make
index 83b9b7954..4a4a87924 100644
--- a/make/bsd/makefiles/sa.make
+++ b/make/bsd/makefiles/sa.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,8 @@ AGENT_DIR = $(GAMMADIR)/agent
include $(GAMMADIR)/make/sa.files
+-include $(HS_ALT_MAKE)/bsd/makefiles/sa.make
+
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
diff --git a/make/bsd/makefiles/saproc.make b/make/bsd/makefiles/saproc.make
index 831ef3e04..77dc9f772 100644
--- a/make/bsd/makefiles/saproc.make
+++ b/make/bsd/makefiles/saproc.make
@@ -110,6 +110,9 @@ endif
ifneq ($(OS_VENDOR), Darwin)
SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE))
+else
+# bring in minimum version argument or we'll fail on OSX 10.10
+SA_LFLAGS = $(LFLAGS)
endif
SA_LFLAGS += $(LDFLAGS_HASH_STYLE)
diff --git a/make/defs.make b/make/defs.make
index 51f0c40d3..b5a41239c 100644
--- a/make/defs.make
+++ b/make/defs.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -116,6 +116,18 @@ endif
# hotspot version definitions
include $(GAMMADIR)/make/hotspot_version
+# When config parameter --with-update-version is defined,
+# Hotspot minor version should be set to that
+ifneq ($(JDK_UPDATE_VERSION),)
+ HS_MINOR_VER=$(JDK_UPDATE_VERSION)
+endif
+
+# When config parameter --with-build-number is defined,
+# Hotspot build number should be set to that
+ifneq ($(JDK_BUILD_NUMBER),)
+ HS_BUILD_NUMBER=$(subst b,,$(JDK_BUILD_NUMBER))
+endif
+
# Java versions needed
ifeq ($(PREVIOUS_JDK_VERSION),)
PREVIOUS_JDK_VERSION=$(JDK_PREVIOUS_VERSION)
@@ -273,7 +285,7 @@ ifneq ($(OSNAME),windows)
# Use uname output for SRCARCH, but deal with platform differences. If ARCH
# is not explicitly listed below, it is treated as x86.
- SRCARCH = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc ppc64 zero,$(ARCH)))
+ SRCARCH ?= $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 ppc ppc64 zero,$(ARCH)))
ARCH/ = x86
ARCH/sparc = sparc
ARCH/sparc64= sparc
@@ -282,11 +294,10 @@ ifneq ($(OSNAME),windows)
ARCH/x86_64 = x86
ARCH/ppc64 = ppc
ARCH/ppc = ppc
- ARCH/arm = arm
ARCH/zero = zero
# BUILDARCH is usually the same as SRCARCH, except for sparcv9
- BUILDARCH = $(SRCARCH)
+ BUILDARCH ?= $(SRCARCH)
ifeq ($(BUILDARCH), x86)
ifdef LP64
BUILDARCH = amd64
@@ -306,18 +317,16 @@ ifneq ($(OSNAME),windows)
endif
# LIBARCH is 1:1 mapping from BUILDARCH
- LIBARCH = $(LIBARCH/$(BUILDARCH))
+ LIBARCH ?= $(LIBARCH/$(BUILDARCH))
LIBARCH/i486 = i386
LIBARCH/amd64 = amd64
LIBARCH/sparc = sparc
LIBARCH/sparcv9 = sparcv9
LIBARCH/ia64 = ia64
LIBARCH/ppc64 = ppc64
- LIBARCH/ppc = ppc
- LIBARCH/arm = arm
LIBARCH/zero = $(ZERO_LIBARCH)
- LP64_ARCH = sparcv9 amd64 ia64 ppc64 zero
+ LP64_ARCH += sparcv9 amd64 ia64 ppc64 zero
endif
# Required make macro settings for all platforms
diff --git a/make/hotspot_version b/make/hotspot_version
index ebfbc5dfb..fb3b47b21 100644
--- a/make/hotspot_version
+++ b/make/hotspot_version
@@ -31,11 +31,11 @@
#
# Don't put quotes (fail windows build).
-HOTSPOT_VM_COPYRIGHT=Copyright 2015
+HOTSPOT_VM_COPYRIGHT=Copyright 2016
HS_MAJOR_VER=25
-HS_MINOR_VER=40
-HS_BUILD_NUMBER=25
+HS_MINOR_VER=71
+HS_BUILD_NUMBER=01
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
diff --git a/make/linux/Makefile b/make/linux/Makefile
index df4509348..17c140979 100644
--- a/make/linux/Makefile
+++ b/make/linux/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -67,8 +67,12 @@ ifndef CC_INTERP
endif
endif
# C1 is not ported on ppc64, so we cannot build a tiered VM:
-ifeq ($(ARCH),ppc64)
- FORCE_TIERED=0
+# Notice: after 8046471 ARCH will be 'ppc' for top-level ppc64 builds but
+# 'ppc64' for HotSpot-only ppc64 builds. Need to detect both variants here!
+ifneq (,$(findstring $(ARCH), ppc ppc64))
+ ifeq ($(ARCH_DATA_MODEL), 64)
+ FORCE_TIERED=0
+ endif
endif
ifdef LP64
@@ -229,7 +233,7 @@ checks: check_os_version check_j2se_version
# Solaris 2.5.1, 2.6).
# Disable this check by setting DISABLE_HOTSPOT_OS_VERSION_CHECK=ok.
-SUPPORTED_OS_VERSION = 2.4% 2.5% 2.6% 3%
+SUPPORTED_OS_VERSION = 2.4% 2.5% 2.6% 3% 4%
OS_VERSION := $(shell uname -r)
EMPTY_IF_NOT_SUPPORTED = $(filter $(SUPPORTED_OS_VERSION),$(OS_VERSION))
diff --git a/make/linux/makefiles/arm.make b/make/linux/makefiles/arm.make
deleted file mode 100644
index ff8e3c519..000000000
--- a/make/linux/makefiles/arm.make
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
-
-Obj_Files += linux_arm.o
-
-ifneq ($(EXT_LIBS_PATH),)
- LIBS += $(EXT_LIBS_PATH)/sflt_glibc.a
-endif
-
-CFLAGS += -DVM_LITTLE_ENDIAN
diff --git a/make/linux/makefiles/build_vm_def.sh b/make/linux/makefiles/build_vm_def.sh
deleted file mode 100644
index ea81ff6c2..000000000
--- a/make/linux/makefiles/build_vm_def.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh
-
-# If we're cross compiling use that path for nm
-if [ "$CROSS_COMPILE_ARCH" != "" ]; then
-NM=$ALT_COMPILER_PATH/nm
-else
-NM=nm
-fi
-
-$NM --defined-only $* \
- | awk '{
- if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";"
- if ($3 ~ /^UseSharedSpaces$/) print "\t" $3 ";"
- if ($3 ~ /^_ZN9Arguments17SharedArchivePathE$/) print "\t" $3 ";"
- }' \
- | sort -u
diff --git a/make/linux/makefiles/buildtree.make b/make/linux/makefiles/buildtree.make
index 150a126f7..b5977650e 100644
--- a/make/linux/makefiles/buildtree.make
+++ b/make/linux/makefiles/buildtree.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -70,6 +70,8 @@ else
PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH).suncc
else
PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH)
+ ALT_PLATFORM_FILE = $(HS_ALT_MAKE)/$(OS_FAMILY)/platform_$(BUILDARCH)
+ PLATFORM_FILE := $(if $(wildcard $(ALT_PLATFORM_FILE)),$(ALT_PLATFORM_FILE),$(PLATFORM_FILE))
endif
endif
@@ -202,7 +204,7 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
$(QUIETLY) ( \
$(BUILDTREE_COMMENT); \
echo; \
- echo "Platform_file = $(PLATFORM_FILE)" | sed 's|$(GAMMADIR)|$$(GAMMADIR)|'; \
+ echo "Platform_file = $(PLATFORM_FILE)" | sed -e 's|$(HS_ALT_MAKE)|$$(HS_ALT_MAKE)|' -e 's|$(GAMMADIR)|$$(GAMMADIR)|'; \
sed -n '/=/s/^ */Platform_/p' < $(PLATFORM_FILE); \
echo; \
echo "GAMMADIR = $(GAMMADIR)"; \
diff --git a/make/linux/makefiles/defs.make b/make/linux/makefiles/defs.make
index 8922fdd87..3af5878aa 100644
--- a/make/linux/makefiles/defs.make
+++ b/make/linux/makefiles/defs.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -69,7 +69,7 @@ ifeq ($(ARCH), ia64)
endif
# sparc
-ifeq ($(ARCH), sparc64)
+ifneq (,$(findstring $(ARCH), sparc))
ifeq ($(ARCH_DATA_MODEL), 64)
ARCH_DATA_MODEL = 64
MAKE_ARGS += LP64=1
@@ -83,55 +83,35 @@ ifeq ($(ARCH), sparc64)
HS_ARCH = sparc
endif
-# amd64/x86_64
-ifneq (,$(findstring $(ARCH), amd64 x86_64))
+# i686/i586 and amd64/x86_64
+ifneq (,$(findstring $(ARCH), amd64 x86_64 i686 i586))
ifeq ($(ARCH_DATA_MODEL), 64)
ARCH_DATA_MODEL = 64
MAKE_ARGS += LP64=1
PLATFORM = linux-amd64
VM_PLATFORM = linux_amd64
- HS_ARCH = x86
else
ARCH_DATA_MODEL = 32
PLATFORM = linux-i586
VM_PLATFORM = linux_i486
- HS_ARCH = x86
- # We have to reset ARCH to i686 since SRCARCH relies on it
- ARCH = i686
endif
-endif
-
-# i686/i586 ie 32-bit x86
-ifneq (,$(findstring $(ARCH), i686 i586))
- ARCH_DATA_MODEL = 32
- PLATFORM = linux-i586
- VM_PLATFORM = linux_i486
- HS_ARCH = x86
-endif
-
-# ARM
-ifeq ($(ARCH), arm)
- ARCH_DATA_MODEL = 32
- PLATFORM = linux-arm
- VM_PLATFORM = linux_arm
- HS_ARCH = arm
+ HS_ARCH = x86
endif
# PPC
-ifeq ($(ARCH), ppc)
- ARCH_DATA_MODEL = 32
- PLATFORM = linux-ppc
- VM_PLATFORM = linux_ppc
- HS_ARCH = ppc
-endif
-
-# PPC64
-ifeq ($(ARCH), ppc64)
- ARCH_DATA_MODEL = 64
- MAKE_ARGS += LP64=1
- PLATFORM = linux-ppc64
- VM_PLATFORM = linux_ppc64
- HS_ARCH = ppc
+# Notice: after 8046471 ARCH will be 'ppc' for top-level ppc64 builds but
+# 'ppc64' for HotSpot-only ppc64 builds. Need to detect both variants here!
+ifneq (,$(findstring $(ARCH), ppc ppc64))
+ ifeq ($(ARCH_DATA_MODEL), 64)
+ MAKE_ARGS += LP64=1
+ PLATFORM = linux-ppc64
+ VM_PLATFORM = linux_ppc64
+ else
+ ARCH_DATA_MODEL = 32
+ PLATFORM = linux-ppc
+ VM_PLATFORM = linux_ppc
+ endif
+ HS_ARCH = ppc
endif
# On 32 bit linux we build server and client, on 64 bit just server.
diff --git a/make/linux/makefiles/dtrace.make b/make/linux/makefiles/dtrace.make
index b50eab183..386e94b1f 100644
--- a/make/linux/makefiles/dtrace.make
+++ b/make/linux/makefiles/dtrace.make
@@ -31,8 +31,8 @@ ifndef OPENJDK
REASON = "This JDK does not support SDT probes"
else
-# We need a recent GCC for the default
-ifeq "$(shell expr \( $(CC_VER_MAJOR) \>= 4 \) \& \( $(CC_VER_MINOR) \>= 4 \) )" "0"
+# We need a recent GCC for the default (4.4 or later)
+ifeq "$(shell expr \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 4 \) \) \| \( $(CC_VER_MAJOR) \>= 5 \) )" "0"
REASON = "gcc version is too old"
else
diff --git a/make/linux/makefiles/gcc.make b/make/linux/makefiles/gcc.make
index 698010625..272afceae 100644
--- a/make/linux/makefiles/gcc.make
+++ b/make/linux/makefiles/gcc.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -176,11 +176,7 @@ ARCHFLAG/amd64 = -m64 $(STACK_ALIGNMENT_OPT)
ARCHFLAG/ia64 =
ARCHFLAG/sparc = -m32 -mcpu=v9
ARCHFLAG/sparcv9 = -m64 -mcpu=v9
-ARCHFLAG/arm = -fsigned-char
ARCHFLAG/zero = $(ZERO_ARCHFLAG)
-ifndef E500V2
-ARCHFLAG/ppc = -mcpu=powerpc
-endif
ARCHFLAG/ppc64 = -m64
CFLAGS += $(ARCHFLAG)
@@ -188,10 +184,6 @@ AOUT_FLAGS += $(ARCHFLAG)
LFLAGS += $(ARCHFLAG)
ASFLAGS += $(ARCHFLAG)
-ifdef E500V2
-CFLAGS += -DE500V2
-endif
-
# Use C++ Interpreter
ifdef CC_INTERP
CFLAGS += -DCC_INTERP
@@ -345,47 +337,41 @@ else
# Note: The Itanium gcc compiler crashes when using -gstabs.
DEBUG_CFLAGS/ia64 = -g
DEBUG_CFLAGS/amd64 = -g
- DEBUG_CFLAGS/arm = -g
- DEBUG_CFLAGS/ppc = -g
DEBUG_CFLAGS/ppc64 = -g
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
ifeq ($(USE_CLANG), true)
# Clang doesn't understand -gstabs
- DEBUG_CFLAGS += -g
+ DEBUG_CFLAGS/$(BUILDARCH) = -g
else
- DEBUG_CFLAGS += -gstabs
+ DEBUG_CFLAGS/$(BUILDARCH) = -gstabs
endif
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
FASTDEBUG_CFLAGS/ia64 = -g
FASTDEBUG_CFLAGS/amd64 = -g
- FASTDEBUG_CFLAGS/arm = -g
- FASTDEBUG_CFLAGS/ppc = -g
FASTDEBUG_CFLAGS/ppc64 = -g
- FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
+ FASTDEBUG_CFLAGS += $(FASTDEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
ifeq ($(USE_CLANG), true)
# Clang doesn't understand -gstabs
- FASTDEBUG_CFLAGS += -g
+ FASTDEBUG_CFLAGS/$(BUILDARCH) = -g
else
- FASTDEBUG_CFLAGS += -gstabs
+ FASTDEBUG_CFLAGS/$(BUILDARCH) = -gstabs
endif
endif
OPT_CFLAGS/ia64 = -g
OPT_CFLAGS/amd64 = -g
- OPT_CFLAGS/arm = -g
- OPT_CFLAGS/ppc = -g
OPT_CFLAGS/ppc64 = -g
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
ifeq ($(USE_CLANG), true)
# Clang doesn't understand -gstabs
- OPT_CFLAGS += -g
+ OPT_CFLAGS/$(BUILDARCH) = -g
else
- OPT_CFLAGS += -gstabs
+ OPT_CFLAGS/$(BUILDARCH) = -gstabs
endif
endif
endif
@@ -409,3 +395,5 @@ endif
ifndef USE_SUNCC
CFLAGS += -fno-omit-frame-pointer
endif
+
+-include $(HS_ALT_MAKE)/linux/makefiles/gcc.make
diff --git a/make/linux/makefiles/ppc.make b/make/linux/makefiles/ppc.make
deleted file mode 100644
index 3364bd743..000000000
--- a/make/linux/makefiles/ppc.make
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
-
-# The copied fdlibm routines in sharedRuntimeTrig.o must not be optimized
-OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT)
-
-# Must also specify if CPU is big endian
-CFLAGS += -DVM_BIG_ENDIAN
-
-ifdef E500V2
-ASFLAGS += -Wa,-mspe -Wa,--defsym -Wa,E500V2=1
-endif
diff --git a/make/linux/makefiles/saproc.make b/make/linux/makefiles/saproc.make
index 7e8f7c430..6579c8e11 100644
--- a/make/linux/makefiles/saproc.make
+++ b/make/linux/makefiles/saproc.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -69,19 +69,21 @@ endif
endif
ifneq ($(ALT_SASRCDIR),)
-ALT_SAINCDIR=-I$(ALT_SASRCDIR)
+ALT_SAINCDIR=-I$(ALT_SASRCDIR) -DALT_SASRCDIR
else
ALT_SAINCDIR=
endif
SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE)) $(LDFLAGS_HASH_STYLE)
+SAARCH ?= $(BUILDARCH)
+
$(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
exit 1; \
fi
@echo Making SA debugger back-end...
- $(QUIETLY) $(CC) -D$(BUILDARCH) -D_GNU_SOURCE \
+ $(QUIETLY) $(CC) -D$(SAARCH) -D_GNU_SOURCE \
-D_FILE_OFFSET_BITS=64 \
$(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
-I$(SASRCDIR) \
diff --git a/make/linux/makefiles/vm.make b/make/linux/makefiles/vm.make
index eebf68fd5..1a48df939 100644
--- a/make/linux/makefiles/vm.make
+++ b/make/linux/makefiles/vm.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -45,8 +45,9 @@ DEP_DIR = $(GENERATED)/dependencies
ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
include $(MAKEFILES_DIR)/zeroshark.make
else
- include $(MAKEFILES_DIR)/$(BUILDARCH).make
- -include $(HS_ALT_MAKE)/$(Platform_os_family)/makefiles/$(BUILDARCH).make
+ BUILDARCH_MAKE = $(MAKEFILES_DIR)/$(BUILDARCH).make
+ ALT_BUILDARCH_MAKE = $(HS_ALT_MAKE)/$(Platform_os_family)/makefiles/$(BUILDARCH).make
+ include $(if $(wildcard $(ALT_BUILDARCH_MAKE)),$(ALT_BUILDARCH_MAKE),$(BUILDARCH_MAKE))
endif
# set VPATH so make knows where to look for source files
@@ -245,8 +246,14 @@ mapfile_reorder : mapfile $(REORDERFILE)
rm -f $@
cat $^ > $@
+VMDEF_PAT = ^_ZTV
+VMDEF_PAT := ^gHotSpotVM|$(VMDEF_PAT)
+VMDEF_PAT := ^UseSharedSpaces$$|$(VMDEF_PAT)
+VMDEF_PAT := ^_ZN9Arguments17SharedArchivePathE$$|$(VMDEF_PAT)
+
vm.def: $(Res_Files) $(Obj_Files)
- sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
+ $(QUIETLY) $(NM) --defined-only $(Obj_Files) | sort -k3 -u | \
+ awk '$$3 ~ /$(VMDEF_PAT)/ { print "\t" $$3 ";" }' > $@
mapfile_ext:
rm -f $@
diff --git a/make/linux/platform_arm b/make/linux/platform_arm
deleted file mode 100644
index 13206d637..000000000
--- a/make/linux/platform_arm
+++ /dev/null
@@ -1,17 +0,0 @@
-os_family = linux
-
-arch = arm
-
-arch_model = arm
-
-os_arch = linux_arm
-
-os_arch_model = linux_arm
-
-lib_arch = arm
-
-compiler = gcc
-
-gnu_dis_arch = arm
-
-sysdefs = -DLINUX -D_GNU_SOURCE -DARM
diff --git a/make/linux/platform_ppc b/make/linux/platform_ppc
deleted file mode 100644
index 809456b52..000000000
--- a/make/linux/platform_ppc
+++ /dev/null
@@ -1,17 +0,0 @@
-os_family = linux
-
-arch = ppc
-
-arch_model = ppc_32
-
-os_arch = linux_ppc
-
-os_arch_model = linux_ppc_32
-
-lib_arch = ppc
-
-compiler = gcc
-
-gnu_dis_arch = ppc
-
-sysdefs = -DLINUX -D_GNU_SOURCE -DPPC32
diff --git a/make/solaris/makefiles/buildtree.make b/make/solaris/makefiles/buildtree.make
index 4b68a7c13..e5dbbc6bb 100644
--- a/make/solaris/makefiles/buildtree.make
+++ b/make/solaris/makefiles/buildtree.make
@@ -265,9 +265,12 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \
echo; \
+ [ -n "$(INCLUDE_TRACE)" ] && \
+ echo && echo "INCLUDE_TRACE = $(INCLUDE_TRACE)"; \
[ -n "$(SPEC)" ] && \
echo "include $(SPEC)"; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
+ echo "include \$$(GAMMADIR)/make/excludeSrc.make"; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \
) > $@
diff --git a/make/solaris/makefiles/sa.make b/make/solaris/makefiles/sa.make
index 2c08b1c51..3d2a9afc5 100644
--- a/make/solaris/makefiles/sa.make
+++ b/make/solaris/makefiles/sa.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -29,8 +29,12 @@
# and generate JNI header file for native methods.
include $(GAMMADIR)/make/solaris/makefiles/rules.make
+include $(GAMMADIR)/make/defs.make
AGENT_DIR = $(GAMMADIR)/agent
include $(GAMMADIR)/make/sa.files
+
+-include $(HS_ALT_MAKE)/solaris/makefiles/sa.make
+
GENERATED = ../generated
# tools.jar is needed by the JDI - SA binding
diff --git a/make/solaris/makefiles/trace.make b/make/solaris/makefiles/trace.make
index 16c82cd78..18c88b925 100644
--- a/make/solaris/makefiles/trace.make
+++ b/make/solaris/makefiles/trace.make
@@ -56,8 +56,12 @@ TraceGeneratedNames = \
ifeq ($(HAS_ALT_SRC), true)
TraceGeneratedNames += \
traceRequestables.hpp \
- traceEventControl.hpp \
- traceProducer.cpp
+ traceEventControl.hpp
+
+ifneq ($(INCLUDE_TRACE), false)
+ TraceGeneratedNames += traceProducer.cpp
+endif
+
endif
TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
diff --git a/make/solaris/makefiles/vm.make b/make/solaris/makefiles/vm.make
index 005ba8446..11942b4ec 100644
--- a/make/solaris/makefiles/vm.make
+++ b/make/solaris/makefiles/vm.make
@@ -204,7 +204,7 @@ SHARK_SPECIFIC_FILES := shark
ZERO_SPECIFIC_FILES := zero
# Always exclude these.
-Src_Files_EXCLUDE := dtrace jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
+Src_Files_EXCLUDE += dtrace jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
# Exclude per type.
Src_Files_EXCLUDE/CORE := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
diff --git a/make/windows/makefiles/sa.make b/make/windows/makefiles/sa.make
index 792f028f9..d75e2aa20 100644
--- a/make/windows/makefiles/sa.make
+++ b/make/windows/makefiles/sa.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,22 @@ checkAndBuildSA::
GENERATED = ../generated
+HS_COMMON_SRC_REL = src
+
+!if "$(OPENJDK)" != "true"
+HS_ALT_SRC_REL=src/closed
+HS_ALT_SRC = $(WorkSpace)/$(HS_ALT_SRC_REL)
+!ifndef HS_ALT_MAKE
+HS_ALT_MAKE=$(WorkSpace)/make/closed
+!endif
+!endif
+
+HS_COMMON_SRC = $(WorkSpace)/$(HS_COMMON_SRC_REL)
+
+!ifdef HS_ALT_MAKE
+!include $(HS_ALT_MAKE)/windows/makefiles/sa.make
+!endif
+
# tools.jar is needed by the JDI - SA binding
SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
diff --git a/src/cpu/ppc/vm/frame_ppc.cpp b/src/cpu/ppc/vm/frame_ppc.cpp
index ff177e623..a4f7bedb0 100644
--- a/src/cpu/ppc/vm/frame_ppc.cpp
+++ b/src/cpu/ppc/vm/frame_ppc.cpp
@@ -318,3 +318,10 @@ intptr_t *frame::initial_deoptimization_info() {
// unused... but returns fp() to minimize changes introduced by 7087445
return fp();
}
+
+#ifndef PRODUCT
+// This is a generic constructor which is only used by pns() in debug.cpp.
+frame::frame(void* sp, void* fp, void* pc) : _sp((intptr_t*)sp), _unextended_sp((intptr_t*)sp) {
+ find_codeblob_and_set_pc_and_deopt_state((address)pc); // also sets _fp and adjusts _unextended_sp
+}
+#endif
diff --git a/src/cpu/ppc/vm/globals_ppc.hpp b/src/cpu/ppc/vm/globals_ppc.hpp
index 36bdf7325..386bae680 100644
--- a/src/cpu/ppc/vm/globals_ppc.hpp
+++ b/src/cpu/ppc/vm/globals_ppc.hpp
@@ -55,6 +55,8 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);
+define_pd_global(bool, PreserveFramePointer, false);
+
// GC Ergo Flags
define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // Default max size of CMS young gen, per GC worker thread.
diff --git a/src/cpu/ppc/vm/interpreter_ppc.cpp b/src/cpu/ppc/vm/interpreter_ppc.cpp
index 9b5aa93b3..00c80b65b 100644
--- a/src/cpu/ppc/vm/interpreter_ppc.cpp
+++ b/src/cpu/ppc/vm/interpreter_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -296,8 +296,16 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
__ bind(do_float);
__ lfs(floatSlot, 0, arg_java);
#if defined(LINUX)
+ // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float
+ // in the least significant word of an argument slot.
+#if defined(VM_LITTLE_ENDIAN)
+ __ stfs(floatSlot, 0, arg_c);
+#else
__ stfs(floatSlot, 4, arg_c);
+#endif
#elif defined(AIX)
+ // Although AIX runs on big endian CPU, float is in most significant
+ // word of an argument slot.
__ stfs(floatSlot, 0, arg_c);
#else
#error "unknown OS"
@@ -459,7 +467,8 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// This is not a leaf but we have a JavaFrameAnchor now and we will
// check (create) exceptions afterward so this is ok.
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError),
+ R16_thread);
// Pop the C frame and restore LR.
__ pop_frame();
diff --git a/src/cpu/ppc/vm/macroAssembler_ppc.cpp b/src/cpu/ppc/vm/macroAssembler_ppc.cpp
index 95a70506c..a96e31035 100644
--- a/src/cpu/ppc/vm/macroAssembler_ppc.cpp
+++ b/src/cpu/ppc/vm/macroAssembler_ppc.cpp
@@ -592,13 +592,6 @@ void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool
"can't identify emitted call");
} else {
// variant 1:
-#if defined(ABI_ELFv2)
- nop();
- calculate_address_from_global_toc(R12, dest, true, true, false);
- mtctr(R12);
- nop();
- nop();
-#else
mr(R0, R11); // spill R11 -> R0.
// Load the destination address into CTR,
@@ -608,7 +601,6 @@ void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool
mtctr(R11);
mr(R11, R0); // spill R11 <- R0.
nop();
-#endif
// do the call/jump
if (link) {
diff --git a/src/cpu/ppc/vm/ppc.ad b/src/cpu/ppc/vm/ppc.ad
index ce0406de1..9fd4d7316 100644
--- a/src/cpu/ppc/vm/ppc.ad
+++ b/src/cpu/ppc/vm/ppc.ad
@@ -2264,9 +2264,8 @@ const bool Matcher::clone_shift_expressions = false;
// Do we need to mask the count passed to shift instructions or does
// the cpu only look at the lower 5/6 bits anyway?
-// Off, as masks are generated in expand rules where required.
-// Constant shift counts are handled in Ideal phase.
-const bool Matcher::need_masked_shift_count = false;
+// PowerPC requires masked shift counts.
+const bool Matcher::need_masked_shift_count = true;
// This affects two different things:
// - how Decode nodes are matched
@@ -4418,11 +4417,11 @@ operand immF() %{
interface(CONST_INTER);
%}
-// constant 'float +0.0'.
+// Float Immediate: +0.0f.
operand immF_0() %{
- predicate((n->getf() == 0) &&
- (fpclassify(n->getf()) == FP_ZERO) && (signbit(n->getf()) == 0));
+ predicate(jint_cast(n->getf()) == 0);
match(ConF);
+
op_cost(0);
format %{ %}
interface(CONST_INTER);
diff --git a/src/cpu/ppc/vm/sharedRuntime_ppc.cpp b/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
index 15a5812e8..6685eaaba 100644
--- a/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
+++ b/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
@@ -766,6 +766,21 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// in farg_reg[j] if argument i is the j-th float argument of this call.
//
case T_FLOAT:
+#if defined(LINUX)
+ // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float
+ // in the least significant word of an argument slot.
+#if defined(VM_LITTLE_ENDIAN)
+#define FLOAT_WORD_OFFSET_IN_SLOT 0
+#else
+#define FLOAT_WORD_OFFSET_IN_SLOT 1
+#endif
+#elif defined(AIX)
+ // Although AIX runs on big endian CPU, float is in the most
+ // significant word of an argument slot.
+#define FLOAT_WORD_OFFSET_IN_SLOT 0
+#else
+#error "unknown OS"
+#endif
if (freg < Argument::n_float_register_parameters_c) {
// Put float in register ...
reg = farg_reg[freg];
@@ -779,14 +794,14 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
if (arg >= Argument::n_regs_not_on_stack_c) {
// ... and on the stack.
guarantee(regs2 != NULL, "must pass float in register and stack slot");
- VMReg reg2 = VMRegImpl::stack2reg(stk LINUX_ONLY(+1));
+ VMReg reg2 = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT);
regs2[i].set1(reg2);
stk += inc_stk_for_intfloat;
}
} else {
// Put float on stack.
- reg = VMRegImpl::stack2reg(stk LINUX_ONLY(+1));
+ reg = VMRegImpl::stack2reg(stk + FLOAT_WORD_OFFSET_IN_SLOT);
stk += inc_stk_for_intfloat;
}
regs[i].set1(reg);
diff --git a/src/cpu/sparc/vm/frame_sparc.cpp b/src/cpu/sparc/vm/frame_sparc.cpp
index 6ce05bdfc..7e2e8bd08 100644
--- a/src/cpu/sparc/vm/frame_sparc.cpp
+++ b/src/cpu/sparc/vm/frame_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -342,7 +342,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// constructors
// Construct an unpatchable, deficient frame
-frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
+void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
#ifdef _LP64
assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
#endif
@@ -364,6 +364,10 @@ frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
#endif // ASSERT
}
+frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
+ init(sp, pc, cb);
+}
+
frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
_sp(sp),
_younger_sp(younger_sp),
@@ -418,6 +422,13 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpret
}
}
+#ifndef PRODUCT
+// This is a generic constructor which is only used by pns() in debug.cpp.
+frame::frame(void* sp, void* fp, void* pc) {
+ init((intptr_t*)sp, (address)pc, NULL);
+}
+#endif
+
bool frame::is_interpreted_frame() const {
return Interpreter::contains(pc());
}
@@ -436,32 +447,6 @@ void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
}
#endif // CC_INTERP
-
-#ifdef ASSERT
-// Debugging aid
-static frame nth_sender(int n) {
- frame f = JavaThread::current()->last_frame();
-
- for(int i = 0; i < n; ++i)
- f = f.sender((RegisterMap*)NULL);
-
- printf("first frame %d\n", f.is_first_frame() ? 1 : 0);
- printf("interpreted frame %d\n", f.is_interpreted_frame() ? 1 : 0);
- printf("java frame %d\n", f.is_java_frame() ? 1 : 0);
- printf("entry frame %d\n", f.is_entry_frame() ? 1 : 0);
- printf("native frame %d\n", f.is_native_frame() ? 1 : 0);
- if (f.is_compiled_frame()) {
- if (f.is_deoptimized_frame())
- printf("deoptimized frame 1\n");
- else
- printf("compiled frame 1\n");
- }
-
- return f;
-}
-#endif
-
-
frame frame::sender_for_entry_frame(RegisterMap *map) const {
assert(map != NULL, "map must be set");
// Java frame called from C; skip all C frames and return top C
diff --git a/src/cpu/sparc/vm/frame_sparc.hpp b/src/cpu/sparc/vm/frame_sparc.hpp
index 51a014872..916444ae6 100644
--- a/src/cpu/sparc/vm/frame_sparc.hpp
+++ b/src/cpu/sparc/vm/frame_sparc.hpp
@@ -164,6 +164,8 @@
enum unpatchable_t { unpatchable };
frame(intptr_t* sp, unpatchable_t, address pc = NULL, CodeBlob* cb = NULL);
+ void init(intptr_t* sp, address pc, CodeBlob* cb);
+
// Walk from sp outward looking for old_sp, and return old_sp's predecessor
// (i.e. return the sp from the frame where old_sp is the fp).
// Register windows are assumed to be flushed for the stack in question.
diff --git a/src/cpu/sparc/vm/globals_sparc.hpp b/src/cpu/sparc/vm/globals_sparc.hpp
index 95f731abe..c692f6d9a 100644
--- a/src/cpu/sparc/vm/globals_sparc.hpp
+++ b/src/cpu/sparc/vm/globals_sparc.hpp
@@ -74,6 +74,8 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);
+define_pd_global(bool, PreserveFramePointer, false);
+
// GC Ergo Flags
define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
diff --git a/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp b/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp
index 7e3804fd3..3518d0b63 100644
--- a/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp
@@ -630,7 +630,12 @@ inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, Registe
inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) {
relocate(a.rspec(offset));
- ldf(w, a.base(), a.disp() + offset, d);
+ if (a.has_index()) {
+ assert(offset == 0, "");
+ ldf(w, a.base(), a.index(), d);
+ } else {
+ ldf(w, a.base(), a.disp() + offset, d);
+ }
}
// returns if membar generates anything, obviously this code should mirror
diff --git a/src/cpu/sparc/vm/sparc.ad b/src/cpu/sparc/vm/sparc.ad
index 3383bc21e..3d43f6b87 100644
--- a/src/cpu/sparc/vm/sparc.ad
+++ b/src/cpu/sparc/vm/sparc.ad
@@ -3760,13 +3760,9 @@ operand immD() %{
interface(CONST_INTER);
%}
+// Double Immediate: +0.0d
operand immD0() %{
-#ifdef _LP64
- // on 64-bit architectures this comparision is faster
predicate(jlong_cast(n->getd()) == 0);
-#else
- predicate((n->getd() == 0) && (fpclass(n->getd()) == FP_PZERO));
-#endif
match(ConD);
op_cost(0);
@@ -3783,9 +3779,9 @@ operand immF() %{
interface(CONST_INTER);
%}
-// Float Immediate: 0
-operand immF0() %{
- predicate((n->getf() == 0) && (fpclass(n->getf()) == FP_PZERO));
+// Float Immediate: +0.0f
+operand immF0() %{
+ predicate(jint_cast(n->getf()) == 0);
match(ConF);
op_cost(0);
diff --git a/src/cpu/sparc/vm/vm_version_sparc.cpp b/src/cpu/sparc/vm/vm_version_sparc.cpp
index 2fd9c396a..664f29e0f 100644
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp
@@ -37,7 +37,7 @@
int VM_Version::_features = VM_Version::unknown_m;
const char* VM_Version::_features_str = "";
-unsigned int VM_Version::_L2_cache_line_size = 0;
+unsigned int VM_Version::_L2_data_cache_line_size = 0;
void VM_Version::initialize() {
_features = determine_features();
@@ -363,7 +363,7 @@ void VM_Version::initialize() {
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
- tty->print_cr("L2 cache line size: %u", L2_cache_line_size());
+ tty->print_cr("L2 data cache line size: %u", L2_data_cache_line_size());
tty->print("Allocation");
if (AllocatePrefetchStyle <= 0) {
tty->print_cr(": no prefetching");
diff --git a/src/cpu/sparc/vm/vm_version_sparc.hpp b/src/cpu/sparc/vm/vm_version_sparc.hpp
index fa823863b..59969ed10 100644
--- a/src/cpu/sparc/vm/vm_version_sparc.hpp
+++ b/src/cpu/sparc/vm/vm_version_sparc.hpp
@@ -96,8 +96,8 @@ protected:
static int _features;
static const char* _features_str;
- static unsigned int _L2_cache_line_size;
- static unsigned int L2_cache_line_size() { return _L2_cache_line_size; }
+ static unsigned int _L2_data_cache_line_size;
+ static unsigned int L2_data_cache_line_size() { return _L2_data_cache_line_size; }
static void print_features();
static int determine_features();
@@ -171,7 +171,7 @@ public:
static const char* cpu_features() { return _features_str; }
// default prefetch block size on sparc
- static intx prefetch_data_size() { return L2_cache_line_size(); }
+ static intx prefetch_data_size() { return L2_data_cache_line_size(); }
// Prefetch
static intx prefetch_copy_interval_in_bytes() {
diff --git a/src/cpu/x86/vm/assembler_x86.hpp b/src/cpu/x86/vm/assembler_x86.hpp
index 2ac9df8c9..fc270dbbd 100644
--- a/src/cpu/x86/vm/assembler_x86.hpp
+++ b/src/cpu/x86/vm/assembler_x86.hpp
@@ -141,8 +141,10 @@ REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
#endif // _LP64
-// JSR 292 fixed register usages:
-REGISTER_DECLARATION(Register, rbp_mh_SP_save, rbp);
+// JSR 292
+// On x86, the SP does not have to be saved when invoking method handle intrinsics
+// or compiled lambda forms. We indicate that by setting rbp_mh_SP_save to noreg.
+REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg);
// Address is an abstraction used to represent a memory location
// using any of the amd64 addressing modes with one object.
diff --git a/src/cpu/x86/vm/c1_FrameMap_x86.cpp b/src/cpu/x86/vm/c1_FrameMap_x86.cpp
index f75eca728..bec7eed65 100644
--- a/src/cpu/x86/vm/c1_FrameMap_x86.cpp
+++ b/src/cpu/x86/vm/c1_FrameMap_x86.cpp
@@ -343,14 +343,13 @@ LIR_Opr FrameMap::stack_pointer() {
return FrameMap::rsp_opr;
}
-
// JSR 292
+// On x86, there is no need to save the SP, because neither
+// method handle intrinsics, nor compiled lambda forms modify it.
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
- assert(rbp == rbp_mh_SP_save, "must be same register");
- return rbp_opr;
+ return LIR_OprFact::illegalOpr;
}
-
bool FrameMap::validate_frame() {
return true;
}
diff --git a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
index 9dfb27969..6fd6b793a 100644
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
@@ -359,6 +359,9 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_by
generate_stack_overflow_check(bang_size_in_bytes);
push(rbp);
+ if (PreserveFramePointer) {
+ mov(rbp, rsp);
+ }
#ifdef TIERED
// c2 leaves fpu stack dirty. Clean it on entry
if (UseSSE < 2 ) {
diff --git a/src/cpu/x86/vm/c1_Runtime1_x86.cpp b/src/cpu/x86/vm/c1_Runtime1_x86.cpp
index 76303c114..9810f6e1c 100644
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp
@@ -754,14 +754,9 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
// WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
// since we do a leave anyway.
- // Pop the return address since we are possibly changing SP (restoring from BP).
+ // Pop the return address.
__ leave();
__ pop(rcx);
-
- // Restore SP from BP if the exception PC is a method handle call site.
- NOT_LP64(__ get_thread(thread);)
- __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
- __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
__ jmp(rcx); // jump to exception handler
break;
default: ShouldNotReachHere();
@@ -832,11 +827,6 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// the pop is also necessary to simulate the effect of a ret(0)
__ pop(exception_pc);
- // Restore SP from BP if the exception PC is a method handle call site.
- NOT_LP64(__ get_thread(thread);)
- __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
- __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
-
// continue at exception handler (return address removed)
// note: do *not* remove arguments when unwinding the
// activation since the caller assumes having
diff --git a/src/cpu/x86/vm/frame_x86.cpp b/src/cpu/x86/vm/frame_x86.cpp
index 212cdac3d..1a3364313 100644
--- a/src/cpu/x86/vm/frame_x86.cpp
+++ b/src/cpu/x86/vm/frame_x86.cpp
@@ -216,7 +216,8 @@ bool frame::safe_for_sender(JavaThread *thread) {
if (sender_blob->is_nmethod()) {
nmethod* nm = sender_blob->as_nmethod_or_null();
if (nm != NULL) {
- if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc)) {
+ if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
+ nm->method()->is_method_handle_intrinsic()) {
return false;
}
}
@@ -383,10 +384,9 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
// frame::verify_deopt_original_pc
//
// Verifies the calculated original PC of a deoptimization PC for the
-// given unextended SP. The unextended SP might also be the saved SP
-// for MethodHandle call sites.
+// given unextended SP.
#ifdef ASSERT
-void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
+void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
@@ -396,33 +396,23 @@ void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool
address original_pc = nm->get_original_pc(&fr);
assert(nm->insts_contains(original_pc), "original PC must be in nmethod");
- assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
}
#endif
//------------------------------------------------------------------------------
// frame::adjust_unextended_sp
void frame::adjust_unextended_sp() {
- // If we are returning to a compiled MethodHandle call site, the
- // saved_fp will in fact be a saved value of the unextended SP. The
- // simplest way to tell whether we are returning to such a call site
- // is as follows:
+ // On x86, sites calling method handle intrinsics and lambda forms are treated
+ // as any other call site. Therefore, no special action is needed when we are
+ // returning to any of these call sites.
nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null();
if (sender_nm != NULL) {
- // If the sender PC is a deoptimization point, get the original
- // PC. For MethodHandle call site the unextended_sp is stored in
- // saved_fp.
- if (sender_nm->is_deopt_mh_entry(_pc)) {
- DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp));
- _unextended_sp = _fp;
- }
- else if (sender_nm->is_deopt_entry(_pc)) {
+ // If the sender PC is a deoptimization point, get the original PC.
+ if (sender_nm->is_deopt_entry(_pc) ||
+ sender_nm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
}
- else if (sender_nm->is_method_handle_return(_pc)) {
- _unextended_sp = _fp;
- }
}
}
@@ -717,3 +707,10 @@ intptr_t* frame::real_fp() const {
assert(! is_compiled_frame(), "unknown compiled frame size");
return fp();
}
+
+#ifndef PRODUCT
+// This is a generic constructor which is only used by pns() in debug.cpp.
+frame::frame(void* sp, void* fp, void* pc) {
+ init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
+}
+#endif
diff --git a/src/cpu/x86/vm/frame_x86.hpp b/src/cpu/x86/vm/frame_x86.hpp
index 8c6a72c2e..1fd5b9c7a 100644
--- a/src/cpu/x86/vm/frame_x86.hpp
+++ b/src/cpu/x86/vm/frame_x86.hpp
@@ -76,11 +76,11 @@
// [locals and parameters ]
// <- sender sp
-// [1] When the c++ interpreter calls a new method it returns to the frame
+// [1] When the C++ interpreter calls a new method it returns to the frame
// manager which allocates a new frame on the stack. In that case there
// is no real callee of this newly allocated frame. The frame manager is
-// aware of the additional frame(s) and will pop them as nested calls
-// complete. Howevers tTo make it look good in the debugger the frame
+// aware of the additional frame(s) and will pop them as nested calls
+// complete. However, to make it look good in the debugger the frame
// manager actually installs a dummy pc pointing to RecursiveInterpreterActivation
// with a fake interpreter_state* parameter to make it easy to debug
// nested calls.
@@ -88,7 +88,7 @@
// Note that contrary to the layout for the assembly interpreter the
// expression stack allocated for the C++ interpreter is full sized.
// However this is not as bad as it seems as the interpreter frame_manager
-// will truncate the unused space on succesive method calls.
+// will truncate the unused space on successive method calls.
//
// ------------------------------ C++ interpreter ----------------------------------------
@@ -172,10 +172,7 @@
#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
- static void verify_deopt_original_pc( nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
- static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
- verify_deopt_original_pc(nm, unextended_sp, true);
- }
+ static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
#endif
public:
@@ -187,6 +184,8 @@
frame(intptr_t* sp, intptr_t* fp);
+ void init(intptr_t* sp, intptr_t* fp, address pc);
+
// accessors for the instance variables
// Note: not necessarily the real 'frame pointer' (see real_fp)
intptr_t* fp() const { return _fp; }
diff --git a/src/cpu/x86/vm/frame_x86.inline.hpp b/src/cpu/x86/vm/frame_x86.inline.hpp
index 9b875b834..a2649fdb1 100644
--- a/src/cpu/x86/vm/frame_x86.inline.hpp
+++ b/src/cpu/x86/vm/frame_x86.inline.hpp
@@ -40,7 +40,7 @@ inline frame::frame() {
_deopt_state = unknown;
}
-inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
+inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
_sp = sp;
_unextended_sp = sp;
_fp = fp;
@@ -58,6 +58,10 @@ inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
}
}
+inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
+ init(sp, fp, pc);
+}
+
inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
_sp = sp;
_unextended_sp = unextended_sp;
@@ -89,7 +93,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
// find_blob call. This is also why we can have no asserts on the validity
// of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
// -> pd_last_frame should use a specialized version of pd_last_frame which could
- // call a specilaized frame constructor instead of this one.
+ // call a specialized frame constructor instead of this one.
// Then we could use the assert below. However this assert is of somewhat dubious
// value.
// assert(_pc != NULL, "no pc?");
diff --git a/src/cpu/x86/vm/globals_x86.hpp b/src/cpu/x86/vm/globals_x86.hpp
index 1401997b3..57adca2ff 100644
--- a/src/cpu/x86/vm/globals_x86.hpp
+++ b/src/cpu/x86/vm/globals_x86.hpp
@@ -82,6 +82,8 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS
define_pd_global(uintx, TypeProfileLevel, 111);
+define_pd_global(bool, PreserveFramePointer, false);
+
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
\
develop(bool, IEEEPrecision, true, \
diff --git a/src/cpu/x86/vm/macroAssembler_x86.cpp b/src/cpu/x86/vm/macroAssembler_x86.cpp
index 5857a9350..b3a72ab0e 100644
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp
@@ -6122,6 +6122,10 @@ void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_
// We always push rbp, so that on return to interpreter rbp, will be
// restored correctly and we can correct the stack.
push(rbp);
+ // Save caller's stack pointer into RBP if the frame pointer is preserved.
+ if (PreserveFramePointer) {
+ mov(rbp, rsp);
+ }
// Remove word for ebp
framesize -= wordSize;
@@ -6136,6 +6140,13 @@ void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_
// Save RBP register now.
framesize -= wordSize;
movptr(Address(rsp, framesize), rbp);
+ // Save caller's stack pointer into RBP if the frame pointer is preserved.
+ if (PreserveFramePointer) {
+ movptr(rbp, rsp);
+ if (framesize > 0) {
+ addptr(rbp, framesize);
+ }
+ }
}
if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
@@ -6690,7 +6701,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
subl(cnt2, stride2);
jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
// clean upper bits of YMM registers
- vzeroupper();
+ vpxor(vec1, vec1);
// compare wide vectors tail
bind(COMPARE_WIDE_TAIL);
@@ -6705,7 +6716,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
// Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
bind(VECTOR_NOT_EQUAL);
// clean upper bits of YMM registers
- vzeroupper();
+ vpxor(vec1, vec1);
lea(str1, Address(str1, result, scale));
lea(str2, Address(str2, result, scale));
jmp(COMPARE_16_CHARS);
@@ -6964,7 +6975,8 @@ void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Regist
bind(DONE);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
- vzeroupper();
+ vpxor(vec1, vec1);
+ vpxor(vec2, vec2);
}
}
@@ -7098,7 +7110,8 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
BIND(L_check_fill_8_bytes);
// clean upper bits of YMM registers
- vzeroupper();
+ movdl(xtmp, value);
+ pshufd(xtmp, xtmp, 0);
} else {
// Fill 32-byte chunks
pshufd(xtmp, xtmp, 0);
@@ -7261,7 +7274,11 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
bind(L_copy_16_chars_exit);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
- vzeroupper();
+ vpxor(tmp2Reg, tmp2Reg);
+ vpxor(tmp3Reg, tmp3Reg);
+ vpxor(tmp4Reg, tmp4Reg);
+ movdl(tmp1Reg, tmp5);
+ pshufd(tmp1Reg, tmp1Reg, 0);
}
subptr(len, 8);
jccb(Assembler::greater, L_copy_8_chars_exit);
diff --git a/src/cpu/x86/vm/methodHandles_x86.cpp b/src/cpu/x86/vm/methodHandles_x86.cpp
index 42c690f5e..2f1326a62 100644
--- a/src/cpu/x86/vm/methodHandles_x86.cpp
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp
@@ -373,7 +373,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// member_reg - MemberName that was the trailing argument
// temp1_recv_klass - klass of stacked receiver, if needed
// rsi/r13 - interpreter linkage (if interpreted)
- // rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled)
+ // rcx, rdx, rsi, rdi, r8 - compiler arguments (if compiled)
Label L_incompatible_class_change_error;
switch (iid) {
diff --git a/src/cpu/x86/vm/runtime_x86_32.cpp b/src/cpu/x86/vm/runtime_x86_32.cpp
index 1cc10d766..36457cb81 100644
--- a/src/cpu/x86/vm/runtime_x86_32.cpp
+++ b/src/cpu/x86/vm/runtime_x86_32.cpp
@@ -126,10 +126,6 @@ void OptoRuntime::generate_exception_blob() {
// rax: exception handler for given <exception oop/exception pc>
- // Restore SP from BP if the exception PC is a MethodHandle call site.
- __ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0);
- __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
-
// We have a handler in rax, (could be deopt blob)
// rdx - throwing pc, deopt blob will need it.
diff --git a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
index be2bfcfa0..86ce38d24 100644
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
@@ -4017,8 +4017,8 @@ void OptoRuntime::generate_exception_blob() {
// Save callee-saved registers. See x86_64.ad.
- // rbp is an implicitly saved callee saved register (i.e. the calling
- // convention will save restore it in prolog/epilog) Other than that
+ // rbp is an implicitly saved callee saved register (i.e., the calling
+ // convention will save/restore it in the prolog/epilog). Other than that
// there are no callee save registers now that adapter frames are gone.
__ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
@@ -4060,9 +4060,9 @@ void OptoRuntime::generate_exception_blob() {
// Restore callee-saved registers
- // rbp is an implicitly saved callee saved register (i.e. the calling
+ // rbp is an implicitly saved callee-saved register (i.e., the calling
// convention will save restore it in prolog/epilog) Other than that
- // there are no callee save registers no that adapter frames are gone.
+ // there are no callee save registers now that adapter frames are gone.
__ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
@@ -4071,10 +4071,6 @@ void OptoRuntime::generate_exception_blob() {
// rax: exception handler
- // Restore SP from BP if the exception PC is a MethodHandle call site.
- __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
- __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
-
// We have a handler in rax (could be deopt blob).
__ mov(r8, rax);
diff --git a/src/cpu/x86/vm/stubGenerator_x86_32.cpp b/src/cpu/x86/vm/stubGenerator_x86_32.cpp
index 1622fe5ff..5387ba184 100644
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp
@@ -837,7 +837,8 @@ class StubGenerator: public StubCodeGenerator {
if (UseUnalignedLoadStores && (UseAVX >= 2)) {
// clean upper bits of YMM registers
- __ vzeroupper();
+ __ vpxor(xmm0, xmm0);
+ __ vpxor(xmm1, xmm1);
}
__ addl(qword_count, 8);
__ jccb(Assembler::zero, L_exit);
diff --git a/src/cpu/x86/vm/stubGenerator_x86_64.cpp b/src/cpu/x86/vm/stubGenerator_x86_64.cpp
index 0000146f5..0bb6118d7 100644
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp
@@ -1328,7 +1328,8 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_end);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
- __ vzeroupper();
+ __ vpxor(xmm0, xmm0);
+ __ vpxor(xmm1, xmm1);
}
} else {
// Copy 32-bytes per iteration
@@ -1405,7 +1406,8 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_end);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
- __ vzeroupper();
+ __ vpxor(xmm0, xmm0);
+ __ vpxor(xmm1, xmm1);
}
} else {
// Copy 32-bytes per iteration
diff --git a/src/cpu/x86/vm/vm_version_x86.hpp b/src/cpu/x86/vm/vm_version_x86.hpp
index 1ad94e38b..49c9dba7e 100644
--- a/src/cpu/x86/vm/vm_version_x86.hpp
+++ b/src/cpu/x86/vm/vm_version_x86.hpp
@@ -570,10 +570,12 @@ public:
static uint cores_per_cpu() {
uint result = 1;
if (is_intel()) {
- if (supports_processor_topology()) {
+ bool supports_topology = supports_processor_topology();
+ if (supports_topology) {
result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
_cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
- } else {
+ }
+ if (!supports_topology || result == 0) {
result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
}
} else if (is_amd()) {
@@ -590,7 +592,7 @@ public:
result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
cores_per_cpu();
}
- return result;
+ return (result == 0 ? 1 : result);
}
static intx prefetch_data_size() {
diff --git a/src/cpu/x86/vm/x86.ad b/src/cpu/x86/vm/x86.ad
index b575a98a8..8d402c1c6 100644
--- a/src/cpu/x86/vm/x86.ad
+++ b/src/cpu/x86/vm/x86.ad
@@ -912,21 +912,6 @@ static inline jdouble replicate8_imm(int con, int width) {
encode %{
- enc_class preserve_SP %{
- debug_only(int off0 = cbuf.insts_size());
- MacroAssembler _masm(&cbuf);
- // RBP is preserved across all calls, even compiled calls.
- // Use it to preserve RSP in places where the callee might change the SP.
- __ movptr(rbp_mh_SP_save, rsp);
- debug_only(int off1 = cbuf.insts_size());
- assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
- %}
-
- enc_class restore_SP %{
- MacroAssembler _masm(&cbuf);
- __ movptr(rsp, rbp_mh_SP_save);
- %}
-
enc_class call_epilog %{
if (VerifyStackAtCalls) {
// Check that stack depth is unchanged: find majik cookie on stack
diff --git a/src/cpu/x86/vm/x86_32.ad b/src/cpu/x86/vm/x86_32.ad
index 63a7a0663..42aba5b21 100644
--- a/src/cpu/x86/vm/x86_32.ad
+++ b/src/cpu/x86/vm/x86_32.ad
@@ -123,50 +123,94 @@ alloc_class chunk0( ECX, EBX, EBP, EDI, EAX, EDX, ESI, ESP,
// 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
// 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
//
+// Class for no registers (empty set).
+reg_class no_reg();
+
// Class for all registers
-reg_class any_reg(EAX, EDX, EBP, EDI, ESI, ECX, EBX, ESP);
+reg_class any_reg_with_ebp(EAX, EDX, EBP, EDI, ESI, ECX, EBX, ESP);
+// Class for all registers (excluding EBP)
+reg_class any_reg_no_ebp(EAX, EDX, EDI, ESI, ECX, EBX, ESP);
+// Dynamic register class that selects at runtime between register classes
+// any_reg and any_no_ebp_reg (depending on the value of the flag PreserveFramePointer).
+// Equivalent to: return PreserveFramePointer ? any_no_ebp_reg : any_reg;
+reg_class_dynamic any_reg(any_reg_no_ebp, any_reg_with_ebp, %{ PreserveFramePointer %});
+
// Class for general registers
-reg_class int_reg(EAX, EDX, EBP, EDI, ESI, ECX, EBX);
-// Class for general registers which may be used for implicit null checks on win95
-// Also safe for use by tailjump. We don't want to allocate in rbp,
-reg_class int_reg_no_rbp(EAX, EDX, EDI, ESI, ECX, EBX);
+reg_class int_reg_with_ebp(EAX, EDX, EBP, EDI, ESI, ECX, EBX);
+// Class for general registers (excluding EBP).
+// This register class can be used for implicit null checks on win95.
+// It is also safe for use by tailjumps (we don't want to allocate in ebp).
+// Used also if the PreserveFramePointer flag is true.
+reg_class int_reg_no_ebp(EAX, EDX, EDI, ESI, ECX, EBX);
+// Dynamic register class that selects between int_reg and int_reg_no_ebp.
+reg_class_dynamic int_reg(int_reg_no_ebp, int_reg_with_ebp, %{ PreserveFramePointer %});
+
// Class of "X" registers
reg_class int_x_reg(EBX, ECX, EDX, EAX);
+
// Class of registers that can appear in an address with no offset.
// EBP and ESP require an extra instruction byte for zero offset.
// Used in fast-unlock
reg_class p_reg(EDX, EDI, ESI, EBX);
-// Class for general registers not including ECX
-reg_class ncx_reg(EAX, EDX, EBP, EDI, ESI, EBX);
-// Class for general registers not including EAX
+
+// Class for general registers excluding ECX
+reg_class ncx_reg_with_ebp(EAX, EDX, EBP, EDI, ESI, EBX);
+// Class for general registers excluding ECX (and EBP)
+reg_class ncx_reg_no_ebp(EAX, EDX, EDI, ESI, EBX);
+// Dynamic register class that selects between ncx_reg and ncx_reg_no_ebp.
+reg_class_dynamic ncx_reg(ncx_reg_no_ebp, ncx_reg_with_ebp, %{ PreserveFramePointer %});
+
+// Class for general registers excluding EAX
reg_class nax_reg(EDX, EDI, ESI, ECX, EBX);
-// Class for general registers not including EAX or EBX.
-reg_class nabx_reg(EDX, EDI, ESI, ECX, EBP);
+
+// Class for general registers excluding EAX and EBX.
+reg_class nabx_reg_with_ebp(EDX, EDI, ESI, ECX, EBP);
+// Class for general registers excluding EAX and EBX (and EBP)
+reg_class nabx_reg_no_ebp(EDX, EDI, ESI, ECX);
+// Dynamic register class that selects between nabx_reg and nabx_reg_no_ebp.
+reg_class_dynamic nabx_reg(nabx_reg_no_ebp, nabx_reg_with_ebp, %{ PreserveFramePointer %});
+
// Class of EAX (for multiply and divide operations)
reg_class eax_reg(EAX);
+
// Class of EBX (for atomic add)
reg_class ebx_reg(EBX);
+
// Class of ECX (for shift and JCXZ operations and cmpLTMask)
reg_class ecx_reg(ECX);
+
// Class of EDX (for multiply and divide operations)
reg_class edx_reg(EDX);
+
// Class of EDI (for synchronization)
reg_class edi_reg(EDI);
+
// Class of ESI (for synchronization)
reg_class esi_reg(ESI);
-// Singleton class for interpreter's stack pointer
-reg_class ebp_reg(EBP);
+
// Singleton class for stack pointer
reg_class sp_reg(ESP);
+
// Singleton class for instruction pointer
// reg_class ip_reg(EIP);
+
// Class of integer register pairs
-reg_class long_reg( EAX,EDX, ECX,EBX, EBP,EDI );
+reg_class long_reg_with_ebp( EAX,EDX, ECX,EBX, EBP,EDI );
+// Class of integer register pairs (excluding EBP and EDI);
+reg_class long_reg_no_ebp( EAX,EDX, ECX,EBX );
+// Dynamic register class that selects between long_reg and long_reg_no_ebp.
+reg_class_dynamic long_reg(long_reg_no_ebp, long_reg_with_ebp, %{ PreserveFramePointer %});
+
// Class of integer register pairs that aligns with calling convention
reg_class eadx_reg( EAX,EDX );
reg_class ebcx_reg( ECX,EBX );
+
// Not AX or DX, used in divides
-reg_class nadx_reg( EBX,ECX,ESI,EDI,EBP );
+reg_class nadx_reg_with_ebp(EBX, ECX, ESI, EDI, EBP);
+// Not AX or DX (and neither EBP), used in divides
+reg_class nadx_reg_no_ebp(EBX, ECX, ESI, EDI);
+// Dynamic register class that selects between nadx_reg and nadx_reg_no_ebp.
+reg_class_dynamic nadx_reg(nadx_reg_no_ebp, nadx_reg_with_ebp, %{ PreserveFramePointer %});
// Floating point registers. Notice FPR0 is not a choice.
// FPR0 is not ever allocated; we use clever encodings to fake
@@ -240,18 +284,11 @@ static int pre_call_resets_size() {
return size;
}
-static int preserve_SP_size() {
- return 2; // op, rm(reg/reg)
-}
-
// !!!!! Special hack to get all type of calls to specify the byte offset
// from the start of the call to the point where the return address
// will point.
int MachCallStaticJavaNode::ret_addr_offset() {
- int offset = 5 + pre_call_resets_size(); // 5 bytes from start of call to where return address points
- if (_method_handle_invoke)
- offset += preserve_SP_size();
- return offset;
+ return 5 + pre_call_resets_size(); // 5 bytes from start of call to where return address points
}
int MachCallDynamicJavaNode::ret_addr_offset() {
@@ -285,15 +322,6 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
-int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
- current_offset += pre_call_resets_size(); // skip fldcw, if any
- current_offset += preserve_SP_size(); // skip mov rbp, rsp
- current_offset += 1; // skip call opcode byte
- return round_to(current_offset, alignment_required()) - current_offset;
-}
-
-// The address of the call instruction needs to be 4-byte aligned to
-// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
current_offset += pre_call_resets_size(); // skip fldcw, if any
current_offset += 5; // skip MOV instruction
@@ -523,6 +551,10 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
st->print("# stack bang (%d bytes)", bangsize);
st->print("\n\t");
st->print("PUSH EBP\t# Save EBP");
+ if (PreserveFramePointer) {
+ st->print("\n\t");
+ st->print("MOV EBP, ESP\t# Save the caller's SP into EBP");
+ }
if (framesize) {
st->print("\n\t");
st->print("SUB ESP, #%d\t# Create frame",framesize);
@@ -532,6 +564,14 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
st->print("\n\t");
framesize -= wordSize;
st->print("MOV [ESP + #%d], EBP\t# Save EBP",framesize);
+ if (PreserveFramePointer) {
+ st->print("\n\t");
+ st->print("MOV EBP, ESP\t# Save the caller's SP into EBP");
+ if (framesize > 0) {
+ st->print("\n\t");
+ st->print("ADD EBP, #%d", framesize);
+ }
+ }
}
if (VerifyStackAtCalls) {
@@ -1488,7 +1528,7 @@ RegMask Matcher::modL_proj_mask() {
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
- return EBP_REG_mask();
+ return NO_REG_mask();
}
// Returns true if the high 32 bits of the value is known to be zero.
@@ -3734,7 +3774,7 @@ operand eRegP() %{
// On windows95, EBP is not safe to use for implicit null tests.
operand eRegP_no_EBP() %{
- constraint(ALLOC_IN_RC(int_reg_no_rbp));
+ constraint(ALLOC_IN_RC(int_reg_no_ebp));
match(RegP);
match(eAXRegP);
match(eBXRegP);
@@ -3823,13 +3863,6 @@ operand eDIRegP(eRegP reg) %{
interface(REG_INTER);
%}
-operand eBPRegP() %{
- constraint(ALLOC_IN_RC(ebp_reg));
- match(RegP);
- format %{ "EBP" %}
- interface(REG_INTER);
-%}
-
operand eRegL() %{
constraint(ALLOC_IN_RC(long_reg));
match(RegL);
@@ -12708,7 +12741,6 @@ instruct cmovFF_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, regF dst,
// compute_padding() functions will have to be adjusted.
instruct CallStaticJavaDirect(method meth) %{
match(CallStaticJava);
- predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth);
ins_cost(300);
@@ -12722,29 +12754,6 @@ instruct CallStaticJavaDirect(method meth) %{
ins_alignment(4);
%}
-// Call Java Static Instruction (method handle version)
-// Note: If this code changes, the corresponding ret_addr_offset() and
-// compute_padding() functions will have to be adjusted.
-instruct CallStaticJavaHandle(method meth, eBPRegP ebp_mh_SP_save) %{
- match(CallStaticJava);
- predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
- effect(USE meth);
- // EBP is saved by all callees (for interpreter stack correction).
- // We use it here for a similar purpose, in {preserve,restore}_SP.
-
- ins_cost(300);
- format %{ "CALL,static/MethodHandle " %}
- opcode(0xE8); /* E8 cd */
- ins_encode( pre_call_resets,
- preserve_SP,
- Java_Static_Call( meth ),
- restore_SP,
- call_epilog,
- post_call_FPU );
- ins_pipe( pipe_slow );
- ins_alignment(4);
-%}
-
// Call Java Dynamic Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
diff --git a/src/cpu/x86/vm/x86_64.ad b/src/cpu/x86/vm/x86_64.ad
index 1233dbf77..52bc63507 100644
--- a/src/cpu/x86/vm/x86_64.ad
+++ b/src/cpu/x86/vm/x86_64.ad
@@ -166,55 +166,67 @@ alloc_class chunk0(R10, R10_H,
// 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
//
-// Class for all pointer registers (including RSP)
-reg_class any_reg(RAX, RAX_H,
- RDX, RDX_H,
- RBP, RBP_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RCX, RCX_H,
- RBX, RBX_H,
- RSP, RSP_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R12, R12_H,
- R13, R13_H,
- R14, R14_H,
- R15, R15_H);
-
-// Class for all pointer registers except RSP
-reg_class ptr_reg(RAX, RAX_H,
- RDX, RDX_H,
- RBP, RBP_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RCX, RCX_H,
- RBX, RBX_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R13, R13_H,
- R14, R14_H);
-
-// Class for all pointer registers except RAX and RSP
-reg_class ptr_no_rax_reg(RDX, RDX_H,
- RBP, RBP_H,
+// Empty register class.
+reg_class no_reg();
+
+// Class for all pointer registers (including RSP and RBP)
+reg_class any_reg_with_rbp(RAX, RAX_H,
+ RDX, RDX_H,
+ RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ RSP, RSP_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R12, R12_H,
+ R13, R13_H,
+ R14, R14_H,
+ R15, R15_H);
+
+// Class for all pointer registers (including RSP, but excluding RBP)
+reg_class any_reg_no_rbp(RAX, RAX_H,
+ RDX, RDX_H,
RDI, RDI_H,
RSI, RSI_H,
RCX, RCX_H,
RBX, RBX_H,
+ RSP, RSP_H,
R8, R8_H,
R9, R9_H,
R10, R10_H,
R11, R11_H,
+ R12, R12_H,
R13, R13_H,
- R14, R14_H);
-
-reg_class ptr_no_rbp_reg(RDX, RDX_H,
- RAX, RAX_H,
+ R14, R14_H,
+ R15, R15_H);
+
+// Dynamic register class that selects at runtime between register classes
+// any_reg_no_rbp and any_reg_with_rbp (depending on the value of the flag PreserveFramePointer).
+// Equivalent to: return PreserveFramePointer ? any_reg_no_rbp : any_reg_with_rbp;
+reg_class_dynamic any_reg(any_reg_no_rbp, any_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all pointer registers (excluding RSP)
+reg_class ptr_reg_with_rbp(RAX, RAX_H,
+ RDX, RDX_H,
+ RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Class for all pointer registers (excluding RSP and RBP)
+reg_class ptr_reg_no_rbp(RAX, RAX_H,
+ RDX, RDX_H,
RDI, RDI_H,
RSI, RSI_H,
RCX, RCX_H,
@@ -226,18 +238,66 @@ reg_class ptr_no_rbp_reg(RDX, RDX_H,
R13, R13_H,
R14, R14_H);
-// Class for all pointer registers except RAX, RBX and RSP
-reg_class ptr_no_rax_rbx_reg(RDX, RDX_H,
- RBP, RBP_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RCX, RCX_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R13, R13_H,
- R14, R14_H);
+// Dynamic register class that selects between ptr_reg_no_rbp and ptr_reg_with_rbp.
+reg_class_dynamic ptr_reg(ptr_reg_no_rbp, ptr_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all pointer registers (excluding RAX and RSP)
+reg_class ptr_no_rax_reg_with_rbp(RDX, RDX_H,
+ RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Class for all pointer registers (excluding RAX, RSP, and RBP)
+reg_class ptr_no_rax_reg_no_rbp(RDX, RDX_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Dynamic register class that selects between ptr_no_rax_reg_no_rbp and ptr_no_rax_reg_with_rbp.
+reg_class_dynamic ptr_no_rax_reg(ptr_no_rax_reg_no_rbp, ptr_no_rax_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all pointer registers (excluding RAX, RBX, and RSP)
+reg_class ptr_no_rax_rbx_reg_with_rbp(RDX, RDX_H,
+ RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Class for all pointer registers (excluding RAX, RBX, RSP, and RBP)
+reg_class ptr_no_rax_rbx_reg_no_rbp(RDX, RDX_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Dynamic register class that selects between ptr_no_rax_rbx_reg_no_rbp and ptr_no_rax_rbx_reg_with_rbp.
+reg_class_dynamic ptr_no_rax_rbx_reg(ptr_no_rax_rbx_reg_no_rbp, ptr_no_rax_rbx_reg_with_rbp, %{ PreserveFramePointer %});
// Singleton class for RAX pointer register
reg_class ptr_rax_reg(RAX, RAX_H);
@@ -251,59 +311,29 @@ reg_class ptr_rsi_reg(RSI, RSI_H);
// Singleton class for RDI pointer register
reg_class ptr_rdi_reg(RDI, RDI_H);
-// Singleton class for RBP pointer register
-reg_class ptr_rbp_reg(RBP, RBP_H);
-
// Singleton class for stack pointer
reg_class ptr_rsp_reg(RSP, RSP_H);
// Singleton class for TLS pointer
reg_class ptr_r15_reg(R15, R15_H);
-// Class for all long registers (except RSP)
-reg_class long_reg(RAX, RAX_H,
- RDX, RDX_H,
- RBP, RBP_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RCX, RCX_H,
- RBX, RBX_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R13, R13_H,
- R14, R14_H);
-
-// Class for all long registers except RAX, RDX (and RSP)
-reg_class long_no_rax_rdx_reg(RBP, RBP_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RCX, RCX_H,
- RBX, RBX_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R13, R13_H,
- R14, R14_H);
-
-// Class for all long registers except RCX (and RSP)
-reg_class long_no_rcx_reg(RBP, RBP_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RAX, RAX_H,
- RDX, RDX_H,
- RBX, RBX_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R13, R13_H,
- R14, R14_H);
-
-// Class for all long registers except RAX (and RSP)
-reg_class long_no_rax_reg(RBP, RBP_H,
+// Class for all long registers (excluding RSP)
+reg_class long_reg_with_rbp(RAX, RAX_H,
+ RDX, RDX_H,
+ RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Class for all long registers (excluding RSP and RBP)
+reg_class long_reg_no_rbp(RAX, RAX_H,
RDX, RDX_H,
RDI, RDI_H,
RSI, RSI_H,
@@ -316,6 +346,67 @@ reg_class long_no_rax_reg(RBP, RBP_H,
R13, R13_H,
R14, R14_H);
+// Dynamic register class that selects between long_reg_no_rbp and long_reg_with_rbp.
+reg_class_dynamic long_reg(long_reg_no_rbp, long_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all long registers (excluding RAX, RDX and RSP)
+reg_class long_no_rax_rdx_reg_with_rbp(RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Class for all long registers (excluding RAX, RDX, RSP, and RBP)
+reg_class long_no_rax_rdx_reg_no_rbp(RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Dynamic register class that selects between long_no_rax_rdx_reg_no_rbp and long_no_rax_rdx_reg_with_rbp.
+reg_class_dynamic long_no_rax_rdx_reg(long_no_rax_rdx_reg_no_rbp, long_no_rax_rdx_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all long registers (excluding RCX and RSP)
+reg_class long_no_rcx_reg_with_rbp(RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RAX, RAX_H,
+ RDX, RDX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Class for all long registers (excluding RCX, RSP, and RBP)
+reg_class long_no_rcx_reg_no_rbp(RDI, RDI_H,
+ RSI, RSI_H,
+ RAX, RAX_H,
+ RDX, RDX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Dynamic register class that selects between long_no_rcx_reg_no_rbp and long_no_rcx_reg_with_rbp.
+reg_class_dynamic long_no_rcx_reg(long_no_rcx_reg_no_rbp, long_no_rcx_reg_with_rbp, %{ PreserveFramePointer %});
+
// Singleton class for RAX long register
reg_class long_rax_reg(RAX, RAX_H);
@@ -325,27 +416,27 @@ reg_class long_rcx_reg(RCX, RCX_H);
// Singleton class for RDX long register
reg_class long_rdx_reg(RDX, RDX_H);
-// Class for all int registers (except RSP)
-reg_class int_reg(RAX,
- RDX,
- RBP,
- RDI,
- RSI,
- RCX,
- RBX,
- R8,
- R9,
- R10,
- R11,
- R13,
- R14);
-
-// Class for all int registers except RCX (and RSP)
-reg_class int_no_rcx_reg(RAX,
+// Class for all int registers (excluding RSP)
+reg_class int_reg_with_rbp(RAX,
+ RDX,
+ RBP,
+ RDI,
+ RSI,
+ RCX,
+ RBX,
+ R8,
+ R9,
+ R10,
+ R11,
+ R13,
+ R14);
+
+// Class for all int registers (excluding RSP and RBP)
+reg_class int_reg_no_rbp(RAX,
RDX,
- RBP,
RDI,
RSI,
+ RCX,
RBX,
R8,
R9,
@@ -354,18 +445,66 @@ reg_class int_no_rcx_reg(RAX,
R13,
R14);
-// Class for all int registers except RAX, RDX (and RSP)
-reg_class int_no_rax_rdx_reg(RBP,
- RDI,
- RSI,
- RCX,
- RBX,
- R8,
- R9,
- R10,
- R11,
- R13,
- R14);
+// Dynamic register class that selects between int_reg_no_rbp and int_reg_with_rbp.
+reg_class_dynamic int_reg(int_reg_no_rbp, int_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all int registers (excluding RCX and RSP)
+reg_class int_no_rcx_reg_with_rbp(RAX,
+ RDX,
+ RBP,
+ RDI,
+ RSI,
+ RBX,
+ R8,
+ R9,
+ R10,
+ R11,
+ R13,
+ R14);
+
+// Class for all int registers (excluding RCX, RSP, and RBP)
+reg_class int_no_rcx_reg_no_rbp(RAX,
+ RDX,
+ RDI,
+ RSI,
+ RBX,
+ R8,
+ R9,
+ R10,
+ R11,
+ R13,
+ R14);
+
+// Dynamic register class that selects between int_no_rcx_reg_no_rbp and int_no_rcx_reg_with_rbp.
+reg_class_dynamic int_no_rcx_reg(int_no_rcx_reg_no_rbp, int_no_rcx_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all int registers (excluding RAX, RDX, and RSP)
+reg_class int_no_rax_rdx_reg_with_rbp(RBP,
+ RDI,
+ RSI,
+ RCX,
+ RBX,
+ R8,
+ R9,
+ R10,
+ R11,
+ R13,
+ R14);
+
+// Class for all int registers (excluding RAX, RDX, RSP, and RBP)
+reg_class int_no_rax_rdx_reg_no_rbp(RDI,
+ RSI,
+ RCX,
+ RBX,
+ R8,
+ R9,
+ R10,
+ R11,
+ R13,
+ R14);
+
+// Dynamic register class that selects between int_no_rax_rdx_reg_no_rbp and int_no_rax_rdx_reg_with_rbp.
+reg_class_dynamic int_no_rax_rdx_reg(int_no_rax_rdx_reg_no_rbp, int_no_rax_rdx_reg_with_rbp, %{ PreserveFramePointer %});
// Singleton class for RAX int register
reg_class int_rax_reg(RAX);
@@ -396,9 +535,6 @@ source %{
#define __ _masm.
-static int preserve_SP_size() {
- return 3; // rex.w, op, rm(reg/reg)
-}
static int clear_avx_size() {
return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper
}
@@ -409,9 +545,7 @@ static int clear_avx_size() {
int MachCallStaticJavaNode::ret_addr_offset()
{
int offset = 5; // 5 bytes from start of call to where return address points
- offset += clear_avx_size();
- if (_method_handle_invoke)
- offset += preserve_SP_size();
+ offset += clear_avx_size();
return offset;
}
@@ -450,16 +584,6 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
-int CallStaticJavaHandleNode::compute_padding(int current_offset) const
-{
- current_offset += preserve_SP_size(); // skip mov rbp, rsp
- current_offset += clear_avx_size(); // skip vzeroupper
- current_offset += 1; // skip call opcode byte
- return round_to(current_offset, alignment_required()) - current_offset;
-}
-
-// The address of the call instruction needs to be 4-byte aligned to
-// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
{
current_offset += clear_avx_size(); // skip vzeroupper
@@ -724,6 +848,10 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
st->print("# stack bang (%d bytes)", bangsize);
st->print("\n\t");
st->print("pushq rbp\t# Save rbp");
+ if (PreserveFramePointer) {
+ st->print("\n\t");
+ st->print("movq rbp, rsp\t# Save the caller's SP into rbp");
+ }
if (framesize) {
st->print("\n\t");
st->print("subq rsp, #%d\t# Create frame",framesize);
@@ -732,7 +860,15 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
st->print("subq rsp, #%d\t# Create frame",framesize);
st->print("\n\t");
framesize -= wordSize;
- st->print("movq [rsp + #%d], rbp\t# Save rbp",framesize);
+ st->print("movq [rsp + #%d], rbp\t# Save rbp",framesize);
+ if (PreserveFramePointer) {
+ st->print("\n\t");
+ st->print("movq rbp, rsp\t# Save the caller's SP into rbp");
+ if (framesize > 0) {
+ st->print("\n\t");
+ st->print("addq rbp, #%d", framesize);
+ }
+ }
}
if (VerifyStackAtCalls) {
@@ -1598,8 +1734,9 @@ RegMask Matcher::modL_proj_mask() {
return LONG_RDX_REG_mask();
}
+// Register for saving SP into on method handle invokes. Not used on x86_64.
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
- return PTR_RBP_REG_mask();
+ return NO_REG_mask();
}
%}
@@ -3202,7 +3339,7 @@ operand no_rax_rdx_RegI()
// Pointer Register
operand any_RegP()
%{
- constraint(ALLOC_IN_RC(any_reg));
+ constraint(ALLOC_IN_RC(any_reg));
match(RegP);
match(rax_RegP);
match(rbx_RegP);
@@ -3224,8 +3361,8 @@ operand rRegP()
match(rbx_RegP);
match(rdi_RegP);
match(rsi_RegP);
- match(rbp_RegP);
- match(r15_RegP); // See Q&A below about r15_RegP.
+ match(rbp_RegP); // See Q&A below about
+ match(r15_RegP); // r15_RegP and rbp_RegP.
format %{ %}
interface(REG_INTER);
@@ -3241,11 +3378,14 @@ operand rRegN() %{
// Question: Why is r15_RegP (the read-only TLS register) a match for rRegP?
// Answer: Operand match rules govern the DFA as it processes instruction inputs.
-// It's fine for an instruction input which expects rRegP to match a r15_RegP.
+// It's fine for an instruction input that expects rRegP to match a r15_RegP.
// The output of an instruction is controlled by the allocator, which respects
// register class masks, not match rules. Unless an instruction mentions
// r15_RegP or any_RegP explicitly as its output, r15 will not be considered
// by the allocator as an input.
+// The same logic applies to rbp_RegP being a match for rRegP: If PreserveFramePointer==true,
+// the RBP is used as a proper frame pointer and is not included in ptr_reg. As a
+// result, RBP is not included in the output of the instruction either.
operand no_rax_RegP()
%{
@@ -3259,9 +3399,11 @@ operand no_rax_RegP()
interface(REG_INTER);
%}
+// This operand is not allowed to use RBP even if
+// RBP is not used to hold the frame pointer.
operand no_rbp_RegP()
%{
- constraint(ALLOC_IN_RC(ptr_no_rbp_reg));
+ constraint(ALLOC_IN_RC(ptr_reg_no_rbp));
match(RegP);
match(rbx_RegP);
match(rsi_RegP);
@@ -3338,16 +3480,6 @@ operand rdi_RegP()
interface(REG_INTER);
%}
-operand rbp_RegP()
-%{
- constraint(ALLOC_IN_RC(ptr_rbp_reg));
- match(RegP);
- match(rRegP);
-
- format %{ %}
- interface(REG_INTER);
-%}
-
operand r15_RegP()
%{
constraint(ALLOC_IN_RC(ptr_r15_reg));
@@ -11414,7 +11546,6 @@ instruct safePoint_poll_far(rFlagsReg cr, rRegP poll)
// compute_padding() functions will have to be adjusted.
instruct CallStaticJavaDirect(method meth) %{
match(CallStaticJava);
- predicate(!((CallStaticJavaNode*) n)->is_method_handle_invoke());
effect(USE meth);
ins_cost(300);
@@ -11425,27 +11556,6 @@ instruct CallStaticJavaDirect(method meth) %{
ins_alignment(4);
%}
-// Call Java Static Instruction (method handle version)
-// Note: If this code changes, the corresponding ret_addr_offset() and
-// compute_padding() functions will have to be adjusted.
-instruct CallStaticJavaHandle(method meth, rbp_RegP rbp_mh_SP_save) %{
- match(CallStaticJava);
- predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke());
- effect(USE meth);
- // RBP is saved by all callees (for interpreter stack correction).
- // We use it here for a similar purpose, in {preserve,restore}_SP.
-
- ins_cost(300);
- format %{ "call,static/MethodHandle " %}
- opcode(0xE8); /* E8 cd */
- ins_encode(clear_avx, preserve_SP,
- Java_Static_Call(meth),
- restore_SP,
- call_epilog);
- ins_pipe(pipe_slow);
- ins_alignment(4);
-%}
-
// Call Java Dynamic Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
diff --git a/src/cpu/zero/vm/cppInterpreter_zero.cpp b/src/cpu/zero/vm/cppInterpreter_zero.cpp
index 9272724ad..4de3dae32 100644
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp
@@ -730,7 +730,7 @@ InterpreterFrame *InterpreterFrame::build(Method* const method, TRAPS) {
if (method->is_static())
object = method->constants()->pool_holder()->java_mirror();
else
- object = (oop) locals[0];
+ object = (oop) (void*)locals[0];
monitor->set_obj(object);
}
diff --git a/src/cpu/zero/vm/frame_zero.cpp b/src/cpu/zero/vm/frame_zero.cpp
index 4be75fdc3..38317525e 100644
--- a/src/cpu/zero/vm/frame_zero.cpp
+++ b/src/cpu/zero/vm/frame_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -216,7 +216,7 @@ void frame::zero_print_on_error(int frame_index,
valuebuf[buflen - 1] = '\0';
// Print the result
- st->print_cr(" " PTR_FORMAT ": %-21s = %s", addr, fieldbuf, valuebuf);
+ st->print_cr(" " PTR_FORMAT ": %-21s = %s", p2i(addr), fieldbuf, valuebuf);
}
}
@@ -441,3 +441,10 @@ intptr_t *frame::initial_deoptimization_info() {
// unused... but returns fp() to minimize changes introduced by 7087445
return fp();
}
+
+#ifndef PRODUCT
+// This is a generic constructor which is only used by pns() in debug.cpp.
+frame::frame(void* sp, void* fp, void* pc) {
+ Unimplemented();
+}
+#endif
diff --git a/src/cpu/zero/vm/frame_zero.inline.hpp b/src/cpu/zero/vm/frame_zero.inline.hpp
index 8c9685998..45368f625 100644
--- a/src/cpu/zero/vm/frame_zero.inline.hpp
+++ b/src/cpu/zero/vm/frame_zero.inline.hpp
@@ -26,6 +26,8 @@
#ifndef CPU_ZERO_VM_FRAME_ZERO_INLINE_HPP
#define CPU_ZERO_VM_FRAME_ZERO_INLINE_HPP
+#include "code/codeCache.hpp"
+
// Constructors
inline frame::frame() {
diff --git a/src/cpu/zero/vm/globals_zero.hpp b/src/cpu/zero/vm/globals_zero.hpp
index 0f88bba85..016540d2c 100644
--- a/src/cpu/zero/vm/globals_zero.hpp
+++ b/src/cpu/zero/vm/globals_zero.hpp
@@ -61,6 +61,8 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS
define_pd_global(uintx, TypeProfileLevel, 0);
+define_pd_global(bool, PreserveFramePointer, false);
+
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
diff --git a/src/cpu/zero/vm/stack_zero.cpp b/src/cpu/zero/vm/stack_zero.cpp
index 33b0551b5..747199f54 100644
--- a/src/cpu/zero/vm/stack_zero.cpp
+++ b/src/cpu/zero/vm/stack_zero.cpp
@@ -30,7 +30,9 @@
int ZeroStack::suggest_size(Thread *thread) const {
assert(needs_setup(), "already set up");
- return align_size_down(abi_stack_available(thread) / 2, wordSize);
+ int abi_available = abi_stack_available(thread);
+ assert(abi_available >= 0, "available abi stack must be >= 0");
+ return align_size_down(abi_available / 2, wordSize);
}
void ZeroStack::handle_overflow(TRAPS) {
diff --git a/src/cpu/zero/vm/stack_zero.inline.hpp b/src/cpu/zero/vm/stack_zero.inline.hpp
index f0387bb7b..0f868823f 100644
--- a/src/cpu/zero/vm/stack_zero.inline.hpp
+++ b/src/cpu/zero/vm/stack_zero.inline.hpp
@@ -48,9 +48,11 @@ inline void ZeroStack::overflow_check(int required_words, TRAPS) {
// to use under normal circumstances. Note that the returned
// value can be negative.
inline int ZeroStack::abi_stack_available(Thread *thread) const {
- int stack_used = thread->stack_base() - (address) &stack_used;
+ guarantee(Thread::current() == thread, "should run in the same thread");
+ int stack_used = thread->stack_base() - (address) &stack_used
+ + (StackYellowPages+StackRedPages+StackShadowPages) * os::vm_page_size();
int stack_free = thread->stack_size() - stack_used;
- return stack_free - shadow_pages_size();
+ return stack_free;
}
#endif // CPU_ZERO_VM_STACK_ZERO_INLINE_HPP
diff --git a/src/os/aix/vm/os_aix.cpp b/src/os/aix/vm/os_aix.cpp
index 9a65f741a..4cd28b868 100644
--- a/src/os/aix/vm/os_aix.cpp
+++ b/src/os/aix/vm/os_aix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -3727,6 +3727,11 @@ void os::Aix::check_signal_handler(int sig) {
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
+ // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
+ if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
+ tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
+ exception_name(sig, buf, O_BUFLEN));
+ }
} else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
@@ -3987,11 +3992,6 @@ jint os::init_2(void) {
return JNI_OK;
}
-// this is called at the end of vm_initialization
-void os::init_3(void) {
- return;
-}
-
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if (!guard_memory((char*)_polling_page, Aix::page_size())) {
diff --git a/src/os/aix/vm/perfMemory_aix.cpp b/src/os/aix/vm/perfMemory_aix.cpp
index 7d64b5f83..be6e855d9 100644
--- a/src/os/aix/vm/perfMemory_aix.cpp
+++ b/src/os/aix/vm/perfMemory_aix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -201,6 +201,7 @@ static pid_t filename_to_pid(const char* filename) {
// the backing store files. Returns true if the directory is considered
// a secure location. Returns false if the statbuf is a symbolic link or
// if an error occurred.
+//
static bool is_statbuf_secure(struct stat *statp) {
if (S_ISLNK(statp->st_mode) || !S_ISDIR(statp->st_mode)) {
// The path represents a link or some non-directory file type,
@@ -209,15 +210,18 @@ static bool is_statbuf_secure(struct stat *statp) {
return false;
}
// We have an existing directory, check if the permissions are safe.
+ //
if ((statp->st_mode & (S_IWGRP|S_IWOTH)) != 0) {
// The directory is open for writing and could be subjected
// to a symlink or a hard link attack. Declare it insecure.
+ //
return false;
}
- // See if the uid of the directory matches the effective uid of the process.
- //
- if (statp->st_uid != geteuid()) {
+ // If user is not root then see if the uid of the directory matches the effective uid of the process.
+ uid_t euid = geteuid();
+ if ((euid != 0) && (statp->st_uid != euid)) {
// The directory was not created by this user, declare it insecure.
+ //
return false;
}
return true;
@@ -228,6 +232,7 @@ static bool is_statbuf_secure(struct stat *statp) {
// the backing store files. Returns true if the directory exists
// and is considered a secure location. Returns false if the path
// is a symbolic link or if an error occurred.
+//
static bool is_directory_secure(const char* path) {
struct stat statbuf;
int result = 0;
@@ -454,13 +459,27 @@ static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
*saved_cwd_fd = result;
}
- // Set the current directory to dirname by using the fd of the directory.
+ // Set the current directory to dirname by using the fd of the directory and
+ // handle errors, otherwise shared memory files will be created in cwd.
result = fchdir(fd);
-
- return dirp;
+ if (result == OS_ERR) {
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not change to directory %s", dirname);
+ }
+ if (*saved_cwd_fd != -1) {
+ ::close(*saved_cwd_fd);
+ *saved_cwd_fd = -1;
+ }
+ // Close the directory.
+ os::closedir(dirp);
+ return NULL;
+ } else {
+ return dirp;
+ }
}
// Close the directory and restore the current working directory.
+//
static void close_directory_secure_cwd(DIR* dirp, int saved_cwd_fd) {
int result;
diff --git a/src/os/bsd/dtrace/hotspot.d b/src/os/bsd/dtrace/hotspot.d
index 0e4802b51..996e1d782 100644
--- a/src/os/bsd/dtrace/hotspot.d
+++ b/src/os/bsd/dtrace/hotspot.d
@@ -47,8 +47,8 @@ provider hotspot {
probe mem__pool__gc__end(
char*, uintptr_t, char*, uintptr_t,
uintptr_t, uintptr_t, uintptr_t, uintptr_t);
- probe thread__probe__start(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
- probe thread__probe__stop(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+ probe thread__start(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+ probe thread__stop(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
probe thread__sleep__begin(long long);
probe thread__sleep__end(int);
probe thread__yield();
@@ -68,7 +68,7 @@ provider hotspot {
probe monitor__contended__entered(uintptr_t, uintptr_t, char*, uintptr_t);
probe monitor__contended__exit(uintptr_t, uintptr_t, char*, uintptr_t);
probe monitor__wait(uintptr_t, uintptr_t, char*, uintptr_t, uintptr_t);
- probe monitor__probe__waited(uintptr_t, uintptr_t, char*, uintptr_t);
+ probe monitor__waited(uintptr_t, uintptr_t, char*, uintptr_t);
probe monitor__notify(uintptr_t, uintptr_t, char*, uintptr_t);
probe monitor__notifyAll(uintptr_t, uintptr_t, char*, uintptr_t);
diff --git a/src/os/bsd/vm/jsig.c b/src/os/bsd/vm/jsig.c
index a8d98a084..a3ea0fdd4 100644
--- a/src/os/bsd/vm/jsig.c
+++ b/src/os/bsd/vm/jsig.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
+#include <string.h>
#define MAXSIGNUM 32
#define MASK(sig) ((unsigned int)1 << sig)
@@ -43,6 +44,9 @@
static struct sigaction sact[MAXSIGNUM]; /* saved signal handlers */
static unsigned int jvmsigs = 0; /* signals used by jvm */
+static pthread_key_t reentry_flag_key;
+static pthread_once_t reentry_key_init_once = PTHREAD_ONCE_INIT;
+
/* used to synchronize the installation of signal handlers */
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
@@ -59,6 +63,15 @@ static sigaction_t os_sigaction = 0; /* os's version of sigaction() */
static bool jvm_signal_installing = false;
static bool jvm_signal_installed = false;
+#define check_status(cmd) \
+ do { \
+ int status = (cmd); \
+ if (status != 0) { \
+ printf("error %s (%d) in " #cmd "\n", strerror(status), status); \
+ exit(1); \
+ } \
+ } while (0)
+
static void signal_lock() {
pthread_mutex_lock(&mutex);
/* When the jvm is installing its set of signal handlers, threads
@@ -74,8 +87,15 @@ static void signal_unlock() {
pthread_mutex_unlock(&mutex);
}
+static void reentry_tls_init() {
+ // value for reentry_flag_key will default to NULL (false)
+ check_status(pthread_key_create(&reentry_flag_key, NULL));
+}
+
static sa_handler_t call_os_signal(int sig, sa_handler_t disp,
bool is_sigset) {
+ sa_handler_t res;
+
if (os_signal == NULL) {
if (!is_sigset) {
os_signal = (signal_t)dlsym(RTLD_NEXT, "signal");
@@ -87,7 +107,12 @@ static sa_handler_t call_os_signal(int sig, sa_handler_t disp,
exit(0);
}
}
- return (*os_signal)(sig, disp);
+ check_status(pthread_once(&reentry_key_init_once, reentry_tls_init));
+ // set reentry_flag_key to non-NULL to show reentry
+ check_status(pthread_setspecific(reentry_flag_key, &res));
+ res = (*os_signal)(sig, disp);
+ check_status(pthread_setspecific(reentry_flag_key, NULL));
+ return res;
}
static void save_signal_handler(int sig, sa_handler_t disp) {
@@ -161,6 +186,11 @@ int sigaction(int sig, const struct sigaction *act, struct sigaction *oact) {
bool sigused;
struct sigaction oldAct;
+ check_status(pthread_once(&reentry_key_init_once, reentry_tls_init));
+ if (pthread_getspecific(reentry_flag_key) != NULL) {
+ return call_os_sigaction(sig, act, oact);
+ }
+
signal_lock();
sigused = (MASK(sig) & jvmsigs) != 0;
diff --git a/src/os/bsd/vm/os_bsd.cpp b/src/os/bsd/vm/os_bsd.cpp
index e1c53c5b4..58b58967c 100644
--- a/src/os/bsd/vm/os_bsd.cpp
+++ b/src/os/bsd/vm/os_bsd.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1197,12 +1197,18 @@ pid_t os::Bsd::gettid() {
guarantee(retval != 0, "just checking");
return retval;
-#elif __FreeBSD__
+#else
+ #ifdef __FreeBSD__
retval = syscall(SYS_thr_self);
-#elif __OpenBSD__
+ #else
+ #ifdef __OpenBSD__
retval = syscall(SYS_getthrid);
-#elif __NetBSD__
+ #else
+ #ifdef __NetBSD__
retval = (pid_t) syscall(SYS__lwp_self);
+ #endif
+ #endif
+ #endif
#endif
if (retval == -1) {
@@ -3545,6 +3551,11 @@ void os::Bsd::check_signal_handler(int sig) {
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
+ // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
+ if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
+ tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
+ exception_name(sig, buf, O_BUFLEN));
+ }
} else if(os::Bsd::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Bsd::get_our_sigflags(sig)) {
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
tty->print("expected:" PTR32_FORMAT, os::Bsd::get_our_sigflags(sig));
@@ -3745,9 +3756,6 @@ jint os::init_2(void)
return JNI_OK;
}
-// this is called at the end of vm_initialization
-void os::init_3(void) { }
-
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if( !guard_memory((char*)_polling_page, Bsd::page_size()) )
diff --git a/src/os/bsd/vm/perfMemory_bsd.cpp b/src/os/bsd/vm/perfMemory_bsd.cpp
index 5ee992ec0..f25c39d9e 100644
--- a/src/os/bsd/vm/perfMemory_bsd.cpp
+++ b/src/os/bsd/vm/perfMemory_bsd.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -217,9 +217,9 @@ static bool is_statbuf_secure(struct stat *statp) {
//
return false;
}
- // See if the uid of the directory matches the effective uid of the process.
- //
- if (statp->st_uid != geteuid()) {
+ // If user is not root then see if the uid of the directory matches the effective uid of the process.
+ uid_t euid = geteuid();
+ if ((euid != 0) && (statp->st_uid != euid)) {
// The directory was not created by this user, declare it insecure.
//
return false;
@@ -375,10 +375,23 @@ static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
*saved_cwd_fd = result;
}
- // Set the current directory to dirname by using the fd of the directory.
+ // Set the current directory to dirname by using the fd of the directory and
+ // handle errors, otherwise shared memory files will be created in cwd.
result = fchdir(fd);
-
- return dirp;
+ if (result == OS_ERR) {
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not change to directory %s", dirname);
+ }
+ if (*saved_cwd_fd != -1) {
+ ::close(*saved_cwd_fd);
+ *saved_cwd_fd = -1;
+ }
+ // Close the directory.
+ os::closedir(dirp);
+ return NULL;
+ } else {
+ return dirp;
+ }
}
// Close the directory and restore the current working directory.
diff --git a/src/os/linux/vm/os_linux.cpp b/src/os/linux/vm/os_linux.cpp
index 3a24939a1..4f7c38ab3 100644
--- a/src/os/linux/vm/os_linux.cpp
+++ b/src/os/linux/vm/os_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -160,35 +160,6 @@ static pthread_mutex_t dl_mutex;
// Declarations
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
-#ifdef JAVASE_EMBEDDED
-class MemNotifyThread: public Thread {
- friend class VMStructs;
- public:
- virtual void run();
-
- private:
- static MemNotifyThread* _memnotify_thread;
- int _fd;
-
- public:
-
- // Constructor
- MemNotifyThread(int fd);
-
- // Tester
- bool is_memnotify_thread() const { return true; }
-
- // Printing
- char* name() const { return (char*)"Linux MemNotify Thread"; }
-
- // Returns the single instance of the MemNotifyThread
- static MemNotifyThread* memnotify_thread() { return _memnotify_thread; }
-
- // Create and start the single instance of MemNotifyThread
- static void start();
-};
-#endif // JAVASE_EMBEDDED
-
// utility functions
static int SR_initialize();
@@ -238,44 +209,27 @@ bool os::have_special_privileges() {
#ifndef SYS_gettid
// i386: 224, ia64: 1105, amd64: 186, sparc 143
-#ifdef __ia64__
-#define SYS_gettid 1105
-#elif __i386__
-#define SYS_gettid 224
-#elif __amd64__
-#define SYS_gettid 186
-#elif __sparc__
-#define SYS_gettid 143
-#else
-#error define gettid for the arch
-#endif
+ #ifdef __ia64__
+ #define SYS_gettid 1105
+ #else
+ #ifdef __i386__
+ #define SYS_gettid 224
+ #else
+ #ifdef __amd64__
+ #define SYS_gettid 186
+ #else
+ #ifdef __sparc__
+ #define SYS_gettid 143
+ #else
+ #error define gettid for the arch
+ #endif
+ #endif
+ #endif
+ #endif
#endif
// Cpu architecture string
-#if defined(ZERO)
-static char cpu_arch[] = ZERO_LIBARCH;
-#elif defined(IA64)
-static char cpu_arch[] = "ia64";
-#elif defined(IA32)
-static char cpu_arch[] = "i386";
-#elif defined(AMD64)
-static char cpu_arch[] = "amd64";
-#elif defined(ARM)
-static char cpu_arch[] = "arm";
-#elif defined(PPC32)
-static char cpu_arch[] = "ppc";
-#elif defined(PPC64)
-static char cpu_arch[] = "ppc64";
-#elif defined(SPARC)
-# ifdef _LP64
-static char cpu_arch[] = "sparcv9";
-# else
-static char cpu_arch[] = "sparc";
-# endif
-#else
-#error Add appropriate cpu_arch setting
-#endif
-
+static char cpu_arch[] = HOTSPOT_LIB_ARCH;
// pid_t gettid()
//
@@ -4681,6 +4635,11 @@ void os::Linux::check_signal_handler(int sig) {
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
+ // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
+ if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
+ tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
+ exception_name(sig, buf, O_BUFLEN));
+ }
} else if(os::Linux::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Linux::get_our_sigflags(sig)) {
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
tty->print("expected:" PTR32_FORMAT, os::Linux::get_our_sigflags(sig));
@@ -4936,17 +4895,6 @@ jint os::init_2(void)
return JNI_OK;
}
-// this is called at the end of vm_initialization
-void os::init_3(void) {
-#ifdef JAVASE_EMBEDDED
- // Start the MemNotifyThread
- if (LowMemoryProtection) {
- MemNotifyThread::start();
- }
- return;
-#endif
-}
-
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if( !guard_memory((char*)_polling_page, Linux::page_size()) )
@@ -5974,9 +5922,11 @@ void Parker::unpark() {
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant");
} else {
+ // must capture correct index before unlocking
+ int index = _cur_index;
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant");
- status = pthread_cond_signal (&_cond[_cur_index]);
+ status = pthread_cond_signal (&_cond[index]);
assert (status == 0, "invariant");
}
} else {
@@ -5992,14 +5942,6 @@ void Parker::unpark() {
extern char** environ;
-#ifndef __NR_fork
-#define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57)
-#endif
-
-#ifndef __NR_execve
-#define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59)
-#endif
-
// Run the specified command in a separate process. Return its exit value,
// or -1 on failure (e.g. can't fork a new process).
// Unlike system(), this function can be called from signal handler. It
@@ -6007,13 +5949,7 @@ extern char** environ;
int os::fork_and_exec(char* cmd) {
const char * argv[4] = {"sh", "-c", cmd, NULL};
- // fork() in LinuxThreads/NPTL is not async-safe. It needs to run
- // pthread_atfork handlers and reset pthread library. All we need is a
- // separate process to execve. Make a direct syscall to fork process.
- // On IA64 there's no fork syscall, we have to use fork() and hope for
- // the best...
- pid_t pid = NOT_IA64(syscall(__NR_fork);)
- IA64_ONLY(fork();)
+ pid_t pid = fork();
if (pid < 0) {
// fork failed
@@ -6022,15 +5958,7 @@ int os::fork_and_exec(char* cmd) {
} else if (pid == 0) {
// child process
- // execve() in LinuxThreads will call pthread_kill_other_threads_np()
- // first to kill every thread on the thread list. Because this list is
- // not reset by fork() (see notes above), execve() will instead kill
- // every thread in the parent process. We know this is the only thread
- // in the new process, so make a system call directly.
- // IA64 should use normal execve() from glibc to match the glibc fork()
- // above.
- NOT_IA64(syscall(__NR_execve, "/bin/sh", argv, environ);)
- IA64_ONLY(execve("/bin/sh", (char* const*)argv, environ);)
+ execve("/bin/sh", (char* const*)argv, environ);
// execve failed
_exit(-1);
@@ -6123,83 +6051,6 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
return strlen(buffer);
}
-#ifdef JAVASE_EMBEDDED
-//
-// A thread to watch the '/dev/mem_notify' device, which will tell us when the OS is running low on memory.
-//
-MemNotifyThread* MemNotifyThread::_memnotify_thread = NULL;
-
-// ctor
-//
-MemNotifyThread::MemNotifyThread(int fd): Thread() {
- assert(memnotify_thread() == NULL, "we can only allocate one MemNotifyThread");
- _fd = fd;
-
- if (os::create_thread(this, os::os_thread)) {
- _memnotify_thread = this;
- os::set_priority(this, NearMaxPriority);
- os::start_thread(this);
- }
-}
-
-// Where all the work gets done
-//
-void MemNotifyThread::run() {
- assert(this == memnotify_thread(), "expected the singleton MemNotifyThread");
-
- // Set up the select arguments
- fd_set rfds;
- if (_fd != -1) {
- FD_ZERO(&rfds);
- FD_SET(_fd, &rfds);
- }
-
- // Now wait for the mem_notify device to wake up
- while (1) {
- // Wait for the mem_notify device to signal us..
- int rc = select(_fd+1, _fd != -1 ? &rfds : NULL, NULL, NULL, NULL);
- if (rc == -1) {
- perror("select!\n");
- break;
- } else if (rc) {
- //ssize_t free_before = os::available_memory();
- //tty->print ("Notified: Free: %dK \n",os::available_memory()/1024);
-
- // The kernel is telling us there is not much memory left...
- // try to do something about that
-
- // If we are not already in a GC, try one.
- if (!Universe::heap()->is_gc_active()) {
- Universe::heap()->collect(GCCause::_allocation_failure);
-
- //ssize_t free_after = os::available_memory();
- //tty->print ("Post-Notify: Free: %dK\n",free_after/1024);
- //tty->print ("GC freed: %dK\n", (free_after - free_before)/1024);
- }
- // We might want to do something like the following if we find the GC's are not helping...
- // Universe::heap()->size_policy()->set_gc_time_limit_exceeded(true);
- }
- }
-}
-
-//
-// See if the /dev/mem_notify device exists, and if so, start a thread to monitor it.
-//
-void MemNotifyThread::start() {
- int fd;
- fd = open ("/dev/mem_notify", O_RDONLY, 0);
- if (fd < 0) {
- return;
- }
-
- if (memnotify_thread() == NULL) {
- new MemNotifyThread(fd);
- }
-}
-
-#endif // JAVASE_EMBEDDED
-
-
/////////////// Unit tests ///////////////
#ifndef PRODUCT
diff --git a/src/os/linux/vm/perfMemory_linux.cpp b/src/os/linux/vm/perfMemory_linux.cpp
index 9beb1113e..8510abbac 100644
--- a/src/os/linux/vm/perfMemory_linux.cpp
+++ b/src/os/linux/vm/perfMemory_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -217,9 +217,9 @@ static bool is_statbuf_secure(struct stat *statp) {
//
return false;
}
- // See if the uid of the directory matches the effective uid of the process.
- //
- if (statp->st_uid != geteuid()) {
+ // If user is not root then see if the uid of the directory matches the effective uid of the process.
+ uid_t euid = geteuid();
+ if ((euid != 0) && (statp->st_uid != euid)) {
// The directory was not created by this user, declare it insecure.
//
return false;
@@ -374,10 +374,23 @@ static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
*saved_cwd_fd = result;
}
- // Set the current directory to dirname by using the fd of the directory.
+ // Set the current directory to dirname by using the fd of the directory and
+ // handle errors, otherwise shared memory files will be created in cwd.
result = fchdir(fd);
-
- return dirp;
+ if (result == OS_ERR) {
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not change to directory %s", dirname);
+ }
+ if (*saved_cwd_fd != -1) {
+ ::close(*saved_cwd_fd);
+ *saved_cwd_fd = -1;
+ }
+ // Close the directory.
+ os::closedir(dirp);
+ return NULL;
+ } else {
+ return dirp;
+ }
}
// Close the directory and restore the current working directory.
diff --git a/src/os/solaris/vm/jvm_solaris.h b/src/os/solaris/vm/jvm_solaris.h
index 57b32ac16..f1fa075bc 100644
--- a/src/os/solaris/vm/jvm_solaris.h
+++ b/src/os/solaris/vm/jvm_solaris.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,9 @@
* JNI conversion, which should be sorted out later.
*/
+#define __USE_LEGACY_PROTOTYPES__
#include <dirent.h> /* For DIR */
+#undef __USE_LEGACY_PROTOTYPES__
#include <sys/param.h> /* For MAXPATHLEN */
#include <sys/socket.h> /* For socklen_t */
#include <unistd.h> /* For F_OK, R_OK, W_OK */
diff --git a/src/os/solaris/vm/os_solaris.cpp b/src/os/solaris/vm/os_solaris.cpp
index c74dc70c6..2b4e33c66 100644
--- a/src/os/solaris/vm/os_solaris.cpp
+++ b/src/os/solaris/vm/os_solaris.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -178,75 +178,6 @@ extern "C" {
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
-// Thread Local Storage
-// This is common to all Solaris platforms so it is defined here,
-// in this common file.
-// The declarations are in the os_cpu threadLS*.hpp files.
-//
-// Static member initialization for TLS
-Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
-
-#ifndef PRODUCT
-#define _PCT(n,d) ((100.0*(double)(n))/(double)(d))
-
-int ThreadLocalStorage::_tcacheHit = 0;
-int ThreadLocalStorage::_tcacheMiss = 0;
-
-void ThreadLocalStorage::print_statistics() {
- int total = _tcacheMiss+_tcacheHit;
- tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
- _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
-}
-#undef _PCT
-#endif // PRODUCT
-
-Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
- int index) {
- Thread *thread = get_thread_slow();
- if (thread != NULL) {
- address sp = os::current_stack_pointer();
- guarantee(thread->_stack_base == NULL ||
- (sp <= thread->_stack_base &&
- sp >= thread->_stack_base - thread->_stack_size) ||
- is_error_reported(),
- "sp must be inside of selected thread stack");
-
- thread->set_self_raw_id(raw_id); // mark for quick retrieval
- _get_thread_cache[ index ] = thread;
- }
- return thread;
-}
-
-
-static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
-#define NO_CACHED_THREAD ((Thread*)all_zero)
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-
- // Store the new value before updating the cache to prevent a race
- // between get_thread_via_cache_slowly() and this store operation.
- os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-
- // Update thread cache with new thread if setting on thread create,
- // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
- uintptr_t raw = pd_raw_thread_id();
- int ix = pd_cache_index(raw);
- _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
-}
-
-void ThreadLocalStorage::pd_init() {
- for (int i = 0; i < _pd_cache_size; i++) {
- _get_thread_cache[i] = NO_CACHED_THREAD;
- }
-}
-
-// Invalidate all the caches (happens to be the same as pd_init).
-void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
-
-#undef NO_CACHED_THREAD
-
-// END Thread Local Storage
-
static inline size_t adjust_stack_size(address base, size_t size) {
if ((ssize_t)size < 0) {
// 4759953: Compensate for ridiculous stack size.
@@ -1473,64 +1404,6 @@ int os::current_process_id() {
return (int)(_initial_pid ? _initial_pid : getpid());
}
-int os::allocate_thread_local_storage() {
- // %%% in Win32 this allocates a memory segment pointed to by a
- // register. Dan Stein can implement a similar feature in
- // Solaris. Alternatively, the VM can do the same thing
- // explicitly: malloc some storage and keep the pointer in a
- // register (which is part of the thread's context) (or keep it
- // in TLS).
- // %%% In current versions of Solaris, thr_self and TSD can
- // be accessed via short sequences of displaced indirections.
- // The value of thr_self is available as %g7(36).
- // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
- // assuming that the current thread already has a value bound to k.
- // It may be worth experimenting with such access patterns,
- // and later having the parameters formally exported from a Solaris
- // interface. I think, however, that it will be faster to
- // maintain the invariant that %g2 always contains the
- // JavaThread in Java code, and have stubs simply
- // treat %g2 as a caller-save register, preserving it in a %lN.
- thread_key_t tk;
- if (thr_keycreate( &tk, NULL ) )
- fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
- "(%s)", strerror(errno)));
- return int(tk);
-}
-
-void os::free_thread_local_storage(int index) {
- // %%% don't think we need anything here
- // if ( pthread_key_delete((pthread_key_t) tk) )
- // fatal("os::free_thread_local_storage: pthread_key_delete failed");
-}
-
-#define SMALLINT 32 // libthread allocate for tsd_common is a version specific
- // small number - point is NO swap space available
-void os::thread_local_storage_at_put(int index, void* value) {
- // %%% this is used only in threadLocalStorage.cpp
- if (thr_setspecific((thread_key_t)index, value)) {
- if (errno == ENOMEM) {
- vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
- "thr_setspecific: out of swap space");
- } else {
- fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
- "(%s)", strerror(errno)));
- }
- } else {
- ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
- }
-}
-
-// This function could be called before TLS is initialized, for example, when
-// VM receives an async signal or when VM causes a fatal error during
-// initialization. Return NULL if thr_getspecific() fails.
-void* os::thread_local_storage_at(int index) {
- // %%% this is used only in threadLocalStorage.cpp
- void* r = NULL;
- return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
-}
-
-
// gethrtime() should be monotonic according to the documentation,
// but some virtualized platforms are known to break this guarantee.
// getTimeNanos() must be guaranteed not to move backwards, so we
@@ -2168,7 +2041,9 @@ void os::print_memory_info(outputStream* st) {
st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
st->cr();
- (void) check_addr0(st);
+ if (VMError::fatal_error_in_progress()) {
+ (void) check_addr0(st);
+ }
}
void os::print_siginfo(outputStream* st, void* siginfo) {
@@ -2696,29 +2571,30 @@ void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
}
}
+size_t os::Solaris::page_size_for_alignment(size_t alignment) {
+ assert(is_size_aligned(alignment, (size_t) vm_page_size()),
+ err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
+ alignment, (size_t) vm_page_size()));
+
+ for (int i = 0; _page_sizes[i] != 0; i++) {
+ if (is_size_aligned(alignment, _page_sizes[i])) {
+ return _page_sizes[i];
+ }
+ }
+
+ return (size_t) vm_page_size();
+}
+
int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
size_t alignment_hint, bool exec) {
int err = Solaris::commit_memory_impl(addr, bytes, exec);
- if (err == 0) {
- if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) {
- // If the large page size has been set and the VM
- // is using large pages, use the large page size
- // if it is smaller than the alignment hint. This is
- // a case where the VM wants to use a larger alignment size
- // for its own reasons but still want to use large pages
- // (which is what matters to setting the mpss range.
- size_t page_size = 0;
- if (large_page_size() < alignment_hint) {
- assert(UseLargePages, "Expected to be here for large page use only");
- page_size = large_page_size();
- } else {
- // If the alignment hint is less than the large page
- // size, the VM wants a particular alignment (thus the hint)
- // for internal reasons. Try to set the mpss range using
- // the alignment_hint.
- page_size = alignment_hint;
- }
- // Since this is a hint, ignore any failures.
+ if (err == 0 && UseLargePages && alignment_hint > 0) {
+ assert(is_size_aligned(bytes, alignment_hint),
+ err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
+
+ // The syscall memcntl requires an exact page size (see man memcntl for details).
+ size_t page_size = page_size_for_alignment(alignment_hint);
+ if (page_size > (size_t) vm_page_size()) {
(void)Solaris::setup_large_pages(addr, bytes, page_size);
}
}
@@ -3251,7 +3127,22 @@ void os::large_page_init() {
}
}
+bool os::Solaris::is_valid_page_size(size_t bytes) {
+ for (int i = 0; _page_sizes[i] != 0; i++) {
+ if (_page_sizes[i] == bytes) {
+ return true;
+ }
+ }
+ return false;
+}
+
bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
+ assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
+ assert(is_ptr_aligned((void*) start, align),
+ err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
+ assert(is_size_aligned(bytes, align),
+ err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
+
// Signal to OS that we want large pages for addresses
// from addr, addr + bytes
struct memcntl_mha mpss_struct;
@@ -4577,6 +4468,11 @@ void os::Solaris::check_signal_handler(int sig) {
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
+ // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
+ if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
+ tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
+ exception_name(sig, buf, O_BUFLEN));
+ }
} else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
@@ -5194,10 +5090,6 @@ jint os::init_2(void) {
return JNI_OK;
}
-void os::init_3(void) {
- return;
-}
-
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
diff --git a/src/os/solaris/vm/os_solaris.hpp b/src/os/solaris/vm/os_solaris.hpp
index 048534691..a22f0e9fe 100644
--- a/src/os/solaris/vm/os_solaris.hpp
+++ b/src/os/solaris/vm/os_solaris.hpp
@@ -110,6 +110,8 @@ class Solaris {
static meminfo_func_t _meminfo;
// Large Page Support
+ static bool is_valid_page_size(size_t bytes);
+ static size_t page_size_for_alignment(size_t alignment);
static bool setup_large_pages(caddr_t start, size_t bytes, size_t align);
static void init_thread_fpu_state(void);
diff --git a/src/os/solaris/vm/perfMemory_solaris.cpp b/src/os/solaris/vm/perfMemory_solaris.cpp
index 996b6d889..a90623e74 100644
--- a/src/os/solaris/vm/perfMemory_solaris.cpp
+++ b/src/os/solaris/vm/perfMemory_solaris.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -219,9 +219,9 @@ static bool is_statbuf_secure(struct stat *statp) {
//
return false;
}
- // See if the uid of the directory matches the effective uid of the process.
- //
- if (statp->st_uid != geteuid()) {
+ // If user is not root then see if the uid of the directory matches the effective uid of the process.
+ uid_t euid = geteuid();
+ if ((euid != 0) && (statp->st_uid != euid)) {
// The directory was not created by this user, declare it insecure.
//
return false;
@@ -377,10 +377,23 @@ static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
*saved_cwd_fd = result;
}
- // Set the current directory to dirname by using the fd of the directory.
+ // Set the current directory to dirname by using the fd of the directory and
+ // handle errors, otherwise shared memory files will be created in cwd.
result = fchdir(fd);
-
- return dirp;
+ if (result == OS_ERR) {
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not change to directory %s", dirname);
+ }
+ if (*saved_cwd_fd != -1) {
+ ::close(*saved_cwd_fd);
+ *saved_cwd_fd = -1;
+ }
+ // Close the directory.
+ os::closedir(dirp);
+ return NULL;
+ } else {
+ return dirp;
+ }
}
// Close the directory and restore the current working directory.
diff --git a/src/os/solaris/vm/thread_solaris.inline.hpp b/src/os/solaris/vm/thread_solaris.inline.hpp
index d7d6d378f..48c85aaf7 100644
--- a/src/os/solaris/vm/thread_solaris.inline.hpp
+++ b/src/os/solaris/vm/thread_solaris.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,19 +40,12 @@
// For SPARC, to avoid excessive register window spill-fill faults,
// we aggressively inline these routines.
-inline Thread* ThreadLocalStorage::thread() {
- // don't use specialized code if +UseMallocOnly -- may confuse Purify et al.
- debug_only(if (UseMallocOnly) return get_thread_slow(););
+inline void ThreadLocalStorage::set_thread(Thread* thread) {
+ _thr_current = thread;
+}
- uintptr_t raw = pd_raw_thread_id();
- int ix = pd_cache_index(raw);
- Thread* candidate = ThreadLocalStorage::_get_thread_cache[ix];
- if (candidate->self_raw_id() == raw) {
- // hit
- return candidate;
- } else {
- return ThreadLocalStorage::get_thread_via_cache_slowly(raw, ix);
- }
+inline Thread* ThreadLocalStorage::thread() {
+ return _thr_current;
}
#endif // OS_SOLARIS_VM_THREAD_SOLARIS_INLINE_HPP
diff --git a/src/os/windows/vm/os_windows.cpp b/src/os/windows/vm/os_windows.cpp
index c2839a187..e329c9fd6 100644
--- a/src/os/windows/vm/os_windows.cpp
+++ b/src/os/windows/vm/os_windows.cpp
@@ -110,11 +110,13 @@ static FILETIME process_user_time;
static FILETIME process_kernel_time;
#ifdef _M_IA64
-#define __CPU__ ia64
-#elif _M_AMD64
-#define __CPU__ amd64
+ #define __CPU__ ia64
#else
-#define __CPU__ i486
+ #ifdef _M_AMD64
+ #define __CPU__ amd64
+ #else
+ #define __CPU__ i486
+ #endif
#endif
// save DLL module handle, used by GetModuleFileName
@@ -2138,20 +2140,22 @@ LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler
// at the beginning of the target bundle.
exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
-#elif _M_AMD64
+#else
+ #ifdef _M_AMD64
// Do not blow up if no thread info available.
if (thread) {
thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
}
// Set pc to handler
exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
-#else
+ #else
// Do not blow up if no thread info available.
if (thread) {
thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
}
// Set pc to handler
exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
+ #endif
#endif
// Continue the execution
@@ -2250,7 +2254,8 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
// (division by zero is handled explicitly)
#ifdef _M_IA64
assert(0, "Fix Handle_IDiv_Exception");
-#elif _M_AMD64
+#else
+ #ifdef _M_AMD64
PCONTEXT ctx = exceptionInfo->ContextRecord;
address pc = (address)ctx->Rip;
assert(pc[0] == 0xF7, "not an idiv opcode");
@@ -2261,7 +2266,7 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
ctx->Rax = (DWORD)min_jint; // result
ctx->Rdx = (DWORD)0; // remainder
// Continue the execution
-#else
+ #else
PCONTEXT ctx = exceptionInfo->ContextRecord;
address pc = (address)ctx->Eip;
assert(pc[0] == 0xF7, "not an idiv opcode");
@@ -2272,6 +2277,7 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
ctx->Eax = (DWORD)min_jint; // result
ctx->Edx = (DWORD)0; // remainder
// Continue the execution
+ #endif
#endif
return EXCEPTION_CONTINUE_EXECUTION;
}
@@ -2351,10 +2357,12 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
// information is saved in the Unix format.
address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
-#elif _M_AMD64
- address pc = (address) exceptionInfo->ContextRecord->Rip;
#else
+ #ifdef _M_AMD64
+ address pc = (address) exceptionInfo->ContextRecord->Rip;
+ #else
address pc = (address) exceptionInfo->ContextRecord->Eip;
+ #endif
#endif
Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
@@ -4062,10 +4070,6 @@ jint os::init_2(void) {
return JNI_OK;
}
-void os::init_3(void) {
- return;
-}
-
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
DWORD old_status;
diff --git a/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp b/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp
index 4ed293446..6bc5f1e10 100644
--- a/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp
+++ b/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp
@@ -238,7 +238,13 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
// operation. Note that some platforms only support this with the
// limitation that the only valid value to store is the immediate
// constant 1. There is a test for this in JNI_CreateJavaVM().
- return __sync_lock_test_and_set (dest, exchange_value);
+ jint result = __sync_lock_test_and_set (dest, exchange_value);
+ // All atomic operations are expected to be full memory barriers
+ // (see atomic.hpp). However, __sync_lock_test_and_set is not
+ // a full memory barrier, but an acquire barrier. Hence, this added
+ // barrier.
+ __sync_synchronize();
+ return result;
#endif // M68K
#endif // ARM
}
@@ -251,7 +257,9 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
#ifdef M68K
return m68k_lock_test_and_set(dest, exchange_value);
#else
- return __sync_lock_test_and_set (dest, exchange_value);
+ intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
+ __sync_synchronize();
+ return result;
#endif // M68K
#endif // ARM
}
diff --git a/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp b/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp
index f2195c49b..1192f551d 100644
--- a/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp
+++ b/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -10,7 +10,7 @@
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file hat
+ * version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
@@ -290,6 +290,7 @@ JVM_handle_linux_signal(int sig,
goto report_and_die;
}
+ CodeBlob *cb = NULL;
// Handle signal from NativeJump::patch_verified_entry().
if (( TrapBasedNotEntrantChecks && sig == SIGTRAP && nativeInstruction_at(pc)->is_sigtrap_zombie_not_entrant()) ||
(!TrapBasedNotEntrantChecks && sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant())) {
@@ -305,7 +306,10 @@ JVM_handle_linux_signal(int sig,
// especially when we try to read from the safepoint polling page. So the check
// (address)info->si_addr == os::get_standard_polling_page()
// doesn't work for us. We use:
- ((NativeInstruction*)pc)->is_safepoint_poll()) {
+ ((NativeInstruction*)pc)->is_safepoint_poll() &&
+ CodeCache::contains((void*) pc) &&
+ ((cb = CodeCache::find_blob(pc)) != NULL) &&
+ cb->is_nmethod()) {
if (TraceTraps) {
tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc));
}
diff --git a/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp b/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp
index 4af6c5236..0b992632b 100644
--- a/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp
+++ b/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,8 @@
#include "runtime/os.hpp"
#include "vm_version_sparc.hpp"
-static bool detect_niagara() {
- char cpu[128];
+static bool cpuinfo_field_contains(const char* field, const char* value) {
+ char line[1024];
bool rv = false;
FILE* fp = fopen("/proc/cpuinfo", "r");
@@ -35,9 +35,10 @@ static bool detect_niagara() {
return rv;
}
- while (!feof(fp)) {
- if (fscanf(fp, "cpu\t\t: %100[^\n]", &cpu) == 1) {
- if (strstr(cpu, "Niagara") != NULL) {
+ while (fgets(line, sizeof(line), fp) != NULL) {
+ assert(strlen(line) < sizeof(line) - 1, "buffer line[1024] is too small.");
+ if (strncmp(line, field, strlen(field)) == 0) {
+ if (strstr(line, value) != NULL) {
rv = true;
}
break;
@@ -45,10 +46,21 @@ static bool detect_niagara() {
}
fclose(fp);
-
return rv;
}
+static bool detect_niagara() {
+ return cpuinfo_field_contains("cpu", "Niagara");
+}
+
+static bool detect_M_family() {
+ return cpuinfo_field_contains("cpu", "SPARC-M");
+}
+
+static bool detect_blkinit() {
+ return cpuinfo_field_contains("cpucaps", "blkinit");
+}
+
int VM_Version::platform_features(int features) {
// Default to generic v9
features = generic_v9_m;
@@ -58,5 +70,14 @@ int VM_Version::platform_features(int features) {
features = niagara1_m | T_family_m;
}
+ if (detect_M_family()) {
+ NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on M family");)
+ features = sun4v_m | generic_v9_m | M_family_m | T_family_m;
+ }
+
+ if (detect_blkinit()) {
+ features |= blk_init_instructions_m;
+ }
+
return features;
}
diff --git a/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp b/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp
index 33ce50dc7..b7aca02e0 100644
--- a/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp
+++ b/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2011 Red Hat, Inc.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -232,7 +232,13 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
// operation. Note that some platforms only support this with the
// limitation that the only valid value to store is the immediate
// constant 1. There is a test for this in JNI_CreateJavaVM().
- return __sync_lock_test_and_set (dest, exchange_value);
+ jint result = __sync_lock_test_and_set (dest, exchange_value);
+ // All atomic operations are expected to be full memory barriers
+ // (see atomic.hpp). However, __sync_lock_test_and_set is not
+ // a full memory barrier, but an acquire barrier. Hence, this added
+ // barrier.
+ __sync_synchronize();
+ return result;
#endif // M68K
#endif // ARM
}
@@ -245,7 +251,9 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
#ifdef M68K
return m68k_lock_test_and_set(dest, exchange_value);
#else
- return __sync_lock_test_and_set (dest, exchange_value);
+ intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
+ __sync_synchronize();
+ return result;
#endif // M68K
#endif // ARM
}
diff --git a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp
index f5f44e0a7..1d069c6de 100644
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp
+++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp
@@ -55,8 +55,8 @@
#include "utilities/vmError.hpp"
address os::current_stack_pointer() {
- address dummy = (address) &dummy;
- return dummy;
+ // return the address of the current function
+ return (address)__builtin_frame_address(0);
}
frame os::get_sender_for_C_frame(frame* fr) {
diff --git a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp
index d8c08fd4a..1785dc374 100644
--- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp
+++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp
@@ -264,7 +264,7 @@ frame os::current_frame() {
CAST_FROM_FN_PTR(address, os::current_frame));
if (os::is_first_C_frame(&myframe)) {
// stack is not walkable
- return frame(NULL, NULL, NULL);
+ return frame(NULL, NULL, false);
} else {
return os::get_sender_for_C_frame(&myframe);
}
diff --git a/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp b/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp
index d07db6a5c..30210a453 100644
--- a/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp
+++ b/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,19 +26,26 @@
#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
-// Provides an entry point we can link against and
-// a buffer we can emit code into. The buffer is
-// filled by ThreadLocalStorage::generate_code_for_get_thread
-// and called from ThreadLocalStorage::thread()
+// True thread-local variable
+__thread Thread * ThreadLocalStorage::_thr_current = NULL;
-#include <sys/systeminfo.h>
+// Implementations needed to support the shared API
-// The portable TLS mechanism (get_thread_via_cache) is enough on SPARC.
-// There is no need for hand-assembling a special function.
-void ThreadLocalStorage::generate_code_for_get_thread() {
+void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
+
+bool ThreadLocalStorage::_initialized = false;
+
+void ThreadLocalStorage::init() {
+ _initialized = true;
}
-void ThreadLocalStorage::set_thread_in_slot (Thread * self) {}
+bool ThreadLocalStorage::is_initialized() {
+ return _initialized;
+}
+
+Thread* ThreadLocalStorage::get_thread_slow() {
+ return thread();
+}
extern "C" Thread* get_thread() {
return ThreadLocalStorage::thread();
diff --git a/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.hpp b/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.hpp
index 98a8fc58c..e3d96c87a 100644
--- a/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.hpp
+++ b/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,47 +25,15 @@
#ifndef OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
#define OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
-public:
- // Java Thread - force inlining
- static inline Thread* thread() ;
+// Solaris specific implementation involves simple, direct use
+// of a compiler-based thread-local variable
private:
- static Thread* _get_thread_cache[]; // index by [(raw_id>>9)^(raw_id>>20) % _pd_cache_size]
- static Thread* get_thread_via_cache_slowly(uintptr_t raw_id, int index);
+ static __thread Thread * _thr_current;
- NOT_PRODUCT(static int _tcacheHit;)
- NOT_PRODUCT(static int _tcacheMiss;)
+ static bool _initialized; // needed for shared API
public:
-
- // Print cache hit/miss statistics
- static void print_statistics() PRODUCT_RETURN;
-
- enum Constants {
- _pd_cache_size = 256*2 // projected typical # of threads * 2
- };
-
- static void set_thread_in_slot (Thread *) ;
-
- static uintptr_t pd_raw_thread_id() {
- return _raw_thread_id();
- }
-
- static int pd_cache_index(uintptr_t raw_id) {
- // Hash function: From email from Dave:
- // The hash function deserves an explanation. %g7 points to libthread's
- // "thread" structure. On T1 the thread structure is allocated on the
- // user's stack (yes, really!) so the ">>20" handles T1 where the JVM's
- // stack size is usually >= 1Mb. The ">>9" is for T2 where Roger allocates
- // globs of thread blocks contiguously. The "9" has to do with the
- // expected size of the T2 thread structure. If these constants are wrong
- // the worst thing that'll happen is that the hit rate for heavily threaded
- // apps won't be as good as it could be. If you want to burn another
- // shift+xor you could mix together _all of the %g7 bits to form the hash,
- // but I think that's excessive. Making the change above changed the
- // T$ miss rate on SpecJBB (on a 16X system) from about 3% to imperceptible.
- uintptr_t ix = (int) (((raw_id >> 9) ^ (raw_id >> 20)) % _pd_cache_size);
- return ix;
- }
+ static inline Thread* thread();
#endif // OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
diff --git a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp
index 56c942096..859a15692 100644
--- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp
+++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp
@@ -34,8 +34,7 @@
#include <dlfcn.h>
#include <link.h>
-extern "C" static int PICL_get_l1_data_cache_line_size_helper(picl_nodehdl_t nodeh, void *result);
-extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, void *result);
+extern "C" static int PICL_visit_cpu_helper(picl_nodehdl_t nodeh, void *result);
// Functions from the library we need (signatures should match those in picl.h)
extern "C" {
@@ -128,60 +127,87 @@ class PICL {
bool is_inconsistent() { return _state == INCONSISTENT; }
void set_inconsistent() { _state = INCONSISTENT; }
- static int visit(picl_nodehdl_t nodeh, const char* name, void *arg) {
- UniqueValueVisitor *state = static_cast<UniqueValueVisitor*>(arg);
- PICL* picl = state->_picl;
- assert(!state->is_inconsistent(), "Precondition");
+ bool visit(picl_nodehdl_t nodeh, const char* name) {
+ assert(!is_inconsistent(), "Precondition");
int curr;
- if (picl->get_int_property(nodeh, name, &curr) == PICL_SUCCESS) {
- if (!state->is_assigned()) { // first iteration
- state->set_value(curr);
- } else if (curr != state->value()) { // following iterations
- state->set_inconsistent();
+ if (_picl->get_int_property(nodeh, name, &curr) == PICL_SUCCESS) {
+ if (!is_assigned()) { // first iteration
+ set_value(curr);
+ } else if (curr != value()) { // following iterations
+ set_inconsistent();
}
+ return true;
}
- if (state->is_inconsistent()) {
+ return false;
+ }
+ };
+
+ class CPUVisitor {
+ UniqueValueVisitor _l1_visitor;
+ UniqueValueVisitor _l2_visitor;
+ int _limit; // number of times visit() can be run
+ public:
+ CPUVisitor(PICL *picl, int limit) : _l1_visitor(picl), _l2_visitor(picl), _limit(limit) {}
+ static int visit(picl_nodehdl_t nodeh, void *arg) {
+ CPUVisitor *cpu_visitor = static_cast<CPUVisitor*>(arg);
+ UniqueValueVisitor* l1_visitor = cpu_visitor->l1_visitor();
+ UniqueValueVisitor* l2_visitor = cpu_visitor->l2_visitor();
+ if (!l1_visitor->is_inconsistent()) {
+ l1_visitor->visit(nodeh, "l1-dcache-line-size");
+ }
+ static const char* l2_data_cache_line_property_name = NULL;
+ // On the first visit determine the name of the l2 cache line size property and memoize it.
+ if (l2_data_cache_line_property_name == NULL) {
+ assert(!l2_visitor->is_inconsistent(), "First iteration cannot be inconsistent");
+ l2_data_cache_line_property_name = "l2-cache-line-size";
+ if (!l2_visitor->visit(nodeh, l2_data_cache_line_property_name)) {
+ l2_data_cache_line_property_name = "l2-dcache-line-size";
+ l2_visitor->visit(nodeh, l2_data_cache_line_property_name);
+ }
+ } else {
+ if (!l2_visitor->is_inconsistent()) {
+ l2_visitor->visit(nodeh, l2_data_cache_line_property_name);
+ }
+ }
+
+ if (l1_visitor->is_inconsistent() && l2_visitor->is_inconsistent()) {
+ return PICL_WALK_TERMINATE;
+ }
+ cpu_visitor->_limit--;
+ if (cpu_visitor->_limit <= 0) {
return PICL_WALK_TERMINATE;
}
return PICL_WALK_CONTINUE;
}
+ UniqueValueVisitor* l1_visitor() { return &_l1_visitor; }
+ UniqueValueVisitor* l2_visitor() { return &_l2_visitor; }
};
-
int _L1_data_cache_line_size;
- int _L2_cache_line_size;
+ int _L2_data_cache_line_size;
public:
- static int get_l1_data_cache_line_size(picl_nodehdl_t nodeh, void *state) {
- return UniqueValueVisitor::visit(nodeh, "l1-dcache-line-size", state);
- }
- static int get_l2_cache_line_size(picl_nodehdl_t nodeh, void *state) {
- return UniqueValueVisitor::visit(nodeh, "l2-cache-line-size", state);
+ static int visit_cpu(picl_nodehdl_t nodeh, void *state) {
+ return CPUVisitor::visit(nodeh, state);
}
- PICL() : _L1_data_cache_line_size(0), _L2_cache_line_size(0), _dl_handle(NULL) {
+ PICL(bool is_fujitsu, bool is_sun4v) : _L1_data_cache_line_size(0), _L2_data_cache_line_size(0), _dl_handle(NULL) {
if (!open_library()) {
return;
}
if (_picl_initialize() == PICL_SUCCESS) {
picl_nodehdl_t rooth;
if (_picl_get_root(&rooth) == PICL_SUCCESS) {
- UniqueValueVisitor L1_state(this);
- // Visit all "cpu" class instances
- _picl_walk_tree_by_class(rooth, "cpu", &L1_state, PICL_get_l1_data_cache_line_size_helper);
- if (L1_state.is_initial()) { // Still initial, iteration found no values
- // Try walk all "core" class instances, it might be a Fujitsu machine
- _picl_walk_tree_by_class(rooth, "core", &L1_state, PICL_get_l1_data_cache_line_size_helper);
+ const char* cpu_class = "cpu";
+ // If it's a Fujitsu machine, it's a "core"
+ if (is_fujitsu) {
+ cpu_class = "core";
}
- if (L1_state.is_assigned()) { // Is there a value?
- _L1_data_cache_line_size = L1_state.value();
+ CPUVisitor cpu_visitor(this, (is_sun4v && !is_fujitsu) ? 1 : os::processor_count());
+ _picl_walk_tree_by_class(rooth, cpu_class, &cpu_visitor, PICL_visit_cpu_helper);
+ if (cpu_visitor.l1_visitor()->is_assigned()) { // Is there a value?
+ _L1_data_cache_line_size = cpu_visitor.l1_visitor()->value();
}
-
- UniqueValueVisitor L2_state(this);
- _picl_walk_tree_by_class(rooth, "cpu", &L2_state, PICL_get_l2_cache_line_size_helper);
- if (L2_state.is_initial()) {
- _picl_walk_tree_by_class(rooth, "core", &L2_state, PICL_get_l2_cache_line_size_helper);
- }
- if (L2_state.is_assigned()) {
- _L2_cache_line_size = L2_state.value();
+ if (cpu_visitor.l2_visitor()->is_assigned()) {
+ _L2_data_cache_line_size = cpu_visitor.l2_visitor()->value();
}
}
_picl_shutdown();
@@ -190,14 +216,12 @@ public:
}
unsigned int L1_data_cache_line_size() const { return _L1_data_cache_line_size; }
- unsigned int L2_cache_line_size() const { return _L2_cache_line_size; }
+ unsigned int L2_data_cache_line_size() const { return _L2_data_cache_line_size; }
};
-extern "C" static int PICL_get_l1_data_cache_line_size_helper(picl_nodehdl_t nodeh, void *result) {
- return PICL::get_l1_data_cache_line_size(nodeh, result);
-}
-extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, void *result) {
- return PICL::get_l2_cache_line_size(nodeh, result);
+
+extern "C" static int PICL_visit_cpu_helper(picl_nodehdl_t nodeh, void *result) {
+ return PICL::visit_cpu(nodeh, result);
}
template<typename FuncType>
@@ -470,8 +494,8 @@ int VM_Version::platform_features(int features) {
}
// Figure out cache line sizes using PICL
- PICL picl;
- _L2_cache_line_size = picl.L2_cache_line_size();
+ PICL picl((features & sparc64_family_m) != 0, (features & sun4v_m) != 0);
+ _L2_data_cache_line_size = picl.L2_data_cache_line_size();
return features;
}
diff --git a/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp b/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp
index 203da611c..d4c0fecca 100644
--- a/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp
+++ b/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,11 +23,10 @@
*/
#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"
-
+#include "runtime/thread.inline.hpp"
void MacroAssembler::int3() {
push(rax);
@@ -39,98 +38,32 @@ void MacroAssembler::int3() {
pop(rax);
}
-#define __ _masm->
-#ifndef _LP64
-static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) {
-
- // slow call to of thr_getspecific
- // int thr_getspecific(thread_key_t key, void **value);
- // Consider using pthread_getspecific instead.
-
-__ push(0); // allocate space for return value
- if (thread != rax) __ push(rax); // save rax, if caller still wants it
-__ push(rcx); // save caller save
-__ push(rdx); // save caller save
- if (thread != rax) {
-__ lea(thread, Address(rsp, 3 * sizeof(int))); // address of return value
- } else {
-__ lea(thread, Address(rsp, 2 * sizeof(int))); // address of return value
- }
-__ push(thread); // and pass the address
-__ push(ThreadLocalStorage::thread_index()); // the key
-__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
-__ increment(rsp, 2 * wordSize);
-__ pop(rdx);
-__ pop(rcx);
- if (thread != rax) __ pop(rax);
-__ pop(thread);
-
-}
-#else
-static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) {
- // slow call to of thr_getspecific
- // int thr_getspecific(thread_key_t key, void **value);
- // Consider using pthread_getspecific instead.
-
+// This is simply a call to ThreadLocalStorage::thread()
+void MacroAssembler::get_thread(Register thread) {
if (thread != rax) {
-__ push(rax);
+ push(rax);
}
-__ push(0); // space for return value
-__ push(rdi);
-__ push(rsi);
-__ lea(rsi, Address(rsp, 16)); // pass return value address
-__ push(rdx);
-__ push(rcx);
-__ push(r8);
-__ push(r9);
-__ push(r10);
- // XXX
-__ mov(r10, rsp);
-__ andptr(rsp, -16);
-__ push(r10);
-__ push(r11);
+ push(rdi);
+ push(rsi);
+ push(rdx);
+ push(rcx);
+ push(r8);
+ push(r9);
+ push(r10);
+ push(r11);
-__ movl(rdi, ThreadLocalStorage::thread_index());
-__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ThreadLocalStorage::thread)));
-__ pop(r11);
-__ pop(rsp);
-__ pop(r10);
-__ pop(r9);
-__ pop(r8);
-__ pop(rcx);
-__ pop(rdx);
-__ pop(rsi);
-__ pop(rdi);
-__ pop(thread); // load return value
+ pop(r11);
+ pop(r10);
+ pop(r9);
+ pop(r8);
+ pop(rcx);
+ pop(rdx);
+ pop(rsi);
+ pop(rdi);
if (thread != rax) {
-__ pop(rax);
- }
-}
-#endif //LP64
-
-void MacroAssembler::get_thread(Register thread) {
-
- int segment = NOT_LP64(Assembler::GS_segment) LP64_ONLY(Assembler::FS_segment);
- // Try to emit a Solaris-specific fast TSD/TLS accessor.
- ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode ();
- if (tlsMode == ThreadLocalStorage::pd_tlsAccessIndirect) { // T1
- // Use thread as a temporary: mov r, gs:[0]; mov r, [r+tlsOffset]
- emit_int8 (segment);
- // ExternalAddress doesn't work because it can't take NULL
- AddressLiteral null(0, relocInfo::none);
- movptr (thread, null);
- movptr(thread, Address(thread, ThreadLocalStorage::pd_getTlsOffset())) ;
- return ;
- } else
- if (tlsMode == ThreadLocalStorage::pd_tlsAccessDirect) { // T2
- // mov r, gs:[tlsOffset]
- emit_int8 (segment);
- AddressLiteral tls_off((address)ThreadLocalStorage::pd_getTlsOffset(), relocInfo::none);
- movptr (thread, tls_off);
- return ;
+ movl(thread, rax);
+ pop(rax);
}
-
- slow_call_thr_specific(this, thread);
-
}
diff --git a/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp b/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp
index e2ce144a3..30210a453 100644
--- a/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp
+++ b/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,167 +26,27 @@
#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
-#ifdef AMD64
-extern "C" Thread* fs_load(ptrdiff_t tlsOffset);
-extern "C" intptr_t fs_thread();
-#else
-// From solaris_i486.s
-extern "C" Thread* gs_load(ptrdiff_t tlsOffset);
-extern "C" intptr_t gs_thread();
-#endif // AMD64
+// True thread-local variable
+__thread Thread * ThreadLocalStorage::_thr_current = NULL;
-// tlsMode encoding:
-//
-// pd_tlsAccessUndefined : uninitialized
-// pd_tlsAccessSlow : not available
-// pd_tlsAccessIndirect :
-// old-style indirect access - present in "T1" libthread.
-// use thr_slot_sync_allocate() to attempt to allocate a slot.
-// pd_tlsAccessDirect :
-// new-style direct access - present in late-model "T2" libthread.
-// Allocate the offset (slot) via _thr_slot_offset() or by
-// defining an IE- or LE-mode TLS/TSD slot in the launcher and then passing
-// that offset into libjvm.so.
-// See http://sac.eng/Archives/CaseLog/arc/PSARC/2003/159/.
-//
-// Note that we have a capability gap - some early model T2 forms
-// (e.g., unpatched S9) have neither _thr_slot_sync_allocate() nor
-// _thr_slot_offset(). In that case we revert to the usual
-// thr_getspecific accessor.
-//
+// Implementations needed to support the shared API
-static ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_tlsAccessUndefined ;
-static ptrdiff_t tlsOffset = 0 ;
-static thread_key_t tlsKey ;
+void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
-typedef int (*TSSA_Entry) (ptrdiff_t *, int, int) ;
-typedef ptrdiff_t (*TSO_Entry) (int) ;
+bool ThreadLocalStorage::_initialized = false;
-ThreadLocalStorage::pd_tlsAccessMode ThreadLocalStorage::pd_getTlsAccessMode ()
-{
- guarantee (tlsMode != pd_tlsAccessUndefined, "tlsMode not set") ;
- return tlsMode ;
+void ThreadLocalStorage::init() {
+ _initialized = true;
}
-ptrdiff_t ThreadLocalStorage::pd_getTlsOffset () {
- guarantee (tlsMode != pd_tlsAccessUndefined, "tlsMode not set") ;
- return tlsOffset ;
+bool ThreadLocalStorage::is_initialized() {
+ return _initialized;
}
-// TODO: Consider the following improvements:
-//
-// 1. Convert from thr_*specific* to pthread_*specific*.
-// The pthread_ forms are slightly faster. Also, the
-// pthread_ forms have a pthread_key_delete() API which
-// would aid in clean JVM shutdown and the eventual goal
-// of permitting a JVM to reinstantiate itself withing a process.
-//
-// 2. See ThreadLocalStorage::init(). We end up allocating
-// two TLS keys during VM startup. That's benign, but we could collapse
-// down to one key without too much trouble.
-//
-// 3. MacroAssembler::get_thread() currently emits calls to thr_getspecific().
-// Modify get_thread() to call Thread::current() instead.
-//
-// 4. Thread::current() currently uses a cache keyed by %gs:[0].
-// (The JVM has PSARC permission to use %g7/%gs:[0]
-// as an opaque temporally unique thread identifier).
-// For C++ access to a thread's reflexive "self" pointer we
-// should consider using one of the following:
-// a. a radix tree keyed by %esp - as in EVM.
-// This requires two loads (the 2nd dependent on the 1st), but
-// is easily inlined and doesn't require a "miss" slow path.
-// b. a fast TLS/TSD slot allocated by _thr_slot_offset
-// or _thr_slot_sync_allocate.
-//
-// 5. 'generate_code_for_get_thread' is a misnomer.
-// We should change it to something more general like
-// pd_ThreadSelf_Init(), for instance.
-//
-
-static void AllocateTLSOffset ()
-{
- int rslt ;
- TSSA_Entry tssa ;
- TSO_Entry tso ;
- ptrdiff_t off ;
-
- guarantee (tlsMode == ThreadLocalStorage::pd_tlsAccessUndefined, "tlsMode not set") ;
- tlsMode = ThreadLocalStorage::pd_tlsAccessSlow ;
- tlsOffset = 0 ;
-#ifndef AMD64
-
- tssa = (TSSA_Entry) dlsym (RTLD_DEFAULT, "thr_slot_sync_allocate") ;
- if (tssa != NULL) {
- off = -1 ;
- rslt = (*tssa)(&off, NULL, NULL) ; // (off,dtor,darg)
- if (off != -1) {
- tlsOffset = off ;
- tlsMode = ThreadLocalStorage::pd_tlsAccessIndirect ;
- return ;
- }
- }
-
- rslt = thr_keycreate (&tlsKey, NULL) ;
- if (rslt != 0) {
- tlsMode = ThreadLocalStorage::pd_tlsAccessSlow ; // revert to slow mode
- return ;
- }
-
- tso = (TSO_Entry) dlsym (RTLD_DEFAULT, "_thr_slot_offset") ;
- if (tso != NULL) {
- off = (*tso)(tlsKey) ;
- if (off >= 0) {
- tlsOffset = off ;
- tlsMode = ThreadLocalStorage::pd_tlsAccessDirect ;
- return ;
- }
- }
-
- // Failure: Too bad ... we've allocated a TLS slot we don't need and there's
- // no provision in the ABI for returning the slot.
- //
- // If we didn't find a slot then then:
- // 1. We might be on liblwp.
- // 2. We might be on T2 libthread, but all "fast" slots are already
- // consumed
- // 3. We might be on T1, and all TSD (thr_slot_sync_allocate) slots are
- // consumed.
- // 4. We might be on T2 libthread, but it's be re-architected
- // so that fast slots are no longer g7-relative.
- //
-
- tlsMode = ThreadLocalStorage::pd_tlsAccessSlow ;
- return ;
-#endif // AMD64
-}
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
- AllocateTLSOffset() ;
+Thread* ThreadLocalStorage::get_thread_slow() {
+ return thread();
}
-void ThreadLocalStorage::set_thread_in_slot(Thread *thread) {
- guarantee (tlsMode != pd_tlsAccessUndefined, "tlsMode not set") ;
- if (tlsMode == pd_tlsAccessIndirect) {
-#ifdef AMD64
- intptr_t tbase = fs_thread();
-#else
- intptr_t tbase = gs_thread();
-#endif // AMD64
- *((Thread**) (tbase + tlsOffset)) = thread ;
- } else
- if (tlsMode == pd_tlsAccessDirect) {
- thr_setspecific (tlsKey, (void *) thread) ;
- // set with thr_setspecific and then readback with gs_load to validate.
-#ifdef AMD64
- guarantee (thread == fs_load(tlsOffset), "tls readback failure") ;
-#else
- guarantee (thread == gs_load(tlsOffset), "tls readback failure") ;
-#endif // AMD64
- }
-}
-
-
extern "C" Thread* get_thread() {
return ThreadLocalStorage::thread();
}
diff --git a/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.hpp b/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.hpp
index 05a9e7c25..4f8da7bcb 100644
--- a/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.hpp
+++ b/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,61 +25,15 @@
#ifndef OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
#define OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
-// Processor dependent parts of ThreadLocalStorage
+// Solaris specific implementation involves simple, direct use
+// of a compiler-based thread-local variable
private:
- static Thread* _get_thread_cache[]; // index by [(raw_id>>9)^(raw_id>>20) % _pd_cache_size]
- static Thread* get_thread_via_cache_slowly(uintptr_t raw_id, int index);
+ static __thread Thread * _thr_current;
- NOT_PRODUCT(static int _tcacheHit;)
- NOT_PRODUCT(static int _tcacheMiss;)
+ static bool _initialized; // needed for shared API
public:
- // Cache hit/miss statistics
- static void print_statistics() PRODUCT_RETURN;
-
- enum Constants {
-#ifdef AMD64
- _pd_cache_size = 256*2 // projected typical # of threads * 2
-#else
- _pd_cache_size = 128*2 // projected typical # of threads * 2
-#endif // AMD64
- };
-
- enum pd_tlsAccessMode {
- pd_tlsAccessUndefined = -1,
- pd_tlsAccessSlow = 0,
- pd_tlsAccessIndirect = 1,
- pd_tlsAccessDirect = 2
- } ;
-
- static void set_thread_in_slot (Thread *) ;
-
- static pd_tlsAccessMode pd_getTlsAccessMode () ;
- static ptrdiff_t pd_getTlsOffset () ;
-
- static uintptr_t pd_raw_thread_id() {
-#ifdef _GNU_SOURCE
-#ifdef AMD64
- uintptr_t rv;
- __asm__ __volatile__ ("movq %%fs:0, %0" : "=r"(rv));
- return rv;
-#else
- return gs_thread();
-#endif // AMD64
-#else //_GNU_SOURCE
- return _raw_thread_id();
-#endif //_GNU_SOURCE
- }
-
- static int pd_cache_index(uintptr_t raw_id) {
- // Copied from the sparc version. Dave said it should also work fine
- // for solx86.
- int ix = (int) (((raw_id >> 9) ^ (raw_id >> 20)) % _pd_cache_size);
- return ix;
- }
-
- // Java Thread
static inline Thread* thread();
#endif // OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
diff --git a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java
index ecd17b309..d2b6b5511 100644
--- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java
+++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,12 +24,12 @@
/**
* A SAX based parser of LogCompilation output from HotSpot. It takes a complete
- * @author never
*/
package com.sun.hotspot.tools.compiler;
import java.io.FileReader;
+import java.io.PrintStream;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Collections;
@@ -133,6 +133,44 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
}
};
+ class Jvms {
+ Jvms(Method method, int bci) {
+ this.method = method;
+ this.bci = bci;
+ }
+ final public Method method;
+ final public int bci;
+ final public String toString() {
+ return "@" + bci + " " + method;
+ }
+ }
+
+ class LockElimination extends BasicLogEvent {
+
+ ArrayList<Jvms> jvms = new ArrayList<Jvms>(1);
+ final String kind;
+ final String classId;
+ final String tagName;
+ LockElimination(String tagName, double start, String id, String kind, String classId) {
+ super(start, id);
+ this.kind = kind;
+ this.classId = classId;
+ this.tagName = tagName;
+ }
+
+ @Override
+ public void print(PrintStream stream) {
+ stream.printf("%s %s %s %s %.3f ", getId(), tagName, kind, classId, getStart());
+ stream.print(jvms.toString());
+ stream.print("\n");
+ }
+
+ void addJVMS(Method method, int bci) {
+ jvms.add(new Jvms(method, bci));
+ }
+
+ }
+
private ArrayList<LogEvent> events = new ArrayList<LogEvent>();
private HashMap<String, String> types = new HashMap<String, String>();
@@ -145,6 +183,7 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
private Compilation compile;
private CallSite site;
private Stack<Phase> phaseStack = new Stack<Phase>();
+ private LockElimination currentLockElimination;
private UncommonTrapEvent currentTrap;
private Stack<CallSite> late_inline_scope;
@@ -188,7 +227,12 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
}
LogParser log = new LogParser();
- p.parse(new InputSource(reader), log);
+ try {
+ p.parse(new InputSource(reader), log);
+ } catch (Throwable th) {
+ th.printStackTrace();
+ // Carry on with what we've got...
+ }
// Associate compilations with their NMethods
for (NMethod nm : log.nmethods.values()) {
@@ -370,6 +414,15 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
// uncommon trap inserted during parsing.
// ignore for now
}
+ } else if (qname.startsWith("eliminate_lock")) {
+ String id = atts.getValue("compile_id");
+ if (id != null) {
+ id = makeId(atts);
+ String kind = atts.getValue("kind");
+ String classId = atts.getValue("class_id");
+ currentLockElimination = new LockElimination(qname, Double.parseDouble(search(atts, "stamp")), id, kind, classId);
+ events.add(currentLockElimination);
+ }
} else if (qname.equals("late_inline")) {
late_inline_scope = new Stack<CallSite>();
site = new CallSite(-999, method(search(atts, "method")));
@@ -378,13 +431,14 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
// <jvms bci='4' method='java/io/DataInputStream readChar ()C' bytes='40' count='5815' iicount='20815'/>
if (currentTrap != null) {
currentTrap.addJVMS(atts.getValue("method"), Integer.parseInt(atts.getValue("bci")));
+ } else if (currentLockElimination != null) {
+ currentLockElimination.addJVMS(method(atts.getValue("method")), Integer.parseInt(atts.getValue("bci")));
} else if (late_inline_scope != null) {
bci = Integer.parseInt(search(atts, "bci"));
site = new CallSite(bci, method(search(atts, "method")));
late_inline_scope.push(site);
} else {
// Ignore <eliminate_allocation type='667'>,
- // <eliminate_lock lock='1'>,
// <replace_string_concat arguments='2' string_alloc='0' multiple='0'>
}
} else if (qname.equals("nmethod")) {
@@ -437,6 +491,8 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
scopes.pop();
} else if (qname.equals("uncommon_trap")) {
currentTrap = null;
+ } else if (qname.startsWith("eliminate_lock")) {
+ currentLockElimination = null;
} else if (qname.equals("late_inline")) {
// Populate late inlining info.
@@ -445,8 +501,8 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
CallSite caller = late_inline_scope.pop();
Method m = compile.getMethod();
if (m != caller.getMethod()) {
- System.out.println(m);
- System.out.println(caller.getMethod() + " bci: " + bci);
+ System.err.println(m);
+ System.err.println(caller.getMethod() + " bci: " + bci);
throw new InternalError("call site and late_inline info don't match");
}
diff --git a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/UncommonTrapEvent.java b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/UncommonTrapEvent.java
index 9aa69c425..231e1e4ff 100644
--- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/UncommonTrapEvent.java
+++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/UncommonTrapEvent.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,7 @@ class UncommonTrapEvent extends BasicLogEvent {
}
public void print(PrintStream stream) {
- stream.printf("%s uncommon trap %s %s\n", getId(), getReason(), getAction());
+ stream.printf("%s uncommon trap %.3f %s %s\n", getId(), getStart(), getReason(), getAction());
stream.print(getJvms());
}
diff --git a/src/share/vm/adlc/adlparse.cpp b/src/share/vm/adlc/adlparse.cpp
index 9e3091cff..54bff63ad 100644
--- a/src/share/vm/adlc/adlparse.cpp
+++ b/src/share/vm/adlc/adlparse.cpp
@@ -800,6 +800,7 @@ void ADLParser::reg_parse(void) {
}
if (strcmp(token,"reg_def")==0) { reg_def_parse(); }
else if (strcmp(token,"reg_class")==0) { reg_class_parse(); }
+ else if (strcmp(token, "reg_class_dynamic") == 0) { reg_class_dynamic_parse(); }
else if (strcmp(token,"alloc_class")==0) { alloc_class_parse(); }
else if (strcmp(token,"#define")==0) { preproc_define(); }
else { parse_err(SYNERR, "bad token %s inside register block.\n", token); break; }
@@ -2323,11 +2324,12 @@ void ADLParser::reg_class_parse(void) {
// Debug Stuff
if (_AD._adl_debug >1) fprintf(stderr,"Register Class: %s\n", cname);
- RegClass *reg_class = _AD._register->addRegClass(cname);
-
- // Collect registers in class
skipws();
if (_curchar == '(') {
+ // A register list is defined for the register class.
+ // Collect registers into a generic RegClass register class.
+ RegClass* reg_class = _AD._register->addRegClass<RegClass>(cname);
+
next_char(); // Skip '('
skipws();
while (_curchar != ')') {
@@ -2352,12 +2354,15 @@ void ADLParser::reg_class_parse(void) {
}
next_char(); // Skip closing ')'
} else if (_curchar == '%') {
+ // A code snippet is defined for the register class.
+ // Collect the code snippet into a CodeSnippetRegClass register class.
+ CodeSnippetRegClass* reg_class = _AD._register->addRegClass<CodeSnippetRegClass>(cname);
char *code = find_cpp_block("reg class");
if (code == NULL) {
parse_err(SYNERR, "missing code declaration for reg class.\n");
return;
}
- reg_class->_user_defined = code;
+ reg_class->set_code_snippet(code);
return;
}
@@ -2374,6 +2379,87 @@ void ADLParser::reg_class_parse(void) {
return;
}
+//------------------------------reg_class_dynamic_parse------------------------
+void ADLParser::reg_class_dynamic_parse(void) {
+ char *cname; // Name of dynamic register class being defined
+
+ // Get register class name
+ skipws();
+ cname = get_ident();
+ if (cname == NULL) {
+ parse_err(SYNERR, "missing dynamic register class name after 'reg_class_dynamic'\n");
+ return;
+ }
+
+ if (_AD._adl_debug > 1) {
+ fprintf(stdout, "Dynamic Register Class: %s\n", cname);
+ }
+
+ skipws();
+ if (_curchar != '(') {
+ parse_err(SYNERR, "missing '(' at the beginning of reg_class_dynamic definition\n");
+ return;
+ }
+ next_char();
+ skipws();
+
+ // Collect two register classes and the C++ code representing the condition code used to
+ // select between the two classes into a ConditionalRegClass register class.
+ ConditionalRegClass* reg_class = _AD._register->addRegClass<ConditionalRegClass>(cname);
+ int i;
+ for (i = 0; i < 2; i++) {
+ char* name = get_ident();
+ if (name == NULL) {
+ parse_err(SYNERR, "missing class identifier inside reg_class_dynamic list.\n");
+ return;
+ }
+ RegClass* rc = _AD._register->getRegClass(name);
+ if (rc == NULL) {
+ parse_err(SEMERR, "unknown identifier %s inside reg_class_dynamic list.\n", name);
+ } else {
+ reg_class->set_rclass_at_index(i, rc);
+ }
+
+ skipws();
+ if (_curchar == ',') {
+ next_char();
+ skipws();
+ } else {
+ parse_err(SYNERR, "missing separator ',' inside reg_class_dynamic list.\n");
+ }
+ }
+
+ // Collect the condition code.
+ skipws();
+ if (_curchar == '%') {
+ char* code = find_cpp_block("reg class dynamic");
+ if (code == NULL) {
+ parse_err(SYNERR, "missing code declaration for reg_class_dynamic.\n");
+ return;
+ }
+ reg_class->set_condition_code(code);
+ } else {
+ parse_err(SYNERR, "missing %% at the beginning of code block in reg_class_dynamic definition\n");
+ return;
+ }
+
+ skipws();
+ if (_curchar != ')') {
+ parse_err(SYNERR, "missing ')' at the end of reg_class_dynamic definition\n");
+ return;
+ }
+ next_char();
+
+ skipws();
+ if (_curchar != ';') {
+ parse_err(SYNERR, "missing ';' at the end of reg_class_dynamic definition.\n");
+ return;
+ }
+ next_char(); // Skip trailing ';'
+
+ return;
+}
+
//------------------------------alloc_class_parse------------------------------
void ADLParser::alloc_class_parse(void) {
char *name; // Name of allocation class being defined
diff --git a/src/share/vm/adlc/adlparse.hpp b/src/share/vm/adlc/adlparse.hpp
index 3ea00a249..f52116026 100644
--- a/src/share/vm/adlc/adlparse.hpp
+++ b/src/share/vm/adlc/adlparse.hpp
@@ -53,6 +53,8 @@ class ConstructRule;
// ***** Register Section *****
class RegDef;
class RegClass;
+class CodeSnippetRegClass;
+class ConditionalRegClass;
class AllocClass;
class ResourceForm;
// ***** Pipeline Section *****
@@ -127,6 +129,7 @@ protected:
// Parse components of the register section
void reg_def_parse(void); // Parse register definition
void reg_class_parse(void); // Parse register class definition
+ void reg_class_dynamic_parse(void); // Parse dynamic register class definition
void alloc_class_parse(void); // Parse allocation class definition
// Parse components of the definition section
diff --git a/src/share/vm/adlc/archDesc.cpp b/src/share/vm/adlc/archDesc.cpp
index 69345fdac..801592658 100644
--- a/src/share/vm/adlc/archDesc.cpp
+++ b/src/share/vm/adlc/archDesc.cpp
@@ -934,7 +934,7 @@ char *ArchDesc::stack_or_reg_mask(OperandForm &opForm) {
void ArchDesc::set_stack_or_reg(const char *reg_class_name) {
if( _register ) {
RegClass *reg_class = _register->getRegClass(reg_class_name);
- reg_class->_stack_or_reg = true;
+ reg_class->set_stack_version(true);
}
}
diff --git a/src/share/vm/adlc/forms.hpp b/src/share/vm/adlc/forms.hpp
index 63e367dd7..073ca4c81 100644
--- a/src/share/vm/adlc/forms.hpp
+++ b/src/share/vm/adlc/forms.hpp
@@ -68,6 +68,8 @@ class Opcode;
class InsEncode;
class RegDef;
class RegClass;
+class CodeSnippetRegClass;
+class ConditionalRegClass;
class AllocClass;
class ResourceForm;
class PipeClassForm;
diff --git a/src/share/vm/adlc/formsopt.cpp b/src/share/vm/adlc/formsopt.cpp
index 07d70fc6c..ac8ffff29 100644
--- a/src/share/vm/adlc/formsopt.cpp
+++ b/src/share/vm/adlc/formsopt.cpp
@@ -47,13 +47,19 @@ void RegisterForm::addRegDef(char *name, char *callingConv, char *c_conv,
}
// record a new register class
-RegClass *RegisterForm::addRegClass(const char *className) {
- RegClass *regClass = new RegClass(className);
+template <typename T>
+T* RegisterForm::addRegClass(const char* className) {
+ T* regClass = new T(className);
_rclasses.addName(className);
- _regClass.Insert(className,regClass);
+ _regClass.Insert(className, regClass);
return regClass;
}
+// Explicit instantiation for all supported register classes.
+template RegClass* RegisterForm::addRegClass<RegClass>(const char* className);
+template CodeSnippetRegClass* RegisterForm::addRegClass<CodeSnippetRegClass>(const char* className);
+template ConditionalRegClass* RegisterForm::addRegClass<ConditionalRegClass>(const char* className);
+
// record a new register class
AllocClass *RegisterForm::addAllocClass(char *className) {
AllocClass *allocClass = new AllocClass(className);
@@ -67,9 +73,9 @@ AllocClass *RegisterForm::addAllocClass(char *className) {
void RegisterForm::addSpillRegClass() {
// Stack slots start at the next available even register number.
_reg_ctr = (_reg_ctr+7) & ~7;
- const char *rc_name = "stack_slots";
- RegClass *reg_class = new RegClass(rc_name);
- reg_class->_stack_or_reg = true;
+ const char *rc_name = "stack_slots";
+ RegClass* reg_class = new RegClass(rc_name);
+ reg_class->set_stack_version(true);
_rclasses.addName(rc_name);
_regClass.Insert(rc_name,reg_class);
}
@@ -224,9 +230,11 @@ void RegDef::output(FILE *fp) { // Write info to output files
//------------------------------RegClass---------------------------------------
// Construct a register class into which registers will be inserted
-RegClass::RegClass(const char *classid) : _stack_or_reg(false), _classid(classid), _regDef(cmpstr,hashstr, Form::arena),
- _user_defined(NULL)
-{
+RegClass::RegClass(const char* classid) : _stack_or_reg(false), _classid(classid), _regDef(cmpstr, hashstr, Form::arena) {
+}
+
+RegClass::~RegClass() {
+ delete _classid;
}
// record a register in this class
@@ -305,6 +313,91 @@ void RegClass::output(FILE *fp) { // Write info to output files
fprintf(fp,"--- done with entries for reg_class %s\n\n",_classid);
}
+void RegClass::declare_register_masks(FILE* fp) {
+ const char* prefix = "";
+ const char* rc_name_to_upper = toUpper(_classid);
+ fprintf(fp, "extern const RegMask _%s%s_mask;\n", prefix, rc_name_to_upper);
+ fprintf(fp, "inline const RegMask &%s%s_mask() { return _%s%s_mask; }\n", prefix, rc_name_to_upper, prefix, rc_name_to_upper);
+ if (_stack_or_reg) {
+ fprintf(fp, "extern const RegMask _%sSTACK_OR_%s_mask;\n", prefix, rc_name_to_upper);
+ fprintf(fp, "inline const RegMask &%sSTACK_OR_%s_mask() { return _%sSTACK_OR_%s_mask; }\n", prefix, rc_name_to_upper, prefix, rc_name_to_upper);
+ }
+ delete[] rc_name_to_upper;
+}
+
+void RegClass::build_register_masks(FILE* fp) {
+ int len = RegisterForm::RegMask_Size();
+ const char *prefix = "";
+ const char* rc_name_to_upper = toUpper(_classid);
+ fprintf(fp, "const RegMask _%s%s_mask(", prefix, rc_name_to_upper);
+
+ int i;
+ for(i = 0; i < len - 1; i++) {
+ fprintf(fp," 0x%x,", regs_in_word(i, false));
+ }
+ fprintf(fp," 0x%x );\n", regs_in_word(i, false));
+
+ if (_stack_or_reg) {
+ fprintf(fp, "const RegMask _%sSTACK_OR_%s_mask(", prefix, rc_name_to_upper);
+ for(i = 0; i < len - 1; i++) {
+ fprintf(fp," 0x%x,", regs_in_word(i, true));
+ }
+ fprintf(fp," 0x%x );\n", regs_in_word(i, true));
+ }
+ delete[] rc_name_to_upper;
+}
+
+//------------------------------CodeSnippetRegClass---------------------------
+CodeSnippetRegClass::CodeSnippetRegClass(const char* classid) : RegClass(classid), _code_snippet(NULL) {
+}
+
+CodeSnippetRegClass::~CodeSnippetRegClass() {
+ delete _code_snippet;
+}
+
+void CodeSnippetRegClass::declare_register_masks(FILE* fp) {
+ const char* prefix = "";
+ const char* rc_name_to_upper = toUpper(_classid);
+ fprintf(fp, "inline const RegMask &%s%s_mask() { %s }\n", prefix, rc_name_to_upper, _code_snippet);
+ delete[] rc_name_to_upper;
+}
+
+//------------------------------ConditionalRegClass---------------------------
+ConditionalRegClass::ConditionalRegClass(const char *classid) : RegClass(classid), _condition_code(NULL) {
+}
+
+ConditionalRegClass::~ConditionalRegClass() {
+ delete _condition_code;
+}
+
+void ConditionalRegClass::declare_register_masks(FILE* fp) {
+ const char* prefix = "";
+ const char* rc_name_to_upper = toUpper(_classid);
+ const char* rclass_0_to_upper = toUpper(_rclasses[0]->_classid);
+ const char* rclass_1_to_upper = toUpper(_rclasses[1]->_classid);
+ fprintf(fp, "inline const RegMask &%s%s_mask() {"
+ " return (%s) ?"
+ " %s%s_mask() :"
+ " %s%s_mask(); }\n",
+ prefix, rc_name_to_upper,
+ _condition_code,
+ prefix, rclass_0_to_upper,
+ prefix, rclass_1_to_upper);
+ if (_stack_or_reg) {
+ fprintf(fp, "inline const RegMask &%sSTACK_OR_%s_mask() {"
+ " return (%s) ?"
+ " %sSTACK_OR_%s_mask() :"
+ " %sSTACK_OR_%s_mask(); }\n",
+ prefix, rc_name_to_upper,
+ _condition_code,
+ prefix, rclass_0_to_upper,
+ prefix, rclass_1_to_upper);
+ }
+ delete[] rc_name_to_upper;
+ delete[] rclass_0_to_upper;
+ delete[] rclass_1_to_upper;
+ return;
+}
//------------------------------AllocClass-------------------------------------
AllocClass::AllocClass(char *classid) : _classid(classid), _regDef(cmpstr,hashstr, Form::arena) {
diff --git a/src/share/vm/adlc/formsopt.hpp b/src/share/vm/adlc/formsopt.hpp
index b6108d488..8ba8e64d9 100644
--- a/src/share/vm/adlc/formsopt.hpp
+++ b/src/share/vm/adlc/formsopt.hpp
@@ -60,6 +60,8 @@ class Opcode;
class InsEncode;
class RegDef;
class RegClass;
+class CodeSnippetRegClass;
+class ConditionalRegClass;
class AllocClass;
class ResourceForm;
class PipeClassForm;
@@ -98,7 +100,8 @@ public:
void addRegDef(char *regName, char *callingConv, char *c_conv,
char * idealtype, char *encoding, char* concreteName);
- RegClass *addRegClass(const char *className);
+ template<typename T> T* addRegClass(const char* className);
+
AllocClass *addAllocClass(char *allocName);
void addSpillRegClass();
@@ -154,17 +157,28 @@ public:
};
//------------------------------RegClass---------------------------------------
+// Generic register class. This register class is the internal representation
+// for the following .ad file format:
+//
+// reg_class ptr(RAX, RBX, ...);
+//
+// where ptr is the name of the register class, RAX and RBX are registers.
+//
+// This register class allows registers to be spilled onto the stack. Spilling
+// is allowed is field _stack_or_reg is true.
class RegClass : public Form {
public:
// Public Data
const char *_classid; // Name of class
NameList _regDefs; // List of registers in class
Dict _regDef; // Dictionary of registers in class
+protected:
bool _stack_or_reg; // Allowed on any stack slot
- char* _user_defined;
+public:
// Public Methods
RegClass(const char *classid);// Constructor
+ virtual ~RegClass();
void addReg(RegDef *regDef); // Add a register to this class
@@ -183,6 +197,115 @@ public:
void dump(); // Debug printer
void output(FILE *fp); // Write info to output files
+
+ virtual bool has_stack_version() {
+ return _stack_or_reg;
+ }
+ virtual void set_stack_version(bool flag) {
+ _stack_or_reg = flag;
+ }
+
+ virtual void declare_register_masks(FILE* fp);
+ virtual void build_register_masks(FILE* fp);
+};
+
+//------------------------------CodeSnippetRegClass----------------------------
+// Register class that has an user-defined C++ code snippet attached to it
+// to determine at runtime which register class to use. This register class is
+// the internal representation for the following .ad file format:
+//
+// reg_class actual_dflt_reg %{
+// if (VM_Version::has_vfp3_32()) {
+// return DFLT_REG_mask();
+// } else {
+// return DFLT_LOW_REG_mask();
+// }
+// %}
+//
+// where DFLT_REG_mask() and DFLT_LOW_REG_mask() are the internal names of the
+// masks of register classes dflt_reg and dflt_low_reg.
+//
+// The attached code snippet can select also between more than two register classes.
+// This register class can be, however, used only if the register class is not
+// cisc-spillable (i.e., the registers of this class are not allowed on the stack,
+// which is equivalent with _stack_or_reg being false).
+class CodeSnippetRegClass : public RegClass {
+protected:
+ char* _code_snippet;
+public:
+ CodeSnippetRegClass(const char* classid);// Constructor
+ ~CodeSnippetRegClass();
+
+ void set_code_snippet(char* code) {
+ _code_snippet = code;
+ }
+ char* code_snippet() {
+ return _code_snippet;
+ }
+ void set_stack_version(bool flag) {
+ assert(false, "User defined register classes are not allowed to spill to the stack.");
+ }
+ void declare_register_masks(FILE* fp);
+ void build_register_masks(FILE* fp) {
+ // We do not need to generate register masks because we select at runtime
+ // between register masks generated for other register classes.
+ return;
+ }
+};
+
+//------------------------------ConditionalRegClass----------------------------
+// Register class that has two register classes and a runtime condition attached
+// to it. The condition is evaluated at runtime and either one of the register
+// attached register classes is selected. This register class is the internal
+// representation for the following .ad format:
+//
+// reg_class_dynamic actual_dflt_reg(dflt_reg, low_reg,
+// %{ VM_Version::has_vfp3_32() }%
+// );
+//
+// This example is equivalent to the example used with the CodeSnippetRegClass
+// register class. A ConditionalRegClass works also if a register class is cisc-spillable
+// (i.e., _stack_or_reg is true), but if can select only between two register classes.
+class ConditionalRegClass : public RegClass {
+protected:
+ // reference to condition code
+ char* _condition_code; // C++ condition code to dynamically determine which register class to use.
+
+ // Example syntax (equivalent to previous example):
+ //
+ // reg_class actual_dflt_reg(dflt_reg, low_reg,
+ // %{ VM_Version::has_vfp3_32() }%
+ // );
+ // reference to conditional register classes
+ RegClass* _rclasses[2]; // 0 is the register class selected if the condition code returns true
+ // 1 is the register class selected if the condition code returns false
+public:
+ ConditionalRegClass(const char* classid);// Constructor
+ ~ConditionalRegClass();
+
+ virtual void set_stack_version(bool flag) {
+ RegClass::set_stack_version(flag);
+ assert((_rclasses[0] != NULL), "Register class NULL for condition code == true");
+ assert((_rclasses[1] != NULL), "Register class NULL for condition code == false");
+ _rclasses[0]->set_stack_version(flag);
+ _rclasses[1]->set_stack_version(flag);
+ }
+ void declare_register_masks(FILE* fp);
+ void build_register_masks(FILE* fp) {
+ // We do not need to generate register masks because we select at runtime
+ // between register masks generated for other register classes.
+ return;
+ }
+ void set_rclass_at_index(int index, RegClass* rclass) {
+ assert((0 <= index && index < 2), "Condition code can select only between two register classes");
+ _rclasses[index] = rclass;
+ }
+ void set_condition_code(char* code) {
+ _condition_code = code;
+ }
+ char* condition_code() {
+ return _condition_code;
+ }
};
//------------------------------AllocClass-------------------------------------
diff --git a/src/share/vm/adlc/formssel.hpp b/src/share/vm/adlc/formssel.hpp
index 340db8f43..0a38a24a5 100644
--- a/src/share/vm/adlc/formssel.hpp
+++ b/src/share/vm/adlc/formssel.hpp
@@ -59,6 +59,8 @@ class Opcode;
class InsEncode;
class RegDef;
class RegClass;
+class CodeSnippetRegClass;
+class ConditionalRegClass;
class AllocClass;
class ResourceForm;
class PipeDesc;
diff --git a/src/share/vm/adlc/output_c.cpp b/src/share/vm/adlc/output_c.cpp
index 1d0913fe7..f53dbd582 100644
--- a/src/share/vm/adlc/output_c.cpp
+++ b/src/share/vm/adlc/output_c.cpp
@@ -155,26 +155,9 @@ void ArchDesc::declare_register_masks(FILE *fp_hpp) {
fprintf(fp_hpp,"// Register masks, one for each register class.\n");
_register->_rclasses.reset();
for (rc_name = NULL; (rc_name = _register->_rclasses.iter()) != NULL;) {
- const char *prefix = "";
RegClass *reg_class = _register->getRegClass(rc_name);
assert(reg_class, "Using an undefined register class");
-
- const char* rc_name_to_upper = toUpper(rc_name);
-
- if (reg_class->_user_defined == NULL) {
- fprintf(fp_hpp, "extern const RegMask _%s%s_mask;\n", prefix, rc_name_to_upper);
- fprintf(fp_hpp, "inline const RegMask &%s%s_mask() { return _%s%s_mask; }\n", prefix, rc_name_to_upper, prefix, rc_name_to_upper);
- } else {
- fprintf(fp_hpp, "inline const RegMask &%s%s_mask() { %s }\n", prefix, rc_name_to_upper, reg_class->_user_defined);
- }
-
- if (reg_class->_stack_or_reg) {
- assert(reg_class->_user_defined == NULL, "no user defined reg class here");
- fprintf(fp_hpp, "extern const RegMask _%sSTACK_OR_%s_mask;\n", prefix, rc_name_to_upper);
- fprintf(fp_hpp, "inline const RegMask &%sSTACK_OR_%s_mask() { return _%sSTACK_OR_%s_mask; }\n", prefix, rc_name_to_upper, prefix, rc_name_to_upper);
- }
- delete[] rc_name_to_upper;
-
+ reg_class->declare_register_masks(fp_hpp);
}
}
}
@@ -190,35 +173,9 @@ void ArchDesc::build_register_masks(FILE *fp_cpp) {
fprintf(fp_cpp,"// Register masks, one for each register class.\n");
_register->_rclasses.reset();
for (rc_name = NULL; (rc_name = _register->_rclasses.iter()) != NULL;) {
- const char *prefix = "";
RegClass *reg_class = _register->getRegClass(rc_name);
assert(reg_class, "Using an undefined register class");
-
- if (reg_class->_user_defined != NULL) {
- continue;
- }
-
- int len = RegisterForm::RegMask_Size();
- const char* rc_name_to_upper = toUpper(rc_name);
- fprintf(fp_cpp, "const RegMask _%s%s_mask(", prefix, rc_name_to_upper);
-
- {
- int i;
- for(i = 0; i < len - 1; i++) {
- fprintf(fp_cpp," 0x%x,", reg_class->regs_in_word(i, false));
- }
- fprintf(fp_cpp," 0x%x );\n", reg_class->regs_in_word(i, false));
- }
-
- if (reg_class->_stack_or_reg) {
- int i;
- fprintf(fp_cpp, "const RegMask _%sSTACK_OR_%s_mask(", prefix, rc_name_to_upper);
- for(i = 0; i < len - 1; i++) {
- fprintf(fp_cpp," 0x%x,",reg_class->regs_in_word(i, true));
- }
- fprintf(fp_cpp," 0x%x );\n",reg_class->regs_in_word(i, true));
- }
- delete[] rc_name_to_upper;
+ reg_class->build_register_masks(fp_cpp);
}
}
}
diff --git a/src/share/vm/c1/c1_GraphBuilder.cpp b/src/share/vm/c1/c1_GraphBuilder.cpp
index c11c1251d..cbb6255bc 100644
--- a/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -4064,7 +4064,7 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
ValueType* type = apop()->type();
if (type->is_constant()) {
ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget();
- // If the target is another method handle invoke try recursivly to get
+ // If the target is another method handle invoke, try to recursively get
// a better target.
if (target->is_method_handle_intrinsic()) {
if (try_method_handle_inline(target)) {
diff --git a/src/share/vm/c1/c1_LIR.cpp b/src/share/vm/c1/c1_LIR.cpp
index f60190ae6..a6ba98853 100644
--- a/src/share/vm/c1/c1_LIR.cpp
+++ b/src/share/vm/c1/c1_LIR.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -142,16 +142,11 @@ LIR_Address::Scale LIR_Address::scale(BasicType type) {
#ifndef PRODUCT
-void LIR_Address::verify() const {
+void LIR_Address::verify0() const {
#if defined(SPARC) || defined(PPC)
assert(scale() == times_1, "Scaled addressing mode not available on SPARC/PPC and should not be used");
assert(disp() == 0 || index()->is_illegal(), "can't have both");
#endif
-#ifdef ARM
- assert(disp() == 0 || index()->is_illegal(), "can't have both");
- // Note: offsets higher than 4096 must not be rejected here. They can
- // be handled by the back-end or will be rejected if not.
-#endif
#ifdef _LP64
assert(base()->is_cpu_register(), "wrong base operand");
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
@@ -459,7 +454,7 @@ void LIR_OpRTCall::verify() const {
//-------------------visits--------------------------
// complete rework of LIR instruction visitor.
-// The virtual calls for each instruction type is replaced by a big
+// The virtual call for each instruction type is replaced by a big
// switch that adds the operands for each instruction
void LIR_OpVisitState::visit(LIR_Op* op) {
@@ -828,7 +823,8 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
}
if (opJavaCall->_info) do_info(opJavaCall->_info);
- if (opJavaCall->is_method_handle_invoke()) {
+ if (FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr &&
+ opJavaCall->is_method_handle_invoke()) {
opJavaCall->_method_handle_invoke_SP_save_opr = FrameMap::method_handle_invoke_SP_save_opr();
do_temp(opJavaCall->_method_handle_invoke_SP_save_opr);
}
diff --git a/src/share/vm/c1/c1_LIR.hpp b/src/share/vm/c1/c1_LIR.hpp
index e26c280c3..19acfb37e 100644
--- a/src/share/vm/c1/c1_LIR.hpp
+++ b/src/share/vm/c1/c1_LIR.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_C1_C1_LIR_HPP
#define SHARE_VM_C1_C1_LIR_HPP
+#include "c1/c1_Defs.hpp"
#include "c1/c1_ValueType.hpp"
#include "oops/method.hpp"
@@ -561,7 +562,13 @@ class LIR_Address: public LIR_OprPtr {
virtual BasicType type() const { return _type; }
virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
- void verify() const PRODUCT_RETURN;
+ void verify0() const PRODUCT_RETURN;
+#if defined(LIR_ADDRESS_PD_VERIFY) && !defined(PRODUCT)
+ void pd_verify() const;
+ void verify() const { pd_verify(); }
+#else
+ void verify() const { verify0(); }
+#endif
static Scale scale(BasicType type);
};
@@ -610,19 +617,15 @@ class LIR_OprFact: public AllStatic {
LIR_OprDesc::float_type |
LIR_OprDesc::fpu_register |
LIR_OprDesc::single_size); }
-#if defined(ARM)
- static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
- static LIR_Opr single_softfp(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
- static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); }
-#endif
-#ifdef SPARC
+#if defined(C1_LIR_MD_HPP)
+# include C1_LIR_MD_HPP
+#elif defined(SPARC)
static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
(reg2 << LIR_OprDesc::reg2_shift) |
LIR_OprDesc::double_type |
LIR_OprDesc::fpu_register |
LIR_OprDesc::double_size); }
-#endif
-#ifdef X86
+#elif defined(X86)
static LIR_Opr double_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
(reg << LIR_OprDesc::reg2_shift) |
LIR_OprDesc::double_type |
@@ -640,8 +643,7 @@ class LIR_OprFact: public AllStatic {
LIR_OprDesc::fpu_register |
LIR_OprDesc::double_size |
LIR_OprDesc::is_xmm_mask); }
-#endif // X86
-#ifdef PPC
+#elif defined(PPC)
static LIR_Opr double_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
(reg << LIR_OprDesc::reg2_shift) |
LIR_OprDesc::double_type |
@@ -1214,10 +1216,8 @@ class LIR_OpJavaCall: public LIR_OpCall {
// JSR 292 support.
bool is_invokedynamic() const { return code() == lir_dynamic_call; }
bool is_method_handle_invoke() const {
- return
- method()->is_compiled_lambda_form() // Java-generated adapter
- ||
- method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic
+ return method()->is_compiled_lambda_form() || // Java-generated lambda form
+ method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic
}
intptr_t vtable_offset() const {
diff --git a/src/share/vm/c1/c1_LIRGenerator.cpp b/src/share/vm/c1/c1_LIRGenerator.cpp
index 22a018b93..237cfccf8 100644
--- a/src/share/vm/c1/c1_LIRGenerator.cpp
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "c1/c1_Defs.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_Instruction.hpp"
@@ -46,10 +47,7 @@
#define __ gen()->lir()->
#endif
-// TODO: ARM - Use some recognizable constant which still fits architectural constraints
-#ifdef ARM
-#define PATCHED_ADDR (204)
-#else
+#ifndef PATCHED_ADDR
#define PATCHED_ADDR (max_jint)
#endif
@@ -1599,25 +1597,9 @@ void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc*
}
assert(addr->is_register(), "must be a register at this point");
-#ifdef ARM
- // TODO: ARM - move to platform-dependent code
- LIR_Opr tmp = FrameMap::R14_opr;
- if (VM_Version::supports_movw()) {
- __ move((LIR_Opr)card_table_base, tmp);
- } else {
- __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
- }
-
- CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
- LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
- if(((int)ct->byte_map_base & 0xff) == 0) {
- __ move(tmp, card_addr);
- } else {
- LIR_Opr tmp_zero = new_register(T_INT);
- __ move(LIR_OprFact::intConst(0), tmp_zero);
- __ move(tmp_zero, card_addr);
- }
-#else // ARM
+#ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
+ CardTableModRef_post_barrier_helper(addr, card_table_base);
+#else
LIR_Opr tmp = new_pointer_register();
if (TwoOperandLIRForm) {
__ move(addr, tmp);
@@ -1633,7 +1615,7 @@ void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc*
new LIR_Address(tmp, load_constant(card_table_base),
T_BYTE));
}
-#endif // ARM
+#endif
}
@@ -2121,7 +2103,7 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
} else {
#ifdef X86
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
-#elif defined(ARM)
+#elif defined(GENERATE_ADDRESS_IS_PREFERRED)
addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
#else
if (index_op->is_illegal() || log2_scale == 0) {
@@ -2175,6 +2157,9 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
LIR_Opr base_op = base.result();
LIR_Opr index_op = idx.result();
+#ifdef GENERATE_ADDRESS_IS_PREFERRED
+ LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
+#else
#ifndef _LP64
if (base_op->type() == T_LONG) {
base_op = new_register(T_INT);
@@ -2204,10 +2189,19 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
if (log2_scale != 0) {
// temporary fix (platform dependent code without shift on Intel would be better)
// TODO: ARM also allows embedded shift in the address
- __ shift_left(index_op, log2_scale, index_op);
+ LIR_Opr tmp = new_pointer_register();
+ if (TwoOperandLIRForm) {
+ __ move(index_op, tmp);
+ index_op = tmp;
+ }
+ __ shift_left(index_op, log2_scale, tmp);
+ if (!TwoOperandLIRForm) {
+ index_op = tmp;
+ }
}
LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
+#endif // !GENERATE_ADDRESS_IS_PREFERRED
__ move(value.result(), addr);
}
@@ -2561,7 +2555,7 @@ void LIRGenerator::do_Goto(Goto* x) {
// need to free up storage used for OSR entry point
LIR_Opr osrBuffer = block()->next()->operand();
BasicTypeList signature;
- signature.append(T_INT);
+ signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
__ move(osrBuffer, cc->args()->at(0));
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
@@ -2901,7 +2895,7 @@ LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
// g) lock result registers and emit call operation
//
// Before issuing a call, we must spill-save all values on stack
-// that are in caller-save register. "spill-save" moves thos registers
+// that are in caller-save register. "spill-save" moves those registers
// either in a free callee-save register or spills them if no free
// callee save register is available.
//
@@ -2909,7 +2903,7 @@ LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
// - if invoked between e) and f), we may lock callee save
// register in "spill-save" that destroys the receiver register
// before f) is executed
-// - if we rearange the f) to be earlier, by loading %o0, it
+// - if we rearrange f) to be earlier (by loading %o0) it
// may destroy a value on the stack that is currently in %o0
// and is waiting to be spilled
// - if we keep the receiver locked while doing spill-save,
@@ -2942,14 +2936,16 @@ void LIRGenerator::do_Invoke(Invoke* x) {
assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
// JSR 292
- // Preserve the SP over MethodHandle call sites.
+ // Preserve the SP over MethodHandle call sites, if needed.
ciMethod* target = x->target();
bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
target->is_method_handle_intrinsic() ||
target->is_compiled_lambda_form());
if (is_method_handle_invoke) {
info->set_is_method_handle_invoke(true);
- __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
+ if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
+ __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
+ }
}
switch (x->code()) {
@@ -2989,8 +2985,9 @@ void LIRGenerator::do_Invoke(Invoke* x) {
}
// JSR 292
- // Restore the SP after MethodHandle call sites.
- if (is_method_handle_invoke) {
+ // Restore the SP after MethodHandle call sites, if needed.
+ if (is_method_handle_invoke
+ && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
__ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
}
diff --git a/src/share/vm/c1/c1_LIRGenerator.hpp b/src/share/vm/c1/c1_LIRGenerator.hpp
index 69d9a0e50..cd55b43c0 100644
--- a/src/share/vm/c1/c1_LIRGenerator.hpp
+++ b/src/share/vm/c1/c1_LIRGenerator.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -277,6 +277,9 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
void CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
+#ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
+ void CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
+#endif
static LIR_Opr result_register_for(ValueType* type, bool callee = false);
@@ -550,6 +553,10 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
#ifdef ASSERT
virtual void do_Assert (Assert* x);
#endif
+
+#ifdef C1_LIRGENERATOR_MD_HPP
+#include C1_LIRGENERATOR_MD_HPP
+#endif
};
diff --git a/src/share/vm/c1/c1_LinearScan.cpp b/src/share/vm/c1/c1_LinearScan.cpp
index abee3ac59..108868868 100644
--- a/src/share/vm/c1/c1_LinearScan.cpp
+++ b/src/share/vm/c1/c1_LinearScan.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2138,7 +2138,7 @@ LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) {
assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg);
-#elif defined(ARM)
+#elif defined(ARM32)
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
@@ -2727,7 +2727,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
#ifdef SPARC
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
#endif
-#ifdef ARM
+#ifdef ARM32
assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
#endif
#ifdef PPC
diff --git a/src/share/vm/c1/c1_Runtime1.cpp b/src/share/vm/c1/c1_Runtime1.cpp
index cb7adc269..ca6a35e39 100644
--- a/src/share/vm/c1/c1_Runtime1.cpp
+++ b/src/share/vm/c1/c1_Runtime1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1085,7 +1085,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
#ifdef ARM
if((load_klass_or_mirror_patch_id ||
stub_id == Runtime1::load_appendix_patching_id) &&
- !VM_Version::supports_movw()) {
+ nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
nmethod* nm = CodeCache::find_nmethod(instr_pc);
address addr = NULL;
assert(nm != NULL, "invalid nmethod_pc");
diff --git a/src/share/vm/c1/c1_ValueType.cpp b/src/share/vm/c1/c1_ValueType.cpp
index 0aebd036a..5f86a8b93 100644
--- a/src/share/vm/c1/c1_ValueType.cpp
+++ b/src/share/vm/c1/c1_ValueType.cpp
@@ -153,7 +153,19 @@ ValueType* as_ValueType(ciConstant value) {
case T_FLOAT : return new FloatConstant (value.as_float ());
case T_DOUBLE : return new DoubleConstant(value.as_double());
case T_ARRAY : // fall through (ciConstant doesn't have an array accessor)
- case T_OBJECT : return new ObjectConstant(value.as_object());
+ case T_OBJECT : {
+ // TODO: Common the code with GraphBuilder::load_constant?
+ ciObject* obj = value.as_object();
+ if (obj->is_null_object())
+ return objectNull;
+ if (obj->is_loaded()) {
+ if (obj->is_array())
+ return new ArrayConstant(obj->as_array());
+ else if (obj->is_instance())
+ return new InstanceConstant(obj->as_instance());
+ }
+ return new ObjectConstant(obj);
+ }
}
ShouldNotReachHere();
return illegalType;
diff --git a/src/share/vm/ci/bcEscapeAnalyzer.cpp b/src/share/vm/ci/bcEscapeAnalyzer.cpp
index 4c0cd3d80..4c4db3e85 100644
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp
@@ -42,7 +42,7 @@
#define TRACE_BCEA(level, code)
#endif
-// Maintain a map of which aguments a local variable or
+// Maintain a map of which arguments a local variable or
// stack slot may contain. In addition to tracking
// arguments, it tracks two special values, "allocated"
// which represents any object allocated in the current
@@ -318,14 +318,16 @@ void BCEscapeAnalyzer::invoke(StateInfo &state, Bytecodes::Code code, ciMethod*
bool must_record_dependencies = false;
for (i = arg_size - 1; i >= 0; i--) {
ArgumentMap arg = state.raw_pop();
- if (!is_argument(arg))
+ // Check if callee arg is a caller arg or an allocated object
+ bool allocated = arg.contains_allocated();
+ if (!(is_argument(arg) || allocated))
continue;
for (int j = 0; j < _arg_size; j++) {
if (arg.contains(j)) {
_arg_modified[j] |= analyzer._arg_modified[i];
}
}
- if (!is_arg_stack(arg)) {
+ if (!(is_arg_stack(arg) || allocated)) {
// arguments have already been recognized as escaping
} else if (analyzer.is_arg_stack(i) && !analyzer.is_arg_returned(i)) {
set_method_escape(arg);
diff --git a/src/share/vm/ci/ciField.cpp b/src/share/vm/ci/ciField.cpp
index 58c191710..7dd09a98e 100644
--- a/src/share/vm/ci/ciField.cpp
+++ b/src/share/vm/ci/ciField.cpp
@@ -187,6 +187,14 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke"))
return true;
+ // Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
+ // more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
+ if (holder->name() == ciSymbol::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
+ holder->name() == ciSymbol::java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater() ||
+ holder->name() == ciSymbol::java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater() ||
+ holder->name() == ciSymbol::java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl()) {
+ return true;
+ }
return TrustFinalNonStaticFields;
}
diff --git a/src/share/vm/ci/ciMethod.cpp b/src/share/vm/ci/ciMethod.cpp
index ae5dd763a..1f66105a9 100644
--- a/src/share/vm/ci/ciMethod.cpp
+++ b/src/share/vm/ci/ciMethod.cpp
@@ -689,7 +689,8 @@ ciKlass* ciMethod::parameter_profiled_type(int i) {
// via assert_unique_concrete_method or assert_leaf_type.
ciMethod* ciMethod::find_monomorphic_target(ciInstanceKlass* caller,
ciInstanceKlass* callee_holder,
- ciInstanceKlass* actual_recv) {
+ ciInstanceKlass* actual_recv,
+ bool check_access) {
check_is_loaded();
if (actual_recv->is_interface()) {
@@ -697,7 +698,7 @@ ciMethod* ciMethod::find_monomorphic_target(ciInstanceKlass* caller,
return NULL;
}
- ciMethod* root_m = resolve_invoke(caller, actual_recv);
+ ciMethod* root_m = resolve_invoke(caller, actual_recv, check_access);
if (root_m == NULL) {
// Something went wrong looking up the actual receiver method.
return NULL;
@@ -776,7 +777,7 @@ ciMethod* ciMethod::find_monomorphic_target(ciInstanceKlass* caller,
//
// Given a known receiver klass, find the target for the call.
// Return NULL if the call has no target or the target is abstract.
-ciMethod* ciMethod::resolve_invoke(ciKlass* caller, ciKlass* exact_receiver) {
+ciMethod* ciMethod::resolve_invoke(ciKlass* caller, ciKlass* exact_receiver, bool check_access) {
check_is_loaded();
VM_ENTRY_MARK;
@@ -793,9 +794,9 @@ ciMethod* ciMethod::resolve_invoke(ciKlass* caller, ciKlass* exact_receiver) {
||
InstanceKlass::cast(h_recv())->is_linked() && !exact_receiver->is_interface()) {
if (holder()->is_interface()) {
- m = LinkResolver::resolve_interface_call_or_null(h_recv, h_resolved, h_name, h_signature, caller_klass);
+ m = LinkResolver::resolve_interface_call_or_null(h_recv, h_resolved, h_name, h_signature, caller_klass, check_access);
} else {
- m = LinkResolver::resolve_virtual_call_or_null(h_recv, h_resolved, h_name, h_signature, caller_klass);
+ m = LinkResolver::resolve_virtual_call_or_null(h_recv, h_resolved, h_name, h_signature, caller_klass, check_access);
}
}
diff --git a/src/share/vm/ci/ciMethod.hpp b/src/share/vm/ci/ciMethod.hpp
index 59d2cdff1..af66c4f2d 100644
--- a/src/share/vm/ci/ciMethod.hpp
+++ b/src/share/vm/ci/ciMethod.hpp
@@ -172,9 +172,9 @@ class ciMethod : public ciMetadata {
// Code size for inlining decisions.
int code_size_for_inlining();
- bool caller_sensitive() { return get_Method()->caller_sensitive(); }
- bool force_inline() { return get_Method()->force_inline(); }
- bool dont_inline() { return get_Method()->dont_inline(); }
+ bool caller_sensitive() const { return get_Method()->caller_sensitive(); }
+ bool force_inline() const { return get_Method()->force_inline(); }
+ bool dont_inline() const { return get_Method()->dont_inline(); }
int comp_level();
int highest_osr_comp_level();
@@ -248,11 +248,12 @@ class ciMethod : public ciMetadata {
// its calling environment.
ciMethod* find_monomorphic_target(ciInstanceKlass* caller,
ciInstanceKlass* callee_holder,
- ciInstanceKlass* actual_receiver);
+ ciInstanceKlass* actual_receiver,
+ bool check_access = true);
// Given a known receiver klass, find the target for the call.
// Return NULL if the call has no target or is abstract.
- ciMethod* resolve_invoke(ciKlass* caller, ciKlass* exact_receiver);
+ ciMethod* resolve_invoke(ciKlass* caller, ciKlass* exact_receiver, bool check_access = true);
// Find the proper vtable index to invoke this method.
int resolve_vtable_index(ciKlass* caller, ciKlass* receiver);
diff --git a/src/share/vm/ci/ciTypeFlow.cpp b/src/share/vm/ci/ciTypeFlow.cpp
index d78eb1456..4707cc96e 100644
--- a/src/share/vm/ci/ciTypeFlow.cpp
+++ b/src/share/vm/ci/ciTypeFlow.cpp
@@ -36,6 +36,7 @@
#include "interpreter/bytecodes.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/compile.hpp"
+#include "opto/node.hpp"
#include "runtime/deoptimization.hpp"
#include "utilities/growableArray.hpp"
diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp
index b3b033691..c388e2b4f 100644
--- a/src/share/vm/classfile/classFileParser.cpp
+++ b/src/share/vm/classfile/classFileParser.cpp
@@ -1783,6 +1783,10 @@ ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_d
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
return _method_DontInline;
+ case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_InjectedProfile_signature):
+ if (_location != _in_method) break; // only allow for methods
+ if (!privileged) break; // only allow in privileged code
+ return _method_InjectedProfile;
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Compiled_signature):
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
@@ -1824,6 +1828,8 @@ void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) {
m->set_force_inline(true);
if (has_annotation(_method_DontInline))
m->set_dont_inline(true);
+ if (has_annotation(_method_InjectedProfile))
+ m->set_has_injected_profile(true);
if (has_annotation(_method_LambdaForm_Compiled) && m->intrinsic_id() == vmIntrinsics::_none)
m->set_intrinsic_id(vmIntrinsics::_compiledLambdaForm);
if (has_annotation(_method_LambdaForm_Hidden))
@@ -4415,9 +4421,15 @@ void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) {
Method* m = k->lookup_method(vmSymbols::finalize_method_name(),
vmSymbols::void_method_signature());
if (m != NULL && !m->is_empty_method()) {
- f = true;
+ f = true;
+ }
+
+ // Spec doesn't prevent agent from redefinition of empty finalizer.
+ // Despite the fact that it's generally bad idea and redefined finalizer
+ // will not work as expected we shouldn't abort vm in this case
+ if (!k->has_redefined_this_or_super()) {
+ assert(f == k->has_finalizer(), "inconsistent has_finalizer");
}
- assert(f == k->has_finalizer(), "inconsistent has_finalizer");
#endif
// Check if this klass supports the java.lang.Cloneable interface
diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp
index 0e0c741fc..74367f5b9 100644
--- a/src/share/vm/classfile/classFileParser.hpp
+++ b/src/share/vm/classfile/classFileParser.hpp
@@ -126,6 +126,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
_method_CallerSensitive,
_method_ForceInline,
_method_DontInline,
+ _method_InjectedProfile,
_method_LambdaForm_Compiled,
_method_LambdaForm_Hidden,
_sun_misc_Contended,
diff --git a/src/share/vm/classfile/classLoader.cpp b/src/share/vm/classfile/classLoader.cpp
index f10627b6e..ef5766e18 100644
--- a/src/share/vm/classfile/classLoader.cpp
+++ b/src/share/vm/classfile/classLoader.cpp
@@ -414,30 +414,30 @@ void ClassLoader::exit_with_path_failure(const char* error, const char* message)
}
#endif
-void ClassLoader::trace_class_path(const char* msg, const char* name) {
+void ClassLoader::trace_class_path(outputStream* out, const char* msg, const char* name) {
if (!TraceClassPaths) {
return;
}
if (msg) {
- tty->print("%s", msg);
+ out->print("%s", msg);
}
if (name) {
if (strlen(name) < 256) {
- tty->print("%s", name);
+ out->print("%s", name);
} else {
// For very long paths, we need to print each character separately,
// as print_cr() has a length limit
while (name[0] != '\0') {
- tty->print("%c", name[0]);
+ out->print("%c", name[0]);
name++;
}
}
}
if (msg && msg[0] == '[') {
- tty->print_cr("]");
+ out->print_cr("]");
} else {
- tty->cr();
+ out->cr();
}
}
@@ -583,7 +583,7 @@ void ClassLoader::setup_bootstrap_search_path() {
// Don't print sys_class_path - this is the bootcp of this current VM process, not necessarily
// the same as the bootcp of the shared archive.
} else {
- trace_class_path("[Bootstrap loader class path=", sys_class_path);
+ trace_class_path(tty, "[Bootstrap loader class path=", sys_class_path);
}
#if INCLUDE_CDS
if (DumpSharedSpaces) {
diff --git a/src/share/vm/classfile/classLoader.hpp b/src/share/vm/classfile/classLoader.hpp
index 38063e1a4..27f5068f7 100644
--- a/src/share/vm/classfile/classLoader.hpp
+++ b/src/share/vm/classfile/classLoader.hpp
@@ -346,7 +346,7 @@ class ClassLoader: AllStatic {
static void exit_with_path_failure(const char* error, const char* message);
#endif
- static void trace_class_path(const char* msg, const char* name = NULL);
+ static void trace_class_path(outputStream* out, const char* msg, const char* name = NULL);
// VM monitoring and management support
static jlong classloader_time_ms();
diff --git a/src/share/vm/classfile/classLoaderData.cpp b/src/share/vm/classfile/classLoaderData.cpp
index c4e272e3e..2e852f318 100644
--- a/src/share/vm/classfile/classLoaderData.cpp
+++ b/src/share/vm/classfile/classLoaderData.cpp
@@ -320,27 +320,6 @@ void ClassLoaderData::unload() {
}
}
-#ifdef ASSERT
-class AllAliveClosure : public OopClosure {
- BoolObjectClosure* _is_alive_closure;
- bool _found_dead;
- public:
- AllAliveClosure(BoolObjectClosure* is_alive_closure) : _is_alive_closure(is_alive_closure), _found_dead(false) {}
- template <typename T> void do_oop_work(T* p) {
- T heap_oop = oopDesc::load_heap_oop(p);
- if (!oopDesc::is_null(heap_oop)) {
- oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- if (!_is_alive_closure->do_object_b(obj)) {
- _found_dead = true;
- }
- }
- }
- void do_oop(oop* p) { do_oop_work<oop>(p); }
- void do_oop(narrowOop* p) { do_oop_work<narrowOop>(p); }
- bool found_dead() { return _found_dead; }
-};
-#endif
-
oop ClassLoaderData::keep_alive_object() const {
assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
return is_anonymous() ? _klasses->java_mirror() : class_loader();
@@ -350,15 +329,6 @@ bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const {
bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
|| is_alive_closure->do_object_b(keep_alive_object());
-#ifdef ASSERT
- if (alive) {
- AllAliveClosure all_alive_closure(is_alive_closure);
- KlassToOopClosure klass_closure(&all_alive_closure);
- const_cast<ClassLoaderData*>(this)->oops_do(&all_alive_closure, &klass_closure, false);
- assert(!all_alive_closure.found_dead(), err_msg("Found dead oop in alive cld: " PTR_FORMAT, p2i(this)));
- }
-#endif
-
return alive;
}
@@ -912,7 +882,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass)
}
Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
- Klass* head = (Klass*)_next_klass;
+ Klass* head = _next_klass;
while (head != NULL) {
Klass* next = next_klass_in_cldg(head);
diff --git a/src/share/vm/classfile/classLoaderData.hpp b/src/share/vm/classfile/classLoaderData.hpp
index c7b07b095..e12512dc5 100644
--- a/src/share/vm/classfile/classLoaderData.hpp
+++ b/src/share/vm/classfile/classLoaderData.hpp
@@ -307,7 +307,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
// An iterator that distributes Klasses to parallel worker threads.
class ClassLoaderDataGraphKlassIteratorAtomic : public StackObj {
- volatile Klass* _next_klass;
+ Klass* volatile _next_klass;
public:
ClassLoaderDataGraphKlassIteratorAtomic();
Klass* next_klass();
diff --git a/src/share/vm/classfile/defaultMethods.cpp b/src/share/vm/classfile/defaultMethods.cpp
index 1b56ff2fb..6c4abf1ca 100644
--- a/src/share/vm/classfile/defaultMethods.cpp
+++ b/src/share/vm/classfile/defaultMethods.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -731,10 +731,12 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
Method* m = iklass->find_method(_method_name, _method_signature);
// private interface methods are not candidates for default methods
// invokespecial to private interface methods doesn't use default method logic
+ // private class methods are not candidates for default methods,
+ // private methods do not override default methods, so need to perform
+ // default method inheritance without including private methods
// The overpasses are your supertypes' errors, we do not include them
// future: take access controls into account for superclass methods
- if (m != NULL && !m->is_static() && !m->is_overpass() &&
- (!iklass->is_interface() || m->is_public())) {
+ if (m != NULL && !m->is_static() && !m->is_overpass() && !m->is_private()) {
if (_family == NULL) {
_family = new StatefulMethodFamily();
}
@@ -745,6 +747,9 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
} else {
// This is the rule that methods in classes "win" (bad word) over
// methods in interfaces. This works because of single inheritance
+ // private methods in classes do not "win", they will be found
+ // first on searching, but overriding for invokevirtual needs
+ // to find default method candidates for the same signature
_family->set_target_if_empty(m);
}
}
@@ -1091,6 +1096,7 @@ static void merge_in_new_methods(InstanceKlass* klass,
}
// update idnum for new location
merged_methods->at(i)->set_method_idnum(i);
+ merged_methods->at(i)->set_orig_method_idnum(i);
}
// Verify correct order
diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp
index d5869a757..c245cc58f 100644
--- a/src/share/vm/classfile/javaClasses.cpp
+++ b/src/share/vm/classfile/javaClasses.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -124,7 +124,7 @@ compute_offset(int &dest_offset,
tty->print_cr(" name: %s, sig: %s, flags: %08x", fs.name()->as_C_string(), fs.signature()->as_C_string(), fs.access_flags().as_int());
}
#endif //PRODUCT
- fatal("Invalid layout of preloaded class");
+ vm_exit_during_initialization("Invalid layout of preloaded class: use -XX:+TraceClassLoading to see the origin of the problem class");
}
dest_offset = fd.offset();
}
@@ -782,6 +782,22 @@ Symbol* java_lang_Class::as_signature(oop java_class, bool intern_if_not_found,
return name;
}
+// Returns the Java name for this Java mirror (Resource allocated)
+// See Klass::external_name().
+// For primitive type Java mirrors, its type name is returned.
+const char* java_lang_Class::as_external_name(oop java_class) {
+ assert(java_lang_Class::is_instance(java_class), "must be a Class object");
+ const char* name = NULL;
+ if (is_primitive(java_class)) {
+ name = type2name(primitive_type(java_class));
+ } else {
+ name = as_Klass(java_class)->external_name();
+ }
+ if (name == NULL) {
+ name = "<null>";
+ }
+ return name;
+}
Klass* java_lang_Class::array_klass(oop java_class) {
Klass* k = ((Klass*)java_class->metadata_field(_array_klass_offset));
@@ -1278,7 +1294,8 @@ static inline int version_at(unsigned int merged) {
}
static inline bool version_matches(Method* method, int version) {
- return (method->constants()->version() == version && version < MAX_VERSION);
+ assert(version < MAX_VERSION, "version is too big");
+ return method != NULL && (method->constants()->version() == version);
}
static inline int get_line_number(Method* method, int bci) {
@@ -1308,6 +1325,7 @@ class BacktraceBuilder: public StackObj {
typeArrayOop _methods;
typeArrayOop _bcis;
objArrayOop _mirrors;
+ typeArrayOop _cprefs; // needed to insulate method name against redefinition
int _index;
No_Safepoint_Verifier _nsv;
@@ -1315,8 +1333,9 @@ class BacktraceBuilder: public StackObj {
enum {
trace_methods_offset = java_lang_Throwable::trace_methods_offset,
- trace_bcis_offset = java_lang_Throwable::trace_bcis_offset,
+ trace_bcis_offset = java_lang_Throwable::trace_bcis_offset,
trace_mirrors_offset = java_lang_Throwable::trace_mirrors_offset,
+ trace_cprefs_offset = java_lang_Throwable::trace_cprefs_offset,
trace_next_offset = java_lang_Throwable::trace_next_offset,
trace_size = java_lang_Throwable::trace_size,
trace_chunk_size = java_lang_Throwable::trace_chunk_size
@@ -1338,9 +1357,14 @@ class BacktraceBuilder: public StackObj {
assert(mirrors != NULL, "mirror array should be initialized in backtrace");
return mirrors;
}
+ static typeArrayOop get_cprefs(objArrayHandle chunk) {
+ typeArrayOop cprefs = typeArrayOop(chunk->obj_at(trace_cprefs_offset));
+ assert(cprefs != NULL, "cprefs array should be initialized in backtrace");
+ return cprefs;
+ }
// constructor for new backtrace
- BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL) {
+ BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL), _cprefs(NULL) {
expand(CHECK);
_backtrace = _head;
_index = 0;
@@ -1350,6 +1374,7 @@ class BacktraceBuilder: public StackObj {
_methods = get_methods(backtrace);
_bcis = get_bcis(backtrace);
_mirrors = get_mirrors(backtrace);
+ _cprefs = get_cprefs(backtrace);
assert(_methods->length() == _bcis->length() &&
_methods->length() == _mirrors->length(),
"method and source information arrays should match");
@@ -1375,17 +1400,22 @@ class BacktraceBuilder: public StackObj {
objArrayOop mirrors = oopFactory::new_objectArray(trace_chunk_size, CHECK);
objArrayHandle new_mirrors(THREAD, mirrors);
+ typeArrayOop cprefs = oopFactory::new_shortArray(trace_chunk_size, CHECK);
+ typeArrayHandle new_cprefs(THREAD, cprefs);
+
if (!old_head.is_null()) {
old_head->obj_at_put(trace_next_offset, new_head());
}
new_head->obj_at_put(trace_methods_offset, new_methods());
new_head->obj_at_put(trace_bcis_offset, new_bcis());
new_head->obj_at_put(trace_mirrors_offset, new_mirrors());
+ new_head->obj_at_put(trace_cprefs_offset, new_cprefs());
_head = new_head();
_methods = new_methods();
_bcis = new_bcis();
_mirrors = new_mirrors();
+ _cprefs = new_cprefs();
_index = 0;
}
@@ -1405,8 +1435,9 @@ class BacktraceBuilder: public StackObj {
method = mhandle();
}
- _methods->short_at_put(_index, method->method_idnum());
+ _methods->short_at_put(_index, method->orig_method_idnum());
_bcis->int_at_put(_index, merge_bci_and_version(bci, method->constants()->version()));
+ _cprefs->short_at_put(_index, method->name_index());
// We need to save the mirrors in the backtrace to keep the class
// from being unloaded while we still have this stack trace.
@@ -1419,27 +1450,26 @@ class BacktraceBuilder: public StackObj {
// Print stack trace element to resource allocated buffer
char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
- int method_id, int version, int bci) {
+ int method_id, int version, int bci, int cpref) {
// Get strings and string lengths
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror()));
const char* klass_name = holder->external_name();
int buf_len = (int)strlen(klass_name);
- // The method id may point to an obsolete method, can't get more stack information
- Method* method = holder->method_with_idnum(method_id);
- if (method == NULL) {
- char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64);
- // This is what the java code prints in this case - added Redefined
- sprintf(buf, "\tat %s.null (Redefined)", klass_name);
- return buf;
- }
+ Method* method = holder->method_with_orig_idnum(method_id, version);
- char* method_name = method->name()->as_C_string();
+ // The method can be NULL if the requested class version is gone
+ Symbol* sym = (method != NULL) ? method->name() : holder->constants()->symbol_at(cpref);
+ char* method_name = sym->as_C_string();
buf_len += (int)strlen(method_name);
+ // Use a specific ik version as a holder since the mirror might
+ // refer to a version that is now obsolete and no longer accessible
+ // via the previous versions list.
+ holder = holder->get_klass_version(version);
char* source_file_name = NULL;
- if (version_matches(method, version)) {
+ if (holder != NULL) {
Symbol* source = holder->source_file_name();
if (source != NULL) {
source_file_name = source->as_C_string();
@@ -1481,17 +1511,18 @@ char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
}
void java_lang_Throwable::print_stack_element(outputStream *st, Handle mirror,
- int method_id, int version, int bci) {
+ int method_id, int version, int bci, int cpref) {
ResourceMark rm;
- char* buf = print_stack_element_to_buffer(mirror, method_id, version, bci);
+ char* buf = print_stack_element_to_buffer(mirror, method_id, version, bci, cpref);
st->print_cr("%s", buf);
}
void java_lang_Throwable::print_stack_element(outputStream *st, methodHandle method, int bci) {
Handle mirror = method->method_holder()->java_mirror();
- int method_id = method->method_idnum();
+ int method_id = method->orig_method_idnum();
int version = method->constants()->version();
- print_stack_element(st, mirror, method_id, version, bci);
+ int cpref = method->name_index();
+ print_stack_element(st, mirror, method_id, version, bci, cpref);
}
const char* java_lang_Throwable::no_stack_trace_message() {
@@ -1516,6 +1547,7 @@ void java_lang_Throwable::print_stack_trace(oop throwable, outputStream* st) {
typeArrayHandle methods (THREAD, BacktraceBuilder::get_methods(result));
typeArrayHandle bcis (THREAD, BacktraceBuilder::get_bcis(result));
objArrayHandle mirrors (THREAD, BacktraceBuilder::get_mirrors(result));
+ typeArrayHandle cprefs (THREAD, BacktraceBuilder::get_cprefs(result));
int length = methods()->length();
for (int index = 0; index < length; index++) {
@@ -1525,7 +1557,8 @@ void java_lang_Throwable::print_stack_trace(oop throwable, outputStream* st) {
int method = methods->short_at(index);
int version = version_at(bcis->int_at(index));
int bci = bci_at(bcis->int_at(index));
- print_stack_element(st, mirror, method, version, bci);
+ int cpref = cprefs->short_at(index);
+ print_stack_element(st, mirror, method, version, bci, cpref);
}
result = objArrayHandle(THREAD, objArrayOop(result->obj_at(trace_next_offset)));
}
@@ -1809,29 +1842,30 @@ oop java_lang_Throwable::get_stack_trace_element(oop throwable, int index, TRAPS
if (chunk == NULL) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
- // Get method id, bci, version and mirror from chunk
+ // Get method id, bci, version, mirror and cpref from chunk
typeArrayOop methods = BacktraceBuilder::get_methods(chunk);
typeArrayOop bcis = BacktraceBuilder::get_bcis(chunk);
objArrayOop mirrors = BacktraceBuilder::get_mirrors(chunk);
+ typeArrayOop cprefs = BacktraceBuilder::get_cprefs(chunk);
assert(methods != NULL && bcis != NULL && mirrors != NULL, "sanity check");
int method = methods->short_at(chunk_index);
int version = version_at(bcis->int_at(chunk_index));
int bci = bci_at(bcis->int_at(chunk_index));
+ int cpref = cprefs->short_at(chunk_index);
Handle mirror(THREAD, mirrors->obj_at(chunk_index));
// Chunk can be partial full
if (mirror.is_null()) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
-
- oop element = java_lang_StackTraceElement::create(mirror, method, version, bci, CHECK_0);
+ oop element = java_lang_StackTraceElement::create(mirror, method, version, bci, cpref, CHECK_0);
return element;
}
oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
- int version, int bci, TRAPS) {
+ int version, int bci, int cpref, TRAPS) {
// Allocate java.lang.StackTraceElement instance
Klass* k = SystemDictionary::StackTraceElement_klass();
assert(k != NULL, "must be loaded in 1.4+");
@@ -1848,17 +1882,13 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
oop classname = StringTable::intern((char*) str, CHECK_0);
java_lang_StackTraceElement::set_declaringClass(element(), classname);
- Method* method = holder->method_with_idnum(method_id);
- // Method on stack may be obsolete because it was redefined so cannot be
- // found by idnum.
- if (method == NULL) {
- // leave name and fileName null
- java_lang_StackTraceElement::set_lineNumber(element(), -1);
- return element();
- }
+ Method* method = holder->method_with_orig_idnum(method_id, version);
+
+ // The method can be NULL if the requested class version is gone
+ Symbol* sym = (method != NULL) ? method->name() : holder->constants()->symbol_at(cpref);
// Fill in method name
- oop methodname = StringTable::intern(method->name(), CHECK_0);
+ oop methodname = StringTable::intern(sym, CHECK_0);
java_lang_StackTraceElement::set_methodName(element(), methodname);
if (!version_matches(method, version)) {
@@ -1867,6 +1897,11 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
java_lang_StackTraceElement::set_lineNumber(element(), -1);
} else {
// Fill in source file name and line number.
+ // Use a specific ik version as a holder since the mirror might
+ // refer to a version that is now obsolete and no longer accessible
+ // via the previous versions list.
+ holder = holder->get_klass_version(version);
+ assert(holder != NULL, "sanity check");
Symbol* source = holder->source_file_name();
if (ShowHiddenFrames && source == NULL)
source = vmSymbols::unknown_class_name();
@@ -1881,8 +1916,9 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
oop java_lang_StackTraceElement::create(methodHandle method, int bci, TRAPS) {
Handle mirror (THREAD, method->method_holder()->java_mirror());
- int method_id = method->method_idnum();
- return create(mirror, method_id, method->constants()->version(), bci, THREAD);
+ int method_id = method->orig_method_idnum();
+ int cpref = method->name_index();
+ return create(mirror, method_id, method->constants()->version(), bci, cpref, THREAD);
}
void java_lang_reflect_AccessibleObject::compute_offsets() {
@@ -2781,33 +2817,6 @@ bool java_lang_invoke_MemberName::is_method(oop mname) {
return (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0;
}
-#if INCLUDE_JVMTI
-// Can be executed on VM thread only
-void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Method* old_method,
- Method* new_method, bool* trace_name_printed) {
- assert(is_method(mname), "wrong type");
- assert(Thread::current()->is_VM_thread(), "not VM thread");
-
- Method* target = (Method*)mname->address_field(_vmtarget_offset);
- if (target == old_method) {
- mname->address_field_put(_vmtarget_offset, (address)new_method);
-
- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
- if (!(*trace_name_printed)) {
- // RC_TRACE_MESG macro has an embedded ResourceMark
- RC_TRACE_MESG(("adjust: name=%s",
- old_method->method_holder()->external_name()));
- *trace_name_printed = true;
- }
- // RC_TRACE macro has an embedded ResourceMark
- RC_TRACE(0x00400000, ("MemberName method update: %s(%s)",
- new_method->name()->as_C_string(),
- new_method->signature()->as_C_string()));
- }
- }
-}
-#endif // INCLUDE_JVMTI
-
void java_lang_invoke_MemberName::set_vmtarget(oop mname, Metadata* ref) {
assert(is_instance(mname), "wrong type");
// check the type of the vmtarget
@@ -3581,7 +3590,7 @@ int InjectedField::compute_offset() {
tty->print_cr(" name: %s, sig: %s, flags: %08x", fs.name()->as_C_string(), fs.signature()->as_C_string(), fs.access_flags().as_int());
}
#endif //PRODUCT
- fatal("Invalid layout of preloaded class");
+ vm_exit_during_initialization("Invalid layout of preloaded class: use -XX:+TraceClassLoading to see the origin of the problem class");
return -1;
}
diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp
index 7b041ee22..5e497e6aa 100644
--- a/src/share/vm/classfile/javaClasses.hpp
+++ b/src/share/vm/classfile/javaClasses.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -270,6 +270,7 @@ class java_lang_Class : AllStatic {
}
static Symbol* as_signature(oop java_class, bool intern_if_not_found, TRAPS);
static void print_signature(oop java_class, outputStream *st);
+ static const char* as_external_name(oop java_class);
// Testing
static bool is_instance(oop obj) {
return obj != NULL && obj->klass() == SystemDictionary::Class_klass();
@@ -484,8 +485,9 @@ class java_lang_Throwable: AllStatic {
trace_methods_offset = 0,
trace_bcis_offset = 1,
trace_mirrors_offset = 2,
- trace_next_offset = 3,
- trace_size = 4,
+ trace_cprefs_offset = 3,
+ trace_next_offset = 4,
+ trace_size = 5,
trace_chunk_size = 32
};
@@ -496,7 +498,7 @@ class java_lang_Throwable: AllStatic {
static int static_unassigned_stacktrace_offset;
// Printing
- static char* print_stack_element_to_buffer(Handle mirror, int method, int version, int bci);
+ static char* print_stack_element_to_buffer(Handle mirror, int method, int version, int bci, int cpref);
// StackTrace (programmatic access, new since 1.4)
static void clear_stacktrace(oop throwable);
// No stack trace available
@@ -517,7 +519,7 @@ class java_lang_Throwable: AllStatic {
static oop message(Handle throwable);
static void set_message(oop throwable, oop value);
static void print_stack_element(outputStream *st, Handle mirror, int method,
- int version, int bci);
+ int version, int bci, int cpref);
static void print_stack_element(outputStream *st, methodHandle method, int bci);
static void print_stack_usage(Handle stream);
@@ -1095,10 +1097,6 @@ class java_lang_invoke_MemberName: AllStatic {
static Metadata* vmtarget(oop mname);
static void set_vmtarget(oop mname, Metadata* target);
-#if INCLUDE_JVMTI
- static void adjust_vmtarget(oop mname, Method* old_method, Method* new_method,
- bool* trace_name_printed);
-#endif // INCLUDE_JVMTI
static intptr_t vmindex(oop mname);
static void set_vmindex(oop mname, intptr_t index);
@@ -1326,7 +1324,7 @@ class java_lang_StackTraceElement: AllStatic {
static void set_lineNumber(oop element, int value);
// Create an instance of StackTraceElement
- static oop create(Handle mirror, int method, int version, int bci, TRAPS);
+ static oop create(Handle mirror, int method, int version, int bci, int cpref, TRAPS);
static oop create(methodHandle method, int bci, TRAPS);
// Debugging
diff --git a/src/share/vm/classfile/sharedPathsMiscInfo.cpp b/src/share/vm/classfile/sharedPathsMiscInfo.cpp
index 79457a0e5..500f92235 100644
--- a/src/share/vm/classfile/sharedPathsMiscInfo.cpp
+++ b/src/share/vm/classfile/sharedPathsMiscInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -67,7 +67,7 @@ bool SharedPathsMiscInfo::read(void* ptr, size_t size) {
}
bool SharedPathsMiscInfo::fail(const char* msg, const char* name) {
- ClassLoader::trace_class_path(msg, name);
+ ClassLoader::trace_class_path(tty, msg, name);
MetaspaceShared::set_archive_loading_failed();
return false;
}
diff --git a/src/share/vm/classfile/sharedPathsMiscInfo.hpp b/src/share/vm/classfile/sharedPathsMiscInfo.hpp
index 3f5264831..882fed01a 100644
--- a/src/share/vm/classfile/sharedPathsMiscInfo.hpp
+++ b/src/share/vm/classfile/sharedPathsMiscInfo.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,7 +64,7 @@ protected:
bool read(void* ptr, size_t size);
static void trace_class_path(const char* msg, const char* name = NULL) {
- ClassLoader::trace_class_path(msg, name);
+ ClassLoader::trace_class_path(tty, msg, name);
}
protected:
static bool fail(const char* msg, const char* name = NULL);
diff --git a/src/share/vm/classfile/stackMapTable.cpp b/src/share/vm/classfile/stackMapTable.cpp
index f74adbe31..1c92b61e5 100644
--- a/src/share/vm/classfile/stackMapTable.cpp
+++ b/src/share/vm/classfile/stackMapTable.cpp
@@ -186,7 +186,6 @@ VerificationType StackMapReader::parse_verification_type(u1* flags, TRAPS) {
u2 offset = _stream->get_u2(THREAD);
if (offset >= _code_length ||
_code_data[offset] != ClassVerifier::NEW_OFFSET) {
- ResourceMark rm(THREAD);
_verifier->class_format_error(
"StackMapTable format error: bad offset for Uninitialized");
return VerificationType::bogus_type();
diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp
index 84bac6aa5..46b88470b 100644
--- a/src/share/vm/classfile/systemDictionary.cpp
+++ b/src/share/vm/classfile/systemDictionary.cpp
@@ -2349,9 +2349,6 @@ methodHandle SystemDictionary::find_method_handle_invoker(Symbol* name,
assert(!THREAD->is_Compiler_thread(), "");
Handle method_type =
SystemDictionary::find_method_handle_type(signature, accessing_klass, CHECK_(empty));
- if (false) { // FIXME: Decide if the Java upcall should resolve signatures.
- method_type = java_lang_String::create_from_symbol(signature, CHECK_(empty));
- }
KlassHandle mh_klass = SystemDictionary::MethodHandle_klass();
int ref_kind = JVM_REF_invokeVirtual;
@@ -2383,6 +2380,24 @@ methodHandle SystemDictionary::find_method_handle_invoker(Symbol* name,
return unpack_method_and_appendix(mname, accessing_klass, appendix_box, appendix_result, THREAD);
}
+// Decide if we can globally cache a lookup of this class, to be returned to any client that asks.
+// We must ensure that all class loaders everywhere will reach this class, for any client.
+// This is a safe bet for public classes in java.lang, such as Object and String.
+// We also include public classes in java.lang.invoke, because they appear frequently in system-level method types.
+// Out of an abundance of caution, we do not include any other classes, not even for packages like java.util.
+static bool is_always_visible_class(oop mirror) {
+ Klass* klass = java_lang_Class::as_Klass(mirror);
+ if (klass->oop_is_objArray()) {
+ klass = ObjArrayKlass::cast(klass)->bottom_klass(); // check element type
+ }
+ if (klass->oop_is_typeArray()) {
+ return true; // primitive array
+ }
+ assert(klass->oop_is_instance(), klass->external_name());
+ return klass->is_public() &&
+ (InstanceKlass::cast(klass)->is_same_class_package(SystemDictionary::Object_klass()) || // java.lang
+ InstanceKlass::cast(klass)->is_same_class_package(SystemDictionary::MethodHandle_klass())); // java.lang.invoke
+}
// Ask Java code to find or construct a java.lang.invoke.MethodType for the given
// signature, as interpreted relative to the given class loader.
@@ -2405,32 +2420,33 @@ Handle SystemDictionary::find_method_handle_type(Symbol* signature,
}
Handle class_loader, protection_domain;
- bool is_on_bcp = true; // keep this true as long as we can materialize from the boot classloader
+ if (accessing_klass.not_null()) {
+ class_loader = Handle(THREAD, InstanceKlass::cast(accessing_klass())->class_loader());
+ protection_domain = Handle(THREAD, InstanceKlass::cast(accessing_klass())->protection_domain());
+ }
+ bool can_be_cached = true;
int npts = ArgumentCount(signature).size();
objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::Class_klass(), npts, CHECK_(empty));
int arg = 0;
- Handle rt; // the return type from the signature
+ Handle rt; // the return type from the signature
ResourceMark rm(THREAD);
for (SignatureStream ss(signature); !ss.is_done(); ss.next()) {
oop mirror = NULL;
- if (is_on_bcp) {
- // Note: class_loader & protection_domain are both null at this point.
- mirror = ss.as_java_mirror(class_loader, protection_domain,
+ if (can_be_cached) {
+ // Use neutral class loader to lookup candidate classes to be placed in the cache.
+ mirror = ss.as_java_mirror(Handle(), Handle(),
SignatureStream::ReturnNull, CHECK_(empty));
- if (mirror == NULL) {
- // fall back from BCP to accessing_klass
- if (accessing_klass.not_null()) {
- class_loader = Handle(THREAD, InstanceKlass::cast(accessing_klass())->class_loader());
- protection_domain = Handle(THREAD, InstanceKlass::cast(accessing_klass())->protection_domain());
- }
- is_on_bcp = false;
+ if (mirror == NULL || (ss.is_object() && !is_always_visible_class(mirror))) {
+ // Fall back to accessing_klass context.
+ can_be_cached = false;
}
}
- if (!is_on_bcp) {
+ if (!can_be_cached) {
// Resolve, throwing a real error if it doesn't work.
mirror = ss.as_java_mirror(class_loader, protection_domain,
SignatureStream::NCDFError, CHECK_(empty));
}
+ assert(!oopDesc::is_null(mirror), ss.as_symbol(THREAD)->as_C_string());
if (ss.at_return_type())
rt = Handle(THREAD, mirror);
else
@@ -2462,7 +2478,7 @@ Handle SystemDictionary::find_method_handle_type(Symbol* signature,
&args, CHECK_(empty));
Handle method_type(THREAD, (oop) result.get_jobject());
- if (is_on_bcp) {
+ if (can_be_cached) {
// We can cache this MethodType inside the JVM.
MutexLocker ml(SystemDictionary_lock, THREAD);
spe = invoke_method_table()->find_entry(index, hash, signature, null_iid);
diff --git a/src/share/vm/classfile/verifier.cpp b/src/share/vm/classfile/verifier.cpp
index 9a059b2d1..ed65f0d78 100644
--- a/src/share/vm/classfile/verifier.cpp
+++ b/src/share/vm/classfile/verifier.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -655,6 +655,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
bool this_uninit = false; // Set to true when invokespecial <init> initialized 'this'
+ bool verified_exc_handlers = false;
// Merge with the next instruction
{
@@ -686,6 +687,18 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
}
}
+ // Look for possible jump target in exception handlers and see if it
+ // matches current_frame. Do this check here for astore*, dstore*,
+ // fstore*, istore*, and lstore* opcodes because they can change the type
+ // state by adding a local. JVM Spec says that the incoming type state
+ // should be used for this check. So, do the check here before a possible
+ // local is added to the type state.
+ if (Bytecodes::is_store_into_local(opcode) && bci >= ex_min && bci < ex_max) {
+ verify_exception_handler_targets(
+ bci, this_uninit, &current_frame, &stackmap_table, CHECK_VERIFY(this));
+ verified_exc_handlers = true;
+ }
+
switch (opcode) {
case Bytecodes::_nop :
no_control_flow = false; break;
@@ -1662,9 +1675,13 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
} // end switch
} // end Merge with the next instruction
- // Look for possible jump target in exception handlers and see if it
- // matches current_frame
- if (bci >= ex_min && bci < ex_max) {
+ // Look for possible jump target in exception handlers and see if it matches
+ // current_frame. Don't do this check if it has already been done (for
+ // ([a,d,f,i,l]store* opcodes). This check cannot be done earlier because
+ // opcodes, such as invokespecial, may set the this_uninit flag.
+ assert(!(verified_exc_handlers && this_uninit),
+ "Exception handler targets got verified before this_uninit got set");
+ if (!verified_exc_handlers && bci >= ex_min && bci < ex_max) {
verify_exception_handler_targets(
bci, this_uninit, &current_frame, &stackmap_table, CHECK_VERIFY(this));
}
@@ -2232,14 +2249,20 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
}
// Look at the method's handlers. If the bci is in the handler's try block
-// then check if the handler_pc is already on the stack. If not, push it.
+// then check if the handler_pc is already on the stack. If not, push it
+// unless the handler has already been scanned.
void ClassVerifier::push_handlers(ExceptionTable* exhandlers,
+ GrowableArray<u4>* handler_list,
GrowableArray<u4>* handler_stack,
u4 bci) {
int exlength = exhandlers->length();
for(int x = 0; x < exlength; x++) {
if (bci >= exhandlers->start_pc(x) && bci < exhandlers->end_pc(x)) {
- handler_stack->append_if_missing(exhandlers->handler_pc(x));
+ u4 exhandler_pc = exhandlers->handler_pc(x);
+ if (!handler_list->contains(exhandler_pc)) {
+ handler_stack->append_if_missing(exhandler_pc);
+ handler_list->append(exhandler_pc);
+ }
}
}
}
@@ -2257,6 +2280,10 @@ bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
GrowableArray<u4>* bci_stack = new GrowableArray<u4>(30);
// Create stack for handlers for try blocks containing this handler.
GrowableArray<u4>* handler_stack = new GrowableArray<u4>(30);
+ // Create list of handlers that have been pushed onto the handler_stack
+ // so that handlers embedded inside of their own TRY blocks only get
+ // scanned once.
+ GrowableArray<u4>* handler_list = new GrowableArray<u4>(30);
// Create list of visited branch opcodes (goto* and if*).
GrowableArray<u4>* visited_branches = new GrowableArray<u4>(30);
ExceptionTable exhandlers(_method());
@@ -2275,7 +2302,7 @@ bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
// If the bytecode is in a TRY block, push its handlers so they
// will get parsed.
- push_handlers(&exhandlers, handler_stack, bci);
+ push_handlers(&exhandlers, handler_list, handler_stack, bci);
switch (opcode) {
case Bytecodes::_if_icmpeq:
@@ -2488,8 +2515,7 @@ void ClassVerifier::verify_invoke_init(
// of the current class.
VerificationType objectref_type = new_class_type;
if (name_in_supers(ref_class_type.name(), current_class())) {
- Klass* ref_klass = load_class(
- ref_class_type.name(), CHECK_VERIFY(this));
+ Klass* ref_klass = load_class(ref_class_type.name(), CHECK);
Method* m = InstanceKlass::cast(ref_klass)->uncached_lookup_method(
vmSymbols::object_initializer_name(),
cp->signature_ref_at(bcs->get_index_u2()), Klass::normal);
diff --git a/src/share/vm/classfile/verifier.hpp b/src/share/vm/classfile/verifier.hpp
index e08a2a4bc..96b6abe99 100644
--- a/src/share/vm/classfile/verifier.hpp
+++ b/src/share/vm/classfile/verifier.hpp
@@ -305,9 +305,10 @@ class ClassVerifier : public StackObj {
bool* this_uninit, constantPoolHandle cp, StackMapTable* stackmap_table,
TRAPS);
- // Used by ends_in_athrow() to push all handlers that contain bci onto
- // the handler_stack, if the handler is not already on the stack.
+ // Used by ends_in_athrow() to push all handlers that contain bci onto the
+ // handler_stack, if the handler has not already been pushed on the stack.
void push_handlers(ExceptionTable* exhandlers,
+ GrowableArray<u4>* handler_list,
GrowableArray<u4>* handler_stack,
u4 bci);
diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp
index 2a438dccd..222d2b067 100644
--- a/src/share/vm/classfile/vmSymbols.hpp
+++ b/src/share/vm/classfile/vmSymbols.hpp
@@ -203,7 +203,11 @@
template(java_lang_StackTraceElement, "java/lang/StackTraceElement") \
\
/* Concurrency support */ \
- template(java_util_concurrent_locks_AbstractOwnableSynchronizer, "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
+ template(java_util_concurrent_locks_AbstractOwnableSynchronizer, "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
+ template(java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicIntegerFieldUpdater$AtomicIntegerFieldUpdaterImpl") \
+ template(java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$CASUpdater") \
+ template(java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$LockedUpdater") \
+ template(java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl") \
template(sun_misc_Contended_signature, "Lsun/misc/Contended;") \
\
/* class symbols needed by intrinsics */ \
@@ -244,7 +248,6 @@
template(returnType_name, "returnType") \
template(signature_name, "signature") \
template(slot_name, "slot") \
- template(selectAlternative_name, "selectAlternative") \
\
/* Support for annotations (JDK 1.5 and above) */ \
\
@@ -279,6 +282,7 @@
template(java_lang_invoke_LambdaForm, "java/lang/invoke/LambdaForm") \
template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \
template(java_lang_invoke_DontInline_signature, "Ljava/lang/invoke/DontInline;") \
+ template(java_lang_invoke_InjectedProfile_signature, "Ljava/lang/invoke/InjectedProfile;") \
template(java_lang_invoke_Stable_signature, "Ljava/lang/invoke/Stable;") \
template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \
@@ -296,8 +300,7 @@
template(setTarget_signature, "(Ljava/lang/invoke/MethodHandle;)V") \
NOT_LP64( do_alias(intptr_signature, int_signature) ) \
LP64_ONLY( do_alias(intptr_signature, long_signature) ) \
- template(selectAlternative_signature, "(ZLjava/lang/invoke/MethodHandle;Ljava/lang/invoke/MethodHandle;)Ljava/lang/invoke/MethodHandle;") \
- \
+ \
/* common method and field names */ \
template(object_initializer_name, "<init>") \
template(class_initializer_name, "<clinit>") \
@@ -868,6 +871,12 @@
do_name( fullFence_name, "fullFence") \
do_alias( fullFence_signature, void_method_signature) \
\
+ /* Custom branch frequencies profiling support for JSR292 */ \
+ do_class(java_lang_invoke_MethodHandleImpl, "java/lang/invoke/MethodHandleImpl") \
+ do_intrinsic(_profileBoolean, java_lang_invoke_MethodHandleImpl, profileBoolean_name, profileBoolean_signature, F_S) \
+ do_name( profileBoolean_name, "profileBoolean") \
+ do_signature(profileBoolean_signature, "(Z[I)Z") \
+ \
/* unsafe memory references (there are a lot of them...) */ \
do_signature(getObject_signature, "(Ljava/lang/Object;J)Ljava/lang/Object;") \
do_signature(putObject_signature, "(Ljava/lang/Object;JLjava/lang/Object;)V") \
diff --git a/src/share/vm/code/codeCache.cpp b/src/share/vm/code/codeCache.cpp
index eef8b4db5..c9059d73c 100644
--- a/src/share/vm/code/codeCache.cpp
+++ b/src/share/vm/code/codeCache.cpp
@@ -521,15 +521,17 @@ void CodeCache::gc_prologue() {
void CodeCache::gc_epilogue() {
assert_locked_or_safepoint(CodeCache_lock);
- FOR_ALL_ALIVE_BLOBS(cb) {
- if (cb->is_nmethod()) {
- nmethod *nm = (nmethod*)cb;
- assert(!nm->is_unloaded(), "Tautology");
- if (needs_cache_clean()) {
- nm->cleanup_inline_caches();
+ NOT_DEBUG(if (needs_cache_clean())) {
+ FOR_ALL_ALIVE_BLOBS(cb) {
+ if (cb->is_nmethod()) {
+ nmethod *nm = (nmethod*)cb;
+ assert(!nm->is_unloaded(), "Tautology");
+ DEBUG_ONLY(if (needs_cache_clean())) {
+ nm->cleanup_inline_caches();
+ }
+ DEBUG_ONLY(nm->verify());
+ DEBUG_ONLY(nm->verify_oop_relocations());
}
- DEBUG_ONLY(nm->verify());
- DEBUG_ONLY(nm->verify_oop_relocations());
}
}
set_needs_cache_clean(false);
@@ -734,27 +736,6 @@ int CodeCache::mark_for_deoptimization(Method* dependee) {
return number_of_marked_CodeBlobs;
}
-void CodeCache::make_marked_nmethods_zombies() {
- assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
- FOR_ALL_ALIVE_NMETHODS(nm) {
- if (nm->is_marked_for_deoptimization()) {
-
- // If the nmethod has already been made non-entrant and it can be converted
- // then zombie it now. Otherwise make it non-entrant and it will eventually
- // be zombied when it is no longer seen on the stack. Note that the nmethod
- // might be "entrant" and not on the stack and so could be zombied immediately
- // but we can't tell because we don't track it on stack until it becomes
- // non-entrant.
-
- if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
- nm->make_zombie();
- } else {
- nm->make_not_entrant();
- }
- }
- }
-}
-
void CodeCache::make_marked_nmethods_not_entrant() {
assert_locked_or_safepoint(CodeCache_lock);
FOR_ALL_ALIVE_NMETHODS(nm) {
diff --git a/src/share/vm/code/codeCache.hpp b/src/share/vm/code/codeCache.hpp
index 966304fce..f098284a3 100644
--- a/src/share/vm/code/codeCache.hpp
+++ b/src/share/vm/code/codeCache.hpp
@@ -179,7 +179,6 @@ class CodeCache : AllStatic {
static void mark_all_nmethods_for_deoptimization();
static int mark_for_deoptimization(Method* dependee);
- static void make_marked_nmethods_zombies();
static void make_marked_nmethods_not_entrant();
// tells how many nmethods have dependencies
diff --git a/src/share/vm/code/compiledIC.cpp b/src/share/vm/code/compiledIC.cpp
index 77e435e47..478a6cf58 100644
--- a/src/share/vm/code/compiledIC.cpp
+++ b/src/share/vm/code/compiledIC.cpp
@@ -155,6 +155,14 @@ address CompiledIC::stub_address() const {
return _ic_call->destination();
}
+// Clears the IC stub if the compiled IC is in transition state
+void CompiledIC::clear_ic_stub() {
+ if (is_in_transition_state()) {
+ ICStub* stub = ICStub_from_destination_address(stub_address());
+ stub->clear();
+ }
+}
+
//-----------------------------------------------------------------------------
// High-level access to an inline cache. Guaranteed to be MT-safe.
@@ -279,6 +287,7 @@ bool CompiledIC::is_call_to_compiled() const {
assert( is_c1_method ||
!is_monomorphic ||
is_optimized() ||
+ !caller->is_alive() ||
(cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
#endif // ASSERT
return is_monomorphic;
@@ -313,7 +322,7 @@ bool CompiledIC::is_call_to_interpreted() const {
}
-void CompiledIC::set_to_clean() {
+void CompiledIC::set_to_clean(bool in_use) {
assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
if (TraceInlineCacheClearing || TraceICs) {
tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
@@ -329,17 +338,14 @@ void CompiledIC::set_to_clean() {
// A zombie transition will always be safe, since the metadata has already been set to NULL, so
// we only need to patch the destination
- bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();
+ bool safe_transition = !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
if (safe_transition) {
// Kill any leftover stub we might have too
- if (is_in_transition_state()) {
- ICStub* old_stub = ICStub_from_destination_address(stub_address());
- old_stub->clear();
- }
+ clear_ic_stub();
if (is_optimized()) {
- set_ic_destination(entry);
- } else {
+ set_ic_destination(entry);
+ } else {
set_ic_destination_and_value(entry, (void*)NULL);
}
} else {
diff --git a/src/share/vm/code/compiledIC.hpp b/src/share/vm/code/compiledIC.hpp
index f2207cc2e..b797329ac 100644
--- a/src/share/vm/code/compiledIC.hpp
+++ b/src/share/vm/code/compiledIC.hpp
@@ -228,8 +228,9 @@ class CompiledIC: public ResourceObj {
//
// They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
//
- void set_to_clean(); // Can only be called during a safepoint operation
+ void set_to_clean(bool in_use = true);
void set_to_monomorphic(CompiledICInfo& info);
+ void clear_ic_stub();
// Returns true if successful and false otherwise. The call can fail if memory
// allocation in the code cache fails.
diff --git a/src/share/vm/code/dependencies.cpp b/src/share/vm/code/dependencies.cpp
index a2f567798..7317036d6 100644
--- a/src/share/vm/code/dependencies.cpp
+++ b/src/share/vm/code/dependencies.cpp
@@ -811,7 +811,13 @@ class ClassHierarchyWalker {
assert((uint)n <= (uint)_num_participants, "oob");
Method* fm = _found_methods[n];
assert(n == _num_participants || fm != NULL, "proper usage");
- assert(fm == NULL || fm->method_holder() == _participants[n], "sanity");
+ if (fm != NULL && fm->method_holder() != _participants[n]) {
+ // Default methods from interfaces can be added to classes. In
+ // that case the holder of the method is not the class but the
+ // interface where it's defined.
+ assert(fm->is_default_method(), "sanity");
+ return NULL;
+ }
return fm;
}
diff --git a/src/share/vm/code/nmethod.cpp b/src/share/vm/code/nmethod.cpp
index b31ede05e..6ea39aea9 100644
--- a/src/share/vm/code/nmethod.cpp
+++ b/src/share/vm/code/nmethod.cpp
@@ -1148,9 +1148,20 @@ void nmethod::clear_inline_caches() {
}
}
+// Clear ICStubs of all compiled ICs
+void nmethod::clear_ic_stubs() {
+ assert_locked_or_safepoint(CompiledIC_lock);
+ RelocIterator iter(this);
+ while(iter.next()) {
+ if (iter.type() == relocInfo::virtual_call_type) {
+ CompiledIC* ic = CompiledIC_at(&iter);
+ ic->clear_ic_stub();
+ }
+ }
+}
-void nmethod::cleanup_inline_caches() {
+void nmethod::cleanup_inline_caches() {
assert_locked_or_safepoint(CompiledIC_lock);
// If the method is not entrant or zombie then a JMP is plastered over the
@@ -1166,7 +1177,8 @@ void nmethod::cleanup_inline_caches() {
// In fact, why are we bothering to look at oops in a non-entrant method??
}
- // Find all calls in an nmethod, and clear the ones that points to zombie methods
+ // Find all calls in an nmethod and clear the ones that point to non-entrant,
+ // zombie and unloaded nmethods.
ResourceMark rm;
RelocIterator iter(this, low_boundary);
while(iter.next()) {
@@ -1178,8 +1190,8 @@ void nmethod::cleanup_inline_caches() {
CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
if( cb != NULL && cb->is_nmethod() ) {
nmethod* nm = (nmethod*)cb;
- // Clean inline caches pointing to both zombie and not_entrant methods
- if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
+ // Clean inline caches pointing to zombie, non-entrant and unloaded methods
+ if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
}
break;
}
@@ -1188,7 +1200,7 @@ void nmethod::cleanup_inline_caches() {
CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
if( cb != NULL && cb->is_nmethod() ) {
nmethod* nm = (nmethod*)cb;
- // Clean inline caches pointing to both zombie and not_entrant methods
+ // Clean inline caches pointing to zombie, non-entrant and unloaded methods
if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
}
break;
@@ -1279,7 +1291,7 @@ void nmethod::mark_as_seen_on_stack() {
// Tell if a non-entrant method can be converted to a zombie (i.e.,
// there are no activations on the stack, not in use by the VM,
// and not in use by the ServiceThread)
-bool nmethod::can_not_entrant_be_converted() {
+bool nmethod::can_convert_to_zombie() {
assert(is_not_entrant(), "must be a non-entrant method");
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
@@ -1607,7 +1619,11 @@ void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
// During GC the is_alive closure is non-NULL, and is used to
// determine liveness of dependees that need to be updated.
if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
- InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
+ // The GC defers deletion of this entry, since there might be multiple threads
+ // iterating over the _dependencies graph. Other call paths are single-threaded
+ // and may delete it immediately.
+ bool delete_immediately = is_alive == NULL;
+ InstanceKlass::cast(klass)->remove_dependent_nmethod(this, delete_immediately);
}
}
}
@@ -2695,7 +2711,7 @@ void nmethod::verify() {
// Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
// seems odd.
- if( is_zombie() || is_not_entrant() )
+ if (is_zombie() || is_not_entrant() || is_unloaded())
return;
// Make sure all the entry points are correctly aligned for patching.
diff --git a/src/share/vm/code/nmethod.hpp b/src/share/vm/code/nmethod.hpp
index 7a3922164..b7d689005 100644
--- a/src/share/vm/code/nmethod.hpp
+++ b/src/share/vm/code/nmethod.hpp
@@ -577,6 +577,7 @@ public:
// Inline cache support
void clear_inline_caches();
+ void clear_ic_stubs();
void cleanup_inline_caches();
bool inlinecache_check_contains(address addr) const {
return (addr >= code_begin() && addr < verified_entry_point());
@@ -604,7 +605,7 @@ public:
// See comment at definition of _last_seen_on_stack
void mark_as_seen_on_stack();
- bool can_not_entrant_be_converted();
+ bool can_convert_to_zombie();
// Evolution support. We make old (discarded) compiled methods point to new Method*s.
void set_method(Method* method) { _method = method; }
diff --git a/src/share/vm/code/vmreg.hpp b/src/share/vm/code/vmreg.hpp
index 1ef07ec19..eb26fb6e2 100644
--- a/src/share/vm/code/vmreg.hpp
+++ b/src/share/vm/code/vmreg.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,25 +32,17 @@
#ifdef COMPILER2
#include "opto/adlcVMDeps.hpp"
#include "utilities/ostream.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined ADGLOBALS_MD_HPP
+# include ADGLOBALS_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/adGlobals_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/adGlobals_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/adGlobals_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/adGlobals_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/adGlobals_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/adGlobals_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/adGlobals_ppc_64.hpp"
#endif
#endif
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
index 91d935708..de5555fb7 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
@@ -598,7 +598,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_collector_policy(cp),
_should_unload_classes(CMSClassUnloadingEnabled),
_concurrent_cycles_since_last_unload(0),
- _roots_scanning_options(SharedHeap::SO_None),
+ _roots_scanning_options(GenCollectedHeap::SO_None),
_inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
_intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
@@ -3068,7 +3068,7 @@ void CMSCollector::verify_after_remark_work_1() {
gch->gen_process_roots(_cmsGen->level(),
true, // younger gens are roots
true, // activate StrongRootsScope
- SharedHeap::ScanningOption(roots_scanning_options()),
+ GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&notOlder,
NULL,
@@ -3136,7 +3136,7 @@ void CMSCollector::verify_after_remark_work_2() {
gch->gen_process_roots(_cmsGen->level(),
true, // younger gens are roots
true, // activate StrongRootsScope
- SharedHeap::ScanningOption(roots_scanning_options()),
+ GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&notOlder,
NULL,
@@ -3327,7 +3327,7 @@ bool ConcurrentMarkSweepGeneration::is_too_full() const {
void CMSCollector::setup_cms_unloading_and_verification_state() {
const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
|| VerifyBeforeExit;
- const int rso = SharedHeap::SO_AllCodeCache;
+ const int rso = GenCollectedHeap::SO_AllCodeCache;
// We set the proper root for this CMS cycle here.
if (should_unload_classes()) { // Should unload classes this cycle
@@ -3339,9 +3339,11 @@ void CMSCollector::setup_cms_unloading_and_verification_state() {
// Not unloading classes this cycle
assert(!should_unload_classes(), "Inconsitency!");
+ // If we are not unloading classes then add SO_AllCodeCache to root
+ // scanning options.
+ add_root_scanning_option(rso);
+
if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
- // Include symbols, strings and code cache elements to prevent their resurrection.
- add_root_scanning_option(rso);
set_verifying(true);
} else if (verifying() && !should_verify) {
// We were verifying, but some verification flags got disabled.
@@ -3753,7 +3755,7 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
gch->gen_process_roots(_cmsGen->level(),
true, // younger gens are roots
true, // activate StrongRootsScope
- SharedHeap::ScanningOption(roots_scanning_options()),
+ GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&notOlder,
NULL,
@@ -5254,13 +5256,13 @@ void CMSParInitialMarkTask::work(uint worker_id) {
gch->gen_process_roots(_collector->_cmsGen->level(),
false, // yg was scanned above
false, // this is parallel code
- SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+ GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(),
&par_mri_cl,
NULL,
&cld_closure);
assert(_collector->should_unload_classes()
- || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
+ || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
_timer.stop();
if (PrintCMSStatistics != 0) {
@@ -5390,14 +5392,14 @@ void CMSParRemarkTask::work(uint worker_id) {
gch->gen_process_roots(_collector->_cmsGen->level(),
false, // yg was scanned above
false, // this is parallel code
- SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+ GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(),
&par_mrias_cl,
NULL,
NULL); // The dirty klasses will be handled below
assert(_collector->should_unload_classes()
- || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
+ || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
_timer.stop();
if (PrintCMSStatistics != 0) {
@@ -5982,14 +5984,14 @@ void CMSCollector::do_remark_non_parallel() {
gch->gen_process_roots(_cmsGen->level(),
true, // younger gens as roots
false, // use the local StrongRootsScope
- SharedHeap::ScanningOption(roots_scanning_options()),
+ GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&mrias_cl,
NULL,
NULL); // The dirty klasses will be handled below
assert(should_unload_classes()
- || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
+ || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
}
@@ -6634,7 +6636,6 @@ void CMSCollector::reset(bool asynch) {
}
void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
- gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
TraceCollectorStats tcs(counters());
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp
index 982f7c103..7dac0c2ae 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -130,8 +130,8 @@ class VM_CMS_Final_Remark: public VM_CMS_Operation {
class VM_GenCollectFullConcurrent: public VM_GC_Operation {
bool _disabled_icms;
public:
- VM_GenCollectFullConcurrent(unsigned int gc_count_before,
- unsigned int full_gc_count_before,
+ VM_GenCollectFullConcurrent(uint gc_count_before,
+ uint full_gc_count_before,
GCCause::Cause gc_cause)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
_disabled_icms(false)
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp
index 1a46ed3f5..2a0a563d5 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
nonstatic_field(LinearAllocBlock, _word_size, size_t) \
nonstatic_field(AFLBinaryTreeDictionary, _total_size, size_t) \
nonstatic_field(CompactibleFreeListSpace, _dictionary, AFLBinaryTreeDictionary*) \
- nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList<FreeChunk>) \
+ nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], AdaptiveFreeList<FreeChunk>) \
nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock)
diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/src/share/vm/gc_implementation/g1/concurrentMark.cpp
index 85b1ff5d4..92261912a 100644
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp
@@ -114,7 +114,7 @@ void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
}
size_t CMBitMap::compute_size(size_t heap_size) {
- return heap_size / mark_distance();
+ return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
}
size_t CMBitMap::mark_distance() {
@@ -2640,24 +2640,41 @@ void ConcurrentMark::swapMarkBitMaps() {
_nextMarkBitMap = (CMBitMap*) temp;
}
-class CMObjectClosure;
-
-// Closure for iterating over objects, currently only used for
-// processing SATB buffers.
-class CMObjectClosure : public ObjectClosure {
+// Closure for marking entries in SATB buffers.
+class CMSATBBufferClosure : public SATBBufferClosure {
private:
CMTask* _task;
+ G1CollectedHeap* _g1h;
-public:
- void do_object(oop obj) {
- _task->deal_with_reference(obj);
+ // This is very similar to CMTask::deal_with_reference, but with
+ // more relaxed requirements for the argument, so this must be more
+ // circumspect about treating the argument as an object.
+ void do_entry(void* entry) const {
+ _task->increment_refs_reached();
+ HeapRegion* hr = _g1h->heap_region_containing_raw(entry);
+ if (entry < hr->next_top_at_mark_start()) {
+ // Until we get here, we don't know whether entry refers to a valid
+ // object; it could instead have been a stale reference.
+ oop obj = static_cast<oop>(entry);
+ assert(obj->is_oop(true /* ignore mark word */),
+ err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)));
+ _task->make_reference_grey(obj, hr);
+ }
}
- CMObjectClosure(CMTask* task) : _task(task) { }
+public:
+ CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h)
+ : _task(task), _g1h(g1h) { }
+
+ virtual void do_buffer(void** buffer, size_t size) {
+ for (size_t i = 0; i < size; ++i) {
+ do_entry(buffer[i]);
+ }
+ }
};
class G1RemarkThreadsClosure : public ThreadClosure {
- CMObjectClosure _cm_obj;
+ CMSATBBufferClosure _cm_satb_cl;
G1CMOopClosure _cm_cl;
MarkingCodeBlobClosure _code_cl;
int _thread_parity;
@@ -2665,7 +2682,9 @@ class G1RemarkThreadsClosure : public ThreadClosure {
public:
G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
- _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
+ _cm_satb_cl(task, g1h),
+ _cm_cl(g1h, g1h->concurrent_mark(), task),
+ _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
_thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
void do_thread(Thread* thread) {
@@ -2681,11 +2700,11 @@ class G1RemarkThreadsClosure : public ThreadClosure {
// live by the SATB invariant but other oops recorded in nmethods may behave differently.
jt->nmethods_do(&_code_cl);
- jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
+ jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
}
} else if (thread->is_VM_thread()) {
if (thread->claim_oops_do(_is_par, _thread_parity)) {
- JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
+ JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
}
}
}
@@ -3059,9 +3078,7 @@ ConcurrentMark::claim_region(uint worker_id) {
#ifndef PRODUCT
enum VerifyNoCSetOopsPhase {
VerifyNoCSetOopsStack,
- VerifyNoCSetOopsQueues,
- VerifyNoCSetOopsSATBCompleted,
- VerifyNoCSetOopsSATBThread
+ VerifyNoCSetOopsQueues
};
class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
@@ -3074,8 +3091,6 @@ private:
switch (_phase) {
case VerifyNoCSetOopsStack: return "Stack";
case VerifyNoCSetOopsQueues: return "Queue";
- case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
- case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
default: ShouldNotReachHere();
}
return NULL;
@@ -3102,7 +3117,7 @@ public:
virtual void do_oop(narrowOop* p) {
// We should not come across narrow oops while scanning marking
- // stacks and SATB buffers.
+ // stacks
ShouldNotReachHere();
}
@@ -3111,10 +3126,7 @@ public:
}
};
-void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
- bool verify_enqueued_buffers,
- bool verify_thread_buffers,
- bool verify_fingers) {
+void ConcurrentMark::verify_no_cset_oops() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
if (!G1CollectedHeap::heap()->mark_in_progress()) {
return;
@@ -3122,65 +3134,47 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
VerifyNoCSetOopsClosure cl;
- if (verify_stacks) {
- // Verify entries on the global mark stack
- cl.set_phase(VerifyNoCSetOopsStack);
- _markStack.oops_do(&cl);
+ // Verify entries on the global mark stack
+ cl.set_phase(VerifyNoCSetOopsStack);
+ _markStack.oops_do(&cl);
- // Verify entries on the task queues
- for (uint i = 0; i < _max_worker_id; i += 1) {
- cl.set_phase(VerifyNoCSetOopsQueues, i);
- CMTaskQueue* queue = _task_queues->queue(i);
- queue->oops_do(&cl);
- }
- }
-
- SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
-
- // Verify entries on the enqueued SATB buffers
- if (verify_enqueued_buffers) {
- cl.set_phase(VerifyNoCSetOopsSATBCompleted);
- satb_qs.iterate_completed_buffers_read_only(&cl);
- }
-
- // Verify entries on the per-thread SATB buffers
- if (verify_thread_buffers) {
- cl.set_phase(VerifyNoCSetOopsSATBThread);
- satb_qs.iterate_thread_buffers_read_only(&cl);
- }
-
- if (verify_fingers) {
- // Verify the global finger
- HeapWord* global_finger = finger();
- if (global_finger != NULL && global_finger < _heap_end) {
- // The global finger always points to a heap region boundary. We
- // use heap_region_containing_raw() to get the containing region
- // given that the global finger could be pointing to a free region
- // which subsequently becomes continues humongous. If that
- // happens, heap_region_containing() will return the bottom of the
- // corresponding starts humongous region and the check below will
- // not hold any more.
- // Since we always iterate over all regions, we might get a NULL HeapRegion
- // here.
- HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
- guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
- err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
- p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
- }
-
- // Verify the task fingers
- assert(parallel_marking_threads() <= _max_worker_id, "sanity");
- for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
- CMTask* task = _tasks[i];
- HeapWord* task_finger = task->finger();
- if (task_finger != NULL && task_finger < _heap_end) {
- // See above note on the global finger verification.
- HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
- guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
- !task_hr->in_collection_set(),
- err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
- p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
- }
+ // Verify entries on the task queues
+ for (uint i = 0; i < _max_worker_id; i += 1) {
+ cl.set_phase(VerifyNoCSetOopsQueues, i);
+ CMTaskQueue* queue = _task_queues->queue(i);
+ queue->oops_do(&cl);
+ }
+
+ // Verify the global finger
+ HeapWord* global_finger = finger();
+ if (global_finger != NULL && global_finger < _heap_end) {
+ // The global finger always points to a heap region boundary. We
+ // use heap_region_containing_raw() to get the containing region
+ // given that the global finger could be pointing to a free region
+ // which subsequently becomes continues humongous. If that
+ // happens, heap_region_containing() will return the bottom of the
+ // corresponding starts humongous region and the check below will
+ // not hold any more.
+ // Since we always iterate over all regions, we might get a NULL HeapRegion
+ // here.
+ HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
+ guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
+ err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
+ p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
+ }
+
+ // Verify the task fingers
+ assert(parallel_marking_threads() <= _max_worker_id, "sanity");
+ for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
+ CMTask* task = _tasks[i];
+ HeapWord* task_finger = task->finger();
+ if (task_finger != NULL && task_finger < _heap_end) {
+ // See above note on the global finger verification.
+ HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
+ guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
+ !task_hr->in_collection_set(),
+ err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
+ p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
}
}
}
@@ -3510,22 +3504,29 @@ void ConcurrentMark::print_finger() {
}
#endif
-void CMTask::scan_object(oop obj) {
+template<bool scan>
+inline void CMTask::process_grey_object(oop obj) {
+ assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
if (_cm->verbose_high()) {
- gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
+ gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
_worker_id, p2i((void*) obj));
}
size_t obj_size = obj->size();
_words_scanned += obj_size;
- obj->oop_iterate(_cm_oop_closure);
+ if (scan) {
+ obj->oop_iterate(_cm_oop_closure);
+ }
statsOnly( ++_objs_scanned );
check_limits();
}
+template void CMTask::process_grey_object<true>(oop);
+template void CMTask::process_grey_object<false>(oop);
+
// Closure for iteration over bitmaps
class CMBitMapClosure : public BitMapClosure {
private:
@@ -3994,34 +3995,18 @@ void CMTask::drain_satb_buffers() {
// very counter productive if it did that. :-)
_draining_satb_buffers = true;
- CMObjectClosure oc(this);
+ CMSATBBufferClosure satb_cl(this, _g1h);
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
- if (G1CollectedHeap::use_parallel_gc_threads()) {
- satb_mq_set.set_par_closure(_worker_id, &oc);
- } else {
- satb_mq_set.set_closure(&oc);
- }
// This keeps claiming and applying the closure to completed buffers
// until we run out of buffers or we need to abort.
- if (G1CollectedHeap::use_parallel_gc_threads()) {
- while (!has_aborted() &&
- satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
- if (_cm->verbose_medium()) {
- gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
- }
- statsOnly( ++_satb_buffers_processed );
- regular_clock_call();
- }
- } else {
- while (!has_aborted() &&
- satb_mq_set.apply_closure_to_completed_buffer()) {
- if (_cm->verbose_medium()) {
- gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
- }
- statsOnly( ++_satb_buffers_processed );
- regular_clock_call();
+ while (!has_aborted() &&
+ satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
+ if (_cm->verbose_medium()) {
+ gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
}
+ statsOnly( ++_satb_buffers_processed );
+ regular_clock_call();
}
_draining_satb_buffers = false;
@@ -4030,12 +4015,6 @@ void CMTask::drain_satb_buffers() {
concurrent() ||
satb_mq_set.completed_buffers_num() == 0, "invariant");
- if (G1CollectedHeap::use_parallel_gc_threads()) {
- satb_mq_set.set_par_closure(_worker_id, NULL);
- } else {
- satb_mq_set.set_closure(NULL);
- }
-
// again, this was a potentially expensive operation, decrease the
// limits to get the regular clock call early
decrease_limits();
diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.hpp b/src/share/vm/gc_implementation/g1/concurrentMark.hpp
index 8a1120e73..4c6262415 100644
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp
@@ -793,14 +793,9 @@ public:
}
// Verify that there are no CSet oops on the stacks (taskqueues /
- // global mark stack), enqueued SATB buffers, per-thread SATB
- // buffers, and fingers (global / per-task). The boolean parameters
- // decide which of the above data structures to verify. If marking
- // is not in progress, it's a no-op.
- void verify_no_cset_oops(bool verify_stacks,
- bool verify_enqueued_buffers,
- bool verify_thread_buffers,
- bool verify_fingers) PRODUCT_RETURN;
+ // global mark stack) and fingers (global / per-task).
+ // If marking is not in progress, it's a no-op.
+ void verify_no_cset_oops() PRODUCT_RETURN;
bool isPrevMarked(oop p) const {
assert(p != NULL && p->is_oop(), "expected an oop");
@@ -1108,6 +1103,12 @@ private:
void regular_clock_call();
bool concurrent() { return _concurrent; }
+ // Test whether obj might have already been passed over by the
+ // mark bitmap scan, and so needs to be pushed onto the mark stack.
+ bool is_below_finger(oop obj, HeapWord* global_finger) const;
+
+ template<bool scan> void process_grey_object(oop obj);
+
public:
// It resets the task; it should be called right at the beginning of
// a marking phase.
@@ -1155,12 +1156,22 @@ public:
void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
- // It grays the object by marking it and, if necessary, pushing it
- // on the local queue
+ // Increment the number of references this task has visited.
+ void increment_refs_reached() { ++_refs_reached; }
+
+ // Grey the object by marking it. If not already marked, push it on
+ // the local queue if below the finger.
+ // Precondition: obj is in region.
+ // Precondition: obj is below region's NTAMS.
+ inline void make_reference_grey(oop obj, HeapRegion* region);
+
+ // Grey the object (by calling make_grey_reference) if required,
+ // e.g. obj is below its containing region's NTAMS.
+ // Precondition: obj is a valid heap object.
inline void deal_with_reference(oop obj);
// It scans an object and visits its children.
- void scan_object(oop obj);
+ void scan_object(oop obj) { process_grey_object<true>(obj); }
// It pushes an object on the local queue.
inline void push(oop obj);
diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
index 0557cdbc6..4e84d49cd 100644
--- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
@@ -259,14 +259,87 @@ inline void CMTask::push(oop obj) {
++_local_pushes );
}
-// This determines whether the method below will check both the local
-// and global fingers when determining whether to push on the stack a
-// gray object (value 1) or whether it will only check the global one
-// (value 0). The tradeoffs are that the former will be a bit more
-// accurate and possibly push less on the stack, but it might also be
-// a little bit slower.
+inline bool CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
+ // If obj is above the global finger, then the mark bitmap scan
+ // will find it later, and no push is needed. Similarly, if we have
+ // a current region and obj is between the local finger and the
+ // end of the current region, then no push is needed. The tradeoff
+ // of checking both vs only checking the global finger is that the
+ // local check will be more accurate and so result in fewer pushes,
+ // but may also be a little slower.
+ HeapWord* objAddr = (HeapWord*)obj;
+ if (_finger != NULL) {
+ // We have a current region.
+
+ // Finger and region values are all NULL or all non-NULL. We
+ // use _finger to check since we immediately use its value.
+ assert(_curr_region != NULL, "invariant");
+ assert(_region_limit != NULL, "invariant");
+ assert(_region_limit <= global_finger, "invariant");
+
+ // True if obj is less than the local finger, or is between
+ // the region limit and the global finger.
+ if (objAddr < _finger) {
+ return true;
+ } else if (objAddr < _region_limit) {
+ return false;
+ } // Else check global finger.
+ }
+ // Check global finger.
+ return objAddr < global_finger;
+}
+
+inline void CMTask::make_reference_grey(oop obj, HeapRegion* hr) {
+ if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) {
+
+ if (_cm->verbose_high()) {
+ gclog_or_tty->print_cr("[%u] marked object " PTR_FORMAT,
+ _worker_id, p2i(obj));
+ }
-#define _CHECK_BOTH_FINGERS_ 1
+ // No OrderAccess:store_load() is needed. It is implicit in the
+ // CAS done in CMBitMap::parMark() call in the routine above.
+ HeapWord* global_finger = _cm->finger();
+
+ // We only need to push a newly grey object on the mark
+ // stack if it is in a section of memory the mark bitmap
+ // scan has already examined. Mark bitmap scanning
+ // maintains progress "fingers" for determining that.
+ //
+ // Notice that the global finger might be moving forward
+ // concurrently. This is not a problem. In the worst case, we
+ // mark the object while it is above the global finger and, by
+ // the time we read the global finger, it has moved forward
+ // past this object. In this case, the object will probably
+ // be visited when a task is scanning the region and will also
+ // be pushed on the stack. So, some duplicate work, but no
+ // correctness problems.
+ if (is_below_finger(obj, global_finger)) {
+ if (obj->is_typeArray()) {
+ // Immediately process arrays of primitive types, rather
+ // than pushing on the mark stack. This keeps us from
+ // adding humongous objects to the mark stack that might
+ // be reclaimed before the entry is processed - see
+ // selection of candidates for eager reclaim of humongous
+ // objects. The cost of the additional type test is
+ // mitigated by avoiding a trip through the mark stack,
+ // by only doing a bookkeeping update and avoiding the
+ // actual scan of the object - a typeArray contains no
+ // references, and the metadata is built-in.
+ process_grey_object<false>(obj);
+ } else {
+ if (_cm->verbose_high()) {
+ gclog_or_tty->print_cr("[%u] below a finger (local: " PTR_FORMAT
+ ", global: " PTR_FORMAT ") pushing "
+ PTR_FORMAT " on mark stack",
+ _worker_id, p2i(_finger),
+ p2i(global_finger), p2i(obj));
+ }
+ push(obj);
+ }
+ }
+ }
+}
inline void CMTask::deal_with_reference(oop obj) {
if (_cm->verbose_high()) {
@@ -274,7 +347,7 @@ inline void CMTask::deal_with_reference(oop obj) {
_worker_id, p2i((void*) obj));
}
- ++_refs_reached;
+ increment_refs_reached();
HeapWord* objAddr = (HeapWord*) obj;
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
@@ -286,62 +359,7 @@ inline void CMTask::deal_with_reference(oop obj) {
// anything with it).
HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
if (!hr->obj_allocated_since_next_marking(obj)) {
- if (_cm->verbose_high()) {
- gclog_or_tty->print_cr("[%u] "PTR_FORMAT" is not considered marked",
- _worker_id, p2i((void*) obj));
- }
-
- // we need to mark it first
- if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) {
- // No OrderAccess:store_load() is needed. It is implicit in the
- // CAS done in CMBitMap::parMark() call in the routine above.
- HeapWord* global_finger = _cm->finger();
-
-#if _CHECK_BOTH_FINGERS_
- // we will check both the local and global fingers
-
- if (_finger != NULL && objAddr < _finger) {
- if (_cm->verbose_high()) {
- gclog_or_tty->print_cr("[%u] below the local finger ("PTR_FORMAT"), "
- "pushing it", _worker_id, p2i(_finger));
- }
- push(obj);
- } else if (_curr_region != NULL && objAddr < _region_limit) {
- // do nothing
- } else if (objAddr < global_finger) {
- // Notice that the global finger might be moving forward
- // concurrently. This is not a problem. In the worst case, we
- // mark the object while it is above the global finger and, by
- // the time we read the global finger, it has moved forward
- // passed this object. In this case, the object will probably
- // be visited when a task is scanning the region and will also
- // be pushed on the stack. So, some duplicate work, but no
- // correctness problems.
-
- if (_cm->verbose_high()) {
- gclog_or_tty->print_cr("[%u] below the global finger "
- "("PTR_FORMAT"), pushing it",
- _worker_id, p2i(global_finger));
- }
- push(obj);
- } else {
- // do nothing
- }
-#else // _CHECK_BOTH_FINGERS_
- // we will only check the global finger
-
- if (objAddr < global_finger) {
- // see long comment above
-
- if (_cm->verbose_high()) {
- gclog_or_tty->print_cr("[%u] below the global finger "
- "("PTR_FORMAT"), pushing it",
- _worker_id, p2i(global_finger));
- }
- push(obj);
- }
-#endif // _CHECK_BOTH_FINGERS_
- }
+ make_reference_grey(obj, hr);
}
}
}
diff --git a/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp b/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp
index 9d2f06255..697a40275 100644
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp
@@ -254,25 +254,23 @@ void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
bool force) {
assert(!force, "not supported for GC alloc regions");
- return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
+ return _g1h->new_gc_alloc_region(word_size, count(), InCSetState::Young);
}
void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
- _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
- GCAllocForSurvived);
+ _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Young);
}
HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
bool force) {
assert(!force, "not supported for GC alloc regions");
- return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
+ return _g1h->new_gc_alloc_region(word_size, count(), InCSetState::Old);
}
void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
- _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
- GCAllocForTenured);
+ _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Old);
}
HeapRegion* OldGCAllocRegion::release() {
diff --git a/src/share/vm/gc_implementation/g1/g1Allocator.cpp b/src/share/vm/gc_implementation/g1/g1Allocator.cpp
index 2e223fcc1..0d1ab8411 100644
--- a/src/share/vm/gc_implementation/g1/g1Allocator.cpp
+++ b/src/share/vm/gc_implementation/g1/g1Allocator.cpp
@@ -59,7 +59,7 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
!(retained_region->top() == retained_region->end()) &&
!retained_region->is_empty() &&
!retained_region->isHumongous()) {
- retained_region->record_top_and_timestamp();
+ retained_region->record_timestamp();
// The retained region was added to the old region set when it was
// retired. We have to remove it now, since we don't allow regions
// we allocate to in the region sets. We'll re-add it later, when
@@ -94,6 +94,9 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat
// want either way so no reason to check explicitly for either
// condition.
_retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
+ if (_retained_old_gc_alloc_region != NULL) {
+ _retained_old_gc_alloc_region->record_retained_region();
+ }
if (ResizePLAB) {
_g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
@@ -110,15 +113,16 @@ void G1DefaultAllocator::abandon_gc_alloc_regions() {
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
ParGCAllocBuffer(gclab_word_size), _retired(true) { }
-HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
- HeapWord* obj = NULL;
- size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
+HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
+ size_t word_sz,
+ AllocationContext_t context) {
+ size_t gclab_word_size = _g1h->desired_plab_sz(dest);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
- G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
+ G1ParGCAllocBuffer* alloc_buf = alloc_buffer(dest, context);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
- HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
+ HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
if (buf == NULL) {
return NULL; // Let caller handle allocation failure.
}
@@ -126,30 +130,33 @@ HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
- obj = alloc_buf->allocate(word_sz);
+ HeapWord* const obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
+ return obj;
} else {
- obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
+ return _g1h->par_allocate_during_gc(dest, word_sz, context);
}
- return obj;
}
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
- G1ParGCAllocator(g1h),
- _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
- _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
-
- _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
- _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
-
+ G1ParGCAllocator(g1h),
+ _surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
+ _tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
+ for (uint state = 0; state < InCSetState::Num; state++) {
+ _alloc_buffers[state] = NULL;
+ }
+ _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
+ _alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
}
void G1DefaultParGCAllocator::retire_alloc_buffers() {
- for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
- size_t waste = _alloc_buffers[ap]->words_remaining();
- add_to_alloc_buffer_waste(waste);
- _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
- true /* end_of_gc */,
- false /* retain */);
+ for (uint state = 0; state < InCSetState::Num; state++) {
+ G1ParGCAllocBuffer* const buf = _alloc_buffers[state];
+ if (buf != NULL) {
+ add_to_alloc_buffer_waste(buf->words_remaining());
+ buf->flush_stats_and_retire(_g1h->alloc_buffer_stats(state),
+ true /* end_of_gc */,
+ false /* retain */);
+ }
}
}
diff --git a/src/share/vm/gc_implementation/g1/g1Allocator.hpp b/src/share/vm/gc_implementation/g1/g1Allocator.hpp
index 151342c47..b32a4191d 100644
--- a/src/share/vm/gc_implementation/g1/g1Allocator.hpp
+++ b/src/share/vm/gc_implementation/g1/g1Allocator.hpp
@@ -27,14 +27,9 @@
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/g1/g1InCSetState.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
-enum GCAllocPurpose {
- GCAllocForTenured,
- GCAllocForSurvived,
- GCAllocPurposeCount
-};
-
// Base class for G1 allocators.
class G1Allocator : public CHeapObj<mtGC> {
friend class VMStructs;
@@ -178,20 +173,40 @@ class G1ParGCAllocator : public CHeapObj<mtGC> {
protected:
G1CollectedHeap* _g1h;
+ // The survivor alignment in effect in bytes.
+ // == 0 : don't align survivors
+ // != 0 : align survivors to that alignment
+ // These values were chosen to favor the non-alignment case since some
+ // architectures have a special compare against zero instructions.
+ const uint _survivor_alignment_bytes;
+
size_t _alloc_buffer_waste;
size_t _undo_waste;
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
- HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
-
virtual void retire_alloc_buffers() = 0;
- virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
+ virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
+
+ // Calculate the survivor space object alignment in bytes. Returns that or 0 if
+ // there are no restrictions on survivor alignment.
+ static uint calc_survivor_alignment_bytes() {
+ assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
+ if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
+ // No need to align objects in the survivors differently, return 0
+ // which means "survivor alignment is not used".
+ return 0;
+ } else {
+ assert(SurvivorAlignmentInBytes > 0, "sanity");
+ return SurvivorAlignmentInBytes;
+ }
+ }
public:
G1ParGCAllocator(G1CollectedHeap* g1h) :
- _g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
+ _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
+ _alloc_buffer_waste(0), _undo_waste(0) {
}
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
@@ -199,24 +214,40 @@ public:
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
size_t undo_waste() {return _undo_waste; }
- HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
- HeapWord* obj = NULL;
- if (purpose == GCAllocForSurvived) {
- obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
+ // Allocate word_sz words in dest, either directly into the regions or by
+ // allocating a new PLAB. Returns the address of the allocated memory, NULL if
+ // not successful.
+ HeapWord* allocate_direct_or_new_plab(InCSetState dest,
+ size_t word_sz,
+ AllocationContext_t context);
+
+ // Allocate word_sz words in the PLAB of dest. Returns the address of the
+ // allocated memory, NULL if not successful.
+ HeapWord* plab_allocate(InCSetState dest,
+ size_t word_sz,
+ AllocationContext_t context) {
+ G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
+ if (_survivor_alignment_bytes == 0) {
+ return buffer->allocate(word_sz);
} else {
- obj = alloc_buffer(purpose, context)->allocate(word_sz);
+ return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
}
+ }
+
+ HeapWord* allocate(InCSetState dest, size_t word_sz,
+ AllocationContext_t context) {
+ HeapWord* const obj = plab_allocate(dest, word_sz, context);
if (obj != NULL) {
return obj;
}
- return allocate_slow(purpose, word_sz, context);
+ return allocate_direct_or_new_plab(dest, word_sz, context);
}
- void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
- if (alloc_buffer(purpose, context)->contains(obj)) {
- assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
+ void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
+ if (alloc_buffer(dest, context)->contains(obj)) {
+ assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
"should contain whole object");
- alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
+ alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
@@ -227,13 +258,17 @@ public:
class G1DefaultParGCAllocator : public G1ParGCAllocator {
G1ParGCAllocBuffer _surviving_alloc_buffer;
G1ParGCAllocBuffer _tenured_alloc_buffer;
- G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+ G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
public:
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
- virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
- return _alloc_buffers[purpose];
+ virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {
+ assert(dest.is_valid(),
+ err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
+ assert(_alloc_buffers[dest.value()] != NULL,
+ err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
+ return _alloc_buffers[dest.value()];
}
virtual void retire_alloc_buffers() ;
diff --git a/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp
index 955e0487e..88a673574 100644
--- a/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp
+++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp
@@ -75,7 +75,7 @@ protected:
assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
err_msg("end mapping area address must be a multiple of mapping granularity %zd, is " PTR_FORMAT,
mapping_granularity_in_bytes, p2i(end)));
- size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
+ size_t num_target_elems = pointer_delta(end, bottom, mapping_granularity_in_bytes);
idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index b7490f6b1..f5212f059 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,7 @@
#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
+#include "gc_implementation/g1/g1RootProcessor.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
@@ -85,18 +86,6 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
// apply to TLAB allocation, which is not part of this interface: it
// is done by clients of this interface.)
-// Notes on implementation of parallelism in different tasks.
-//
-// G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
-// The number of GC workers is passed to heap_region_par_iterate_chunked().
-// It does use run_task() which sets _n_workers in the task.
-// G1ParTask executes g1_process_roots() ->
-// SharedHeap::process_roots() which calls eventually to
-// CardTableModRefBS::par_non_clean_card_iterate_work() which uses
-// SequentialSubTasksDone. SharedHeap::process_roots() also
-// directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
-//
-
// Local to this file.
class RefineCardTableEntryClosure: public CardTableEntryClosure {
@@ -364,7 +353,7 @@ void YoungList::print() {
HeapRegion* lists[] = {_head, _survivor_head};
const char* names[] = {"YOUNG", "SURVIVOR"};
- for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
+ for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
HeapRegion *curr = lists[list];
if (curr == NULL)
@@ -838,8 +827,8 @@ HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!isHumongous(word_size), "we do not allow humongous TLABs");
- unsigned int dummy_gc_count_before;
- int dummy_gclocker_retry_count = 0;
+ uint dummy_gc_count_before;
+ uint dummy_gclocker_retry_count = 0;
return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
}
@@ -849,8 +838,8 @@ G1CollectedHeap::mem_allocate(size_t word_size,
assert_heap_not_locked_and_not_at_safepoint();
// Loop until the allocation is satisfied, or unsatisfied after GC.
- for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
- unsigned int gc_count_before;
+ for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
+ uint gc_count_before;
HeapWord* result = NULL;
if (!isHumongous(word_size)) {
@@ -902,8 +891,8 @@ G1CollectedHeap::mem_allocate(size_t word_size,
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
AllocationContext_t context,
- unsigned int *gc_count_before_ret,
- int* gclocker_retry_count_ret) {
+ uint* gc_count_before_ret,
+ uint* gclocker_retry_count_ret) {
// Make sure you read the note in attempt_allocation_humongous().
assert_heap_not_locked_and_not_at_safepoint();
@@ -920,7 +909,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
HeapWord* result = NULL;
for (int try_count = 1; /* we'll return */; try_count += 1) {
bool should_try_gc;
- unsigned int gc_count_before;
+ uint gc_count_before;
{
MutexLockerEx x(Heap_lock);
@@ -964,7 +953,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
if (should_try_gc) {
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded,
- GCCause::_g1_inc_collection_pause);
+ GCCause::_g1_inc_collection_pause);
if (result != NULL) {
assert(succeeded, "only way to get back a non-NULL result");
return result;
@@ -1018,8 +1007,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
}
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
- unsigned int * gc_count_before_ret,
- int* gclocker_retry_count_ret) {
+ uint* gc_count_before_ret,
+ uint* gclocker_retry_count_ret) {
// The structure of this method has a lot of similarities to
// attempt_allocation_slow(). The reason these two were not merged
// into a single one is that such a method would require several "if
@@ -1052,7 +1041,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
HeapWord* result = NULL;
for (int try_count = 1; /* we'll return */; try_count += 1) {
bool should_try_gc;
- unsigned int gc_count_before;
+ uint gc_count_before;
{
MutexLockerEx x(Heap_lock);
@@ -1090,7 +1079,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded,
- GCCause::_g1_humongous_allocation);
+ GCCause::_g1_humongous_allocation);
if (result != NULL) {
assert(succeeded, "only way to get back a non-NULL result");
return result;
@@ -1297,7 +1286,6 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
// Timing
assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
- gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
{
@@ -1855,7 +1843,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_is_alive_closure_stw(this),
_ref_processor_cm(NULL),
_ref_processor_stw(NULL),
- _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
_bot_shared(NULL),
_evac_failure_scan_stack(NULL),
_mark_in_progress(false),
@@ -1866,7 +1853,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
- _humongous_is_live(),
+ _humongous_reclaim_candidates(),
_has_humongous_reclaim_candidates(false),
_free_regions_coming(false),
_young_list(new YoungList(this)),
@@ -1878,6 +1865,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_old_marking_cycles_started(0),
_old_marking_cycles_completed(0),
_concurrent_cycle_started(false),
+ _heap_summary_sent(false),
_in_cset_fast_test(),
_dirty_cards_region_list(NULL),
_worker_cset_start_region(NULL),
@@ -1888,9 +1876,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
_g1h = this;
- if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
- vm_exit_during_initialization("Failed necessary allocation.");
- }
_allocator = G1Allocator::create_allocator(_g1h);
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
@@ -1902,7 +1887,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
assert(n_rem_sets > 0, "Invariant.");
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
- _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
+ _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
_evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
for (int i = 0; i < n_queues; i++) {
@@ -1919,6 +1904,26 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
guarantee(_task_queues != NULL, "task_queues allocation failure.");
}
+G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
+ size_t size,
+ size_t translation_factor) {
+ size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
+ // Allocate a new reserved space, preferring to use large pages.
+ ReservedSpace rs(size, preferred_page_size);
+ G1RegionToSpaceMapper* result =
+ G1RegionToSpaceMapper::create_mapper(rs,
+ size,
+ rs.alignment(),
+ HeapRegion::GrainBytes,
+ translation_factor,
+ mtGC);
+ if (TracePageSizes) {
+ gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
+ description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
+ }
+ return result;
+}
+
jint G1CollectedHeap::initialize() {
CollectedHeap::pre_initialize();
os::enable_vtime();
@@ -1992,57 +1997,35 @@ jint G1CollectedHeap::initialize() {
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
G1RegionToSpaceMapper* heap_storage =
G1RegionToSpaceMapper::create_mapper(g1_rs,
+ g1_rs.size(),
UseLargePages ? os::large_page_size() : os::vm_page_size(),
HeapRegion::GrainBytes,
1,
mtJavaHeap);
heap_storage->set_mapping_changed_listener(&_listener);
- // Reserve space for the block offset table. We do not support automatic uncommit
- // for the card table at this time. BOT only.
- ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
+ // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
G1RegionToSpaceMapper* bot_storage =
- G1RegionToSpaceMapper::create_mapper(bot_rs,
- os::vm_page_size(),
- HeapRegion::GrainBytes,
- G1BlockOffsetSharedArray::N_bytes,
- mtGC);
+ create_aux_memory_mapper("Block offset table",
+ G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
+ G1BlockOffsetSharedArray::N_bytes);
ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* cardtable_storage =
- G1RegionToSpaceMapper::create_mapper(cardtable_rs,
- os::vm_page_size(),
- HeapRegion::GrainBytes,
- G1BlockOffsetSharedArray::N_bytes,
- mtGC);
+ create_aux_memory_mapper("Card table",
+ G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
+ G1BlockOffsetSharedArray::N_bytes);
- // Reserve space for the card counts table.
- ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* card_counts_storage =
- G1RegionToSpaceMapper::create_mapper(card_counts_rs,
- os::vm_page_size(),
- HeapRegion::GrainBytes,
- G1BlockOffsetSharedArray::N_bytes,
- mtGC);
+ create_aux_memory_mapper("Card counts table",
+ G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
+ G1BlockOffsetSharedArray::N_bytes);
- // Reserve space for prev and next bitmap.
size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
-
- ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
G1RegionToSpaceMapper* prev_bitmap_storage =
- G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
- os::vm_page_size(),
- HeapRegion::GrainBytes,
- CMBitMap::mark_distance(),
- mtGC);
-
- ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
+ create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::mark_distance());
G1RegionToSpaceMapper* next_bitmap_storage =
- G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
- os::vm_page_size(),
- HeapRegion::GrainBytes,
- CMBitMap::mark_distance(),
- mtGC);
+ create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::mark_distance());
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
g1_barrier_set()->initialize(cardtable_storage);
@@ -2065,8 +2048,14 @@ jint G1CollectedHeap::initialize() {
_g1h = this;
- _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
- _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
+ {
+ HeapWord* start = _hrm.reserved().start();
+ HeapWord* end = _hrm.reserved().end();
+ size_t granularity = HeapRegion::GrainBytes;
+
+ _in_cset_fast_test.initialize(start, end, granularity);
+ _humongous_reclaim_candidates.initialize(start, end, granularity);
+ }
// Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.)
@@ -2158,11 +2147,6 @@ void G1CollectedHeap::stop() {
}
}
-void G1CollectedHeap::clear_humongous_is_live_table() {
- guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Should only be called if true");
- _humongous_is_live.clear();
-}
-
size_t G1CollectedHeap::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
}
@@ -2299,11 +2283,11 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
- int n_completed_buffers = 0;
+ size_t n_completed_buffers = 0;
while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
n_completed_buffers++;
}
- g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
+ g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
dcqs.clear_n_completed_buffers();
assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
}
@@ -2347,6 +2331,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
case GCCause::_g1_humongous_allocation: return true;
case GCCause::_update_allocation_context_stats_inc: return true;
+ case GCCause::_wb_conc_mark: return true;
default: return false;
}
}
@@ -2448,13 +2433,24 @@ void G1CollectedHeap::register_concurrent_cycle_end() {
_gc_timer_cm->register_gc_end();
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
+ // Clear state variables to prepare for the next concurrent cycle.
_concurrent_cycle_started = false;
+ _heap_summary_sent = false;
}
}
void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
if (_concurrent_cycle_started) {
- trace_heap_after_gc(_gc_tracer_cm);
+ // This function can be called when:
+ // the cleanup pause is run
+ // the concurrent cycle is aborted before the cleanup pause.
+ // the concurrent cycle is aborted after the cleanup pause,
+ // but before the concurrent cycle end has been registered.
+ // Make sure that we only send the heap information once.
+ if (!_heap_summary_sent) {
+ trace_heap_after_gc(_gc_tracer_cm);
+ _heap_summary_sent = true;
+ }
}
}
@@ -2477,9 +2473,9 @@ G1YCType G1CollectedHeap::yc_type() {
void G1CollectedHeap::collect(GCCause::Cause cause) {
assert_heap_not_locked();
- unsigned int gc_count_before;
- unsigned int old_marking_count_before;
- unsigned int full_gc_count_before;
+ uint gc_count_before;
+ uint old_marking_count_before;
+ uint full_gc_count_before;
bool retry_gc;
do {
@@ -3292,11 +3288,12 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
- process_all_roots(true, // activate StrongRootsScope
- SO_AllCodeCache, // roots scanning options
- &rootsCl,
- &cldCl,
- &blobsCl);
+ {
+ G1RootProcessor root_processor(this);
+ root_processor.process_all_roots(&rootsCl,
+ &cldCl,
+ &blobsCl);
+ }
bool failures = rootsCl.failures() || codeRootsCl.failures();
@@ -3616,7 +3613,7 @@ void G1CollectedHeap::gc_epilogue(bool full) {
}
HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
- unsigned int gc_count_before,
+ uint gc_count_before,
bool* succeeded,
GCCause::Cause gc_cause) {
assert_heap_not_locked_and_not_at_safepoint();
@@ -3671,18 +3668,73 @@ size_t G1CollectedHeap::cards_scanned() {
return g1_rem_set()->cardsScanned();
}
-bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
- HeapRegion* region = region_at(index);
- assert(region->startsHumongous(), "Must start a humongous object");
- return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
-}
-
class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
private:
size_t _total_humongous;
size_t _candidate_humongous;
+
+ DirtyCardQueue _dcq;
+
+ // We don't nominate objects with many remembered set entries, on
+ // the assumption that such objects are likely still live.
+ bool is_remset_small(HeapRegion* region) const {
+ HeapRegionRemSet* const rset = region->rem_set();
+ return G1EagerReclaimHumongousObjectsWithStaleRefs
+ ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
+ : rset->is_empty();
+ }
+
+ bool is_typeArray_region(HeapRegion* region) const {
+ return oop(region->bottom())->is_typeArray();
+ }
+
+ bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
+ assert(region->startsHumongous(), "Must start a humongous object");
+
+ // Candidate selection must satisfy the following constraints
+ // while concurrent marking is in progress:
+ //
+ // * In order to maintain SATB invariants, an object must not be
+ // reclaimed if it was allocated before the start of marking and
+ // has not had its references scanned. Such an object must have
+ // its references (including type metadata) scanned to ensure no
+ // live objects are missed by the marking process. Objects
+ // allocated after the start of concurrent marking don't need to
+ // be scanned.
+ //
+ // * An object must not be reclaimed if it is on the concurrent
+ // mark stack. Objects allocated after the start of concurrent
+ // marking are never pushed on the mark stack.
+ //
+ // Nominating only objects allocated after the start of concurrent
+ // marking is sufficient to meet both constraints. This may miss
+ // some objects that satisfy the constraints, but the marking data
+ // structures don't support efficiently performing the needed
+ // additional tests or scrubbing of the mark stack.
+ //
+ // However, we presently only nominate is_typeArray() objects.
+ // A humongous object containing references induces remembered
+ // set entries on other regions. In order to reclaim such an
+ // object, those remembered sets would need to be cleaned up.
+ //
+ // We also treat is_typeArray() objects specially, allowing them
+ // to be reclaimed even if allocated before the start of
+ // concurrent mark. For this we rely on mark stack insertion to
+ // exclude is_typeArray() objects, preventing reclaiming an object
+ // that is in the mark stack. We also rely on the metadata for
+ // such objects to be built-in and so ensured to be kept live.
+ // Frequent allocation and drop of large binary blobs is an
+ // important use case for eager reclaim, and this special handling
+ // may reduce needed headroom.
+
+ return is_typeArray_region(region) && is_remset_small(region);
+ }
+
public:
- RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) {
+ RegisterHumongousWithInCSetFastTestClosure()
+ : _total_humongous(0),
+ _candidate_humongous(0),
+ _dcq(&JavaThread::dirty_card_queue_set()) {
}
virtual bool doHeapRegion(HeapRegion* r) {
@@ -3691,14 +3743,33 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
}
G1CollectedHeap* g1h = G1CollectedHeap::heap();
- uint region_idx = r->hrm_index();
- bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
- // Is_candidate already filters out humongous regions with some remembered set.
- // This will not lead to humongous object that we mistakenly keep alive because
- // during young collection the remembered sets will only be added to.
+ bool is_candidate = humongous_region_is_candidate(g1h, r);
+ uint rindex = r->hrm_index();
+ g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
if (is_candidate) {
- g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
_candidate_humongous++;
+ g1h->register_humongous_region_with_in_cset_fast_test(rindex);
+ // Is_candidate already filters out humongous object with large remembered sets.
+ // If we have a humongous object with a few remembered sets, we simply flush these
+ // remembered set entries into the DCQS. That will result in automatic
+ // re-evaluation of their remembered set entries during the following evacuation
+ // phase.
+ if (!r->rem_set()->is_empty()) {
+ guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
+ "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
+ G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
+ HeapRegionRemSetIterator hrrs(r->rem_set());
+ size_t card_index;
+ while (hrrs.has_next(card_index)) {
+ jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
+ if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
+ *card_ptr = CardTableModRefBS::dirty_card_val();
+ _dcq.enqueue(card_ptr);
+ }
+ }
+ r->rem_set()->clear_locked();
+ }
+ assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
}
_total_humongous++;
@@ -3707,23 +3778,29 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
size_t total_humongous() const { return _total_humongous; }
size_t candidate_humongous() const { return _candidate_humongous; }
+
+ void flush_rem_set_entries() { _dcq.flush(); }
};
void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
- if (!G1ReclaimDeadHumongousObjectsAtYoungGC) {
- g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0, 0);
+ if (!G1EagerReclaimHumongousObjects) {
+ g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
return;
}
+ double time = os::elapsed_counter();
+ // Collect reclaim candidate information and register candidates with cset.
RegisterHumongousWithInCSetFastTestClosure cl;
heap_region_iterate(&cl);
- g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(cl.total_humongous(),
+
+ time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
+ g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
+ cl.total_humongous(),
cl.candidate_humongous());
_has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
- if (_has_humongous_reclaim_candidates) {
- clear_humongous_is_live_table();
- }
+ // Finally flush all remembered set entries to re-check into the global DCQS.
+ cl.flush_rem_set_entries();
}
void
@@ -3901,10 +3978,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
- int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+ uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
workers()->active_workers() : 1);
double pause_start_sec = os::elapsedTime();
- g1_policy()->phase_times()->note_gc_start(active_workers);
+ g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
log_gc_header();
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
@@ -4014,15 +4091,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
register_humongous_regions_with_in_cset_fast_test();
+ assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
+
_cm->note_start_of_gc();
- // We should not verify the per-thread SATB buffers given that
- // we have not filtered them yet (we'll do so during the
- // GC). We also call this after finalize_cset() to
+ // We call this after finalize_cset() to
// ensure that the CSet has been finalized.
- _cm->verify_no_cset_oops(true /* verify_stacks */,
- true /* verify_enqueued_buffers */,
- false /* verify_thread_buffers */,
- true /* verify_fingers */);
+ _cm->verify_no_cset_oops();
if (_hr_printer.is_active()) {
HeapRegion* hr = g1_policy()->collection_set();
@@ -4045,16 +4119,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// Actually do the work...
evacuate_collection_set(evacuation_info);
- // We do this to mainly verify the per-thread SATB buffers
- // (which have been filtered by now) since we didn't verify
- // them earlier. No point in re-checking the stacks / enqueued
- // buffers given that the CSet has not changed since last time
- // we checked.
- _cm->verify_no_cset_oops(false /* verify_stacks */,
- false /* verify_enqueued_buffers */,
- true /* verify_thread_buffers */,
- true /* verify_fingers */);
-
free_collection_set(g1_policy()->collection_set(), evacuation_info);
eagerly_reclaim_humongous_regions();
@@ -4137,10 +4201,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// We redo the verification but now wrt to the new CSet which
// has just got initialized after the previous CSet was freed.
- _cm->verify_no_cset_oops(true /* verify_stacks */,
- true /* verify_enqueued_buffers */,
- true /* verify_thread_buffers */,
- true /* verify_fingers */);
+ _cm->verify_no_cset_oops();
_cm->note_end_of_gc();
// This timing is only used by the ergonomics to handle our pause target.
@@ -4243,29 +4304,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
return true;
}
-size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
-{
- size_t gclab_word_size;
- switch (purpose) {
- case GCAllocForSurvived:
- gclab_word_size = _survivor_plab_stats.desired_plab_sz();
- break;
- case GCAllocForTenured:
- gclab_word_size = _old_plab_stats.desired_plab_sz();
- break;
- default:
- assert(false, "unknown GCAllocPurpose");
- gclab_word_size = _old_plab_stats.desired_plab_sz();
- break;
- }
-
- // Prevent humongous PLAB sizes for two reasons:
- // * PLABs are allocated using a similar paths as oops, but should
- // never be in a humongous region
- // * Allowing humongous PLABs needlessly churns the region free lists
- return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
-}
-
void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
_drain_in_progress = false;
set_evac_failure_closure(cl);
@@ -4405,35 +4443,6 @@ void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
}
}
-HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
- size_t word_size,
- AllocationContext_t context) {
- if (purpose == GCAllocForSurvived) {
- HeapWord* result = survivor_attempt_allocation(word_size, context);
- if (result != NULL) {
- return result;
- } else {
- // Let's try to allocate in the old gen in case we can fit the
- // object there.
- return old_attempt_allocation(word_size, context);
- }
- } else {
- assert(purpose == GCAllocForTenured, "sanity");
- HeapWord* result = old_attempt_allocation(word_size, context);
- if (result != NULL) {
- return result;
- } else {
- // Let's try to allocate in the survivors in case we can fit the
- // object there.
- return survivor_attempt_allocation(word_size, context);
- }
- }
-
- ShouldNotReachHere();
- // Trying to keep some compilers happy.
- return NULL;
-}
-
void G1ParCopyHelper::mark_object(oop obj) {
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
@@ -4476,14 +4485,14 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
- G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
-
- if (state == G1CollectedHeap::InCSet) {
+ const InCSetState state = _g1->in_cset_state(obj);
+ if (state.is_in_cset()) {
oop forwardee;
- if (obj->is_forwarded()) {
- forwardee = obj->forwardee();
+ markOop m = obj->mark();
+ if (m->is_marked()) {
+ forwardee = (oop) m->decode_pointer();
} else {
- forwardee = _par_scan_state->copy_to_survivor_space(obj);
+ forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
}
assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
@@ -4497,7 +4506,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
do_klass_barrier(p, forwardee);
}
} else {
- if (state == G1CollectedHeap::IsHumongous) {
+ if (state.is_humongous()) {
_g1->set_humongous_is_live(obj);
}
// The object is not in collection set. If we're a root scanning
@@ -4582,60 +4591,11 @@ class G1KlassScanClosure : public KlassClosure {
}
};
-class G1CodeBlobClosure : public CodeBlobClosure {
- class HeapRegionGatheringOopClosure : public OopClosure {
- G1CollectedHeap* _g1h;
- OopClosure* _work;
- nmethod* _nm;
-
- template <typename T>
- void do_oop_work(T* p) {
- _work->do_oop(p);
- T oop_or_narrowoop = oopDesc::load_heap_oop(p);
- if (!oopDesc::is_null(oop_or_narrowoop)) {
- oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
- HeapRegion* hr = _g1h->heap_region_containing_raw(o);
- assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
- hr->add_strong_code_root(_nm);
- }
- }
-
- public:
- HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
-
- void do_oop(oop* o) {
- do_oop_work(o);
- }
-
- void do_oop(narrowOop* o) {
- do_oop_work(o);
- }
-
- void set_nm(nmethod* nm) {
- _nm = nm;
- }
- };
-
- HeapRegionGatheringOopClosure _oc;
-public:
- G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
-
- void do_code_blob(CodeBlob* cb) {
- nmethod* nm = cb->as_nmethod_or_null();
- if (nm != NULL) {
- if (!nm->test_set_oops_do_mark()) {
- _oc.set_nm(nm);
- nm->oops_do(&_oc);
- nm->fix_oop_relocations();
- }
- }
- }
-};
-
class G1ParTask : public AbstractGangTask {
protected:
G1CollectedHeap* _g1h;
RefToScanQueueSet *_queues;
+ G1RootProcessor* _root_processor;
ParallelTaskTerminator _terminator;
uint _n_workers;
@@ -4643,10 +4603,11 @@ protected:
Mutex* stats_lock() { return &_stats_lock; }
public:
- G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
+ G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor)
: AbstractGangTask("G1 collection"),
_g1h(g1h),
_queues(task_queues),
+ _root_processor(root_processor),
_terminator(0, _queues),
_stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
{}
@@ -4660,13 +4621,7 @@ public:
ParallelTaskTerminator* terminator() { return &_terminator; }
virtual void set_for_termination(int active_workers) {
- // This task calls set_n_termination() in par_non_clean_card_iterate_work()
- // in the young space (_par_seq_tasks) in the G1 heap
- // for SequentialSubTasksDone.
- // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
- // both of which need setting by set_n_termination().
- _g1h->SharedHeap::set_n_termination(active_workers);
- _g1h->set_n_termination(active_workers);
+ _root_processor->set_num_workers(active_workers);
terminator()->reset_for_reuse(active_workers);
_n_workers = active_workers;
}
@@ -4703,8 +4658,7 @@ public:
void work(uint worker_id) {
if (worker_id >= _n_workers) return; // no work needed this round
- double start_time_ms = os::elapsedTime() * 1000.0;
- _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
+ _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, os::elapsedTime());
{
ResourceMark rm;
@@ -4736,24 +4690,21 @@ public:
false, // Process all klasses.
true); // Need to claim CLDs.
- G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
- G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
- // IM Weak code roots are handled later.
-
OopClosure* strong_root_cl;
OopClosure* weak_root_cl;
CLDClosure* strong_cld_cl;
CLDClosure* weak_cld_cl;
- CodeBlobClosure* strong_code_cl;
+
+ bool trace_metadata = false;
if (_g1h->g1_policy()->during_initial_mark_pause()) {
// We also need to mark copied objects.
strong_root_cl = &scan_mark_root_cl;
strong_cld_cl = &scan_mark_cld_cl;
- strong_code_cl = &scan_mark_code_cl;
if (ClassUnloadingWithConcurrentMark) {
weak_root_cl = &scan_mark_weak_root_cl;
weak_cld_cl = &scan_mark_weak_cld_cl;
+ trace_metadata = true;
} else {
weak_root_cl = &scan_mark_root_cl;
weak_cld_cl = &scan_mark_cld_cl;
@@ -4763,31 +4714,32 @@ public:
weak_root_cl = &scan_only_root_cl;
strong_cld_cl = &scan_only_cld_cl;
weak_cld_cl = &scan_only_cld_cl;
- strong_code_cl = &scan_only_code_cl;
}
-
- G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
-
pss.start_strong_roots();
- _g1h->g1_process_roots(strong_root_cl,
- weak_root_cl,
- &push_heap_rs_cl,
- strong_cld_cl,
- weak_cld_cl,
- strong_code_cl,
- worker_id);
+ _root_processor->evacuate_roots(strong_root_cl,
+ weak_root_cl,
+ strong_cld_cl,
+ weak_cld_cl,
+ trace_metadata,
+ worker_id);
+
+ G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
+ _root_processor->scan_remembered_sets(&push_heap_rs_cl,
+ weak_root_cl,
+ worker_id);
pss.end_strong_roots();
{
double start = os::elapsedTime();
G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
evac.do_void();
- double elapsed_ms = (os::elapsedTime()-start)*1000.0;
- double term_ms = pss.term_time()*1000.0;
- _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
- _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
+ double elapsed_sec = os::elapsedTime() - start;
+ double term_sec = pss.term_time();
+ _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
+ _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
+ _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());
}
_g1h->g1_policy()->record_thread_age_table(pss.age_table());
_g1h->update_surviving_young_words(pss.surviving_young_words()+1);
@@ -4803,100 +4755,10 @@ public:
// destructors are executed here and are included as part of the
// "GC Worker Time".
}
-
- double end_time_ms = os::elapsedTime() * 1000.0;
- _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
+ _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
}
};
-// *** Common G1 Evacuation Stuff
-
-// This method is run in a GC worker.
-
-void
-G1CollectedHeap::
-g1_process_roots(OopClosure* scan_non_heap_roots,
- OopClosure* scan_non_heap_weak_roots,
- OopsInHeapRegionClosure* scan_rs,
- CLDClosure* scan_strong_clds,
- CLDClosure* scan_weak_clds,
- CodeBlobClosure* scan_strong_code,
- uint worker_i) {
-
- // First scan the shared roots.
- double ext_roots_start = os::elapsedTime();
- double closure_app_time_sec = 0.0;
-
- bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
- bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
-
- BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
- BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
-
- process_roots(false, // no scoping; this is parallel code
- SharedHeap::SO_None,
- &buf_scan_non_heap_roots,
- &buf_scan_non_heap_weak_roots,
- scan_strong_clds,
- // Unloading Initial Marks handle the weak CLDs separately.
- (trace_metadata ? NULL : scan_weak_clds),
- scan_strong_code);
-
- // Now the CM ref_processor roots.
- if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
- // We need to treat the discovered reference lists of the
- // concurrent mark ref processor as roots and keep entries
- // (which are added by the marking threads) on them live
- // until they can be processed at the end of marking.
- ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
- }
-
- if (trace_metadata) {
- // Barrier to make sure all workers passed
- // the strong CLD and strong nmethods phases.
- active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
-
- // Now take the complement of the strong CLDs.
- ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
- }
-
- // Finish up any enqueued closure apps (attributed as object copy time).
- buf_scan_non_heap_roots.done();
- buf_scan_non_heap_weak_roots.done();
-
- double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
- + buf_scan_non_heap_weak_roots.closure_app_seconds();
-
- g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
-
- double ext_root_time_ms =
- ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
-
- g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
-
- // During conc marking we have to filter the per-thread SATB buffers
- // to make sure we remove any oops into the CSet (which will show up
- // as implicitly live).
- double satb_filtering_ms = 0.0;
- if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
- if (mark_in_progress()) {
- double satb_filter_start = os::elapsedTime();
-
- JavaThread::satb_mark_queue_set().filter_thread_buffers();
-
- satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
- }
- }
- g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
-
- // Now scan the complement of the collection set.
- G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
-
- g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
-
- _process_strong_tasks->all_tasks_completed();
-}
-
class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
private:
BoolObjectClosure* _is_alive;
@@ -5184,12 +5046,8 @@ class G1KlassCleaningTask : public StackObj {
public:
void clean_klass(InstanceKlass* ik) {
- ik->clean_implementors_list(_is_alive);
- ik->clean_method_data(_is_alive);
+ ik->clean_weak_instanceklass_links(_is_alive);
- // G1 specific cleanup work that has
- // been moved here to be done in parallel.
- ik->clean_dependent_nmethods();
if (JvmtiExport::has_redefined_a_class()) {
InstanceKlass::purge_previous_versions(ik);
}
@@ -5310,7 +5168,8 @@ class G1RedirtyLoggedCardsTask : public AbstractGangTask {
G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
virtual void work(uint worker_id) {
- double start_time = os::elapsedTime();
+ G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
RedirtyLoggedCardTableEntryClosure cl;
if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
@@ -5319,9 +5178,7 @@ class G1RedirtyLoggedCardsTask : public AbstractGangTask {
_queue->apply_closure_to_all_completed_buffers(&cl);
}
- G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
- timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
- timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
+ phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());
}
};
@@ -5382,17 +5239,17 @@ public:
oop obj = *p;
assert(obj != NULL, "the caller should have filtered out NULL values");
- G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj);
- if (cset_state == G1CollectedHeap::InNeither) {
+ const InCSetState cset_state = _g1->in_cset_state(obj);
+ if (!cset_state.is_in_cset_or_humongous()) {
return;
}
- if (cset_state == G1CollectedHeap::InCSet) {
+ if (cset_state.is_in_cset()) {
assert( obj->is_forwarded(), "invariant" );
*p = obj->forwardee();
} else {
assert(!obj->is_forwarded(), "invariant" );
- assert(cset_state == G1CollectedHeap::IsHumongous,
- err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state));
+ assert(cset_state.is_humongous(),
+ err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));
_g1->set_humongous_is_live(obj);
}
}
@@ -5885,7 +5742,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
n_workers = 1;
}
- G1ParTask g1_par_task(this, _task_queues);
init_for_evac_failure(NULL);
@@ -5896,7 +5752,8 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
double end_par_time_sec;
{
- StrongRootsScope srs(this);
+ G1RootProcessor root_processor(this);
+ G1ParTask g1_par_task(this, _task_queues, &root_processor);
// InitialMark needs claim bits to keep track of the marked-through CLDs.
if (g1_policy()->during_initial_mark_pause()) {
ClassLoaderDataGraph::clear_claimed_marks();
@@ -5917,18 +5774,20 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
end_par_time_sec = os::elapsedTime();
// Closing the inner scope will execute the destructor
- // for the StrongRootsScope object. We record the current
+ // for the G1RootProcessor object. We record the current
// elapsed time before closing the scope so that time
- // taken for the SRS destructor is NOT included in the
+ // taken for the destructor is NOT included in the
// reported parallel time.
}
+ G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
+
double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
- g1_policy()->phase_times()->record_par_time(par_time_ms);
+ phase_times->record_par_time(par_time_ms);
double code_root_fixup_time_ms =
(os::elapsedTime() - end_par_time_sec) * 1000.0;
- g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
+ phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
set_par_threads(0);
@@ -5939,14 +5798,15 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
// not copied during the pause.
process_discovered_references(n_workers);
- // Weak root processing.
- {
+ if (G1StringDedup::is_enabled()) {
+ double fixup_start = os::elapsedTime();
+
G1STWIsAliveClosure is_alive(this);
G1KeepAliveClosure keep_alive(this);
- JNIHandles::weak_oops_do(&is_alive, &keep_alive);
- if (G1StringDedup::is_enabled()) {
- G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
- }
+ G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, phase_times);
+
+ double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
+ phase_times->record_string_dedup_fixup_time(fixup_time_ms);
}
_allocator->release_gc_alloc_regions(n_workers, evacuation_info);
@@ -6214,6 +6074,70 @@ void G1CollectedHeap::check_bitmaps(const char* caller) {
heap_region_iterate(&cl);
guarantee(!cl.failures(), "bitmap verification");
}
+
+class G1CheckCSetFastTableClosure : public HeapRegionClosure {
+ private:
+ bool _failures;
+ public:
+ G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
+
+ virtual bool doHeapRegion(HeapRegion* hr) {
+ uint i = hr->hrm_index();
+ InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
+ if (hr->isHumongous()) {
+ if (hr->in_collection_set()) {
+ gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
+ _failures = true;
+ return true;
+ }
+ if (cset_state.is_in_cset()) {
+ gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
+ _failures = true;
+ return true;
+ }
+ if (hr->continuesHumongous() && cset_state.is_humongous()) {
+ gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
+ _failures = true;
+ return true;
+ }
+ } else {
+ if (cset_state.is_humongous()) {
+ gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
+ _failures = true;
+ return true;
+ }
+ if (hr->in_collection_set() != cset_state.is_in_cset()) {
+ gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
+ hr->in_collection_set(), cset_state.value(), i);
+ _failures = true;
+ return true;
+ }
+ if (cset_state.is_in_cset()) {
+ if (hr->is_young() != (cset_state.is_young())) {
+ gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
+ hr->is_young(), cset_state.value(), i);
+ _failures = true;
+ return true;
+ }
+ if (hr->is_old() != (cset_state.is_old())) {
+ gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
+ hr->is_old(), cset_state.value(), i);
+ _failures = true;
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ bool failures() const { return _failures; }
+};
+
+bool G1CollectedHeap::check_cset_fast_test() {
+ G1CheckCSetFastTableClosure cl;
+ _hrm.iterate(&cl);
+ return !cl.failures();
+}
#endif // PRODUCT
void G1CollectedHeap::cleanUpCardTable() {
@@ -6409,47 +6333,47 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
// are completely up-to-date wrt to references to the humongous object.
//
// Other implementation considerations:
- // - never consider object arrays: while they are a valid target, they have not
- // been observed to be used as temporary objects.
- // - they would also pose considerable effort for cleaning up the the remembered
- // sets.
- // While this cleanup is not strictly necessary to be done (or done instantly),
- // given that their occurrence is very low, this saves us this additional
- // complexity.
+ // - never consider object arrays at this time because they would pose
+ // considerable effort for cleaning up the the remembered sets. This is
+ // required because stale remembered sets might reference locations that
+ // are currently allocated into.
uint region_idx = r->hrm_index();
- if (g1h->humongous_is_live(region_idx) ||
- g1h->humongous_region_is_always_live(region_idx)) {
+ if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
+ !r->rem_set()->is_empty()) {
- if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
- gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
- r->isHumongous(),
+ if (G1TraceEagerReclaimHumongousObjects) {
+ gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
region_idx,
+ obj->size()*HeapWordSize,
+ r->bottom(),
+ r->region_num(),
r->rem_set()->occupied(),
r->rem_set()->strong_code_roots_list_length(),
next_bitmap->isMarked(r->bottom()),
- g1h->humongous_is_live(region_idx),
- obj->is_objArray()
+ g1h->is_humongous_reclaim_candidate(region_idx),
+ obj->is_typeArray()
);
}
return false;
}
- guarantee(!obj->is_objArray(),
- err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
+ guarantee(obj->is_typeArray(),
+ err_msg("Only eagerly reclaiming type arrays is supported, but the object "
+ PTR_FORMAT " is not.",
r->bottom()));
- if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
- gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
- r->isHumongous(),
- r->bottom(),
+ if (G1TraceEagerReclaimHumongousObjects) {
+ gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
region_idx,
+ obj->size()*HeapWordSize,
+ r->bottom(),
r->region_num(),
r->rem_set()->occupied(),
r->rem_set()->strong_code_roots_list_length(),
next_bitmap->isMarked(r->bottom()),
- g1h->humongous_is_live(region_idx),
- obj->is_objArray()
+ g1h->is_humongous_reclaim_candidate(region_idx),
+ obj->is_typeArray()
);
}
// Need to clear mark bit of the humongous object if already set.
@@ -6480,7 +6404,8 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
assert_at_safepoint(true);
- if (!G1ReclaimDeadHumongousObjectsAtYoungGC || !_has_humongous_reclaim_candidates) {
+ if (!G1EagerReclaimHumongousObjects ||
+ (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) {
g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
return;
}
@@ -6793,20 +6718,20 @@ void G1CollectedHeap::set_par_threads() {
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
uint count,
- GCAllocPurpose ap) {
+ InCSetState dest) {
assert(FreeList_lock->owned_by_self(), "pre-condition");
- if (count < g1_policy()->max_regions(ap)) {
- bool survivor = (ap == GCAllocForSurvived);
+ if (count < g1_policy()->max_regions(dest)) {
+ const bool is_survivor = (dest.is_young());
HeapRegion* new_alloc_region = new_region(word_size,
- !survivor,
+ !is_survivor,
true /* do_expand */);
if (new_alloc_region != NULL) {
// We really only need to do this for old regions given that we
// should never scan survivors. But it doesn't hurt to do it
// for survivors too.
- new_alloc_region->record_top_and_timestamp();
- if (survivor) {
+ new_alloc_region->record_timestamp();
+ if (is_survivor) {
new_alloc_region->set_survivor();
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
check_bitmaps("Survivor Region Allocation", new_alloc_region);
@@ -6818,8 +6743,6 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
bool during_im = g1_policy()->during_initial_mark_pause();
new_alloc_region->note_start_of_copying(during_im);
return new_alloc_region;
- } else {
- g1_policy()->note_alloc_region_limit_reached(ap);
}
}
return NULL;
@@ -6827,11 +6750,11 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes,
- GCAllocPurpose ap) {
+ InCSetState dest) {
bool during_im = g1_policy()->during_initial_mark_pause();
alloc_region->note_end_of_copying(during_im);
g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
- if (ap == GCAllocForSurvived) {
+ if (dest.is_young()) {
young_list()->add_survivor_region(alloc_region);
} else {
_old_set.add(alloc_region);
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
index 6aaa9dd5f..500390af1 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "gc_implementation/g1/g1HRPrinter.hpp"
+#include "gc_implementation/g1/g1InCSetState.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
@@ -212,6 +213,9 @@ class G1CollectedHeap : public SharedHeap {
// Other related classes.
friend class G1MarkSweep;
+ // Testing classes.
+ friend class G1CheckCSetFastTableClosure;
+
private:
// The one and only G1CollectedHeap, so static functions can find it.
static G1CollectedHeap* _g1h;
@@ -229,7 +233,6 @@ private:
// It keeps track of the humongous regions.
HeapRegionSet _humongous_set;
- void clear_humongous_is_live_table();
void eagerly_reclaim_humongous_regions();
// The number of regions we could create by expansion.
@@ -299,22 +302,26 @@ private:
// Helper for monitoring and management support.
G1MonitoringSupport* _g1mm;
- // Records whether the region at the given index is kept live by roots or
- // references from the young generation.
- class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
+ // Records whether the region at the given index is (still) a
+ // candidate for eager reclaim. Only valid for humongous start
+ // regions; other regions have unspecified values. Humongous start
+ // regions are initialized at start of collection pause, with
+ // candidates removed from the set as they are found reachable from
+ // roots or the young generation.
+ class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
protected:
bool default_value() const { return false; }
public:
void clear() { G1BiasedMappedArray<bool>::clear(); }
- void set_live(uint region) {
- set_by_index(region, true);
+ void set_candidate(uint region, bool value) {
+ set_by_index(region, value);
}
- bool is_live(uint region) {
+ bool is_candidate(uint region) {
return get_by_index(region);
}
};
- HumongousIsLiveBiasedMappedArray _humongous_is_live;
+ HumongousReclaimCandidates _humongous_reclaim_candidates;
// Stores whether during humongous object registration we found candidate regions.
// If not, we can skip a few steps.
bool _has_humongous_reclaim_candidates;
@@ -339,13 +346,14 @@ private:
// Keeps track of how many "old marking cycles" (i.e., Full GCs or
// concurrent cycles) we have started.
- volatile unsigned int _old_marking_cycles_started;
+ volatile uint _old_marking_cycles_started;
// Keeps track of how many "old marking cycles" (i.e., Full GCs or
// concurrent cycles) we have completed.
- volatile unsigned int _old_marking_cycles_completed;
+ volatile uint _old_marking_cycles_completed;
bool _concurrent_cycle_started;
+ bool _heap_summary_sent;
// This is a non-product method that is helpful for testing. It is
// called at the end of a GC and artificially expands the heap by
@@ -362,6 +370,12 @@ private:
// heap after a compaction.
void print_hrm_post_compaction();
+ // Create a memory mapper for auxiliary data structures of the given size and
+ // translation factor.
+ static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
+ size_t size,
+ size_t translation_factor);
+
double verify(bool guard, const char* msg);
void verify_before_gc();
void verify_after_gc();
@@ -510,22 +524,22 @@ protected:
// the mutator alloc region without taking the Heap_lock. This
// should only be used for non-humongous allocations.
inline HeapWord* attempt_allocation(size_t word_size,
- unsigned int* gc_count_before_ret,
- int* gclocker_retry_count_ret);
+ uint* gc_count_before_ret,
+ uint* gclocker_retry_count_ret);
// Second-level mutator allocation attempt: take the Heap_lock and
// retry the allocation attempt, potentially scheduling a GC
// pause. This should only be used for non-humongous allocations.
HeapWord* attempt_allocation_slow(size_t word_size,
AllocationContext_t context,
- unsigned int* gc_count_before_ret,
- int* gclocker_retry_count_ret);
+ uint* gc_count_before_ret,
+ uint* gclocker_retry_count_ret);
// Takes the Heap_lock and attempts a humongous allocation. It can
// potentially schedule a GC pause.
HeapWord* attempt_allocation_humongous(size_t word_size,
- unsigned int* gc_count_before_ret,
- int* gclocker_retry_count_ret);
+ uint* gc_count_before_ret,
+ uint* gclocker_retry_count_ret);
// Allocation attempt that should be called during safepoints (e.g.,
// at the end of a successful GC). expect_null_mutator_alloc_region
@@ -545,15 +559,9 @@ protected:
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
- HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
- size_t word_size,
- AllocationContext_t context);
-
- HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
- HeapRegion* alloc_region,
- bool par,
- size_t word_size);
-
+ inline HeapWord* par_allocate_during_gc(InCSetState dest,
+ size_t word_size,
+ AllocationContext_t context);
// Ensure that no further allocations can happen in "r", bearing in mind
// that parallel threads might be attempting allocations.
void par_allocate_remaining_space(HeapRegion* r);
@@ -575,9 +583,9 @@ protected:
// For GC alloc regions.
HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
- GCAllocPurpose ap);
+ InCSetState dest);
void retire_gc_alloc_region(HeapRegion* alloc_region,
- size_t allocated_bytes, GCAllocPurpose ap);
+ size_t allocated_bytes, InCSetState dest);
// - if explicit_gc is true, the GC is for a System.gc() or a heap
// inspection request and should collect the entire heap
@@ -638,26 +646,11 @@ public:
// (Rounds up to a HeapRegion boundary.)
bool expand(size_t expand_bytes);
- // Returns the PLAB statistics given a purpose.
- PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
- PLABStats* stats = NULL;
-
- switch (purpose) {
- case GCAllocForSurvived:
- stats = &_survivor_plab_stats;
- break;
- case GCAllocForTenured:
- stats = &_old_plab_stats;
- break;
- default:
- assert(false, "unrecognized GCAllocPurpose");
- }
-
- return stats;
- }
+ // Returns the PLAB statistics for a given destination.
+ inline PLABStats* alloc_buffer_stats(InCSetState dest);
- // Determines PLAB size for a particular allocation purpose.
- size_t desired_plab_sz(GCAllocPurpose purpose);
+ // Determines PLAB size for a given destination.
+ inline size_t desired_plab_sz(InCSetState dest);
inline AllocationContextStats& allocation_context_stats();
@@ -665,15 +658,15 @@ public:
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
- inline void set_humongous_is_live(oop obj);
+ // Modify the reclaim candidate set and test for presence.
+ // These are only valid for starts_humongous regions.
+ inline void set_humongous_reclaim_candidate(uint region, bool value);
+ inline bool is_humongous_reclaim_candidate(uint region);
- bool humongous_is_live(uint region) {
- return _humongous_is_live.is_live(region);
- }
+ // Remove from the reclaim candidate set. Also remove from the
+ // collection set so that later encounters avoid the slow path.
+ inline void set_humongous_is_live(oop obj);
- // Returns whether the given region (which must be a humongous (start) region)
- // is to be considered conservatively live regardless of any other conditions.
- bool humongous_region_is_always_live(uint index);
// Register the given region to be part of the collection set.
inline void register_humongous_region_with_in_cset_fast_test(uint index);
// Register regions with humongous objects (actually on the start region) in
@@ -681,8 +674,11 @@ public:
void register_humongous_regions_with_in_cset_fast_test();
// We register a region with the fast "in collection set" test. We
// simply set to true the array slot corresponding to this region.
- void register_region_with_in_cset_fast_test(HeapRegion* r) {
- _in_cset_fast_test.set_in_cset(r->hrm_index());
+ void register_young_region_with_in_cset_fast_test(HeapRegion* r) {
+ _in_cset_fast_test.set_in_young(r->hrm_index());
+ }
+ void register_old_region_with_in_cset_fast_test(HeapRegion* r) {
+ _in_cset_fast_test.set_in_old(r->hrm_index());
}
// This is a fast test on whether a reference points into the
@@ -714,7 +710,7 @@ public:
// +ExplicitGCInvokesConcurrent).
void increment_old_marking_cycles_completed(bool concurrent);
- unsigned int old_marking_cycles_completed() {
+ uint old_marking_cycles_completed() {
return _old_marking_cycles_completed;
}
@@ -773,7 +769,7 @@ protected:
// methods that call do_collection_pause() release the Heap_lock
// before the call, so it's easy to read gc_count_before just before.
HeapWord* do_collection_pause(size_t word_size,
- unsigned int gc_count_before,
+ uint gc_count_before,
bool* succeeded,
GCCause::Cause gc_cause);
@@ -812,22 +808,6 @@ protected:
// statistics or updating free lists.
void abandon_collection_set(HeapRegion* cs_head);
- // Applies "scan_non_heap_roots" to roots outside the heap,
- // "scan_rs" to roots inside the heap (having done "set_region" to
- // indicate the region in which the root resides),
- // and does "scan_metadata" If "scan_rs" is
- // NULL, then this step is skipped. The "worker_i"
- // param is for use with parallel roots processing, and should be
- // the "i" of the calling parallel worker thread's work(i) function.
- // In the sequential case this param will be ignored.
- void g1_process_roots(OopClosure* scan_non_heap_roots,
- OopClosure* scan_non_heap_weak_roots,
- OopsInHeapRegionClosure* scan_rs,
- CLDClosure* scan_strong_clds,
- CLDClosure* scan_weak_clds,
- CodeBlobClosure* scan_strong_code,
- uint worker_i);
-
// The concurrent marker (and the thread it runs in.)
ConcurrentMark* _cm;
ConcurrentMarkThread* _cmThread;
@@ -1012,23 +992,12 @@ protected:
// The heap region entry for a given worker is valid iff
// the associated time stamp value matches the current value
// of G1CollectedHeap::_gc_time_stamp.
- unsigned int* _worker_cset_start_region_time_stamp;
-
- enum G1H_process_roots_tasks {
- G1H_PS_filter_satb_buffers,
- G1H_PS_refProcessor_oops_do,
- // Leave this one last.
- G1H_PS_NumElements
- };
-
- SubTasksDone* _process_strong_tasks;
+ uint* _worker_cset_start_region_time_stamp;
volatile bool _free_regions_coming;
public:
- SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
-
void set_refine_cte_cl_concurrency(bool concurrent);
RefToScanQueue *task_queue(int i) const;
@@ -1061,21 +1030,11 @@ public:
// Initialize weak reference processing.
virtual void ref_processing_init();
- void set_par_threads(uint t) {
- SharedHeap::set_par_threads(t);
- // Done in SharedHeap but oddly there are
- // two _process_strong_tasks's in a G1CollectedHeap
- // so do it here too.
- _process_strong_tasks->set_n_threads(t);
- }
-
+ // Explicitly import set_par_threads into this scope
+ using SharedHeap::set_par_threads;
// Set _n_par_threads according to a policy TBD.
void set_par_threads();
- void set_n_termination(int t) {
- _process_strong_tasks->set_n_threads(t);
- }
-
virtual CollectedHeap::Name kind() const {
return CollectedHeap::G1CollectedHeap;
}
@@ -1150,6 +1109,10 @@ public:
// The number of regions that are completely free.
uint num_free_regions() const { return _hrm.num_free_regions(); }
+ MemoryUsage get_auxiliary_data_memory_usage() const {
+ return _hrm.get_auxiliary_data_memory_usage();
+ }
+
// The number of regions that are not completely free.
uint num_used_regions() const { return num_regions() - num_free_regions(); }
@@ -1182,6 +1145,9 @@ public:
// appropriate error messages and crash.
void check_bitmaps(const char* caller) PRODUCT_RETURN;
+ // Do sanity check on the contents of the in-cset fast test table.
+ bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
+
// verify_region_sets() performs verification over the region
// lists. It will be compiled in the product code to be used when
// necessary (i.e., during heap verification).
@@ -1277,53 +1243,15 @@ public:
inline bool is_in_cset_or_humongous(const oop obj);
- enum in_cset_state_t {
- InNeither, // neither in collection set nor humongous
- InCSet, // region is in collection set only
- IsHumongous // region is a humongous start region
- };
private:
- // Instances of this class are used for quick tests on whether a reference points
- // into the collection set or is a humongous object (points into a humongous
- // object).
- // Each of the array's elements denotes whether the corresponding region is in
- // the collection set or a humongous region.
- // We use this to quickly reclaim humongous objects: by making a humongous region
- // succeed this test, we sort-of add it to the collection set. During the reference
- // iteration closures, when we see a humongous region, we simply mark it as
- // referenced, i.e. live.
- class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> {
- protected:
- char default_value() const { return G1CollectedHeap::InNeither; }
- public:
- void set_humongous(uintptr_t index) {
- assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values");
- set_by_index(index, G1CollectedHeap::IsHumongous);
- }
-
- void clear_humongous(uintptr_t index) {
- set_by_index(index, G1CollectedHeap::InNeither);
- }
-
- void set_in_cset(uintptr_t index) {
- assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value");
- set_by_index(index, G1CollectedHeap::InCSet);
- }
-
- bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
- bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
- G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
- void clear() { G1BiasedMappedArray<char>::clear(); }
- };
-
// This array is used for a quick test on whether a reference points into
// the collection set or not. Each of the array's elements denotes whether the
// corresponding region is in the collection set or not.
- G1FastCSetBiasedMappedArray _in_cset_fast_test;
+ G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
public:
- inline in_cset_state_t in_cset_state(const oop obj);
+ inline InCSetState in_cset_state(const oop obj);
// Return "TRUE" iff the given object address is in the reserved
// region of g1.
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
index 32357bc77..0fd278699 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,41 @@
#include "runtime/orderAccess.inline.hpp"
#include "utilities/taskqueue.hpp"
+PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
+ switch (dest.value()) {
+ case InCSetState::Young:
+ return &_survivor_plab_stats;
+ case InCSetState::Old:
+ return &_old_plab_stats;
+ default:
+ ShouldNotReachHere();
+ return NULL; // Keep some compilers happy
+ }
+}
+
+size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
+ size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz();
+ // Prevent humongous PLAB sizes for two reasons:
+ // * PLABs are allocated using a similar paths as oops, but should
+ // never be in a humongous region
+ // * Allowing humongous PLABs needlessly churns the region free lists
+ return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
+}
+
+HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest,
+ size_t word_size,
+ AllocationContext_t context) {
+ switch (dest.value()) {
+ case InCSetState::Young:
+ return survivor_attempt_allocation(word_size, context);
+ case InCSetState::Old:
+ return old_attempt_allocation(word_size, context);
+ default:
+ ShouldNotReachHere();
+ return NULL; // Keep some compilers happy
+ }
+}
+
// Inline functions for G1CollectedHeap
inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
@@ -96,8 +131,8 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
}
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
- unsigned int* gc_count_before_ret,
- int* gclocker_retry_count_ret) {
+ uint* gc_count_before_ret,
+ uint* gclocker_retry_count_ret) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!isHumongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
@@ -203,7 +238,7 @@ bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
}
-G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
+InCSetState G1CollectedHeap::in_cset_state(const oop obj) {
return _in_cset_fast_test.at((HeapWord*)obj);
}
@@ -313,20 +348,30 @@ inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
return is_obj_ill(obj, heap_region_containing(obj));
}
+inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
+ assert(_hrm.at(region)->startsHumongous(), "Must start a humongous object");
+ _humongous_reclaim_candidates.set_candidate(region, value);
+}
+
+inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
+ assert(_hrm.at(region)->startsHumongous(), "Must start a humongous object");
+ return _humongous_reclaim_candidates.is_candidate(region);
+}
+
inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
uint region = addr_to_region((HeapWord*)obj);
- // We not only set the "live" flag in the humongous_is_live table, but also
+ // Clear the flag in the humongous_reclaim_candidates table. Also
// reset the entry in the _in_cset_fast_test table so that subsequent references
// to the same humongous object do not go into the slow path again.
// This is racy, as multiple threads may at the same time enter here, but this
// is benign.
- // During collection we only ever set the "live" flag, and only ever clear the
+ // During collection we only ever clear the "candidate" flag, and only ever clear the
// entry in the in_cset_fast_table.
// We only ever evaluate the contents of these tables (in the VM thread) after
// having synchronized the worker threads with the VM thread, or in the same
// thread (i.e. within the VM thread).
- if (!_humongous_is_live.is_live(region)) {
- _humongous_is_live.set_live(region);
+ if (is_humongous_reclaim_candidate(region)) {
+ set_humongous_reclaim_candidate(region, false);
_in_cset_fast_test.clear_humongous(region);
}
}
diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
index a9d9543ff..ea80c4492 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
@@ -1084,7 +1084,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
if (update_stats) {
double cost_per_card_ms = 0.0;
if (_pending_cards > 0) {
- cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
+ cost_per_card_ms = phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) / (double) _pending_cards;
_cost_per_card_ms_seq->add(cost_per_card_ms);
}
@@ -1092,7 +1092,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
double cost_per_entry_ms = 0.0;
if (cards_scanned > 10) {
- cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
+ cost_per_entry_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
if (_last_gc_was_young) {
_cost_per_entry_ms_seq->add(cost_per_entry_ms);
} else {
@@ -1134,7 +1134,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
double cost_per_byte_ms = 0.0;
if (copied_bytes > 0) {
- cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
+ cost_per_byte_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
if (_in_marking_window) {
_cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
} else {
@@ -1143,8 +1143,8 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
}
double all_other_time_ms = pause_time_ms -
- (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
- + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
+ (phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) + phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) +
+ phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) + phase_times()->average_time_ms(G1GCPhaseTimes::Termination));
double young_other_time_ms = 0.0;
if (young_cset_region_length() > 0) {
@@ -1185,8 +1185,8 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
- adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
- phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
+ adjust_concurrent_refinement(phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS),
+ phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), update_rs_time_goal_ms);
_collectionSetChooser->verify();
}
@@ -1437,18 +1437,6 @@ bool G1CollectorPolicy::can_expand_young_list() {
return young_list_length < young_list_max_length;
}
-uint G1CollectorPolicy::max_regions(int purpose) {
- switch (purpose) {
- case GCAllocForSurvived:
- return _max_survivor_regions;
- case GCAllocForTenured:
- return REGIONS_UNLIMITED;
- default:
- ShouldNotReachHere();
- return REGIONS_UNLIMITED;
- };
-}
-
void G1CollectorPolicy::update_max_gc_locker_expansion() {
uint expansion_region_num = 0;
if (GCLockerEdenExpansionPercent > 0) {
@@ -1683,7 +1671,7 @@ void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
hr->set_next_in_collection_set(_collection_set);
_collection_set = hr;
_collection_set_bytes_used_before += hr->used();
- _g1->register_region_with_in_cset_fast_test(hr);
+ _g1->register_old_region_with_in_cset_fast_test(hr);
size_t rs_length = hr->rem_set()->occupied();
_recorded_rs_lengths += rs_length;
_old_cset_region_length += 1;
@@ -1816,7 +1804,7 @@ void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
hr->set_in_collection_set(true);
assert( hr->next_in_collection_set() == NULL, "invariant");
- _g1->register_region_with_in_cset_fast_test(hr);
+ _g1->register_young_region_with_in_cset_fast_test(hr);
}
// Add the region at the RHS of the incremental cset
@@ -2189,19 +2177,19 @@ void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTim
_other.add(pause_time_ms - phase_times->accounted_time_ms());
_root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
_parallel.add(phase_times->cur_collection_par_time_ms());
- _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
- _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
- _update_rs.add(phase_times->average_last_update_rs_time());
- _scan_rs.add(phase_times->average_last_scan_rs_time());
- _obj_copy.add(phase_times->average_last_obj_copy_time());
- _termination.add(phase_times->average_last_termination_time());
-
- double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
- phase_times->average_last_satb_filtering_times_ms() +
- phase_times->average_last_update_rs_time() +
- phase_times->average_last_scan_rs_time() +
- phase_times->average_last_obj_copy_time() +
- + phase_times->average_last_termination_time();
+ _ext_root_scan.add(phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan));
+ _satb_filtering.add(phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering));
+ _update_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS));
+ _scan_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::ScanRS));
+ _obj_copy.add(phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy));
+ _termination.add(phase_times->average_time_ms(G1GCPhaseTimes::Termination));
+
+ double parallel_known_time = phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan) +
+ phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering) +
+ phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS) +
+ phase_times->average_time_ms(G1GCPhaseTimes::ScanRS) +
+ phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy) +
+ phase_times->average_time_ms(G1GCPhaseTimes::Termination);
double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
_parallel_other.add(parallel_other_time);
diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
index 6f9e2a1f5..96e4dc63d 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
@@ -877,28 +877,20 @@ private:
public:
uint tenuring_threshold() const { return _tenuring_threshold; }
- inline GCAllocPurpose
- evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
- if (age < _tenuring_threshold && src_region->is_young()) {
- return GCAllocForSurvived;
- } else {
- return GCAllocForTenured;
- }
- }
-
- inline bool track_object_age(GCAllocPurpose purpose) {
- return purpose == GCAllocForSurvived;
- }
-
static const uint REGIONS_UNLIMITED = (uint) -1;
- uint max_regions(int purpose);
-
- // The limit on regions for a particular purpose is reached.
- void note_alloc_region_limit_reached(int purpose) {
- if (purpose == GCAllocForSurvived) {
- _tenuring_threshold = 0;
+ uint max_regions(InCSetState dest) {
+ switch (dest.value()) {
+ case InCSetState::Young:
+ return _max_survivor_regions;
+ case InCSetState::Old:
+ return REGIONS_UNLIMITED;
+ default:
+ assert(false, err_msg("Unknown dest state: " CSETSTATE_FORMAT, dest.value()));
+ break;
}
+ // keep some compilers happy
+ return 0;
}
void note_start_adding_survivor_regions() {
diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
index a13351034..9891e1062 100644
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,12 +22,13 @@
*
*/
-
#include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/os.hpp"
// Helper class for avoiding interleaved logging
class LineBuffer: public StackObj {
@@ -70,184 +71,258 @@ public:
va_end(ap);
}
+ void print_cr() {
+ gclog_or_tty->print_cr("%s", _buffer);
+ _cur = _indent_level * INDENT_CHARS;
+ }
+
void append_and_print_cr(const char* format, ...) ATTRIBUTE_PRINTF(2, 3) {
va_list ap;
va_start(ap, format);
vappend(format, ap);
va_end(ap);
- gclog_or_tty->print_cr("%s", _buffer);
- _cur = _indent_level * INDENT_CHARS;
+ print_cr();
}
};
-PRAGMA_DIAG_PUSH
-PRAGMA_FORMAT_NONLITERAL_IGNORED
template <class T>
-void WorkerDataArray<T>::print(int level, const char* title) {
- if (_length == 1) {
- // No need for min, max, average and sum for only one worker
- LineBuffer buf(level);
- buf.append("[%s: ", title);
- buf.append(_print_format, _data[0]);
- buf.append_and_print_cr("]");
- return;
+class WorkerDataArray : public CHeapObj<mtGC> {
+ friend class G1GCParPhasePrinter;
+ T* _data;
+ uint _length;
+ const char* _title;
+ bool _print_sum;
+ int _log_level;
+ uint _indent_level;
+ bool _enabled;
+
+ WorkerDataArray<size_t>* _thread_work_items;
+
+ NOT_PRODUCT(T uninitialized();)
+
+ // We are caching the sum and average to only have to calculate them once.
+ // This is not done in an MT-safe way. It is intended to allow single
+ // threaded code to call sum() and average() multiple times in any order
+ // without having to worry about the cost.
+ bool _has_new_data;
+ T _sum;
+ T _min;
+ T _max;
+ double _average;
+
+ public:
+ WorkerDataArray(uint length, const char* title, bool print_sum, int log_level, uint indent_level) :
+ _title(title), _length(0), _print_sum(print_sum), _log_level(log_level), _indent_level(indent_level),
+ _has_new_data(true), _thread_work_items(NULL), _enabled(true) {
+ assert(length > 0, "Must have some workers to store data for");
+ _length = length;
+ _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
}
- T min = _data[0];
- T max = _data[0];
- T sum = 0;
+ ~WorkerDataArray() {
+ FREE_C_HEAP_ARRAY(T, _data, mtGC);
+ }
- LineBuffer buf(level);
- buf.append("[%s:", title);
- for (uint i = 0; i < _length; ++i) {
- T val = _data[i];
- min = MIN2(val, min);
- max = MAX2(val, max);
- sum += val;
- if (G1Log::finest()) {
- buf.append(" ");
- buf.append(_print_format, val);
+ void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items) {
+ _thread_work_items = thread_work_items;
+ }
+
+ WorkerDataArray<size_t>* thread_work_items() { return _thread_work_items; }
+
+ void set(uint worker_i, T value) {
+ assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
+ assert(_data[worker_i] == WorkerDataArray<T>::uninitialized(), err_msg("Overwriting data for worker %d in %s", worker_i, _title));
+ _data[worker_i] = value;
+ _has_new_data = true;
+ }
+
+ void set_thread_work_item(uint worker_i, size_t value) {
+ assert(_thread_work_items != NULL, "No sub count");
+ _thread_work_items->set(worker_i, value);
+ }
+
+ T get(uint worker_i) {
+ assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
+ assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data added for worker %d", worker_i));
+ return _data[worker_i];
+ }
+
+ void add(uint worker_i, T value) {
+ assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
+ assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data to add to for worker %d", worker_i));
+ _data[worker_i] += value;
+ _has_new_data = true;
+ }
+
+ double average(){
+ calculate_totals();
+ return _average;
+ }
+
+ T sum() {
+ calculate_totals();
+ return _sum;
+ }
+
+ T minimum() {
+ calculate_totals();
+ return _min;
+ }
+
+ T maximum() {
+ calculate_totals();
+ return _max;
+ }
+
+ void reset() PRODUCT_RETURN;
+ void verify() PRODUCT_RETURN;
+
+ void set_enabled(bool enabled) { _enabled = enabled; }
+
+ int log_level() { return _log_level; }
+
+ private:
+
+ void calculate_totals(){
+ if (!_has_new_data) {
+ return;
+ }
+
+ _sum = (T)0;
+ _min = _data[0];
+ _max = _min;
+ for (uint i = 0; i < _length; ++i) {
+ T val = _data[i];
+ _sum += val;
+ _min = MIN2(_min, val);
+ _max = MAX2(_max, val);
}
+ _average = (double)_sum / (double)_length;
+ _has_new_data = false;
}
+};
- if (G1Log::finest()) {
- buf.append_and_print_cr("%s", "");
- }
-
- double avg = (double)sum / (double)_length;
- buf.append(" Min: ");
- buf.append(_print_format, min);
- buf.append(", Avg: ");
- buf.append("%.1lf", avg); // Always print average as a double
- buf.append(", Max: ");
- buf.append(_print_format, max);
- buf.append(", Diff: ");
- buf.append(_print_format, max - min);
- if (_print_sum) {
- // for things like the start and end times the sum is not
- // that relevant
- buf.append(", Sum: ");
- buf.append(_print_format, sum);
- }
- buf.append_and_print_cr("]");
-}
-PRAGMA_DIAG_POP
#ifndef PRODUCT
-template <> const int WorkerDataArray<int>::_uninitialized = -1;
-template <> const double WorkerDataArray<double>::_uninitialized = -1.0;
-template <> const size_t WorkerDataArray<size_t>::_uninitialized = (size_t)-1;
+template <>
+size_t WorkerDataArray<size_t>::uninitialized() {
+ return (size_t)-1;
+}
+
+template <>
+double WorkerDataArray<double>::uninitialized() {
+ return -1.0;
+}
template <class T>
void WorkerDataArray<T>::reset() {
for (uint i = 0; i < _length; i++) {
- _data[i] = (T)_uninitialized;
+ _data[i] = WorkerDataArray<T>::uninitialized();
+ }
+ if (_thread_work_items != NULL) {
+ _thread_work_items->reset();
}
}
template <class T>
void WorkerDataArray<T>::verify() {
+ if (!_enabled) {
+ return;
+ }
+
for (uint i = 0; i < _length; i++) {
- assert(_data[i] != _uninitialized,
- err_msg("Invalid data for worker " UINT32_FORMAT ", data: %lf, uninitialized: %lf",
- i, (double)_data[i], (double)_uninitialized));
+ assert(_data[i] != WorkerDataArray<T>::uninitialized(),
+ err_msg("Invalid data for worker %u in '%s'", i, _title));
+ }
+ if (_thread_work_items != NULL) {
+ _thread_work_items->verify();
}
}
#endif
G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
- _max_gc_threads(max_gc_threads),
- _last_gc_worker_start_times_ms(_max_gc_threads, "%.1lf", false),
- _last_ext_root_scan_times_ms(_max_gc_threads, "%.1lf"),
- _last_satb_filtering_times_ms(_max_gc_threads, "%.1lf"),
- _last_update_rs_times_ms(_max_gc_threads, "%.1lf"),
- _last_update_rs_processed_buffers(_max_gc_threads, "%d"),
- _last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
- _last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
- _last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
- _last_termination_times_ms(_max_gc_threads, "%.1lf"),
- _last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
- _last_gc_worker_end_times_ms(_max_gc_threads, "%.1lf", false),
- _last_gc_worker_times_ms(_max_gc_threads, "%.1lf"),
- _last_gc_worker_other_times_ms(_max_gc_threads, "%.1lf"),
- _last_redirty_logged_cards_time_ms(_max_gc_threads, "%.1lf"),
- _last_redirty_logged_cards_processed_cards(_max_gc_threads, SIZE_FORMAT),
- _cur_string_dedup_queue_fixup_worker_times_ms(_max_gc_threads, "%.1lf"),
- _cur_string_dedup_table_fixup_worker_times_ms(_max_gc_threads, "%.1lf")
+ _max_gc_threads(max_gc_threads)
{
assert(max_gc_threads > 0, "Must have some GC threads");
+
+ _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms)", false, G1Log::LevelFiner, 2);
+ _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms)", true, G1Log::LevelFiner, 2);
+
+ // Root scanning phases
+ _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[CodeCacheRoots] = new WorkerDataArray<double>(max_gc_threads, "CodeCache Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms)", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms)", true, G1Log::LevelFinest, 3);
+
+ _gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms)", true, G1Log::LevelFiner, 2);
+ _gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms)", true, G1Log::LevelFiner, 2);
+ _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms)", true, G1Log::LevelFiner, 2);
+ _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms)", true, G1Log::LevelFiner, 2);
+ _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms)", true, G1Log::LevelFiner, 2);
+ _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms)", true, G1Log::LevelFiner, 2);
+ _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms)", false, G1Log::LevelFiner, 2);
+ _gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms)", true, G1Log::LevelFiner, 2);
+
+ _update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers", true, G1Log::LevelFiner, 3);
+ _gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers);
+
+ _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
+
+ _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms)", true, G1Log::LevelFiner, 2);
+ _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms)", true, G1Log::LevelFiner, 2);
+
+ _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty", true, G1Log::LevelFinest, 3);
+ _redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards", true, G1Log::LevelFinest, 3);
+ _gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
}
-void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
+void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress) {
assert(active_gc_threads > 0, "The number of threads must be > 0");
- assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max nubmer of threads");
+ assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads");
_active_gc_threads = active_gc_threads;
- _last_gc_worker_start_times_ms.reset();
- _last_ext_root_scan_times_ms.reset();
- _last_satb_filtering_times_ms.reset();
- _last_update_rs_times_ms.reset();
- _last_update_rs_processed_buffers.reset();
- _last_scan_rs_times_ms.reset();
- _last_strong_code_root_scan_times_ms.reset();
- _last_obj_copy_times_ms.reset();
- _last_termination_times_ms.reset();
- _last_termination_attempts.reset();
- _last_gc_worker_end_times_ms.reset();
- _last_gc_worker_times_ms.reset();
- _last_gc_worker_other_times_ms.reset();
-
- _last_redirty_logged_cards_time_ms.reset();
- _last_redirty_logged_cards_processed_cards.reset();
+ for (int i = 0; i < GCParPhasesSentinel; i++) {
+ _gc_par_phases[i]->reset();
+ }
+ _gc_par_phases[StringDedupQueueFixup]->set_enabled(G1StringDedup::is_enabled());
+ _gc_par_phases[StringDedupTableFixup]->set_enabled(G1StringDedup::is_enabled());
}
void G1GCPhaseTimes::note_gc_end() {
- _last_gc_worker_start_times_ms.verify();
- _last_ext_root_scan_times_ms.verify();
- _last_satb_filtering_times_ms.verify();
- _last_update_rs_times_ms.verify();
- _last_update_rs_processed_buffers.verify();
- _last_scan_rs_times_ms.verify();
- _last_strong_code_root_scan_times_ms.verify();
- _last_obj_copy_times_ms.verify();
- _last_termination_times_ms.verify();
- _last_termination_attempts.verify();
- _last_gc_worker_end_times_ms.verify();
-
for (uint i = 0; i < _active_gc_threads; i++) {
- double worker_time = _last_gc_worker_end_times_ms.get(i) - _last_gc_worker_start_times_ms.get(i);
- _last_gc_worker_times_ms.set(i, worker_time);
-
- double worker_known_time = _last_ext_root_scan_times_ms.get(i) +
- _last_satb_filtering_times_ms.get(i) +
- _last_update_rs_times_ms.get(i) +
- _last_scan_rs_times_ms.get(i) +
- _last_strong_code_root_scan_times_ms.get(i) +
- _last_obj_copy_times_ms.get(i) +
- _last_termination_times_ms.get(i);
-
- double worker_other_time = worker_time - worker_known_time;
- _last_gc_worker_other_times_ms.set(i, worker_other_time);
+ double worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i);
+ record_time_secs(GCWorkerTotal, i , worker_time);
+
+ double worker_known_time =
+ _gc_par_phases[ExtRootScan]->get(i) +
+ _gc_par_phases[SATBFiltering]->get(i) +
+ _gc_par_phases[UpdateRS]->get(i) +
+ _gc_par_phases[ScanRS]->get(i) +
+ _gc_par_phases[CodeRoots]->get(i) +
+ _gc_par_phases[ObjCopy]->get(i) +
+ _gc_par_phases[Termination]->get(i);
+
+ record_time_secs(Other, i, worker_time - worker_known_time);
}
- _last_gc_worker_times_ms.verify();
- _last_gc_worker_other_times_ms.verify();
-
- _last_redirty_logged_cards_time_ms.verify();
- _last_redirty_logged_cards_processed_cards.verify();
-}
-
-void G1GCPhaseTimes::note_string_dedup_fixup_start() {
- _cur_string_dedup_queue_fixup_worker_times_ms.reset();
- _cur_string_dedup_table_fixup_worker_times_ms.reset();
-}
-
-void G1GCPhaseTimes::note_string_dedup_fixup_end() {
- _cur_string_dedup_queue_fixup_worker_times_ms.verify();
- _cur_string_dedup_table_fixup_worker_times_ms.verify();
+ for (int i = 0; i < GCParPhasesSentinel; i++) {
+ _gc_par_phases[i]->verify();
+ }
}
void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
@@ -259,7 +334,7 @@ void G1GCPhaseTimes::print_stats(int level, const char* str, size_t value) {
}
void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
- LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: " UINT32_FORMAT "]", str, value, workers);
+ LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %u]", str, value, workers);
}
double G1GCPhaseTimes::accounted_time_ms() {
@@ -287,46 +362,172 @@ double G1GCPhaseTimes::accounted_time_ms() {
return misc_time_ms;
}
-void G1GCPhaseTimes::print(double pause_time_sec) {
- if (_root_region_scan_wait_time_ms > 0.0) {
- print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
+// record the time a phase took in seconds
+void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_i, double secs) {
+ _gc_par_phases[phase]->set(worker_i, secs);
+}
+
+// add a number of seconds to a phase
+void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_i, double secs) {
+ _gc_par_phases[phase]->add(worker_i, secs);
+}
+
+void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count) {
+ _gc_par_phases[phase]->set_thread_work_item(worker_i, count);
+}
+
+// return the average time for a phase in milliseconds
+double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
+ return _gc_par_phases[phase]->average() * 1000.0;
+}
+
+double G1GCPhaseTimes::get_time_ms(GCParPhases phase, uint worker_i) {
+ return _gc_par_phases[phase]->get(worker_i) * 1000.0;
+}
+
+double G1GCPhaseTimes::sum_time_ms(GCParPhases phase) {
+ return _gc_par_phases[phase]->sum() * 1000.0;
+}
+
+double G1GCPhaseTimes::min_time_ms(GCParPhases phase) {
+ return _gc_par_phases[phase]->minimum() * 1000.0;
+}
+
+double G1GCPhaseTimes::max_time_ms(GCParPhases phase) {
+ return _gc_par_phases[phase]->maximum() * 1000.0;
+}
+
+size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i) {
+ assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
+ return _gc_par_phases[phase]->thread_work_items()->get(worker_i);
+}
+
+size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase) {
+ assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
+ return _gc_par_phases[phase]->thread_work_items()->sum();
+}
+
+double G1GCPhaseTimes::average_thread_work_items(GCParPhases phase) {
+ assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
+ return _gc_par_phases[phase]->thread_work_items()->average();
+}
+
+size_t G1GCPhaseTimes::min_thread_work_items(GCParPhases phase) {
+ assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
+ return _gc_par_phases[phase]->thread_work_items()->minimum();
+}
+
+size_t G1GCPhaseTimes::max_thread_work_items(GCParPhases phase) {
+ assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
+ return _gc_par_phases[phase]->thread_work_items()->maximum();
+}
+
+class G1GCParPhasePrinter : public StackObj {
+ G1GCPhaseTimes* _phase_times;
+ public:
+ G1GCParPhasePrinter(G1GCPhaseTimes* phase_times) : _phase_times(phase_times) {}
+
+ void print(G1GCPhaseTimes::GCParPhases phase_id) {
+ WorkerDataArray<double>* phase = _phase_times->_gc_par_phases[phase_id];
+
+ if (phase->_log_level > G1Log::level() || !phase->_enabled) {
+ return;
+ }
+
+ if (phase->_length == 1) {
+ print_single_length(phase_id, phase);
+ } else {
+ print_multi_length(phase_id, phase);
+ }
+ }
+
+ private:
+
+ void print_single_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
+ // No need for min, max, average and sum for only one worker
+ LineBuffer buf(phase->_indent_level);
+ buf.append_and_print_cr("[%s: %.1lf]", phase->_title, _phase_times->get_time_ms(phase_id, 0));
+
+ if (phase->_thread_work_items != NULL) {
+ LineBuffer buf2(phase->_thread_work_items->_indent_level);
+ buf2.append_and_print_cr("[%s: "SIZE_FORMAT"]", phase->_thread_work_items->_title, _phase_times->sum_thread_work_items(phase_id));
+ }
}
- if (G1CollectedHeap::use_parallel_gc_threads()) {
- print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads);
- _last_gc_worker_start_times_ms.print(2, "GC Worker Start (ms)");
- _last_ext_root_scan_times_ms.print(2, "Ext Root Scanning (ms)");
- if (_last_satb_filtering_times_ms.sum() > 0.0) {
- _last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
+
+ void print_time_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
+ for (uint i = 0; i < phase->_length; ++i) {
+ buf.append(" %.1lf", _phase_times->get_time_ms(phase_id, i));
+ }
+ buf.print_cr();
+ }
+
+ void print_count_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
+ for (uint i = 0; i < thread_work_items->_length; ++i) {
+ buf.append(" " SIZE_FORMAT, _phase_times->get_thread_work_item(phase_id, i));
+ }
+ buf.print_cr();
+ }
+
+ void print_thread_work_items(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
+ LineBuffer buf(thread_work_items->_indent_level);
+ buf.append("[%s:", thread_work_items->_title);
+
+ if (G1Log::finest()) {
+ print_count_values(buf, phase_id, thread_work_items);
}
- _last_update_rs_times_ms.print(2, "Update RS (ms)");
- _last_update_rs_processed_buffers.print(3, "Processed Buffers");
- _last_scan_rs_times_ms.print(2, "Scan RS (ms)");
- _last_strong_code_root_scan_times_ms.print(2, "Code Root Scanning (ms)");
- _last_obj_copy_times_ms.print(2, "Object Copy (ms)");
- _last_termination_times_ms.print(2, "Termination (ms)");
+
+ assert(thread_work_items->_print_sum, err_msg("%s does not have print sum true even though it is a count", thread_work_items->_title));
+
+ buf.append_and_print_cr(" Min: " SIZE_FORMAT ", Avg: %.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT "]",
+ _phase_times->min_thread_work_items(phase_id), _phase_times->average_thread_work_items(phase_id), _phase_times->max_thread_work_items(phase_id),
+ _phase_times->max_thread_work_items(phase_id) - _phase_times->min_thread_work_items(phase_id), _phase_times->sum_thread_work_items(phase_id));
+ }
+
+ void print_multi_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
+ LineBuffer buf(phase->_indent_level);
+ buf.append("[%s:", phase->_title);
+
if (G1Log::finest()) {
- _last_termination_attempts.print(3, "Termination Attempts");
+ print_time_values(buf, phase_id, phase);
}
- _last_gc_worker_other_times_ms.print(2, "GC Worker Other (ms)");
- _last_gc_worker_times_ms.print(2, "GC Worker Total (ms)");
- _last_gc_worker_end_times_ms.print(2, "GC Worker End (ms)");
- } else {
- _last_ext_root_scan_times_ms.print(1, "Ext Root Scanning (ms)");
- if (_last_satb_filtering_times_ms.sum() > 0.0) {
- _last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
+
+ buf.append(" Min: %.1lf, Avg: %.1lf, Max: %.1lf, Diff: %.1lf",
+ _phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
+ _phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id));
+
+ if (phase->_print_sum) {
+ // for things like the start and end times the sum is not
+ // that relevant
+ buf.append(", Sum: %.1lf", _phase_times->sum_time_ms(phase_id));
+ }
+
+ buf.append_and_print_cr("]");
+
+ if (phase->_thread_work_items != NULL) {
+ print_thread_work_items(phase_id, phase->_thread_work_items);
}
- _last_update_rs_times_ms.print(1, "Update RS (ms)");
- _last_update_rs_processed_buffers.print(2, "Processed Buffers");
- _last_scan_rs_times_ms.print(1, "Scan RS (ms)");
- _last_strong_code_root_scan_times_ms.print(1, "Code Root Scanning (ms)");
- _last_obj_copy_times_ms.print(1, "Object Copy (ms)");
}
+};
+
+void G1GCPhaseTimes::print(double pause_time_sec) {
+ G1GCParPhasePrinter par_phase_printer(this);
+
+ if (_root_region_scan_wait_time_ms > 0.0) {
+ print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
+ }
+
+ print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads);
+ for (int i = 0; i <= GCMainParPhasesLast; i++) {
+ par_phase_printer.print((GCParPhases) i);
+ }
+
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
if (G1StringDedup::is_enabled()) {
print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);
- _cur_string_dedup_queue_fixup_worker_times_ms.print(2, "Queue Fixup (ms)");
- _cur_string_dedup_table_fixup_worker_times_ms.print(2, "Table Fixup (ms)");
+ for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) {
+ par_phase_printer.print((GCParPhases) i);
+ }
}
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
@@ -350,15 +551,16 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
- if (G1Log::finest()) {
- _last_redirty_logged_cards_time_ms.print(3, "Parallel Redirty");
- _last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards");
- }
- if (G1ReclaimDeadHumongousObjectsAtYoungGC) {
- print_stats(2, "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
+ par_phase_printer.print(RedirtyCards);
+
+ if (G1EagerReclaimHumongousObjects) {
+ print_stats(2, "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
if (G1Log::finest()) {
print_stats(3, "Humongous Total", _cur_fast_reclaim_humongous_total);
print_stats(3, "Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
+ }
+ print_stats(2, "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
+ if (G1Log::finest()) {
print_stats(3, "Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
}
}
@@ -373,3 +575,17 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
print_stats(2, "Verify After", _cur_verify_after_time_ms);
}
}
+
+G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) :
+ _phase_times(phase_times), _phase(phase), _worker_id(worker_id) {
+ if (_phase_times != NULL) {
+ _start_time = os::elapsedTime();
+ }
+}
+
+G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() {
+ if (_phase_times != NULL) {
+ _phase_times->record_time_secs(_phase, _worker_id, os::elapsedTime() - _start_time);
+ }
+}
+
diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
index 8421eb07b..83e7235c4 100644
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,106 +26,60 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP
#include "memory/allocation.hpp"
-#include "gc_interface/gcCause.hpp"
-template <class T>
-class WorkerDataArray : public CHeapObj<mtGC> {
- T* _data;
- uint _length;
- const char* _print_format;
- bool _print_sum;
+class LineBuffer;
- NOT_PRODUCT(static const T _uninitialized;)
-
- // We are caching the sum and average to only have to calculate them once.
- // This is not done in an MT-safe way. It is intended to allow single
- // threaded code to call sum() and average() multiple times in any order
- // without having to worry about the cost.
- bool _has_new_data;
- T _sum;
- double _average;
-
- public:
- WorkerDataArray(uint length, const char* print_format, bool print_sum = true) :
- _length(length), _print_format(print_format), _print_sum(print_sum), _has_new_data(true) {
- assert(length > 0, "Must have some workers to store data for");
- _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
- }
-
- ~WorkerDataArray() {
- FREE_C_HEAP_ARRAY(T, _data, mtGC);
- }
-
- void set(uint worker_i, T value) {
- assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
- assert(_data[worker_i] == (T)-1, err_msg("Overwriting data for worker %d", worker_i));
- _data[worker_i] = value;
- _has_new_data = true;
- }
-
- T get(uint worker_i) {
- assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
- assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i));
- return _data[worker_i];
- }
-
- void add(uint worker_i, T value) {
- assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
- assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i));
- _data[worker_i] += value;
- _has_new_data = true;
- }
-
- double average(){
- if (_has_new_data) {
- calculate_totals();
- }
- return _average;
- }
-
- T sum() {
- if (_has_new_data) {
- calculate_totals();
- }
- return _sum;
- }
-
- void print(int level, const char* title);
-
- void reset() PRODUCT_RETURN;
- void verify() PRODUCT_RETURN;
-
- private:
-
- void calculate_totals(){
- _sum = (T)0;
- for (uint i = 0; i < _length; ++i) {
- _sum += _data[i];
- }
- _average = (double)_sum / (double)_length;
- _has_new_data = false;
- }
-};
+template <class T> class WorkerDataArray;
class G1GCPhaseTimes : public CHeapObj<mtGC> {
+ friend class G1GCParPhasePrinter;
- private:
uint _active_gc_threads;
uint _max_gc_threads;
- WorkerDataArray<double> _last_gc_worker_start_times_ms;
- WorkerDataArray<double> _last_ext_root_scan_times_ms;
- WorkerDataArray<double> _last_satb_filtering_times_ms;
- WorkerDataArray<double> _last_update_rs_times_ms;
- WorkerDataArray<int> _last_update_rs_processed_buffers;
- WorkerDataArray<double> _last_scan_rs_times_ms;
- WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
- WorkerDataArray<double> _last_obj_copy_times_ms;
- WorkerDataArray<double> _last_termination_times_ms;
- WorkerDataArray<size_t> _last_termination_attempts;
- WorkerDataArray<double> _last_gc_worker_end_times_ms;
- WorkerDataArray<double> _last_gc_worker_times_ms;
- WorkerDataArray<double> _last_gc_worker_other_times_ms;
+ public:
+ enum GCParPhases {
+ GCWorkerStart,
+ ExtRootScan,
+ ThreadRoots,
+ StringTableRoots,
+ UniverseRoots,
+ JNIRoots,
+ ObjectSynchronizerRoots,
+ FlatProfilerRoots,
+ ManagementRoots,
+ SystemDictionaryRoots,
+ CLDGRoots,
+ JVMTIRoots,
+ CodeCacheRoots,
+ CMRefRoots,
+ WaitForStrongCLD,
+ WeakCLDRoots,
+ SATBFiltering,
+ UpdateRS,
+ ScanRS,
+ CodeRoots,
+ ObjCopy,
+ Termination,
+ Other,
+ GCWorkerTotal,
+ GCWorkerEnd,
+ StringDedupQueueFixup,
+ StringDedupTableFixup,
+ RedirtyCards,
+ GCParPhasesSentinel
+ };
+
+ private:
+ // Markers for grouping the phases in the GCPhases enum above
+ static const int GCMainParPhasesLast = GCWorkerEnd;
+ static const int StringDedupPhasesFirst = StringDedupQueueFixup;
+ static const int StringDedupPhasesLast = StringDedupTableFixup;
+
+ WorkerDataArray<double>* _gc_par_phases[GCParPhasesSentinel];
+ WorkerDataArray<size_t>* _update_rs_processed_buffers;
+ WorkerDataArray<size_t>* _termination_attempts;
+ WorkerDataArray<size_t>* _redirtied_cards;
double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms;
@@ -135,9 +89,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _cur_evac_fail_restore_remsets;
double _cur_evac_fail_remove_self_forwards;
- double _cur_string_dedup_fixup_time_ms;
- WorkerDataArray<double> _cur_string_dedup_queue_fixup_worker_times_ms;
- WorkerDataArray<double> _cur_string_dedup_table_fixup_worker_times_ms;
+ double _cur_string_dedup_fixup_time_ms;
double _cur_clear_ct_time_ms;
double _cur_ref_proc_time_ms;
@@ -149,14 +101,13 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _recorded_young_cset_choice_time_ms;
double _recorded_non_young_cset_choice_time_ms;
- WorkerDataArray<double> _last_redirty_logged_cards_time_ms;
- WorkerDataArray<size_t> _last_redirty_logged_cards_processed_cards;
double _recorded_redirty_logged_cards_time_ms;
double _recorded_young_free_cset_time_ms;
double _recorded_non_young_free_cset_time_ms;
double _cur_fast_reclaim_humongous_time_ms;
+ double _cur_fast_reclaim_humongous_register_time_ms;
size_t _cur_fast_reclaim_humongous_total;
size_t _cur_fast_reclaim_humongous_candidates;
size_t _cur_fast_reclaim_humongous_reclaimed;
@@ -171,54 +122,34 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
public:
G1GCPhaseTimes(uint max_gc_threads);
- void note_gc_start(uint active_gc_threads);
+ void note_gc_start(uint active_gc_threads, bool mark_in_progress);
void note_gc_end();
void print(double pause_time_sec);
- void record_gc_worker_start_time(uint worker_i, double ms) {
- _last_gc_worker_start_times_ms.set(worker_i, ms);
- }
-
- void record_ext_root_scan_time(uint worker_i, double ms) {
- _last_ext_root_scan_times_ms.set(worker_i, ms);
- }
-
- void record_satb_filtering_time(uint worker_i, double ms) {
- _last_satb_filtering_times_ms.set(worker_i, ms);
- }
-
- void record_update_rs_time(uint worker_i, double ms) {
- _last_update_rs_times_ms.set(worker_i, ms);
- }
-
- void record_update_rs_processed_buffers(uint worker_i, int processed_buffers) {
- _last_update_rs_processed_buffers.set(worker_i, processed_buffers);
- }
+ // record the time a phase took in seconds
+ void record_time_secs(GCParPhases phase, uint worker_i, double secs);
- void record_scan_rs_time(uint worker_i, double ms) {
- _last_scan_rs_times_ms.set(worker_i, ms);
- }
+ // add a number of seconds to a phase
+ void add_time_secs(GCParPhases phase, uint worker_i, double secs);
- void record_strong_code_root_scan_time(uint worker_i, double ms) {
- _last_strong_code_root_scan_times_ms.set(worker_i, ms);
- }
+ void record_thread_work_item(GCParPhases phase, uint worker_i, size_t count);
- void record_obj_copy_time(uint worker_i, double ms) {
- _last_obj_copy_times_ms.set(worker_i, ms);
- }
+ // return the average time for a phase in milliseconds
+ double average_time_ms(GCParPhases phase);
- void add_obj_copy_time(uint worker_i, double ms) {
- _last_obj_copy_times_ms.add(worker_i, ms);
- }
+ size_t sum_thread_work_items(GCParPhases phase);
- void record_termination(uint worker_i, double ms, size_t attempts) {
- _last_termination_times_ms.set(worker_i, ms);
- _last_termination_attempts.set(worker_i, attempts);
- }
+ private:
+ double get_time_ms(GCParPhases phase, uint worker_i);
+ double sum_time_ms(GCParPhases phase);
+ double min_time_ms(GCParPhases phase);
+ double max_time_ms(GCParPhases phase);
+ size_t get_thread_work_item(GCParPhases phase, uint worker_i);
+ double average_thread_work_items(GCParPhases phase);
+ size_t min_thread_work_items(GCParPhases phase);
+ size_t max_thread_work_items(GCParPhases phase);
- void record_gc_worker_end_time(uint worker_i, double ms) {
- _last_gc_worker_end_times_ms.set(worker_i, ms);
- }
+ public:
void record_clear_ct_time(double ms) {
_cur_clear_ct_time_ms = ms;
@@ -248,21 +179,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_cur_evac_fail_remove_self_forwards = ms;
}
- void note_string_dedup_fixup_start();
- void note_string_dedup_fixup_end();
-
void record_string_dedup_fixup_time(double ms) {
_cur_string_dedup_fixup_time_ms = ms;
}
- void record_string_dedup_queue_fixup_worker_time(uint worker_id, double ms) {
- _cur_string_dedup_queue_fixup_worker_times_ms.set(worker_id, ms);
- }
-
- void record_string_dedup_table_fixup_worker_time(uint worker_id, double ms) {
- _cur_string_dedup_table_fixup_worker_times_ms.set(worker_id, ms);
- }
-
void record_ref_proc_time(double ms) {
_cur_ref_proc_time_ms = ms;
}
@@ -283,7 +203,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_recorded_non_young_free_cset_time_ms = time_ms;
}
- void record_fast_reclaim_humongous_stats(size_t total, size_t candidates) {
+ void record_fast_reclaim_humongous_stats(double time_ms, size_t total, size_t candidates) {
+ _cur_fast_reclaim_humongous_register_time_ms = time_ms;
_cur_fast_reclaim_humongous_total = total;
_cur_fast_reclaim_humongous_candidates = candidates;
}
@@ -301,14 +222,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_recorded_non_young_cset_choice_time_ms = time_ms;
}
- void record_redirty_logged_cards_time_ms(uint worker_i, double time_ms) {
- _last_redirty_logged_cards_time_ms.set(worker_i, time_ms);
- }
-
- void record_redirty_logged_cards_processed_cards(uint worker_i, size_t processed_buffers) {
- _last_redirty_logged_cards_processed_cards.set(worker_i, processed_buffers);
- }
-
void record_redirty_logged_cards_time_ms(double time_ms) {
_recorded_redirty_logged_cards_time_ms = time_ms;
}
@@ -362,38 +275,16 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double fast_reclaim_humongous_time_ms() {
return _cur_fast_reclaim_humongous_time_ms;
}
+};
- double average_last_update_rs_time() {
- return _last_update_rs_times_ms.average();
- }
-
- int sum_last_update_rs_processed_buffers() {
- return _last_update_rs_processed_buffers.sum();
- }
-
- double average_last_scan_rs_time(){
- return _last_scan_rs_times_ms.average();
- }
-
- double average_last_strong_code_root_scan_time(){
- return _last_strong_code_root_scan_times_ms.average();
- }
-
- double average_last_obj_copy_time() {
- return _last_obj_copy_times_ms.average();
- }
-
- double average_last_termination_time() {
- return _last_termination_times_ms.average();
- }
-
- double average_last_ext_root_scan_time() {
- return _last_ext_root_scan_times_ms.average();
- }
-
- double average_last_satb_filtering_times_ms() {
- return _last_satb_filtering_times_ms.average();
- }
+class G1GCParPhaseTimesTracker : public StackObj {
+ double _start_time;
+ G1GCPhaseTimes::GCParPhases _phase;
+ G1GCPhaseTimes* _phase_times;
+ uint _worker_id;
+public:
+ G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id);
+ ~G1GCParPhaseTimesTracker();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP
diff --git a/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp b/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
index 0c51c318f..f1f807c67 100644
--- a/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
+++ b/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,16 +36,13 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
if (default_use_cache()) {
_use_cache = true;
- _hot_cache_size = (1 << G1ConcRSLogCacheSize);
+ _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
_hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
- _n_hot = 0;
- _hot_cache_idx = 0;
+ reset_hot_cache_internal();
// For refining the cards in the hot cache in parallel
- uint n_workers = (ParallelGCThreads > 0 ?
- _g1h->workers()->total_workers() : 1);
- _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
+ _hot_cache_par_chunk_size = (int)(ParallelGCThreads > 0 ? ClaimChunkSize : _hot_cache_size);
_hot_cache_par_claimed_idx = 0;
_card_counts.initialize(card_counts_storage);
@@ -66,26 +63,21 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
// return it for immediate refining.
return card_ptr;
}
-
// Otherwise, the card is hot.
- jbyte* res = NULL;
- MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
- if (_n_hot == _hot_cache_size) {
- res = _hot_cache[_hot_cache_idx];
- _n_hot--;
- }
-
- // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
- _hot_cache[_hot_cache_idx] = card_ptr;
- _hot_cache_idx++;
-
- if (_hot_cache_idx == _hot_cache_size) {
- // Wrap around
- _hot_cache_idx = 0;
- }
- _n_hot++;
-
- return res;
+ size_t index = Atomic::add_ptr((intptr_t)1, (volatile intptr_t*)&_hot_cache_idx) - 1;
+ size_t masked_index = index & (_hot_cache_size - 1);
+ jbyte* current_ptr = _hot_cache[masked_index];
+
+ // Try to store the new card pointer into the cache. Compare-and-swap to guard
+ // against the unlikely event of a race resulting in another card pointer to
+ // have already been written to the cache. In this case we will return
+ // card_ptr in favor of the other option, which would be starting over. This
+ // should be OK since card_ptr will likely be the older card already when/if
+ // this ever happens.
+ jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr,
+ &_hot_cache[masked_index],
+ current_ptr);
+ return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
}
void G1HotCardCache::drain(uint worker_i,
@@ -98,38 +90,37 @@ void G1HotCardCache::drain(uint worker_i,
assert(_hot_cache != NULL, "Logic");
assert(!use_cache(), "cache should be disabled");
- int start_idx;
-
- while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
- int end_idx = start_idx + _hot_cache_par_chunk_size;
-
- if (start_idx ==
- Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
- // The current worker has successfully claimed the chunk [start_idx..end_idx)
- end_idx = MIN2(end_idx, _n_hot);
- for (int i = start_idx; i < end_idx; i++) {
- jbyte* card_ptr = _hot_cache[i];
- if (card_ptr != NULL) {
- if (g1rs->refine_card(card_ptr, worker_i, true)) {
- // The part of the heap spanned by the card contains references
- // that point into the current collection set.
- // We need to record the card pointer in the DirtyCardQueueSet
- // that we use for such cards.
- //
- // The only time we care about recording cards that contain
- // references that point into the collection set is during
- // RSet updating while within an evacuation pause.
- // In this case worker_i should be the id of a GC worker thread
- assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
- assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
- err_msg("incorrect worker id: "UINT32_FORMAT, worker_i));
-
- into_cset_dcq->enqueue(card_ptr);
- }
+ while (_hot_cache_par_claimed_idx < _hot_cache_size) {
+ size_t end_idx = Atomic::add_ptr((intptr_t)_hot_cache_par_chunk_size,
+ (volatile intptr_t*)&_hot_cache_par_claimed_idx);
+ size_t start_idx = end_idx - _hot_cache_par_chunk_size;
+ // The current worker has successfully claimed the chunk [start_idx..end_idx)
+ end_idx = MIN2(end_idx, _hot_cache_size);
+ for (size_t i = start_idx; i < end_idx; i++) {
+ jbyte* card_ptr = _hot_cache[i];
+ if (card_ptr != NULL) {
+ if (g1rs->refine_card(card_ptr, worker_i, true)) {
+ // The part of the heap spanned by the card contains references
+ // that point into the current collection set.
+ // We need to record the card pointer in the DirtyCardQueueSet
+ // that we use for such cards.
+ //
+ // The only time we care about recording cards that contain
+ // references that point into the collection set is during
+ // RSet updating while within an evacuation pause.
+ // In this case worker_i should be the id of a GC worker thread
+ assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
+ assert(worker_i < ParallelGCThreads,
+ err_msg("incorrect worker id: %u", worker_i));
+
+ into_cset_dcq->enqueue(card_ptr);
}
+ } else {
+ break;
}
}
}
+
// The existing entries in the hot card cache, which were just refined
// above, are discarded prior to re-enabling the cache near the end of the GC.
}
diff --git a/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp b/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
index becd68327..b065e36ce 100644
--- a/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
+++ b/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,21 +54,33 @@ class HeapRegion;
// code, increasing throughput.
class G1HotCardCache: public CHeapObj<mtGC> {
- G1CollectedHeap* _g1h;
+
+ G1CollectedHeap* _g1h;
+
+ bool _use_cache;
+
+ G1CardCounts _card_counts;
// The card cache table
- jbyte** _hot_cache;
+ jbyte** _hot_cache;
+
+ size_t _hot_cache_size;
+
+ int _hot_cache_par_chunk_size;
- int _hot_cache_size;
- int _n_hot;
- int _hot_cache_idx;
+ // Avoids false sharing when concurrently updating _hot_cache_idx or
+ // _hot_cache_par_claimed_idx. These are never updated at the same time
+ // thus it's not necessary to separate them as well
+ char _pad_before[DEFAULT_CACHE_LINE_SIZE];
- int _hot_cache_par_chunk_size;
- volatile int _hot_cache_par_claimed_idx;
+ volatile size_t _hot_cache_idx;
- bool _use_cache;
+ volatile size_t _hot_cache_par_claimed_idx;
- G1CardCounts _card_counts;
+ char _pad_after[DEFAULT_CACHE_LINE_SIZE];
+
+ // The number of cached cards a thread claims when flushing the cache
+ static const int ClaimChunkSize = 32;
bool default_use_cache() const {
return (G1ConcRSLogCacheSize > 0);
@@ -110,16 +122,25 @@ class G1HotCardCache: public CHeapObj<mtGC> {
void reset_hot_cache() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
- _hot_cache_idx = 0; _n_hot = 0;
+ if (default_use_cache()) {
+ reset_hot_cache_internal();
+ }
}
- bool hot_cache_is_empty() { return _n_hot == 0; }
-
// Zeros the values in the card counts table for entire committed heap
void reset_card_counts();
// Zeros the values in the card counts table for the given region
void reset_card_counts(HeapRegion* hr);
+
+ private:
+ void reset_hot_cache_internal() {
+ assert(_hot_cache != NULL, "Logic");
+ _hot_cache_idx = 0;
+ for (size_t i = 0; i < _hot_cache_size; i++) {
+ _hot_cache[i] = NULL;
+ }
+ }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
diff --git a/src/share/vm/gc_implementation/g1/g1InCSetState.hpp b/src/share/vm/gc_implementation/g1/g1InCSetState.hpp
new file mode 100644
index 000000000..f13eaa0ae
--- /dev/null
+++ b/src/share/vm/gc_implementation/g1/g1InCSetState.hpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
+
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+#include "memory/allocation.hpp"
+
+// Per-region state during garbage collection.
+struct InCSetState {
+ public:
+ // We use different types to represent the state value. Particularly SPARC puts
+ // values in structs from "left to right", i.e. MSB to LSB. This results in many
+ // unnecessary shift operations when loading and storing values of this type.
+ // This degrades performance significantly (>10%) on that platform.
+ // Other tested ABIs do not seem to have this problem, and actually tend to
+ // favor smaller types, so we use the smallest usable type there.
+#ifdef SPARC
+ #define CSETSTATE_FORMAT INTPTR_FORMAT
+ typedef intptr_t in_cset_state_t;
+#else
+ #define CSETSTATE_FORMAT "%d"
+ typedef int8_t in_cset_state_t;
+#endif
+ private:
+ in_cset_state_t _value;
+ public:
+ enum {
+ // Selection of the values were driven to micro-optimize the encoding and
+ // frequency of the checks.
+ // The most common check is whether the region is in the collection set or not.
+ // This encoding allows us to use an != 0 check which in some architectures
+ // (x86*) can be encoded slightly more efficently than a normal comparison
+ // against zero.
+ // The same situation occurs when checking whether the region is humongous
+ // or not, which is encoded by values < 0.
+ // The other values are simply encoded in increasing generation order, which
+ // makes getting the next generation fast by a simple increment.
+ Humongous = -1, // The region is humongous - note that actually any value < 0 would be possible here.
+ NotInCSet = 0, // The region is not in the collection set.
+ Young = 1, // The region is in the collection set and a young region.
+ Old = 2, // The region is in the collection set and an old region.
+ Num
+ };
+
+ InCSetState(in_cset_state_t value = NotInCSet) : _value(value) {
+ assert(is_valid(), err_msg("Invalid state %d", _value));
+ }
+
+ in_cset_state_t value() const { return _value; }
+
+ void set_old() { _value = Old; }
+
+ bool is_in_cset_or_humongous() const { return _value != NotInCSet; }
+ bool is_in_cset() const { return _value > NotInCSet; }
+ bool is_humongous() const { return _value < NotInCSet; }
+ bool is_young() const { return _value == Young; }
+ bool is_old() const { return _value == Old; }
+
+#ifdef ASSERT
+ bool is_default() const { return !is_in_cset_or_humongous(); }
+ bool is_valid() const { return (_value >= Humongous) && (_value < Num); }
+ bool is_valid_gen() const { return (_value >= Young && _value <= Old); }
+#endif
+};
+
+// Instances of this class are used for quick tests on whether a reference points
+// into the collection set and into which generation or is a humongous object
+//
+// Each of the array's elements indicates whether the corresponding region is in
+// the collection set and if so in which generation, or a humongous region.
+//
+// We use this to speed up reference processing during young collection and
+// quickly reclaim humongous objects. For the latter, by making a humongous region
+// succeed this test, we sort-of add it to the collection set. During the reference
+// iteration closures, when we see a humongous region, we then simply mark it as
+// referenced, i.e. live.
+class G1InCSetStateFastTestBiasedMappedArray : public G1BiasedMappedArray<InCSetState> {
+ protected:
+ InCSetState default_value() const { return InCSetState::NotInCSet; }
+ public:
+ void set_humongous(uintptr_t index) {
+ assert(get_by_index(index).is_default(),
+ err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
+ set_by_index(index, InCSetState::Humongous);
+ }
+
+ void clear_humongous(uintptr_t index) {
+ set_by_index(index, InCSetState::NotInCSet);
+ }
+
+ void set_in_young(uintptr_t index) {
+ assert(get_by_index(index).is_default(),
+ err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
+ set_by_index(index, InCSetState::Young);
+ }
+
+ void set_in_old(uintptr_t index) {
+ assert(get_by_index(index).is_default(),
+ err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
+ set_by_index(index, InCSetState::Old);
+ }
+
+ bool is_in_cset_or_humongous(HeapWord* addr) const { return at(addr).is_in_cset_or_humongous(); }
+ bool is_in_cset(HeapWord* addr) const { return at(addr).is_in_cset(); }
+ InCSetState at(HeapWord* addr) const { return get_by_address(addr); }
+ void clear() { G1BiasedMappedArray<InCSetState>::clear(); }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
diff --git a/src/share/vm/gc_implementation/g1/g1Log.hpp b/src/share/vm/gc_implementation/g1/g1Log.hpp
index b8da001cf..6f72c8fbc 100644
--- a/src/share/vm/gc_implementation/g1/g1Log.hpp
+++ b/src/share/vm/gc_implementation/g1/g1Log.hpp
@@ -28,6 +28,7 @@
#include "memory/allocation.hpp"
class G1Log : public AllStatic {
+ public:
typedef enum {
LevelNone,
LevelFine,
@@ -35,6 +36,7 @@ class G1Log : public AllStatic {
LevelFinest
} LogLevel;
+ private:
static LogLevel _level;
public:
@@ -50,6 +52,10 @@ class G1Log : public AllStatic {
return _level == LevelFinest;
}
+ static LogLevel level() {
+ return _level;
+ }
+
static void init();
};
diff --git a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
index 7e5d0873b..b1bc68275 100644
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
@@ -31,6 +31,7 @@
#include "code/icBuffer.hpp"
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1MarkSweep.hpp"
+#include "gc_implementation/g1/g1RootProcessor.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
@@ -126,21 +127,22 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
GenMarkSweep::trace(" 1");
- SharedHeap* sh = SharedHeap::heap();
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
// Need cleared claim bits for the roots processing
ClassLoaderDataGraph::clear_claimed_marks();
MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
- sh->process_strong_roots(true, // activate StrongRootsScope
- SharedHeap::SO_None,
- &GenMarkSweep::follow_root_closure,
- &GenMarkSweep::follow_cld_closure,
- &follow_code_closure);
+ {
+ G1RootProcessor root_processor(g1h);
+ root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure,
+ &GenMarkSweep::follow_cld_closure,
+ &follow_code_closure);
+ }
// Process reference objects found during marking
ReferenceProcessor* rp = GenMarkSweep::ref_processor();
- assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
+ assert(rp == g1h->ref_processor_stw(), "Sanity");
rp->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats =
@@ -226,6 +228,12 @@ class G1AdjustPointersClosure: public HeapRegionClosure {
}
};
+class G1AlwaysTrueClosure: public BoolObjectClosure {
+public:
+ bool do_object_b(oop p) { return true; }
+};
+static G1AlwaysTrueClosure always_true;
+
void G1MarkSweep::mark_sweep_phase3() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -233,24 +241,23 @@ void G1MarkSweep::mark_sweep_phase3() {
GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
GenMarkSweep::trace("3");
- SharedHeap* sh = SharedHeap::heap();
-
// Need cleared claim bits for the roots processing
ClassLoaderDataGraph::clear_claimed_marks();
CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
- sh->process_all_roots(true, // activate StrongRootsScope
- SharedHeap::SO_AllCodeCache,
- &GenMarkSweep::adjust_pointer_closure,
- &GenMarkSweep::adjust_cld_closure,
- &adjust_code_closure);
+ {
+ G1RootProcessor root_processor(g1h);
+ root_processor.process_all_roots(&GenMarkSweep::adjust_pointer_closure,
+ &GenMarkSweep::adjust_cld_closure,
+ &adjust_code_closure);
+ }
assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
- sh->process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
+ JNIHandles::weak_oops_do(&always_true, &GenMarkSweep::adjust_pointer_closure);
if (G1StringDedup::is_enabled()) {
G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
diff --git a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp
index 79f8b52f6..4f6e655b5 100644
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp
@@ -26,6 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
#include "memory/iterator.hpp"
+#include "oops/markOop.hpp"
class HeapRegion;
class G1CollectedHeap;
@@ -239,14 +240,14 @@ class G1UpdateRSOrPushRefOopClosure: public ExtendedOopClosure {
G1CollectedHeap* _g1;
G1RemSet* _g1_rem_set;
HeapRegion* _from;
- OopsInHeapRegionClosure* _push_ref_cl;
+ G1ParPushHeapRSClosure* _push_ref_cl;
bool _record_refs_into_cset;
uint _worker_i;
public:
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
G1RemSet* rs,
- OopsInHeapRegionClosure* push_ref_cl,
+ G1ParPushHeapRSClosure* push_ref_cl,
bool record_refs_into_cset,
uint worker_i = 0);
@@ -256,7 +257,8 @@ public:
}
bool self_forwarded(oop obj) {
- bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
+ markOop m = obj->mark();
+ bool result = (m->is_marked() && ((oop)m->decode_pointer() == obj));
return result;
}
diff --git a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
index bcf30b6ba..a00cd7a6c 100644
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
@@ -67,8 +67,8 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
- if (state == G1CollectedHeap::InCSet) {
+ const InCSetState state = _g1->in_cset_state(obj);
+ if (state.is_in_cset()) {
// We're not going to even bother checking whether the object is
// already forwarded or not, as this usually causes an immediate
// stall. We'll try to prefetch the object (for write, given that
@@ -87,7 +87,7 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
_par_scan_state->push_on_queue(p);
} else {
- if (state == G1CollectedHeap::IsHumongous) {
+ if (state.is_humongous()) {
_g1->set_humongous_is_live(obj);
}
_par_scan_state->update_rs(_from, p, _worker_id);
diff --git a/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp b/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp
index 662d28aee..1a22af82a 100644
--- a/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp
+++ b/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,20 +44,29 @@
#endif
#include "utilities/bitMap.inline.hpp"
-G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL),
- _high_boundary(NULL), _committed(), _page_size(0), _special(false),
+G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) :
+ _low_boundary(NULL), _high_boundary(NULL), _committed(), _page_size(0), _special(false),
_dirty(), _executable(false) {
+ initialize_with_page_size(rs, used_size, page_size);
}
-bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
- if (!rs.is_reserved()) {
- return false; // Allocation failed.
- }
+void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) {
+ guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");
+
assert(_low_boundary == NULL, "VirtualSpace already initialized");
- assert(page_size > 0, "Granularity must be non-zero.");
+ assert(page_size > 0, "Page size must be non-zero.");
+
+ guarantee(is_ptr_aligned(rs.base(), page_size),
+ err_msg("Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size));
+ guarantee(is_size_aligned(used_size, os::vm_page_size()),
+ err_msg("Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size));
+ guarantee(used_size <= rs.size(),
+ err_msg("Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size()));
+ guarantee(is_size_aligned(rs.size(), page_size),
+ err_msg("Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size));
_low_boundary = rs.base();
- _high_boundary = _low_boundary + rs.size();
+ _high_boundary = _low_boundary + used_size;
_special = rs.special();
_executable = rs.executable();
@@ -65,16 +74,15 @@ bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size
_page_size = page_size;
assert(_committed.size() == 0, "virtual space initialized more than once");
- uintx size_in_bits = rs.size() / page_size;
- _committed.resize(size_in_bits, /* in_resource_area */ false);
+ BitMap::idx_t size_in_pages = rs.size() / page_size;
+ _committed.resize(size_in_pages, /* in_resource_area */ false);
if (_special) {
- _dirty.resize(size_in_bits, /* in_resource_area */ false);
+ _dirty.resize(size_in_pages, /* in_resource_area */ false);
}
- return true;
+ _tail_size = used_size % _page_size;
}
-
G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
release();
}
@@ -87,12 +95,18 @@ void G1PageBasedVirtualSpace::release() {
_special = false;
_executable = false;
_page_size = 0;
+ _tail_size = 0;
_committed.resize(0, false);
_dirty.resize(0, false);
}
size_t G1PageBasedVirtualSpace::committed_size() const {
- return _committed.count_one_bits() * _page_size;
+ size_t result = _committed.count_one_bits() * _page_size;
+ // The last page might not be in full.
+ if (is_last_page_partial() && _committed.at(_committed.size() - 1)) {
+ result -= _page_size - _tail_size;
+ }
+ return result;
}
size_t G1PageBasedVirtualSpace::reserved_size() const {
@@ -103,62 +117,134 @@ size_t G1PageBasedVirtualSpace::uncommitted_size() const {
return reserved_size() - committed_size();
}
-uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
+size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
return (addr - _low_boundary) / _page_size;
}
-bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const {
- uintptr_t end = start + size_in_pages;
- return _committed.get_next_zero_offset(start, end) >= end;
+bool G1PageBasedVirtualSpace::is_area_committed(size_t start_page, size_t size_in_pages) const {
+ size_t end_page = start_page + size_in_pages;
+ return _committed.get_next_zero_offset(start_page, end_page) >= end_page;
}
-bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const {
- uintptr_t end = start + size_in_pages;
- return _committed.get_next_one_offset(start, end) >= end;
+bool G1PageBasedVirtualSpace::is_area_uncommitted(size_t start_page, size_t size_in_pages) const {
+ size_t end_page = start_page + size_in_pages;
+ return _committed.get_next_one_offset(start_page, end_page) >= end_page;
}
-char* G1PageBasedVirtualSpace::page_start(uintptr_t index) {
+char* G1PageBasedVirtualSpace::page_start(size_t index) const {
return _low_boundary + index * _page_size;
}
-size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) {
- return num * _page_size;
+bool G1PageBasedVirtualSpace::is_after_last_page(size_t index) const {
+ guarantee(index <= _committed.size(),
+ err_msg("Given boundary page " SIZE_FORMAT " is beyond managed page count " SIZE_FORMAT, index, _committed.size()));
+ return index == _committed.size();
+}
+
+void G1PageBasedVirtualSpace::commit_preferred_pages(size_t start, size_t num_pages) {
+ assert(num_pages > 0, "No full pages to commit");
+ assert(start + num_pages <= _committed.size(),
+ err_msg("Tried to commit area from page " SIZE_FORMAT " to page " SIZE_FORMAT " "
+ "that is outside of managed space of " SIZE_FORMAT " pages",
+ start, start + num_pages, _committed.size()));
+
+ char* start_addr = page_start(start);
+ size_t size = num_pages * _page_size;
+
+ os::commit_memory_or_exit(start_addr, size, _page_size, _executable,
+ err_msg("Failed to commit area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
+ p2i(start_addr), p2i(start_addr + size), size));
+}
+
+void G1PageBasedVirtualSpace::commit_tail() {
+ assert(_tail_size > 0, "The size of the tail area must be > 0 when reaching here");
+
+ char* const aligned_end_address = (char*)align_ptr_down(_high_boundary, _page_size);
+ os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), _executable,
+ err_msg("Failed to commit tail area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
+ p2i(aligned_end_address), p2i(_high_boundary), _tail_size));
+}
+
+void G1PageBasedVirtualSpace::commit_internal(size_t start_page, size_t end_page) {
+ guarantee(start_page < end_page,
+ err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
+ guarantee(end_page <= _committed.size(),
+ err_msg("Given end page " SIZE_FORMAT " is beyond end of managed page amount of " SIZE_FORMAT, end_page, _committed.size()));
+
+ size_t pages = end_page - start_page;
+ bool need_to_commit_tail = is_after_last_page(end_page) && is_last_page_partial();
+
+ // If we have to commit some (partial) tail area, decrease the amount of pages to avoid
+ // committing that in the full-page commit code.
+ if (need_to_commit_tail) {
+ pages--;
+ }
+
+ if (pages > 0) {
+ commit_preferred_pages(start_page, pages);
+ }
+
+ if (need_to_commit_tail) {
+ commit_tail();
+ }
+}
+
+char* G1PageBasedVirtualSpace::bounded_end_addr(size_t end_page) const {
+ return MIN2(_high_boundary, page_start(end_page));
}
-bool G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) {
+void G1PageBasedVirtualSpace::pretouch_internal(size_t start_page, size_t end_page) {
+ guarantee(start_page < end_page,
+ err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
+
+ os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page));
+}
+
+bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
// We need to make sure to commit all pages covered by the given area.
- guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted");
+ guarantee(is_area_uncommitted(start_page, size_in_pages), "Specified area is not uncommitted");
bool zero_filled = true;
- uintptr_t end = start + size_in_pages;
+ size_t end_page = start_page + size_in_pages;
if (_special) {
// Check for dirty pages and update zero_filled if any found.
- if (_dirty.get_next_one_offset(start,end) < end) {
+ if (_dirty.get_next_one_offset(start_page, end_page) < end_page) {
zero_filled = false;
- _dirty.clear_range(start, end);
+ _dirty.clear_range(start_page, end_page);
}
} else {
- os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable,
- err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages));
+ commit_internal(start_page, end_page);
}
- _committed.set_range(start, end);
+ _committed.set_range(start_page, end_page);
+ if (AlwaysPreTouch) {
+ pretouch_internal(start_page, end_page);
+ }
return zero_filled;
}
-void G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) {
- guarantee(is_area_committed(start, size_in_pages), "checking");
+void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) {
+ guarantee(start_page < end_page,
+ err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
+
+ char* start_addr = page_start(start_page);
+ os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)));
+}
+
+void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) {
+ guarantee(is_area_committed(start_page, size_in_pages), "checking");
+ size_t end_page = start_page + size_in_pages;
if (_special) {
// Mark that memory is dirty. If committed again the memory might
// need to be cleared explicitly.
- _dirty.set_range(start, start + size_in_pages);
+ _dirty.set_range(start_page, end_page);
} else {
- os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages));
+ uncommit_internal(start_page, end_page);
}
- _committed.clear_range(start, start + size_in_pages);
+ _committed.clear_range(start_page, end_page);
}
bool G1PageBasedVirtualSpace::contains(const void* p) const {
@@ -172,7 +258,8 @@ void G1PageBasedVirtualSpace::print_on(outputStream* out) {
out->cr();
out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
- out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary));
+ out->print_cr(" - preferred page size: " SIZE_FORMAT, _page_size);
+ out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary));
}
void G1PageBasedVirtualSpace::print() {
diff --git a/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp b/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp
index fb2c78415..4d0b7b21b 100644
--- a/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp
+++ b/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp
@@ -34,6 +34,12 @@
// granularity.
// (De-)Allocation requests are always OS page aligned by passing a page index
// and multiples of pages.
+// For systems that only commits of memory in a given size (always greater than
+// page size) the base address is required to be aligned to that page size.
+// The actual size requested need not be aligned to that page size, but the size
+// of the reservation passed may be rounded up to this page size. Any fragment
+// (less than the page size) of the actual size at the tail of the request will
+// be committed using OS small pages.
// The implementation gives an error when trying to commit or uncommit pages that
// have already been committed or uncommitted.
class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
@@ -43,7 +49,11 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
char* _low_boundary;
char* _high_boundary;
- // The commit/uncommit granularity in bytes.
+ // The size of the tail in bytes of the handled space that needs to be committed
+ // using small pages.
+ size_t _tail_size;
+
+ // The preferred page size used for commit/uncommit in bytes.
size_t _page_size;
// Bitmap used for verification of commit/uncommit operations.
@@ -62,30 +72,55 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
// Indicates whether the committed space should be executable.
bool _executable;
+ // Helper function for committing memory. Commit the given memory range by using
+ // _page_size pages as much as possible and the remainder with small sized pages.
+ void commit_internal(size_t start_page, size_t end_page);
+ // Commit num_pages pages of _page_size size starting from start. All argument
+ // checking has been performed.
+ void commit_preferred_pages(size_t start_page, size_t end_page);
+ // Commit space at the high end of the space that needs to be committed with small
+ // sized pages.
+ void commit_tail();
+
+ // Uncommit the given memory range.
+ void uncommit_internal(size_t start_page, size_t end_page);
+
+ // Pretouch the given memory range.
+ void pretouch_internal(size_t start_page, size_t end_page);
+
// Returns the index of the page which contains the given address.
uintptr_t addr_to_page_index(char* addr) const;
// Returns the address of the given page index.
- char* page_start(uintptr_t index);
- // Returns the byte size of the given number of pages.
- size_t byte_size_for_pages(size_t num);
+ char* page_start(size_t index) const;
+
+ // Is the given page index the last page?
+ bool is_last_page(size_t index) const { return index == (_committed.size() - 1); }
+ // Is the given page index the first after last page?
+ bool is_after_last_page(size_t index) const;
+ // Is the last page only partially covered by this space?
+ bool is_last_page_partial() const { return !is_ptr_aligned(_high_boundary, _page_size); }
+ // Returns the end address of the given page bounded by the reserved space.
+ char* bounded_end_addr(size_t end_page) const;
// Returns true if the entire area is backed by committed memory.
- bool is_area_committed(uintptr_t start, size_t size_in_pages) const;
+ bool is_area_committed(size_t start_page, size_t size_in_pages) const;
// Returns true if the entire area is not backed by committed memory.
- bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const;
+ bool is_area_uncommitted(size_t start_page, size_t size_in_pages) const;
+ void initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size);
public:
// Commit the given area of pages starting at start being size_in_pages large.
// Returns true if the given area is zero filled upon completion.
- bool commit(uintptr_t start, size_t size_in_pages);
+ bool commit(size_t start_page, size_t size_in_pages);
// Uncommit the given area of pages starting at start being size_in_pages large.
- void uncommit(uintptr_t start, size_t size_in_pages);
+ void uncommit(size_t start_page, size_t size_in_pages);
- // Initialization
- G1PageBasedVirtualSpace();
- bool initialize_with_granularity(ReservedSpace rs, size_t page_size);
+ // Initialize the given reserved space with the given base address and the size
+ // actually used.
+ // Prefer to commit in page_size chunks.
+ G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size);
// Destruction
~G1PageBasedVirtualSpace();
diff --git a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp
index 311b9d008..406ac73d1 100644
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp
@@ -38,6 +38,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_g1_rem(g1h->g1_rem_set()),
_hash_seed(17), _queue_num(queue_num),
_term_attempts(0),
+ _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
_age_table(false), _scanner(g1h, rp),
_strong_roots_time(0), _term_time(0) {
_scanner.set_par_scan_thread_state(this);
@@ -59,6 +60,12 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
+ _dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
+ // The dest for Young is used when the objects are aged enough to
+ // need to be moved to the next space.
+ _dest[InCSetState::Young] = InCSetState::Old;
+ _dest[InCSetState::Old] = InCSetState::Old;
+
_start = os::elapsedTime();
}
@@ -150,86 +157,126 @@ void G1ParScanThreadState::trim_queue() {
} while (!_refs->is_empty());
}
-oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
- size_t word_sz = old->size();
- HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
+HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
+ InCSetState* dest,
+ size_t word_sz,
+ AllocationContext_t const context) {
+ assert(state.is_in_cset_or_humongous(), err_msg("Unexpected state: " CSETSTATE_FORMAT, state.value()));
+ assert(dest->is_in_cset_or_humongous(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value()));
+
+ // Right now we only have two types of regions (young / old) so
+ // let's keep the logic here simple. We can generalize it when necessary.
+ if (dest->is_young()) {
+ HeapWord* const obj_ptr = _g1_par_allocator->allocate(InCSetState::Old,
+ word_sz, context);
+ if (obj_ptr == NULL) {
+ return NULL;
+ }
+ // Make sure that we won't attempt to copy any other objects out
+ // of a survivor region (given that apparently we cannot allocate
+ // any new ones) to avoid coming into this slow path.
+ _tenuring_threshold = 0;
+ dest->set_old();
+ return obj_ptr;
+ } else {
+ assert(dest->is_old(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value()));
+ // no other space to try.
+ return NULL;
+ }
+}
+
+InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
+ if (state.is_young()) {
+ age = !m->has_displaced_mark_helper() ? m->age()
+ : m->displaced_mark_helper()->age();
+ if (age < _tenuring_threshold) {
+ return state;
+ }
+ }
+ return dest(state);
+}
+
+oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
+ oop const old,
+ markOop const old_mark) {
+ const size_t word_sz = old->size();
+ HeapRegion* const from_region = _g1h->heap_region_containing_raw(old);
// +1 to make the -1 indexes valid...
- int young_index = from_region->young_index_in_cset()+1;
+ const int young_index = from_region->young_index_in_cset()+1;
assert( (from_region->is_young() && young_index > 0) ||
(!from_region->is_young() && young_index == 0), "invariant" );
- G1CollectorPolicy* g1p = _g1h->g1_policy();
- markOop m = old->mark();
- int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
- : m->age();
- GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
- word_sz);
- AllocationContext_t context = from_region->allocation_context();
- HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
-#ifndef PRODUCT
- // Should this evacuation fail?
- if (_g1h->evacuation_should_fail()) {
- if (obj_ptr != NULL) {
- _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
- obj_ptr = NULL;
+ const AllocationContext_t context = from_region->allocation_context();
+
+ uint age = 0;
+ InCSetState dest_state = next_state(state, old_mark, age);
+ HeapWord* obj_ptr = _g1_par_allocator->plab_allocate(dest_state, word_sz, context);
+
+ // PLAB allocations should succeed most of the time, so we'll
+ // normally check against NULL once and that's it.
+ if (obj_ptr == NULL) {
+ obj_ptr = _g1_par_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
+ if (obj_ptr == NULL) {
+ obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context);
+ if (obj_ptr == NULL) {
+ // This will either forward-to-self, or detect that someone else has
+ // installed a forwarding pointer.
+ return _g1h->handle_evacuation_failure_par(this, old);
+ }
}
}
-#endif // !PRODUCT
- if (obj_ptr == NULL) {
- // This will either forward-to-self, or detect that someone else has
- // installed a forwarding pointer.
+ assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
+#ifndef PRODUCT
+ // Should this evacuation fail?
+ if (_g1h->evacuation_should_fail()) {
+ // Doing this after all the allocation attempts also tests the
+ // undo_allocation() method too.
+ _g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
return _g1h->handle_evacuation_failure_par(this, old);
}
-
- oop obj = oop(obj_ptr);
+#endif // !PRODUCT
// We're going to allocate linearly, so might as well prefetch ahead.
Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
- oop forward_ptr = old->forward_to_atomic(obj);
+ const oop obj = oop(obj_ptr);
+ const oop forward_ptr = old->forward_to_atomic(obj);
if (forward_ptr == NULL) {
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
- // alloc_purpose is just a hint to allocate() above, recheck the type of region
- // we actually allocated from and update alloc_purpose accordingly
- HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
- alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
-
- if (g1p->track_object_age(alloc_purpose)) {
- // We could simply do obj->incr_age(). However, this causes a
- // performance issue. obj->incr_age() will first check whether
- // the object has a displaced mark by checking its mark word;
- // getting the mark word from the new location of the object
- // stalls. So, given that we already have the mark word and we
- // are about to install it anyway, it's better to increase the
- // age on the mark word, when the object does not have a
- // displaced mark word. We're not expecting many objects to have
- // a displaced marked word, so that case is not optimized
- // further (it could be...) and we simply call obj->incr_age().
-
- if (m->has_displaced_mark_helper()) {
- // in this case, we have to install the mark word first,
+ if (dest_state.is_young()) {
+ if (age < markOopDesc::max_age) {
+ age++;
+ }
+ if (old_mark->has_displaced_mark_helper()) {
+ // In this case, we have to install the mark word first,
// otherwise obj looks to be forwarded (the old mark word,
// which contains the forward pointer, was copied)
- obj->set_mark(m);
- obj->incr_age();
+ obj->set_mark(old_mark);
+ markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
+ old_mark->set_displaced_mark_helper(new_mark);
} else {
- m = m->incr_age();
- obj->set_mark(m);
+ obj->set_mark(old_mark->set_age(age));
}
- age_table()->add(obj, word_sz);
+ age_table()->add(age, word_sz);
} else {
- obj->set_mark(m);
+ obj->set_mark(old_mark);
}
if (G1StringDedup::is_enabled()) {
- G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
- to_region->is_young(),
+ const bool is_from_young = state.is_young();
+ const bool is_to_young = dest_state.is_young();
+ assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(),
+ "sanity");
+ assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(),
+ "sanity");
+ G1StringDedup::enqueue_from_evacuation(is_from_young,
+ is_to_young,
queue_num(),
obj);
}
- size_t* surv_young_words = surviving_young_words();
+ size_t* const surv_young_words = surviving_young_words();
surv_young_words[young_index] += word_sz;
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
@@ -240,14 +287,13 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
oop* old_p = set_partial_array_mask(old);
push_on_queue(old_p);
} else {
- // No point in using the slower heap_region_containing() method,
- // given that we know obj is in the heap.
- _scanner.set_region(_g1h->heap_region_containing_raw(obj));
+ HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr);
+ _scanner.set_region(to_region);
obj->oop_iterate_backwards(&_scanner);
}
+ return obj;
} else {
- _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
- obj = forward_ptr;
+ _g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
+ return forward_ptr;
}
- return obj;
}
diff --git a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp
index c1262a688..d5350310e 100644
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp
@@ -46,14 +46,16 @@ class G1ParScanThreadState : public StackObj {
G1SATBCardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
- G1ParGCAllocator* _g1_par_allocator;
+ G1ParGCAllocator* _g1_par_allocator;
- ageTable _age_table;
+ ageTable _age_table;
+ InCSetState _dest[InCSetState::Num];
+ // Local tenuring threshold.
+ uint _tenuring_threshold;
+ G1ParScanClosure _scanner;
- G1ParScanClosure _scanner;
-
- size_t _alloc_buffer_waste;
- size_t _undo_waste;
+ size_t _alloc_buffer_waste;
+ size_t _undo_waste;
OopsInHeapRegionClosure* _evac_failure_cl;
@@ -82,6 +84,14 @@ class G1ParScanThreadState : public StackObj {
DirtyCardQueue& dirty_card_queue() { return _dcq; }
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
+ InCSetState dest(InCSetState original) const {
+ assert(original.is_valid(),
+ err_msg("Original state invalid: " CSETSTATE_FORMAT, original.value()));
+ assert(_dest[original.value()].is_valid_gen(),
+ err_msg("Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value()));
+ return _dest[original.value()];
+ }
+
public:
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
~G1ParScanThreadState();
@@ -112,7 +122,6 @@ class G1ParScanThreadState : public StackObj {
}
}
}
- public:
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
_evac_failure_cl = evac_failure_cl;
@@ -193,9 +202,20 @@ class G1ParScanThreadState : public StackObj {
template <class T> inline void deal_with_reference(T* ref_to_scan);
inline void dispatch_reference(StarTask ref);
+
+ // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
+ // allocate into dest. State is the original (source) cset state for the object
+ // that is allocated for.
+ // Returns a non-NULL pointer if successful, and updates dest if required.
+ HeapWord* allocate_in_next_plab(InCSetState const state,
+ InCSetState* dest,
+ size_t word_sz,
+ AllocationContext_t const context);
+
+ inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
public:
- oop copy_to_survivor_space(oop const obj);
+ oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
void trim_queue();
diff --git a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
index 3fb1829f2..1b03f8caa 100644
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
@@ -38,20 +38,21 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from
// set, due to (benign) races in the claim mechanism during RSet scanning more
// than one thread might claim the same card. So the same card may be
// processed multiple times. So redo this check.
- G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
- if (in_cset_state == G1CollectedHeap::InCSet) {
+ const InCSetState in_cset_state = _g1h->in_cset_state(obj);
+ if (in_cset_state.is_in_cset()) {
oop forwardee;
- if (obj->is_forwarded()) {
- forwardee = obj->forwardee();
+ markOop m = obj->mark();
+ if (m->is_marked()) {
+ forwardee = (oop) m->decode_pointer();
} else {
- forwardee = copy_to_survivor_space(obj);
+ forwardee = copy_to_survivor_space(in_cset_state, obj, m);
}
oopDesc::encode_store_heap_oop(p, forwardee);
- } else if (in_cset_state == G1CollectedHeap::IsHumongous) {
+ } else if (in_cset_state.is_humongous()) {
_g1h->set_humongous_is_live(obj);
} else {
- assert(in_cset_state == G1CollectedHeap::InNeither,
- err_msg("In_cset_state must be InNeither here, but is %d", in_cset_state));
+ assert(!in_cset_state.is_in_cset_or_humongous(),
+ err_msg("In_cset_state must be NotInCSet here, but is " CSETSTATE_FORMAT, in_cset_state.value()));
}
assert(obj != NULL, "Must be");
diff --git a/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp b/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp
index 68d967c76..0c26b783e 100644
--- a/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp
+++ b/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,17 +31,16 @@
#include "utilities/bitMap.inline.hpp"
G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
- size_t commit_granularity,
+ size_t used_size,
+ size_t page_size,
size_t region_granularity,
MemoryType type) :
- _storage(),
- _commit_granularity(commit_granularity),
+ _storage(rs, used_size, page_size),
_region_granularity(region_granularity),
_listener(NULL),
_commit_map() {
- guarantee(is_power_of_2(commit_granularity), "must be");
+ guarantee(is_power_of_2(page_size), "must be");
guarantee(is_power_of_2(region_granularity), "must be");
- _storage.initialize_with_granularity(rs, commit_granularity);
MemTracker::record_virtual_memory_type((address)rs.base(), type);
}
@@ -55,25 +54,26 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
public:
G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
- size_t os_commit_granularity,
+ size_t actual_size,
+ size_t page_size,
size_t alloc_granularity,
size_t commit_factor,
MemoryType type) :
- G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
- _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) {
+ G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
+ _pages_per_region(alloc_granularity / (page_size * commit_factor)) {
- guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity");
+ guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
}
- virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
- bool zero_filled = _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region);
+ virtual void commit_regions(uint start_idx, size_t num_regions) {
+ bool zero_filled = _storage.commit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
_commit_map.set_range(start_idx, start_idx + num_regions);
fire_on_commit(start_idx, num_regions, zero_filled);
}
- virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
- _storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region);
+ virtual void uncommit_regions(uint start_idx, size_t num_regions) {
+ _storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
_commit_map.clear_range(start_idx, start_idx + num_regions);
}
};
@@ -98,22 +98,23 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
public:
G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
- size_t os_commit_granularity,
+ size_t actual_size,
+ size_t page_size,
size_t alloc_granularity,
size_t commit_factor,
MemoryType type) :
- G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
- _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() {
+ G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
+ _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
- guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
- _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity);
+ guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
+ _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size);
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
}
- virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
- for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
- assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i));
- uintptr_t idx = region_idx_to_page_idx(i);
+ virtual void commit_regions(uint start_idx, size_t num_regions) {
+ for (uint i = start_idx; i < start_idx + num_regions; i++) {
+ assert(!_commit_map.at(i), err_msg("Trying to commit storage at region %u that is already committed", i));
+ size_t idx = region_idx_to_page_idx(i);
uint old_refcount = _refcounts.get_by_index(idx);
bool zero_filled = false;
if (old_refcount == 0) {
@@ -125,10 +126,10 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
}
}
- virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
- for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
- assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i));
- uintptr_t idx = region_idx_to_page_idx(i);
+ virtual void uncommit_regions(uint start_idx, size_t num_regions) {
+ for (uint i = start_idx; i < start_idx + num_regions; i++) {
+ assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region %u that is not committed", i));
+ size_t idx = region_idx_to_page_idx(i);
uint old_refcount = _refcounts.get_by_index(idx);
assert(old_refcount > 0, "must be");
if (old_refcount == 1) {
@@ -147,14 +148,15 @@ void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, b
}
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
- size_t os_commit_granularity,
+ size_t actual_size,
+ size_t page_size,
size_t region_granularity,
size_t commit_factor,
MemoryType type) {
- if (region_granularity >= (os_commit_granularity * commit_factor)) {
- return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
+ if (region_granularity >= (page_size * commit_factor)) {
+ return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
} else {
- return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
+ return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
}
}
diff --git a/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp b/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp
index 6b3420649..6623a37f9 100644
--- a/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp
+++ b/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,17 +46,20 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
protected:
// Backing storage.
G1PageBasedVirtualSpace _storage;
- size_t _commit_granularity;
+
size_t _region_granularity;
// Mapping management
BitMap _commit_map;
- G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type);
+ G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, MemoryType type);
void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled);
public:
MemRegion reserved() { return _storage.reserved(); }
+ size_t reserved_size() { return _storage.reserved_size(); }
+ size_t committed_size() { return _storage.committed_size(); }
+
void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; }
virtual ~G1RegionToSpaceMapper() {
@@ -67,16 +70,20 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
return _commit_map.at(idx);
}
- virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
- virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
+ virtual void commit_regions(uint start_idx, size_t num_regions = 1) = 0;
+ virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
// Creates an appropriate G1RegionToSpaceMapper for the given parameters.
+ // The actual space to be used within the given reservation is given by actual_size.
+ // This is because some OSes need to round up the reservation size to guarantee
+ // alignment of page_size.
// The byte_translation_factor defines how many bytes in a region correspond to
// a single byte in the data structure this mapper is for.
// Eg. in the card table, this value corresponds to the size a single card
- // table entry corresponds to.
+ // table entry corresponds to in the heap.
static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs,
- size_t os_commit_granularity,
+ size_t actual_size,
+ size_t page_size,
size_t region_granularity,
size_t byte_translation_factor,
MemoryType type);
diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/src/share/vm/gc_implementation/g1/g1RemSet.cpp
index b28fd2376..9d5bc57fb 100644
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp
@@ -78,9 +78,8 @@ G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
_cards_scanned(NULL), _total_cards_scanned(0),
_prev_period_summary()
{
- _seq_task = new SubTasksDone(NumSeqTasks);
guarantee(n_workers() > 0, "There should be some workers");
- _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers(), mtGC);
+ _cset_rs_update_cl = NEW_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, n_workers(), mtGC);
for (uint i = 0; i < n_workers(); i++) {
_cset_rs_update_cl[i] = NULL;
}
@@ -90,11 +89,10 @@ G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
}
G1RemSet::~G1RemSet() {
- delete _seq_task;
for (uint i = 0; i < n_workers(); i++) {
assert(_cset_rs_update_cl[i] == NULL, "it should be");
}
- FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl, mtGC);
+ FREE_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, _cset_rs_update_cl, mtGC);
}
void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
@@ -108,7 +106,7 @@ class ScanRSClosure : public HeapRegionClosure {
size_t _cards_done, _cards;
G1CollectedHeap* _g1h;
- OopsInHeapRegionClosure* _oc;
+ G1ParPushHeapRSClosure* _oc;
CodeBlobClosure* _code_root_cl;
G1BlockOffsetSharedArray* _bot_shared;
@@ -120,7 +118,7 @@ class ScanRSClosure : public HeapRegionClosure {
bool _try_claimed;
public:
- ScanRSClosure(OopsInHeapRegionClosure* oc,
+ ScanRSClosure(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl,
uint worker_i) :
_oc(oc),
@@ -142,16 +140,13 @@ public:
void scanCard(size_t index, HeapRegion *r) {
// Stack allocate the DirtyCardToOopClosure instance
HeapRegionDCTOC cl(_g1h, r, _oc,
- CardTableModRefBS::Precise,
- HeapRegionDCTOC::IntoCSFilterKind);
+ CardTableModRefBS::Precise);
// Set the "from" region in the closure.
_oc->set_region(r);
- HeapWord* card_start = _bot_shared->address_for_index(index);
- HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
- Space *sp = SharedHeap::heap()->space_containing(card_start);
- MemRegion sm_region = sp->used_region_at_save_marks();
- MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
+ MemRegion card_region(_bot_shared->address_for_index(index), G1BlockOffsetSharedArray::N_words);
+ MemRegion pre_gc_allocated(r->bottom(), r->scan_top());
+ MemRegion mr = pre_gc_allocated.intersection(card_region);
if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
// We make the card as "claimed" lazily (so races are possible
// but they're benign), which reduces the number of duplicate
@@ -240,7 +235,7 @@ public:
size_t cards_looked_up() { return _cards;}
};
-void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
+void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl,
uint worker_i) {
double rs_time_start = os::elapsedTime();
@@ -258,9 +253,8 @@ void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
assert(_cards_scanned != NULL, "invariant");
_cards_scanned[worker_i] = scanRScl.cards_done();
- _g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
- _g1p->phase_times()->record_strong_code_root_scan_time(worker_i,
- scanRScl.strong_code_root_scan_time_sec() * 1000.0);
+ _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec);
+ _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, scanRScl.strong_code_root_scan_time_sec());
}
// Closure used for updating RSets and recording references that
@@ -297,29 +291,18 @@ public:
};
void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
- double start = os::elapsedTime();
+ G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
// Apply the given closure to all remaining log entries.
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
_g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
-
- // Now there should be no dirty cards.
- if (G1RSLogCheckCardTable) {
- CountNonCleanMemRegionClosure cl(_g1);
- _ct_bs->mod_card_iterate(&cl);
- // XXX This isn't true any more: keeping cards of young regions
- // marked dirty broke it. Need some reasonable fix.
- guarantee(cl.n() == 0, "Card table should be clean.");
- }
-
- _g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
}
void G1RemSet::cleanupHRRS() {
HeapRegionRemSet::cleanup();
}
-void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
+void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl,
uint worker_i) {
#if CARD_REPEAT_HISTO
@@ -344,23 +327,8 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
- // The two flags below were introduced temporarily to serialize
- // the updating and scanning of remembered sets. There are some
- // race conditions when these two operations are done in parallel
- // and they are causing failures. When we resolve said race
- // conditions, we'll revert back to parallel remembered set
- // updating and scanning. See CRs 6677707 and 6677708.
- if (G1UseParallelRSetUpdating || (worker_i == 0)) {
- updateRS(&into_cset_dcq, worker_i);
- } else {
- _g1p->phase_times()->record_update_rs_processed_buffers(worker_i, 0);
- _g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
- }
- if (G1UseParallelRSetScanning || (worker_i == 0)) {
- scanRS(oc, code_root_cl, worker_i);
- } else {
- _g1p->phase_times()->record_scan_rs_time(worker_i, 0.0);
- }
+ updateRS(&into_cset_dcq, worker_i);
+ scanRS(oc, code_root_cl, worker_i);
// We now clear the cached values of _cset_rs_update_cl for this worker
_cset_rs_update_cl[worker_i] = NULL;
@@ -461,7 +429,7 @@ G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) :
G1UpdateRSOrPushRefOopClosure::
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
G1RemSet* rs,
- OopsInHeapRegionClosure* push_ref_cl,
+ G1ParPushHeapRSClosure* push_ref_cl,
bool record_refs_into_cset,
uint worker_i) :
_g1(g1h), _g1_rem_set(rs), _from(NULL),
@@ -562,7 +530,7 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
ct_freq_note_card(_ct_bs->index_for(start));
#endif
- OopsInHeapRegionClosure* oops_in_heap_closure = NULL;
+ G1ParPushHeapRSClosure* oops_in_heap_closure = NULL;
if (check_for_refs_into_cset) {
// ConcurrentG1RefineThreads have worker numbers larger than what
// _cset_rs_update_cl[] is set up to handle. But those threads should
diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.hpp b/src/share/vm/gc_implementation/g1/g1RemSet.hpp
index 5a629fad2..9839e86c5 100644
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp
@@ -33,6 +33,7 @@
class G1CollectedHeap;
class CardTableModRefBarrierSet;
class ConcurrentG1Refine;
+class G1ParPushHeapRSClosure;
// A G1RemSet in which each heap region has a rem set that records the
// external heap references into it. Uses a mod ref bs to track updates,
@@ -58,7 +59,6 @@ protected:
};
CardTableModRefBS* _ct_bs;
- SubTasksDone* _seq_task;
G1CollectorPolicy* _g1p;
ConcurrentG1Refine* _cg1r;
@@ -68,7 +68,7 @@ protected:
// Used for caching the closure that is responsible for scanning
// references into the collection set.
- OopsInHeapRegionClosure** _cset_rs_update_cl;
+ G1ParPushHeapRSClosure** _cset_rs_update_cl;
// Print the given summary info
virtual void print_summary_info(G1RemSetSummary * summary, const char * header = NULL);
@@ -95,7 +95,7 @@ public:
// partitioning the work to be done. It should be the same as
// the "i" passed to the calling thread's work(i) function.
// In the sequential case this param will be ignored.
- void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
+ void oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
CodeBlobClosure* code_root_cl,
uint worker_i);
@@ -107,7 +107,7 @@ public:
void prepare_for_oops_into_collection_set_do();
void cleanup_after_oops_into_collection_set_do();
- void scanRS(OopsInHeapRegionClosure* oc,
+ void scanRS(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl,
uint worker_i);
diff --git a/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp b/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp
new file mode 100644
index 000000000..b27696d2d
--- /dev/null
+++ b/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp
@@ -0,0 +1,339 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "code/codeCache.hpp"
+#include "gc_implementation/g1/bufferingOopClosure.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
+#include "gc_implementation/g1/g1RemSet.inline.hpp"
+#include "gc_implementation/g1/g1RootProcessor.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/fprofiler.hpp"
+#include "runtime/mutex.hpp"
+#include "services/management.hpp"
+
+class G1CodeBlobClosure : public CodeBlobClosure {
+ class HeapRegionGatheringOopClosure : public OopClosure {
+ G1CollectedHeap* _g1h;
+ OopClosure* _work;
+ nmethod* _nm;
+
+ template <typename T>
+ void do_oop_work(T* p) {
+ _work->do_oop(p);
+ T oop_or_narrowoop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(oop_or_narrowoop)) {
+ oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
+ HeapRegion* hr = _g1h->heap_region_containing_raw(o);
+ assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
+ hr->add_strong_code_root(_nm);
+ }
+ }
+
+ public:
+ HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
+
+ void do_oop(oop* o) {
+ do_oop_work(o);
+ }
+
+ void do_oop(narrowOop* o) {
+ do_oop_work(o);
+ }
+
+ void set_nm(nmethod* nm) {
+ _nm = nm;
+ }
+ };
+
+ HeapRegionGatheringOopClosure _oc;
+public:
+ G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
+
+ void do_code_blob(CodeBlob* cb) {
+ nmethod* nm = cb->as_nmethod_or_null();
+ if (nm != NULL) {
+ if (!nm->test_set_oops_do_mark()) {
+ _oc.set_nm(nm);
+ nm->oops_do(&_oc);
+ nm->fix_oop_relocations();
+ }
+ }
+ }
+};
+
+
+void G1RootProcessor::worker_has_discovered_all_strong_classes() {
+ uint n_workers = _g1h->n_par_threads();
+ assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
+
+ if (n_workers > 0) {
+ uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
+ if (new_value == n_workers) {
+ // This thread is last. Notify the others.
+ MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
+ _lock.notify_all();
+ }
+ }
+}
+
+void G1RootProcessor::wait_until_all_strong_classes_discovered() {
+ uint n_workers = _g1h->n_par_threads();
+ assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
+
+ if (n_workers > 0 && (uint)_n_workers_discovered_strong_classes != n_workers) {
+ MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
+ while ((uint)_n_workers_discovered_strong_classes != n_workers) {
+ _lock.wait(Mutex::_no_safepoint_check_flag, 0, false);
+ }
+ }
+}
+
+G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h) :
+ _g1h(g1h),
+ _process_strong_tasks(G1RP_PS_NumElements),
+ _srs(g1h),
+ _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false),
+ _n_workers_discovered_strong_classes(0) {}
+
+void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
+ OopClosure* scan_non_heap_weak_roots,
+ CLDClosure* scan_strong_clds,
+ CLDClosure* scan_weak_clds,
+ bool trace_metadata,
+ uint worker_i) {
+ // First scan the shared roots.
+ double ext_roots_start = os::elapsedTime();
+ G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
+
+ BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
+ BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
+
+ OopClosure* const weak_roots = &buf_scan_non_heap_weak_roots;
+ OopClosure* const strong_roots = &buf_scan_non_heap_roots;
+
+ // CodeBlobClosures are not interoperable with BufferingOopClosures
+ G1CodeBlobClosure root_code_blobs(scan_non_heap_roots);
+
+ process_java_roots(strong_roots,
+ trace_metadata ? scan_strong_clds : NULL,
+ scan_strong_clds,
+ trace_metadata ? NULL : scan_weak_clds,
+ &root_code_blobs,
+ phase_times,
+ worker_i);
+
+ // This is the point where this worker thread will not find more strong CLDs/nmethods.
+ // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
+ if (trace_metadata) {
+ worker_has_discovered_all_strong_classes();
+ }
+
+ process_vm_roots(strong_roots, weak_roots, phase_times, worker_i);
+
+ {
+ // Now the CM ref_processor roots.
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
+ if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
+ // We need to treat the discovered reference lists of the
+ // concurrent mark ref processor as roots and keep entries
+ // (which are added by the marking threads) on them live
+ // until they can be processed at the end of marking.
+ _g1h->ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
+ }
+ }
+
+ if (trace_metadata) {
+ {
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
+ // Barrier to make sure all workers passed
+ // the strong CLD and strong nmethods phases.
+ wait_until_all_strong_classes_discovered();
+ }
+
+ // Now take the complement of the strong CLDs.
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WeakCLDRoots, worker_i);
+ ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
+ } else {
+ phase_times->record_time_secs(G1GCPhaseTimes::WaitForStrongCLD, worker_i, 0.0);
+ phase_times->record_time_secs(G1GCPhaseTimes::WeakCLDRoots, worker_i, 0.0);
+ }
+
+ // Finish up any enqueued closure apps (attributed as object copy time).
+ buf_scan_non_heap_roots.done();
+ buf_scan_non_heap_weak_roots.done();
+
+ double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
+ + buf_scan_non_heap_weak_roots.closure_app_seconds();
+
+ phase_times->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
+
+ double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec;
+
+ phase_times->record_time_secs(G1GCPhaseTimes::ExtRootScan, worker_i, ext_root_time_sec);
+
+ // During conc marking we have to filter the per-thread SATB buffers
+ // to make sure we remove any oops into the CSet (which will show up
+ // as implicitly live).
+ {
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
+ if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->mark_in_progress()) {
+ JavaThread::satb_mark_queue_set().filter_thread_buffers();
+ }
+ }
+
+ _process_strong_tasks.all_tasks_completed();
+}
+
+void G1RootProcessor::process_strong_roots(OopClosure* oops,
+ CLDClosure* clds,
+ CodeBlobClosure* blobs) {
+
+ process_java_roots(oops, clds, clds, NULL, blobs, NULL, 0);
+ process_vm_roots(oops, NULL, NULL, 0);
+
+ _process_strong_tasks.all_tasks_completed();
+}
+
+void G1RootProcessor::process_all_roots(OopClosure* oops,
+ CLDClosure* clds,
+ CodeBlobClosure* blobs) {
+
+ process_java_roots(oops, NULL, clds, clds, NULL, NULL, 0);
+ process_vm_roots(oops, oops, NULL, 0);
+
+ if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
+ CodeCache::blobs_do(blobs);
+ }
+
+ _process_strong_tasks.all_tasks_completed();
+}
+
+void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
+ CLDClosure* thread_stack_clds,
+ CLDClosure* strong_clds,
+ CLDClosure* weak_clds,
+ CodeBlobClosure* strong_code,
+ G1GCPhaseTimes* phase_times,
+ uint worker_i) {
+ assert(thread_stack_clds == NULL || weak_clds == NULL, "There is overlap between those, only one may be set");
+ // Iterating over the CLDG and the Threads are done early to allow us to
+ // first process the strong CLDs and nmethods and then, after a barrier,
+ // let the thread process the weak CLDs and nmethods.
+ {
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
+ if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
+ ClassLoaderDataGraph::roots_cld_do(strong_clds, weak_clds);
+ }
+ }
+
+ {
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
+ Threads::possibly_parallel_oops_do(strong_roots, thread_stack_clds, strong_code);
+ }
+}
+
+void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
+ OopClosure* weak_roots,
+ G1GCPhaseTimes* phase_times,
+ uint worker_i) {
+ {
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
+ if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) {
+ Universe::oops_do(strong_roots);
+ }
+ }
+
+ {
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i);
+ if (!_process_strong_tasks.is_task_claimed(G1RP_PS_JNIHandles_oops_do)) {
+ JNIHandles::oops_do(strong_roots);
+ }
+ }
+
+ {
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_i);
+ if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ObjectSynchronizer_oops_do)) {
+ ObjectSynchronizer::oops_do(strong_roots);
+ }
+ }
+
+ {
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::FlatProfilerRoots, worker_i);
+ if (!_process_strong_tasks.is_task_claimed(G1RP_PS_FlatProfiler_oops_do)) {
+ FlatProfiler::oops_do(strong_roots);
+ }
+ }
+
+ {
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
+ if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) {
+ Management::oops_do(strong_roots);
+ }
+ }
+
+ {
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i);
+ if (!_process_strong_tasks.is_task_claimed(G1RP_PS_jvmti_oops_do)) {
+ JvmtiExport::oops_do(strong_roots);
+ }
+ }
+
+ {
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
+ if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) {
+ SystemDictionary::roots_oops_do(strong_roots, weak_roots);
+ }
+ }
+
+ {
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i);
+ // All threads execute the following. A specific chunk of buckets
+ // from the StringTable are the individual tasks.
+ if (weak_roots != NULL) {
+ StringTable::possibly_parallel_oops_do(weak_roots);
+ }
+ }
+}
+
+void G1RootProcessor::scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
+ OopClosure* scan_non_heap_weak_roots,
+ uint worker_i) {
+ G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CodeCacheRoots, worker_i);
+
+ // Now scan the complement of the collection set.
+ G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
+
+ _g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
+}
+
+void G1RootProcessor::set_num_workers(int active_workers) {
+ _process_strong_tasks.set_n_threads(active_workers);
+}
diff --git a/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp b/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp
new file mode 100644
index 000000000..1cce9f357
--- /dev/null
+++ b/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_ROOTPROCESSOR_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_ROOTPROCESSOR_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/sharedHeap.hpp"
+#include "runtime/mutex.hpp"
+
+class CLDClosure;
+class CodeBlobClosure;
+class G1CollectedHeap;
+class G1GCPhaseTimes;
+class G1ParPushHeapRSClosure;
+class Monitor;
+class OopClosure;
+class SubTasksDone;
+
+// Scoped object to assist in applying oop, CLD and code blob closures to
+// root locations. Handles claiming of different root scanning tasks
+// and takes care of global state for root scanning via a StrongRootsScope.
+// In the parallel case there is a shared G1RootProcessor object where all
+// worker thread call the process_roots methods.
+class G1RootProcessor : public StackObj {
+ G1CollectedHeap* _g1h;
+ SubTasksDone _process_strong_tasks;
+ SharedHeap::StrongRootsScope _srs;
+
+ // Used to implement the Thread work barrier.
+ Monitor _lock;
+ volatile jint _n_workers_discovered_strong_classes;
+
+ enum G1H_process_roots_tasks {
+ G1RP_PS_Universe_oops_do,
+ G1RP_PS_JNIHandles_oops_do,
+ G1RP_PS_ObjectSynchronizer_oops_do,
+ G1RP_PS_FlatProfiler_oops_do,
+ G1RP_PS_Management_oops_do,
+ G1RP_PS_SystemDictionary_oops_do,
+ G1RP_PS_ClassLoaderDataGraph_oops_do,
+ G1RP_PS_jvmti_oops_do,
+ G1RP_PS_CodeCache_oops_do,
+ G1RP_PS_filter_satb_buffers,
+ G1RP_PS_refProcessor_oops_do,
+ // Leave this one last.
+ G1RP_PS_NumElements
+ };
+
+ void worker_has_discovered_all_strong_classes();
+ void wait_until_all_strong_classes_discovered();
+
+ void process_java_roots(OopClosure* scan_non_heap_roots,
+ CLDClosure* thread_stack_clds,
+ CLDClosure* scan_strong_clds,
+ CLDClosure* scan_weak_clds,
+ CodeBlobClosure* scan_strong_code,
+ G1GCPhaseTimes* phase_times,
+ uint worker_i);
+
+ void process_vm_roots(OopClosure* scan_non_heap_roots,
+ OopClosure* scan_non_heap_weak_roots,
+ G1GCPhaseTimes* phase_times,
+ uint worker_i);
+
+public:
+ G1RootProcessor(G1CollectedHeap* g1h);
+
+ // Apply closures to the strongly and weakly reachable roots in the system
+ // in a single pass.
+ // Record and report timing measurements for sub phases using the worker_i
+ void evacuate_roots(OopClosure* scan_non_heap_roots,
+ OopClosure* scan_non_heap_weak_roots,
+ CLDClosure* scan_strong_clds,
+ CLDClosure* scan_weak_clds,
+ bool trace_metadata,
+ uint worker_i);
+
+ // Apply oops, clds and blobs to all strongly reachable roots in the system
+ void process_strong_roots(OopClosure* oops,
+ CLDClosure* clds,
+ CodeBlobClosure* blobs);
+
+ // Apply oops, clds and blobs to strongly and weakly reachable roots in the system
+ void process_all_roots(OopClosure* oops,
+ CLDClosure* clds,
+ CodeBlobClosure* blobs);
+
+ // Apply scan_rs to all locations in the union of the remembered sets for all
+ // regions in the collection set
+ // (having done "set_region" to indicate the region in which the root resides),
+ void scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
+ OopClosure* scan_non_heap_weak_roots,
+ uint worker_i);
+
+ // Inform the root processor about the number of worker threads
+ void set_num_workers(int active_workers);
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_ROOTPROCESSOR_HPP
diff --git a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp
index d353d7ebd..bb960ee3a 100644
--- a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp
+++ b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp
@@ -105,7 +105,7 @@ void G1StringDedup::deduplicate(oop java_string) {
void G1StringDedup::oops_do(OopClosure* keep_alive) {
assert(is_enabled(), "String deduplication not enabled");
- unlink_or_oops_do(NULL, keep_alive);
+ unlink_or_oops_do(NULL, keep_alive, true /* allow_resize_and_rehash */);
}
void G1StringDedup::unlink(BoolObjectClosure* is_alive) {
@@ -122,37 +122,35 @@ void G1StringDedup::unlink(BoolObjectClosure* is_alive) {
class G1StringDedupUnlinkOrOopsDoTask : public AbstractGangTask {
private:
G1StringDedupUnlinkOrOopsDoClosure _cl;
+ G1GCPhaseTimes* _phase_times;
public:
G1StringDedupUnlinkOrOopsDoTask(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
- bool allow_resize_and_rehash) :
+ bool allow_resize_and_rehash,
+ G1GCPhaseTimes* phase_times) :
AbstractGangTask("G1StringDedupUnlinkOrOopsDoTask"),
- _cl(is_alive, keep_alive, allow_resize_and_rehash) {
- }
+ _cl(is_alive, keep_alive, allow_resize_and_rehash), _phase_times(phase_times) { }
virtual void work(uint worker_id) {
- double queue_fixup_start = os::elapsedTime();
- G1StringDedupQueue::unlink_or_oops_do(&_cl);
-
- double table_fixup_start = os::elapsedTime();
- G1StringDedupTable::unlink_or_oops_do(&_cl, worker_id);
-
- double queue_fixup_time_ms = (table_fixup_start - queue_fixup_start) * 1000.0;
- double table_fixup_time_ms = (os::elapsedTime() - table_fixup_start) * 1000.0;
- G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
- g1p->phase_times()->record_string_dedup_queue_fixup_worker_time(worker_id, queue_fixup_time_ms);
- g1p->phase_times()->record_string_dedup_table_fixup_worker_time(worker_id, table_fixup_time_ms);
+ {
+ G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupQueueFixup, worker_id);
+ G1StringDedupQueue::unlink_or_oops_do(&_cl);
+ }
+ {
+ G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupTableFixup, worker_id);
+ G1StringDedupTable::unlink_or_oops_do(&_cl, worker_id);
+ }
}
};
-void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, bool allow_resize_and_rehash) {
+void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive,
+ OopClosure* keep_alive,
+ bool allow_resize_and_rehash,
+ G1GCPhaseTimes* phase_times) {
assert(is_enabled(), "String deduplication not enabled");
- G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
- g1p->phase_times()->note_string_dedup_fixup_start();
- double fixup_start = os::elapsedTime();
- G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash);
+ G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash, phase_times);
if (G1CollectedHeap::use_parallel_gc_threads()) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
g1h->set_par_threads();
@@ -161,10 +159,6 @@ void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* k
} else {
task.work(0);
}
-
- double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
- g1p->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
- g1p->phase_times()->note_string_dedup_fixup_end();
}
void G1StringDedup::threads_do(ThreadClosure* tc) {
diff --git a/src/share/vm/gc_implementation/g1/g1StringDedup.hpp b/src/share/vm/gc_implementation/g1/g1StringDedup.hpp
index 68f700f65..3792a667a 100644
--- a/src/share/vm/gc_implementation/g1/g1StringDedup.hpp
+++ b/src/share/vm/gc_implementation/g1/g1StringDedup.hpp
@@ -90,6 +90,7 @@ class BoolObjectClosure;
class ThreadClosure;
class outputStream;
class G1StringDedupTable;
+class G1GCPhaseTimes;
//
// Main interface for interacting with string deduplication.
@@ -130,7 +131,7 @@ public:
static void oops_do(OopClosure* keep_alive);
static void unlink(BoolObjectClosure* is_alive);
static void unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive,
- bool allow_resize_and_rehash = true);
+ bool allow_resize_and_rehash, G1GCPhaseTimes* phase_times = NULL);
static void threads_do(ThreadClosure* tc);
static void print_worker_threads_on(outputStream* st);
diff --git a/src/share/vm/gc_implementation/g1/g1_globals.hpp b/src/share/vm/gc_implementation/g1/g1_globals.hpp
index 4478e93f8..e24cc9594 100644
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp
@@ -217,14 +217,6 @@
product(uintx, G1HeapRegionSize, 0, \
"Size of the G1 regions.") \
\
- experimental(bool, G1UseParallelRSetUpdating, true, \
- "Enables the parallelization of remembered set updating " \
- "during evacuation pauses") \
- \
- experimental(bool, G1UseParallelRSetScanning, true, \
- "Enables the parallelization of remembered set scanning " \
- "during evacuation pauses") \
- \
product(uintx, G1ConcRefinementThreads, 0, \
"If non-0 is the number of parallel rem set update threads, " \
"otherwise the value is determined ergonomically.") \
@@ -282,10 +274,14 @@
product(uintx, G1MixedGCCountTarget, 8, \
"The target number of mixed GCs after a marking cycle.") \
\
- experimental(bool, G1ReclaimDeadHumongousObjectsAtYoungGC, true, \
+ experimental(bool, G1EagerReclaimHumongousObjects, true, \
"Try to reclaim dead large objects at every young GC.") \
\
- experimental(bool, G1TraceReclaimDeadHumongousObjectsAtYoungGC, false, \
+ experimental(bool, G1EagerReclaimHumongousObjectsWithStaleRefs, true, \
+ "Try to reclaim dead large objects that have a few stale " \
+ "references at every young GC.") \
+ \
+ experimental(bool, G1TraceEagerReclaimHumongousObjects, false, \
"Print some information about large object liveness " \
"at every young GC.") \
\
diff --git a/src/share/vm/gc_implementation/g1/heapRegion.cpp b/src/share/vm/gc_implementation/g1/heapRegion.cpp
index 3a7251b56..d5a4b2617 100644
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,93 +47,55 @@ size_t HeapRegion::GrainWords = 0;
size_t HeapRegion::CardsPerRegion = 0;
HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
- HeapRegion* hr, ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
- FilterKind fk) :
+ HeapRegion* hr,
+ G1ParPushHeapRSClosure* cl,
+ CardTableModRefBS::PrecisionStyle precision) :
DirtyCardToOopClosure(hr, cl, precision, NULL),
- _hr(hr), _fk(fk), _g1(g1) { }
+ _hr(hr), _rs_scan(cl), _g1(g1) { }
FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
OopClosure* oc) :
_r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
-template<class ClosureType>
-HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
- HeapRegion* hr,
- HeapWord* cur, HeapWord* top) {
- oop cur_oop = oop(cur);
- size_t oop_size = hr->block_size(cur);
- HeapWord* next_obj = cur + oop_size;
- while (next_obj < top) {
- // Keep filtering the remembered set.
- if (!g1h->is_obj_dead(cur_oop, hr)) {
- // Bottom lies entirely below top, so we can call the
- // non-memRegion version of oop_iterate below.
- cur_oop->oop_iterate(cl);
- }
- cur = next_obj;
- cur_oop = oop(cur);
- oop_size = hr->block_size(cur);
- next_obj = cur + oop_size;
- }
- return cur;
-}
-
void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
HeapWord* bottom,
HeapWord* top) {
G1CollectedHeap* g1h = _g1;
size_t oop_size;
- ExtendedOopClosure* cl2 = NULL;
-
- FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
- FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl);
-
- switch (_fk) {
- case NoFilterKind: cl2 = _cl; break;
- case IntoCSFilterKind: cl2 = &intoCSFilt; break;
- case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
- default: ShouldNotReachHere();
- }
+ HeapWord* cur = bottom;
// Start filtering what we add to the remembered set. If the object is
// not considered dead, either because it is marked (in the mark bitmap)
// or it was allocated after marking finished, then we add it. Otherwise
// we can safely ignore the object.
- if (!g1h->is_obj_dead(oop(bottom), _hr)) {
- oop_size = oop(bottom)->oop_iterate(cl2, mr);
+ if (!g1h->is_obj_dead(oop(cur), _hr)) {
+ oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
} else {
- oop_size = _hr->block_size(bottom);
+ oop_size = _hr->block_size(cur);
}
- bottom += oop_size;
-
- if (bottom < top) {
- // We replicate the loop below for several kinds of possible filters.
- switch (_fk) {
- case NoFilterKind:
- bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top);
- break;
-
- case IntoCSFilterKind: {
- FilterIntoCSClosure filt(this, g1h, _cl);
- bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
- break;
- }
-
- case OutOfRegionFilterKind: {
- FilterOutOfRegionClosure filt(_hr, _cl);
- bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
- break;
- }
-
- default:
- ShouldNotReachHere();
+ cur += oop_size;
+
+ if (cur < top) {
+ oop cur_oop = oop(cur);
+ oop_size = _hr->block_size(cur);
+ HeapWord* next_obj = cur + oop_size;
+ while (next_obj < top) {
+ // Keep filtering the remembered set.
+ if (!g1h->is_obj_dead(cur_oop, _hr)) {
+ // Bottom lies entirely below top, so we can call the
+ // non-memRegion version of oop_iterate below.
+ cur_oop->oop_iterate(_rs_scan);
+ }
+ cur = next_obj;
+ cur_oop = oop(cur);
+ oop_size = _hr->block_size(cur);
+ next_obj = cur + oop_size;
}
// Last object. Need to do dead-obj filtering here too.
- if (!g1h->is_obj_dead(oop(bottom), _hr)) {
- oop(bottom)->oop_iterate(cl2, mr);
+ if (!g1h->is_obj_dead(oop(cur), _hr)) {
+ oop(cur)->oop_iterate(_rs_scan, mr);
}
}
}
@@ -338,7 +300,7 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
_orig_end = mr.end();
hr_clear(false /*par*/, false /*clear_space*/);
set_top(bottom());
- record_top_and_timestamp();
+ record_timestamp();
}
CompactibleSpace* HeapRegion::next_compaction_space() const {
@@ -426,9 +388,9 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
// If we're within a stop-world GC, then we might look at a card in a
// GC alloc region that extends onto a GC LAB, which may not be
- // parseable. Stop such at the "saved_mark" of the region.
+ // parseable. Stop such at the "scan_top" of the region.
if (g1h->is_gc_active()) {
- mr = mr.intersection(used_region_at_save_marks());
+ mr = mr.intersection(MemRegion(bottom(), scan_top()));
} else {
mr = mr.intersection(used_region());
}
@@ -468,7 +430,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
oop obj;
HeapWord* next = cur;
- while (next <= start) {
+ do {
cur = next;
obj = oop(cur);
if (obj->klass_or_null() == NULL) {
@@ -477,45 +439,38 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
}
// Otherwise...
next = cur + block_size(cur);
- }
+ } while (next <= start);
// If we finish the above loop...We have a parseable object that
// begins on or before the start of the memory region, and ends
// inside or spans the entire region.
-
- assert(obj == oop(cur), "sanity");
assert(cur <= start, "Loop postcondition");
assert(obj->klass_or_null() != NULL, "Loop postcondition");
- assert((cur + block_size(cur)) > start, "Loop postcondition");
-
- if (!g1h->is_obj_dead(obj)) {
- obj->oop_iterate(cl, mr);
- }
- while (cur < end) {
+ do {
obj = oop(cur);
+ assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant");
if (obj->klass_or_null() == NULL) {
// Ran into an unparseable point.
return cur;
- };
+ }
- // Otherwise:
- next = cur + block_size(cur);
+ // Advance the current pointer. "obj" still points to the object to iterate.
+ cur = cur + block_size(cur);
if (!g1h->is_obj_dead(obj)) {
- if (next < end || !obj->is_objArray()) {
- // This object either does not span the MemRegion
- // boundary, or if it does it's not an array.
- // Apply closure to whole object.
+ // Non-objArrays are sometimes marked imprecise at the object start. We
+ // always need to iterate over them in full.
+ // We only iterate over object arrays in full if they are completely contained
+ // in the memory region.
+ if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
obj->oop_iterate(cl);
} else {
- // This obj is an array that spans the boundary.
- // Stop at the boundary.
obj->oop_iterate(cl, mr);
}
}
- cur = next;
- }
+ } while (cur < end);
+
return NULL;
}
@@ -980,7 +935,7 @@ void HeapRegion::verify() const {
void G1OffsetTableContigSpace::clear(bool mangle_space) {
set_top(bottom());
- set_saved_mark_word(bottom());
+ _scan_top = bottom();
CompactibleSpace::clear(mangle_space);
reset_bot();
}
@@ -1012,41 +967,42 @@ HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
return _offsets.threshold();
}
-HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
+HeapWord* G1OffsetTableContigSpace::scan_top() const {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
- assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
HeapWord* local_top = top();
OrderAccess::loadload();
- if (_gc_time_stamp < g1h->get_gc_time_stamp()) {
+ const unsigned local_time_stamp = _gc_time_stamp;
+ assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant");
+ if (local_time_stamp < g1h->get_gc_time_stamp()) {
return local_top;
} else {
- return Space::saved_mark_word();
+ return _scan_top;
}
}
-void G1OffsetTableContigSpace::record_top_and_timestamp() {
+void G1OffsetTableContigSpace::record_timestamp() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
if (_gc_time_stamp < curr_gc_time_stamp) {
- // The order of these is important, as another thread might be
- // about to start scanning this region. If it does so after
- // set_saved_mark and before _gc_time_stamp = ..., then the latter
- // will be false, and it will pick up top() as the high water mark
- // of region. If it does so after _gc_time_stamp = ..., then it
- // will pick up the right saved_mark_word() as the high water mark
- // of the region. Either way, the behaviour will be correct.
- Space::set_saved_mark_word(top());
- OrderAccess::storestore();
+ // Setting the time stamp here tells concurrent readers to look at
+ // scan_top to know the maximum allowed address to look at.
+
+ // scan_top should be bottom for all regions except for the
+ // retained old alloc region which should have scan_top == top
+ HeapWord* st = _scan_top;
+ guarantee(st == _bottom || st == _top, "invariant");
+
_gc_time_stamp = curr_gc_time_stamp;
- // No need to do another barrier to flush the writes above. If
- // this is called in parallel with other threads trying to
- // allocate into the region, the caller should call this while
- // holding a lock and when the lock is released the writes will be
- // flushed.
}
}
+void G1OffsetTableContigSpace::record_retained_region() {
+ // scan_top is the maximum address where it's safe for the next gc to
+ // scan this region.
+ _scan_top = top();
+}
+
void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
object_iterate(blk);
}
@@ -1080,6 +1036,8 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
CompactibleSpace::initialize(mr, clear_space, mangle_space);
_top = bottom();
+ _scan_top = bottom();
+ set_saved_mark_word(NULL);
reset_bot();
}
diff --git a/src/share/vm/gc_implementation/g1/heapRegion.hpp b/src/share/vm/gc_implementation/g1/heapRegion.hpp
index 16c0d719e..80ba22313 100644
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp
@@ -67,17 +67,9 @@ class nmethod;
// sets.
class HeapRegionDCTOC : public DirtyCardToOopClosure {
-public:
- // Specification of possible DirtyCardToOopClosure filtering.
- enum FilterKind {
- NoFilterKind,
- IntoCSFilterKind,
- OutOfRegionFilterKind
- };
-
-protected:
+private:
HeapRegion* _hr;
- FilterKind _fk;
+ G1ParPushHeapRSClosure* _rs_scan;
G1CollectedHeap* _g1;
// Walk the given memory region from bottom to (actual) top
@@ -90,9 +82,9 @@ protected:
public:
HeapRegionDCTOC(G1CollectedHeap* g1,
- HeapRegion* hr, ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
- FilterKind fk);
+ HeapRegion* hr,
+ G1ParPushHeapRSClosure* cl,
+ CardTableModRefBS::PrecisionStyle precision);
};
// The complicating factor is that BlockOffsetTable diverged
@@ -101,28 +93,25 @@ public:
// OffsetTableContigSpace. If the two versions of BlockOffsetTable could
// be reconciled, then G1OffsetTableContigSpace could go away.
-// The idea behind time stamps is the following. Doing a save_marks on
-// all regions at every GC pause is time consuming (if I remember
-// well, 10ms or so). So, we would like to do that only for regions
-// that are GC alloc regions. To achieve this, we use time
-// stamps. For every evacuation pause, G1CollectedHeap generates a
-// unique time stamp (essentially a counter that gets
-// incremented). Every time we want to call save_marks on a region,
-// we set the saved_mark_word to top and also copy the current GC
-// time stamp to the time stamp field of the space. Reading the
-// saved_mark_word involves checking the time stamp of the
-// region. If it is the same as the current GC time stamp, then we
-// can safely read the saved_mark_word field, as it is valid. If the
-// time stamp of the region is not the same as the current GC time
-// stamp, then we instead read top, as the saved_mark_word field is
-// invalid. Time stamps (on the regions and also on the
-// G1CollectedHeap) are reset at every cleanup (we iterate over
-// the regions anyway) and at the end of a Full GC. The current scheme
-// that uses sequential unsigned ints will fail only if we have 4b
+// The idea behind time stamps is the following. We want to keep track of
+// the highest address where it's safe to scan objects for each region.
+// This is only relevant for current GC alloc regions so we keep a time stamp
+// per region to determine if the region has been allocated during the current
+// GC or not. If the time stamp is current we report a scan_top value which
+// was saved at the end of the previous GC for retained alloc regions and which is
+// equal to the bottom for all other regions.
+// There is a race between card scanners and allocating gc workers where we must ensure
+// that card scanners do not read the memory allocated by the gc workers.
+// In order to enforce that, we must not return a value of _top which is more recent than the
+// time stamp. This is due to the fact that a region may become a gc alloc region at
+// some point after we've read the timestamp value as being < the current time stamp.
+// The time stamps are re-initialized to zero at cleanup and at Full GCs.
+// The current scheme that uses sequential unsigned ints will fail only if we have 4b
// evacuation pauses between two cleanups, which is _highly_ unlikely.
class G1OffsetTableContigSpace: public CompactibleSpace {
friend class VMStructs;
HeapWord* _top;
+ HeapWord* volatile _scan_top;
protected:
G1BlockOffsetArrayContigSpace _offsets;
Mutex _par_alloc_lock;
@@ -166,10 +155,11 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
void set_bottom(HeapWord* value);
void set_end(HeapWord* value);
- virtual HeapWord* saved_mark_word() const;
- void record_top_and_timestamp();
+ HeapWord* scan_top() const;
+ void record_timestamp();
void reset_gc_time_stamp() { _gc_time_stamp = 0; }
unsigned get_gc_time_stamp() { return _gc_time_stamp; }
+ void record_retained_region();
// See the comment above in the declaration of _pre_dummy_top for an
// explanation of what it is.
@@ -193,6 +183,8 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
virtual HeapWord* allocate(size_t word_size);
HeapWord* par_allocate(size_t word_size);
+ HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
+
// MarkSweep support phase3
virtual HeapWord* initialize_threshold();
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
diff --git a/src/share/vm/gc_implementation/g1/heapRegionManager.cpp b/src/share/vm/gc_implementation/g1/heapRegionManager.cpp
index f1ffc245d..020eb08df 100644
--- a/src/share/vm/gc_implementation/g1/heapRegionManager.cpp
+++ b/src/share/vm/gc_implementation/g1/heapRegionManager.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -145,6 +145,24 @@ void HeapRegionManager::make_regions_available(uint start, uint num_regions) {
}
}
+MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
+ size_t used_sz =
+ _prev_bitmap_mapper->committed_size() +
+ _next_bitmap_mapper->committed_size() +
+ _bot_mapper->committed_size() +
+ _cardtable_mapper->committed_size() +
+ _card_counts_mapper->committed_size();
+
+ size_t committed_sz =
+ _prev_bitmap_mapper->reserved_size() +
+ _next_bitmap_mapper->reserved_size() +
+ _bot_mapper->reserved_size() +
+ _cardtable_mapper->reserved_size() +
+ _card_counts_mapper->reserved_size();
+
+ return MemoryUsage(0, used_sz, committed_sz, committed_sz);
+}
+
uint HeapRegionManager::expand_by(uint num_regions) {
return expand_at(0, num_regions);
}
diff --git a/src/share/vm/gc_implementation/g1/heapRegionManager.hpp b/src/share/vm/gc_implementation/g1/heapRegionManager.hpp
index 666184e50..83996f71d 100644
--- a/src/share/vm/gc_implementation/g1/heapRegionManager.hpp
+++ b/src/share/vm/gc_implementation/g1/heapRegionManager.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
+#include "services/memoryUsage.hpp"
class HeapRegion;
class HeapRegionClosure;
@@ -197,6 +198,8 @@ public:
// Return the maximum number of regions in the heap.
uint max_length() const { return (uint)_regions.length(); }
+ MemoryUsage get_auxiliary_data_memory_usage() const;
+
MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
// Expand the sequence to reflect that the heap has grown. Either create new
diff --git a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
index ec0249ea4..2574c91b0 100644
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
@@ -694,6 +694,18 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
clear_fcc();
}
+bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const {
+ if (limit <= (size_t)G1RSetSparseRegionEntries) {
+ return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit;
+ } else {
+ // Current uses of this method may only use values less than G1RSetSparseRegionEntries
+ // for the limit. The solution, comparing against occupied() would be too slow
+ // at this time.
+ Unimplemented();
+ return false;
+ }
+}
+
bool OtherRegionsTable::is_empty() const {
return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL;
}
diff --git a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
index 2d5c71b22..221651cf0 100644
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
@@ -181,6 +181,10 @@ public:
// sense.
void add_reference(OopOrNarrowOopStar from, int tid);
+ // Returns whether this remembered set (and all sub-sets) have an occupancy
+ // that is less or equal than the given occupancy.
+ bool occupancy_less_or_equal_than(size_t limit) const;
+
// Removes any entries shown by the given bitmaps to contain only dead
// objects.
void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
@@ -276,6 +280,10 @@ public:
return (strong_code_roots_list_length() == 0) && _other_regions.is_empty();
}
+ bool occupancy_less_or_equal_than(size_t occ) const {
+ return (strong_code_roots_list_length() == 0) && _other_regions.occupancy_less_or_equal_than(occ);
+ }
+
size_t occupied() {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
return occupied_locked();
diff --git a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp
index 8fde2455f..b6544308f 100644
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp
@@ -420,6 +420,7 @@ void FreeRegionList_test() {
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
G1RegionToSpaceMapper* bot_storage =
G1RegionToSpaceMapper::create_mapper(bot_rs,
+ bot_rs.size(),
os::vm_page_size(),
HeapRegion::GrainBytes,
G1BlockOffsetSharedArray::N_bytes,
diff --git a/src/share/vm/gc_implementation/g1/satbQueue.cpp b/src/share/vm/gc_implementation/g1/satbQueue.cpp
index fbd71a2dc..3544db550 100644
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,36 +29,74 @@
#include "memory/sharedHeap.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/mutexLocker.hpp"
+#include "runtime/safepoint.hpp"
#include "runtime/thread.hpp"
#include "runtime/vmThread.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
void ObjPtrQueue::flush() {
- // The buffer might contain refs into the CSet. We have to filter it
- // first before we flush it, otherwise we might end up with an
- // enqueued buffer with refs into the CSet which breaks our invariants.
+ // Filter now to possibly save work later. If filtering empties the
+ // buffer then flush_impl can deallocate the buffer.
filter();
flush_impl();
}
-// This method removes entries from an SATB buffer that will not be
-// useful to the concurrent marking threads. An entry is removed if it
-// satisfies one of the following conditions:
+// Return true if a SATB buffer entry refers to an object that
+// requires marking.
//
-// * it points to an object outside the G1 heap (G1's concurrent
-// marking only visits objects inside the G1 heap),
-// * it points to an object that has been allocated since marking
-// started (according to SATB those objects do not need to be
-// visited during marking), or
-// * it points to an object that has already been marked (no need to
-// process it again).
+// The entry must point into the G1 heap. In particular, it must not
+// be a NULL pointer. NULL pointers are pre-filtered and never
+// inserted into a SATB buffer.
//
-// The rest of the entries will be retained and are compacted towards
-// the top of the buffer. Note that, because we do not allow old
-// regions in the CSet during marking, all objects on the CSet regions
-// are young (eden or survivors) and therefore implicitly live. So any
-// references into the CSet will be removed during filtering.
+// An entry that is below the NTAMS pointer for the containing heap
+// region requires marking. Such an entry must point to a valid object.
+//
+// An entry that is at least the NTAMS pointer for the containing heap
+// region might be any of the following, none of which should be marked.
+//
+// * A reference to an object allocated since marking started.
+// According to SATB, such objects are implicitly kept live and do
+// not need to be dealt with via SATB buffer processing.
+//
+// * A reference to a young generation object. Young objects are
+// handled separately and are not marked by concurrent marking.
+//
+// * A stale reference to a young generation object. If a young
+// generation object reference is recorded and not filtered out
+// before being moved by a young collection, the reference becomes
+// stale.
+//
+// * A stale reference to an eagerly reclaimed humongous object. If a
+// humongous object is recorded and then reclaimed, the reference
+// becomes stale.
+//
+// The stale reference cases are implicitly handled by the NTAMS
+// comparison. Because of the possibility of stale references, buffer
+// processing must be somewhat circumspect and not assume entries
+// in an unfiltered buffer refer to valid objects.
+
+inline bool requires_marking(const void* entry, G1CollectedHeap* heap) {
+ // Includes rejection of NULL pointers.
+ assert(heap->is_in_reserved(entry),
+ err_msg("Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)));
+
+ HeapRegion* region = heap->heap_region_containing_raw(entry);
+ assert(region != NULL, err_msg("No region for " PTR_FORMAT, p2i(entry)));
+ if (entry >= region->next_top_at_mark_start()) {
+ return false;
+ }
+
+ assert(((oop)entry)->is_oop(true /* ignore mark word */),
+ err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry)));
+
+ return true;
+}
+
+// This method removes entries from a SATB buffer that will not be
+// useful to the concurrent marking threads. Entries are retained if
+// they require marking and are not already marked. Retained entries
+// are compacted toward the top of the buffer.
void ObjPtrQueue::filter() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -80,26 +118,25 @@ void ObjPtrQueue::filter() {
assert(i > 0, "we should have at least one more entry to process");
i -= oopSize;
debug_only(entries += 1;)
- oop* p = (oop*) &buf[byte_index_to_index((int) i)];
- oop obj = *p;
+ void** p = &buf[byte_index_to_index((int) i)];
+ void* entry = *p;
// NULL the entry so that unused parts of the buffer contain NULLs
// at the end. If we are going to retain it we will copy it to its
// final place. If we have retained all entries we have visited so
// far, we'll just end up copying it to the same place.
*p = NULL;
- bool retain = g1h->is_obj_ill(obj);
- if (retain) {
+ if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) {
assert(new_index > 0, "we should not have already filled up the buffer");
new_index -= oopSize;
assert(new_index >= i,
"new_index should never be below i, as we alwaysr compact 'up'");
- oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
+ void** new_p = &buf[byte_index_to_index((int) new_index)];
assert(new_p >= p, "the destination location should never be below "
"the source as we always compact 'up'");
assert(*new_p == NULL,
"we should have already cleared the destination location");
- *new_p = obj;
+ *new_p = entry;
debug_only(retained += 1;)
}
}
@@ -126,10 +163,7 @@ bool ObjPtrQueue::should_enqueue_buffer() {
assert(_lock == NULL || _lock->owned_by_self(),
"we should have taken the lock before calling this");
- // Even if G1SATBBufferEnqueueingThresholdPercent == 0 we have to
- // filter the buffer given that this will remove any references into
- // the CSet as we currently assume that no such refs will appear in
- // enqueued buffers.
+ // If G1SATBBufferEnqueueingThresholdPercent == 0 we could skip filtering.
// This method should only be called if there is a non-NULL buffer
// that is full.
@@ -146,31 +180,19 @@ bool ObjPtrQueue::should_enqueue_buffer() {
return should_enqueue;
}
-void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
+void ObjPtrQueue::apply_closure_and_empty(SATBBufferClosure* cl) {
+ assert(SafepointSynchronize::is_at_safepoint(),
+ "SATB queues must only be processed at safepoints");
if (_buf != NULL) {
- apply_closure_to_buffer(cl, _buf, _index, _sz);
- }
-}
-
-void ObjPtrQueue::apply_closure_and_empty(ObjectClosure* cl) {
- if (_buf != NULL) {
- apply_closure_to_buffer(cl, _buf, _index, _sz);
+ assert(_index % sizeof(void*) == 0, "invariant");
+ assert(_sz % sizeof(void*) == 0, "invariant");
+ assert(_index <= _sz, "invariant");
+ cl->do_buffer(_buf + byte_index_to_index((int)_index),
+ byte_index_to_index((int)(_sz - _index)));
_index = _sz;
}
}
-void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl,
- void** buf, size_t index, size_t sz) {
- if (cl == NULL) return;
- for (size_t i = index; i < sz; i += oopSize) {
- oop obj = (oop)buf[byte_index_to_index((int)i)];
- // There can be NULL entries because of destructors.
- if (obj != NULL) {
- cl->do_object(obj);
- }
- }
-}
-
#ifndef PRODUCT
// Helpful for debugging
@@ -186,23 +208,12 @@ void ObjPtrQueue::print(const char* name,
}
#endif // PRODUCT
-#ifdef ASSERT
-void ObjPtrQueue::verify_oops_in_buffer() {
- if (_buf == NULL) return;
- for (size_t i = _index; i < _sz; i += oopSize) {
- oop obj = (oop)_buf[byte_index_to_index((int)i)];
- assert(obj != NULL && obj->is_oop(true /* ignore mark word */),
- "Not an oop");
- }
-}
-#endif
-
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif // _MSC_VER
SATBMarkQueueSet::SATBMarkQueueSet() :
- PtrQueueSet(), _closure(NULL), _par_closures(NULL),
+ PtrQueueSet(),
_shared_satb_queue(this, true /*perm*/) { }
void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
@@ -210,13 +221,9 @@ void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
Mutex* lock) {
PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
_shared_satb_queue.set_lock(lock);
- if (ParallelGCThreads > 0) {
- _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads, mtGC);
- }
}
void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
- DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
t->satb_mark_queue().handle_zero_index();
}
@@ -276,17 +283,7 @@ void SATBMarkQueueSet::filter_thread_buffers() {
shared_satb_queue()->filter();
}
-void SATBMarkQueueSet::set_closure(ObjectClosure* closure) {
- _closure = closure;
-}
-
-void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) {
- assert(ParallelGCThreads > 0 && _par_closures != NULL, "Precondition");
- _par_closures[i] = par_closure;
-}
-
-bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
- uint worker) {
+bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl) {
BufferNode* nd = NULL;
{
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
@@ -298,10 +295,20 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
if (_n_completed_buffers == 0) _process_completed = false;
}
}
- ObjectClosure* cl = (par ? _par_closures[worker] : _closure);
if (nd != NULL) {
void **buf = BufferNode::make_buffer_from_node(nd);
- ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
+ // Skip over NULL entries at beginning (e.g. push end) of buffer.
+ // Filtering can result in non-full completed buffers; see
+ // should_enqueue_buffer.
+ assert(_sz % sizeof(void*) == 0, "invariant");
+ size_t limit = ObjPtrQueue::byte_index_to_index((int)_sz);
+ for (size_t i = 0; i < limit; ++i) {
+ if (buf[i] != NULL) {
+ // Found the end of the block of NULLs; process the remainder.
+ cl->do_buffer(buf + i, limit - i);
+ break;
+ }
+ }
deallocate_buffer(buf);
return true;
} else {
@@ -309,28 +316,6 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
}
}
-void SATBMarkQueueSet::iterate_completed_buffers_read_only(ObjectClosure* cl) {
- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
- assert(cl != NULL, "pre-condition");
-
- BufferNode* nd = _completed_buffers_head;
- while (nd != NULL) {
- void** buf = BufferNode::make_buffer_from_node(nd);
- ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
- nd = nd->next();
- }
-}
-
-void SATBMarkQueueSet::iterate_thread_buffers_read_only(ObjectClosure* cl) {
- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
- assert(cl != NULL, "pre-condition");
-
- for (JavaThread* t = Threads::first(); t; t = t->next()) {
- t->satb_mark_queue().apply_closure(cl);
- }
- shared_satb_queue()->apply_closure(cl);
-}
-
#ifndef PRODUCT
// Helpful for debugging
diff --git a/src/share/vm/gc_implementation/g1/satbQueue.hpp b/src/share/vm/gc_implementation/g1/satbQueue.hpp
index 36af72e60..594895919 100644
--- a/src/share/vm/gc_implementation/g1/satbQueue.hpp
+++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,32 +25,30 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_SATBQUEUE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_SATBQUEUE_HPP
+#include "memory/allocation.hpp"
#include "gc_implementation/g1/ptrQueue.hpp"
-class ObjectClosure;
class JavaThread;
class SATBMarkQueueSet;
+// Base class for processing the contents of a SATB buffer.
+class SATBBufferClosure : public StackObj {
+protected:
+ ~SATBBufferClosure() { }
+
+public:
+ // Process the SATB entries in the designated buffer range.
+ virtual void do_buffer(void** buffer, size_t size) = 0;
+};
+
// A ptrQueue whose elements are "oops", pointers to object heads.
class ObjPtrQueue: public PtrQueue {
- friend class Threads;
friend class SATBMarkQueueSet;
- friend class G1RemarkThreadsClosure;
private:
// Filter out unwanted entries from the buffer.
void filter();
- // Apply the closure to all elements.
- void apply_closure(ObjectClosure* cl);
-
- // Apply the closure to all elements and empty the buffer;
- void apply_closure_and_empty(ObjectClosure* cl);
-
- // Apply the closure to all elements of "buf", down to "index" (inclusive.)
- static void apply_closure_to_buffer(ObjectClosure* cl,
- void** buf, size_t index, size_t sz);
-
public:
ObjPtrQueue(PtrQueueSet* qset, bool perm = false) :
// SATB queues are only active during marking cycles. We create
@@ -63,6 +61,10 @@ public:
// Process queue entries and free resources.
void flush();
+ // Apply cl to the active part of the buffer.
+ // Prerequisite: Must be at a safepoint.
+ void apply_closure_and_empty(SATBBufferClosure* cl);
+
// Overrides PtrQueue::should_enqueue_buffer(). See the method's
// definition for more information.
virtual bool should_enqueue_buffer();
@@ -72,21 +74,11 @@ public:
void print(const char* name);
static void print(const char* name, void** buf, size_t index, size_t sz);
#endif // PRODUCT
-
- void verify_oops_in_buffer() NOT_DEBUG_RETURN;
};
class SATBMarkQueueSet: public PtrQueueSet {
- ObjectClosure* _closure;
- ObjectClosure** _par_closures; // One per ParGCThread.
-
ObjPtrQueue _shared_satb_queue;
- // Utility function to support sequential and parallel versions. If
- // "par" is true, then "worker" is the par thread id; if "false", worker
- // is ignored.
- bool apply_closure_to_completed_buffer_work(bool par, uint worker);
-
#ifdef ASSERT
void dump_active_states(bool expected_active);
void verify_active_states(bool expected_active);
@@ -110,32 +102,12 @@ public:
// Filter all the currently-active SATB buffers.
void filter_thread_buffers();
- // Register "blk" as "the closure" for all queues. Only one such closure
- // is allowed. The "apply_closure_to_completed_buffer" method will apply
- // this closure to a completed buffer, and "iterate_closure_all_threads"
- // applies it to partially-filled buffers (the latter should only be done
- // with the world stopped).
- void set_closure(ObjectClosure* closure);
- // Set the parallel closures: pointer is an array of pointers to
- // closures, one for each parallel GC thread.
- void set_par_closure(int i, ObjectClosure* closure);
-
- // If there exists some completed buffer, pop it, then apply the
- // registered closure to all its elements, and return true. If no
- // completed buffers exist, return false.
- bool apply_closure_to_completed_buffer() {
- return apply_closure_to_completed_buffer_work(false, 0);
- }
- // Parallel version of the above.
- bool par_apply_closure_to_completed_buffer(uint worker) {
- return apply_closure_to_completed_buffer_work(true, worker);
- }
-
- // Apply the given closure on enqueued and currently-active buffers
- // respectively. Both methods are read-only, i.e., they do not
- // modify any of the buffers.
- void iterate_completed_buffers_read_only(ObjectClosure* cl);
- void iterate_thread_buffers_read_only(ObjectClosure* cl);
+ // If there exists some completed buffer, pop and process it, and
+ // return true. Otherwise return false. Processing a buffer
+ // consists of applying the closure to the buffer range starting
+ // with the first non-NULL entry to the end of the buffer; the
+ // leading entries may be NULL due to filtering.
+ bool apply_closure_to_completed_buffer(SATBBufferClosure* cl);
#ifndef PRODUCT
// Helpful for debugging
diff --git a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
index 80a0f7281..47267f21d 100644
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,12 +34,11 @@
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "runtime/interfaceSupport.hpp"
-VM_G1CollectForAllocation::VM_G1CollectForAllocation(
- unsigned int gc_count_before,
- size_t word_size)
+VM_G1CollectForAllocation::VM_G1CollectForAllocation(uint gc_count_before,
+ size_t word_size)
: VM_G1OperationWithAllocRequest(gc_count_before, word_size,
GCCause::_allocation_failure) {
- guarantee(word_size > 0, "an allocation should always be requested");
+ guarantee(word_size != 0, "An allocation should always be requested with this operation.");
}
void VM_G1CollectForAllocation::doit() {
@@ -57,12 +56,11 @@ void VM_G1CollectFull::doit() {
g1h->do_full_collection(false /* clear_all_soft_refs */);
}
-VM_G1IncCollectionPause::VM_G1IncCollectionPause(
- unsigned int gc_count_before,
- size_t word_size,
- bool should_initiate_conc_mark,
- double target_pause_time_ms,
- GCCause::Cause gc_cause)
+VM_G1IncCollectionPause::VM_G1IncCollectionPause(uint gc_count_before,
+ size_t word_size,
+ bool should_initiate_conc_mark,
+ double target_pause_time_ms,
+ GCCause::Cause gc_cause)
: VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
_should_initiate_conc_mark(should_initiate_conc_mark),
_target_pause_time_ms(target_pause_time_ms),
@@ -75,7 +73,7 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
}
bool VM_G1IncCollectionPause::doit_prologue() {
- bool res = VM_GC_Operation::doit_prologue();
+ bool res = VM_G1OperationWithAllocRequest::doit_prologue();
if (!res) {
if (_should_initiate_conc_mark) {
// The prologue can fail for a couple of reasons. The first is that another GC
@@ -92,12 +90,8 @@ bool VM_G1IncCollectionPause::doit_prologue() {
void VM_G1IncCollectionPause::doit() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
- assert(!_should_initiate_conc_mark ||
- ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
- (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
- _gc_cause == GCCause::_g1_humongous_allocation ||
- _gc_cause == GCCause::_update_allocation_context_stats_inc),
- "only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
+ assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause),
+ "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle");
if (_word_size > 0) {
// An allocation has been requested. So, try to do that first.
@@ -169,7 +163,7 @@ void VM_G1IncCollectionPause::doit() {
}
void VM_G1IncCollectionPause::doit_epilogue() {
- VM_GC_Operation::doit_epilogue();
+ VM_G1OperationWithAllocRequest::doit_epilogue();
// If the pause was initiated by a System.gc() and
// +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
@@ -230,7 +224,6 @@ void VM_CGC_Operation::release_and_notify_pending_list_lock() {
}
void VM_CGC_Operation::doit() {
- gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
SharedHeap* sh = SharedHeap::heap();
diff --git a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
index c8014d415..265eb37d3 100644
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,20 +36,17 @@
// - VM_G1CollectForAllocation
// - VM_G1IncCollectionPause
-class VM_G1OperationWithAllocRequest: public VM_GC_Operation {
+class VM_G1OperationWithAllocRequest : public VM_CollectForAllocation {
protected:
- size_t _word_size;
- HeapWord* _result;
bool _pause_succeeded;
AllocationContext_t _allocation_context;
public:
- VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
- size_t word_size,
+ VM_G1OperationWithAllocRequest(uint gc_count_before,
+ size_t word_size,
GCCause::Cause gc_cause)
- : VM_GC_Operation(gc_count_before, gc_cause),
- _word_size(word_size), _result(NULL), _pause_succeeded(false) { }
- HeapWord* result() { return _result; }
+ : VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
+ _pause_succeeded(false) {}
bool pause_succeeded() { return _pause_succeeded; }
void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
AllocationContext_t allocation_context() { return _allocation_context; }
@@ -57,8 +54,8 @@ public:
class VM_G1CollectFull: public VM_GC_Operation {
public:
- VM_G1CollectFull(unsigned int gc_count_before,
- unsigned int full_gc_count_before,
+ VM_G1CollectFull(uint gc_count_before,
+ uint full_gc_count_before,
GCCause::Cause cause)
: VM_GC_Operation(gc_count_before, cause, full_gc_count_before, true) { }
virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
@@ -70,7 +67,7 @@ public:
class VM_G1CollectForAllocation: public VM_G1OperationWithAllocRequest {
public:
- VM_G1CollectForAllocation(unsigned int gc_count_before,
+ VM_G1CollectForAllocation(uint gc_count_before,
size_t word_size);
virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
virtual void doit();
@@ -84,9 +81,9 @@ private:
bool _should_initiate_conc_mark;
bool _should_retry_gc;
double _target_pause_time_ms;
- unsigned int _old_marking_cycles_completed_before;
+ uint _old_marking_cycles_completed_before;
public:
- VM_G1IncCollectionPause(unsigned int gc_count_before,
+ VM_G1IncCollectionPause(uint gc_count_before,
size_t word_size,
bool should_initiate_conc_mark,
double target_pause_time_ms,
diff --git a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
index 4d407dbfa..8ef1bd2cd 100644
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
@@ -622,7 +622,7 @@ void ParNewGenTask::work(uint worker_id) {
true, // Process younger gens, if any,
// as strong roots.
false, // no scope; this is parallel code
- SharedHeap::SO_ScavengeCodeCache,
+ GenCollectedHeap::SO_ScavengeCodeCache,
GenCollectedHeap::StrongAndWeakRoots,
&par_scan_state.to_space_root_closure(),
&par_scan_state.older_gen_closure(),
@@ -1197,8 +1197,10 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
return real_forwardee(old);
}
- new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
- old, m, sz);
+ if (!_promotion_failed) {
+ new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
+ old, m, sz);
+ }
if (new_obj == NULL) {
// promotion failed, forward to self
diff --git a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp
index e417cd495..f4f736a64 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp
@@ -66,9 +66,10 @@ void GenerationSizer::initialize_flags() {
void GenerationSizer::initialize_size_info() {
trace_gen_sizes("ps heap raw");
- const size_t page_sz = os::page_size_for_region(_min_heap_byte_size,
- _max_heap_byte_size,
- 8);
+ const size_t max_page_sz = os::page_size_for_region_aligned(_max_heap_byte_size, 8);
+ const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old
+ const size_t min_page_sz = os::page_size_for_region_aligned(_min_heap_byte_size, min_pages);
+ const size_t page_sz = MIN2(max_page_sz, min_page_sz);
// Can a page size be something else than a power of two?
assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
diff --git a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp
index 19a055cc0..1dde10746 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp
@@ -55,7 +55,7 @@ ParMarkBitMap::initialize(MemRegion covered_region)
const size_t words = bits / BitsPerWord;
const size_t raw_bytes = words * sizeof(idx_t);
- const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
+ const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
const size_t granularity = os::vm_allocation_granularity();
_reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
index 7a7805379..f407f20b7 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -261,7 +261,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
uint loop_count = 0;
uint gc_count = 0;
- int gclocker_stalled_count = 0;
+ uint gclocker_stalled_count = 0;
while (result == NULL) {
// We don't want to have multiple collections for a single filled generation.
@@ -521,8 +521,8 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
assert(!Heap_lock->owned_by_self(),
"this thread should not own the Heap_lock");
- unsigned int gc_count = 0;
- unsigned int full_gc_count = 0;
+ uint gc_count = 0;
+ uint full_gc_count = 0;
{
MutexLocker ml(Heap_lock);
// This value is guarded by the Heap_lock
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
index 77ab2cb17..114d39dfe 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
@@ -167,7 +167,6 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
{
HandleMark hm;
- gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
TraceCollectorStats tcs(counters());
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
index e97a041b9..8175ded1a 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
@@ -401,7 +401,7 @@ PSVirtualSpace*
ParallelCompactData::create_vspace(size_t count, size_t element_size)
{
const size_t raw_bytes = count * element_size;
- const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
+ const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
const size_t granularity = os::vm_allocation_granularity();
_reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
@@ -2054,7 +2054,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
gc_task_manager()->task_idle_workers();
heap->set_par_threads(gc_task_manager()->active_workers());
- gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
TraceCollectorStats tcs(counters());
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
index 4d835314c..881f380ce 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
@@ -348,7 +348,7 @@ public:
HeapWord* _partial_obj_addr;
region_sz_t _partial_obj_size;
region_sz_t volatile _dc_and_los;
- bool _blocks_filled;
+ bool volatile _blocks_filled;
#ifdef ASSERT
size_t _blocks_filled_count; // Number of block table fills.
@@ -499,7 +499,9 @@ ParallelCompactData::RegionData::destination_count() const
inline bool
ParallelCompactData::RegionData::blocks_filled() const
{
- return _blocks_filled;
+ bool result = _blocks_filled;
+ OrderAccess::acquire();
+ return result;
}
#ifdef ASSERT
@@ -513,6 +515,7 @@ ParallelCompactData::RegionData::blocks_filled_count() const
inline void
ParallelCompactData::RegionData::set_blocks_filled()
{
+ OrderAccess::release();
_blocks_filled = true;
// Debug builds count the number of times the table was filled.
DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count));
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
index 7eea946a5..6d728ccf5 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
@@ -329,7 +329,6 @@ bool PSScavenge::invoke_no_policy() {
ResourceMark rm;
HandleMark hm;
- gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
TraceCollectorStats tcs(counters());
diff --git a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
index 3a8f347bc..9148bb4ff 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,12 +32,10 @@
#include "utilities/dtrace.hpp"
// The following methods are used by the parallel scavenge collector
-VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
- unsigned int gc_count) :
- VM_GC_Operation(gc_count, GCCause::_allocation_failure),
- _size(size),
- _result(NULL)
-{
+VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t word_size,
+ uint gc_count) :
+ VM_CollectForAllocation(word_size, gc_count, GCCause::_allocation_failure) {
+ assert(word_size != 0, "An allocation should always be requested with this operation.");
}
void VM_ParallelGCFailedAllocation::doit() {
@@ -47,7 +45,7 @@ void VM_ParallelGCFailedAllocation::doit() {
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
GCCauseSetter gccs(heap, _gc_cause);
- _result = heap->failed_mem_allocate(_size);
+ _result = heap->failed_mem_allocate(_word_size);
if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked();
@@ -55,8 +53,8 @@ void VM_ParallelGCFailedAllocation::doit() {
}
// Only used for System.gc() calls
-VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(unsigned int gc_count,
- unsigned int full_gc_count,
+VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count,
+ uint full_gc_count,
GCCause::Cause gc_cause) :
VM_GC_Operation(gc_count, gc_cause, full_gc_count, true /* full */)
{
diff --git a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp
index 53ac77dc4..7ffe25161 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,26 +29,19 @@
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "gc_interface/gcCause.hpp"
-class VM_ParallelGCFailedAllocation: public VM_GC_Operation {
- private:
- size_t _size;
- HeapWord* _result;
-
+class VM_ParallelGCFailedAllocation : public VM_CollectForAllocation {
public:
- VM_ParallelGCFailedAllocation(size_t size, unsigned int gc_count);
+ VM_ParallelGCFailedAllocation(size_t word_size, uint gc_count);
virtual VMOp_Type type() const {
return VMOp_ParallelGCFailedAllocation;
}
virtual void doit();
-
- HeapWord* result() const { return _result; }
};
class VM_ParallelGCSystemGC: public VM_GC_Operation {
public:
- VM_ParallelGCSystemGC(unsigned int gc_count, unsigned int full_gc_count,
- GCCause::Cause gc_cause);
+ VM_ParallelGCSystemGC(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause);
virtual VMOp_Type type() const { return VMOp_ParallelGCSystemGC; }
virtual void doit();
};
diff --git a/src/share/vm/gc_implementation/shared/ageTable.hpp b/src/share/vm/gc_implementation/shared/ageTable.hpp
index 9e2ee9999..44d8e0ace 100644
--- a/src/share/vm/gc_implementation/shared/ageTable.hpp
+++ b/src/share/vm/gc_implementation/shared/ageTable.hpp
@@ -55,7 +55,10 @@ class ageTable VALUE_OBJ_CLASS_SPEC {
// add entry
void add(oop p, size_t oop_size) {
- uint age = p->age();
+ add(p->age(), oop_size);
+ }
+
+ void add(uint age, size_t oop_size) {
assert(age > 0 && age < table_size, "invalid age of object");
sizes[age] += oop_size;
}
diff --git a/src/share/vm/gc_implementation/shared/gcTraceTime.cpp b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp
index 83890611e..fff7eea8c 100644
--- a/src/share/vm/gc_implementation/shared/gcTraceTime.cpp
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp
@@ -49,10 +49,8 @@ GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* t
}
if (_doit) {
- if (PrintGCTimeStamps) {
- gclog_or_tty->stamp();
- gclog_or_tty->print(": ");
- }
+ gclog_or_tty->date_stamp(PrintGCDateStamps);
+ gclog_or_tty->stamp(PrintGCTimeStamps);
if (PrintGCID) {
gclog_or_tty->print("#%u: ", gc_id.id());
}
diff --git a/src/share/vm/gc_implementation/shared/mutableSpace.cpp b/src/share/vm/gc_implementation/shared/mutableSpace.cpp
index 17a3ecbc1..27a02b68d 100644
--- a/src/share/vm/gc_implementation/shared/mutableSpace.cpp
+++ b/src/share/vm/gc_implementation/shared/mutableSpace.cpp
@@ -62,9 +62,7 @@ void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
}
void MutableSpace::pretouch_pages(MemRegion mr) {
- for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) {
- char t = *p; *p = t;
- }
+ os::pretouch_memory((char*)mr.start(), (char*)mr.end());
}
void MutableSpace::initialize(MemRegion mr,
diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
index ea1760e53..972099b9c 100644
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
@@ -193,10 +193,10 @@ void VM_GenCollectForAllocation::doit() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause);
- _res = gch->satisfy_failed_allocation(_size, _tlab);
- assert(gch->is_in_reserved_or_null(_res), "result not in heap");
+ _result = gch->satisfy_failed_allocation(_word_size, _tlab);
+ assert(gch->is_in_reserved_or_null(_result), "result not in heap");
- if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
+ if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked();
}
}
@@ -209,6 +209,18 @@ void VM_GenCollectFull::doit() {
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
}
+VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
+ size_t size,
+ Metaspace::MetadataType mdtype,
+ uint gc_count_before,
+ uint full_gc_count_before,
+ GCCause::Cause gc_cause)
+ : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
+ _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
+ assert(_size != 0, "An allocation should always be requested with this operation.");
+ AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
+}
+
// Returns true iff concurrent GCs unloads metadata.
bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
#if INCLUDE_ALL_GCS
@@ -313,3 +325,11 @@ void VM_CollectForMetadataAllocation::doit() {
set_gc_locked();
}
}
+
+VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
+ : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {
+ // Only report if operation was really caused by an allocation.
+ if (_word_size != 0) {
+ AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
+ }
+}
diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
index 8f60dcbdb..b8027a972 100644
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_VMGCOPERATIONS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_SHARED_VMGCOPERATIONS_HPP
+#include "gc_implementation/shared/gcId.hpp"
#include "memory/heapInspection.hpp"
#include "runtime/handles.hpp"
#include "runtime/jniHandles.hpp"
@@ -38,11 +39,12 @@
// VM_Operation
// VM_GC_Operation
// VM_GC_HeapInspection
-// VM_GenCollectForAllocation
// VM_GenCollectFull
// VM_GenCollectFullConcurrent
-// VM_ParallelGCFailedAllocation
// VM_ParallelGCSystemGC
+// VM_CollectForAllocation
+// VM_GenCollectForAllocation
+// VM_ParallelGCFailedAllocation
// VM_GC_Operation
// - implements methods common to all classes in the hierarchy:
// prevents multiple gc requests and manages lock on heap;
@@ -51,6 +53,7 @@
// - prints class histogram on SIGBREAK if PrintClassHistogram
// is specified; and also the attach "inspectheap" operation
//
+// VM_CollectForAllocation
// VM_GenCollectForAllocation
// VM_ParallelGCFailedAllocation
// - this operation is invoked when allocation is failed;
@@ -66,13 +69,13 @@
class VM_GC_Operation: public VM_Operation {
protected:
- BasicLock _pending_list_basic_lock; // for refs pending list notification (PLL)
- unsigned int _gc_count_before; // gc count before acquiring PLL
- unsigned int _full_gc_count_before; // full gc count before acquiring PLL
- bool _full; // whether a "full" collection
- bool _prologue_succeeded; // whether doit_prologue succeeded
+ BasicLock _pending_list_basic_lock; // for refs pending list notification (PLL)
+ uint _gc_count_before; // gc count before acquiring PLL
+ uint _full_gc_count_before; // full gc count before acquiring PLL
+ bool _full; // whether a "full" collection
+ bool _prologue_succeeded; // whether doit_prologue succeeded
GCCause::Cause _gc_cause; // the putative cause for this gc op
- bool _gc_locked; // will be set if gc was locked
+ bool _gc_locked; // will be set if gc was locked
virtual bool skip_operation() const;
@@ -81,9 +84,9 @@ class VM_GC_Operation: public VM_Operation {
void release_and_notify_pending_list_lock();
public:
- VM_GC_Operation(unsigned int gc_count_before,
+ VM_GC_Operation(uint gc_count_before,
GCCause::Cause _cause,
- unsigned int full_gc_count_before = 0,
+ uint full_gc_count_before = 0,
bool full = false) {
_full = full;
_prologue_succeeded = false;
@@ -160,38 +163,45 @@ class VM_GC_HeapInspection: public VM_GC_Operation {
bool collect();
};
+class VM_CollectForAllocation : public VM_GC_Operation {
+ protected:
+ size_t _word_size; // Size of object to be allocated (in number of words)
+ HeapWord* _result; // Allocation result (NULL if allocation failed)
+
+ public:
+ VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause);
+
+ HeapWord* result() const {
+ return _result;
+ }
+};
-class VM_GenCollectForAllocation: public VM_GC_Operation {
+class VM_GenCollectForAllocation : public VM_CollectForAllocation {
private:
- HeapWord* _res;
- size_t _size; // size of object to be allocated.
bool _tlab; // alloc is of a tlab.
public:
- VM_GenCollectForAllocation(size_t size,
+ VM_GenCollectForAllocation(size_t word_size,
bool tlab,
- unsigned int gc_count_before)
- : VM_GC_Operation(gc_count_before, GCCause::_allocation_failure),
- _size(size),
+ uint gc_count_before)
+ : VM_CollectForAllocation(word_size, gc_count_before, GCCause::_allocation_failure),
_tlab(tlab) {
- _res = NULL;
+ assert(word_size != 0, "An allocation should always be requested with this operation.");
}
~VM_GenCollectForAllocation() {}
virtual VMOp_Type type() const { return VMOp_GenCollectForAllocation; }
virtual void doit();
- HeapWord* result() const { return _res; }
};
-
// VM operation to invoke a collection of the heap as a
// GenCollectedHeap heap.
class VM_GenCollectFull: public VM_GC_Operation {
private:
int _max_level;
public:
- VM_GenCollectFull(unsigned int gc_count_before,
- unsigned int full_gc_count_before,
+ VM_GenCollectFull(uint gc_count_before,
+ uint full_gc_count_before,
GCCause::Cause gc_cause,
- int max_level)
+ int max_level)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
_max_level(max_level) { }
~VM_GenCollectFull() {}
@@ -208,12 +218,9 @@ class VM_CollectForMetadataAllocation: public VM_GC_Operation {
public:
VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
size_t size, Metaspace::MetadataType mdtype,
- unsigned int gc_count_before,
- unsigned int full_gc_count_before,
- GCCause::Cause gc_cause)
- : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
- _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
- }
+ uint gc_count_before,
+ uint full_gc_count_before,
+ GCCause::Cause gc_cause);
virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
virtual void doit();
MetaWord* result() const { return _result; }
diff --git a/src/share/vm/gc_interface/allocTracer.cpp b/src/share/vm/gc_interface/allocTracer.cpp
index d3440cd85..777925945 100644
--- a/src/share/vm/gc_interface/allocTracer.cpp
+++ b/src/share/vm/gc_interface/allocTracer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "gc_implementation/shared/gcId.hpp"
#include "gc_interface/allocTracer.hpp"
#include "trace/tracing.hpp"
#include "runtime/handles.hpp"
@@ -46,3 +47,12 @@ void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, size_t tl
event.commit();
}
}
+
+void AllocTracer::send_allocation_requiring_gc_event(size_t size, const GCId& gcId) {
+ EventAllocationRequiringGC event;
+ if (event.should_commit()) {
+ event.set_gcId(gcId.id());
+ event.set_size(size);
+ event.commit();
+ }
+}
diff --git a/src/share/vm/gc_interface/allocTracer.hpp b/src/share/vm/gc_interface/allocTracer.hpp
index 33e6f19f3..ddd9877d2 100644
--- a/src/share/vm/gc_interface/allocTracer.hpp
+++ b/src/share/vm/gc_interface/allocTracer.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@ class AllocTracer : AllStatic {
public:
static void send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size);
static void send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size);
+ static void send_allocation_requiring_gc_event(size_t size, const GCId& gcId);
};
#endif /* SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP */
diff --git a/src/share/vm/gc_interface/gcCause.cpp b/src/share/vm/gc_interface/gcCause.cpp
index 4778d8aa4..a364214bd 100644
--- a/src/share/vm/gc_interface/gcCause.cpp
+++ b/src/share/vm/gc_interface/gcCause.cpp
@@ -54,6 +54,9 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _wb_young_gc:
return "WhiteBox Initiated Young GC";
+ case _wb_conc_mark:
+ return "WhiteBox Initiated Concurrent Mark";
+
case _update_allocation_context_stats_inc:
case _update_allocation_context_stats_full:
return "Update Allocation Context Stats";
diff --git a/src/share/vm/gc_interface/gcCause.hpp b/src/share/vm/gc_interface/gcCause.hpp
index cb8dddd5c..26ad48f73 100644
--- a/src/share/vm/gc_interface/gcCause.hpp
+++ b/src/share/vm/gc_interface/gcCause.hpp
@@ -47,6 +47,7 @@ class GCCause : public AllStatic {
_heap_inspection,
_heap_dump,
_wb_young_gc,
+ _wb_conc_mark,
_update_allocation_context_stats_inc,
_update_allocation_context_stats_full,
diff --git a/src/share/vm/interpreter/abstractInterpreter.hpp b/src/share/vm/interpreter/abstractInterpreter.hpp
index d5d1b1501..b94120530 100644
--- a/src/share/vm/interpreter/abstractInterpreter.hpp
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,22 +30,15 @@
#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/top.hpp"
-#ifdef TARGET_ARCH_x86
+#if defined INTERP_MASM_MD_HPP
+# include INTERP_MASM_MD_HPP
+#elif defined TARGET_ARCH_x86
# include "interp_masm_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "interp_masm_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "interp_masm_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "interp_masm_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "interp_masm_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "interp_masm_ppc_64.hpp"
#endif
diff --git a/src/share/vm/interpreter/bytecodeInterpreter.cpp b/src/share/vm/interpreter/bytecodeInterpreter.cpp
index c846664ba..7c6f2e59b 100644
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp
@@ -3432,7 +3432,7 @@ BytecodeInterpreter::print() {
tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
- tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp);
+ tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp));
tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
diff --git a/src/share/vm/interpreter/bytecodes.hpp b/src/share/vm/interpreter/bytecodes.hpp
index a43033c07..bff8cdc3b 100644
--- a/src/share/vm/interpreter/bytecodes.hpp
+++ b/src/share/vm/interpreter/bytecodes.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -420,6 +420,7 @@ class Bytecodes: AllStatic {
static bool is_astore (Code code) { return (code == _astore || code == _astore_0 || code == _astore_1
|| code == _astore_2 || code == _astore_3); }
+ static bool is_store_into_local(Code code){ return (_istore <= code && code <= _astore_3); }
static bool is_const (Code code) { return (_aconst_null <= code && code <= _ldc2_w); }
static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0
|| code == _fconst_0 || code == _dconst_0); }
diff --git a/src/share/vm/interpreter/interpreterRuntime.cpp b/src/share/vm/interpreter/interpreterRuntime.cpp
index 9a89c7a09..7464f5888 100644
--- a/src/share/vm/interpreter/interpreterRuntime.cpp
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp
@@ -407,7 +407,11 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea
// during deoptimization so the interpreter needs to skip it when
// the frame is popped.
thread->set_do_not_unlock_if_synchronized(true);
+#ifdef CC_INTERP
+ return (address) -1;
+#else
return Interpreter::remove_activation_entry();
+#endif
}
// Need to do this check first since when _do_not_unlock_if_synchronized
@@ -1302,6 +1306,8 @@ IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread,
member_name_oop = java_lang_invoke_DirectMethodHandle::member(member_name_oop);
}
thread->set_vm_result(member_name_oop);
+ } else {
+ thread->set_vm_result(NULL);
}
IRT_END
#endif // INCLUDE_JVMTI
diff --git a/src/share/vm/interpreter/linkResolver.cpp b/src/share/vm/interpreter/linkResolver.cpp
index 592b2a395..5a18a50c7 100644
--- a/src/share/vm/interpreter/linkResolver.cpp
+++ b/src/share/vm/interpreter/linkResolver.cpp
@@ -1410,10 +1410,11 @@ methodHandle LinkResolver::resolve_virtual_call_or_null(
KlassHandle resolved_klass,
Symbol* name,
Symbol* signature,
- KlassHandle current_klass) {
+ KlassHandle current_klass,
+ bool check_access) {
EXCEPTION_MARK;
CallInfo info;
- resolve_virtual_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, true, false, THREAD);
+ resolve_virtual_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, check_access, false, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
return methodHandle();
@@ -1426,10 +1427,11 @@ methodHandle LinkResolver::resolve_interface_call_or_null(
KlassHandle resolved_klass,
Symbol* name,
Symbol* signature,
- KlassHandle current_klass) {
+ KlassHandle current_klass,
+ bool check_access) {
EXCEPTION_MARK;
CallInfo info;
- resolve_interface_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, true, false, THREAD);
+ resolve_interface_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, check_access, false, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
return methodHandle();
@@ -1457,10 +1459,11 @@ methodHandle LinkResolver::resolve_static_call_or_null(
KlassHandle resolved_klass,
Symbol* name,
Symbol* signature,
- KlassHandle current_klass) {
+ KlassHandle current_klass,
+ bool check_access) {
EXCEPTION_MARK;
CallInfo info;
- resolve_static_call(info, resolved_klass, name, signature, current_klass, true, false, THREAD);
+ resolve_static_call(info, resolved_klass, name, signature, current_klass, check_access, false, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
return methodHandle();
@@ -1468,11 +1471,15 @@ methodHandle LinkResolver::resolve_static_call_or_null(
return info.selected_method();
}
-methodHandle LinkResolver::resolve_special_call_or_null(KlassHandle resolved_klass, Symbol* name, Symbol* signature,
- KlassHandle current_klass) {
+methodHandle LinkResolver::resolve_special_call_or_null(
+ KlassHandle resolved_klass,
+ Symbol* name,
+ Symbol* signature,
+ KlassHandle current_klass,
+ bool check_access) {
EXCEPTION_MARK;
CallInfo info;
- resolve_special_call(info, resolved_klass, name, signature, current_klass, true, THREAD);
+ resolve_special_call(info, resolved_klass, name, signature, current_klass, check_access, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
return methodHandle();
@@ -1585,6 +1592,26 @@ void LinkResolver::resolve_handle_call(CallInfo& result, KlassHandle resolved_kl
result.set_handle(resolved_method, resolved_appendix, resolved_method_type, CHECK);
}
+static void wrap_invokedynamic_exception(TRAPS) {
+ if (HAS_PENDING_EXCEPTION) {
+ if (TraceMethodHandles) {
+ tty->print_cr("invokedynamic throws BSME for " INTPTR_FORMAT, p2i((void *)PENDING_EXCEPTION));
+ PENDING_EXCEPTION->print();
+ }
+ if (PENDING_EXCEPTION->is_a(SystemDictionary::BootstrapMethodError_klass())) {
+ // throw these guys, since they are already wrapped
+ return;
+ }
+ if (!PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
+ // intercept only LinkageErrors which might have failed to wrap
+ return;
+ }
+ // See the "Linking Exceptions" section for the invokedynamic instruction in the JVMS.
+ Handle nested_exception(THREAD, PENDING_EXCEPTION);
+ CLEAR_PENDING_EXCEPTION;
+ THROW_CAUSE(vmSymbols::java_lang_BootstrapMethodError(), nested_exception)
+ }
+}
void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {
assert(EnableInvokeDynamic, "");
@@ -1600,7 +1627,8 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle po
ConstantPoolCacheEntry* cpce = pool->invokedynamic_cp_cache_entry_at(index);
if (cpce->is_f1_null()) {
int pool_index = cpce->constant_pool_index();
- oop bsm_info = pool->resolve_bootstrap_specifier_at(pool_index, CHECK);
+ oop bsm_info = pool->resolve_bootstrap_specifier_at(pool_index, THREAD);
+ wrap_invokedynamic_exception(CHECK);
assert(bsm_info != NULL, "");
// FIXME: Cache this once per BootstrapMethods entry, not once per CONSTANT_InvokeDynamic.
bootstrap_specifier = Handle(THREAD, bsm_info);
@@ -1609,7 +1637,8 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle po
methodHandle method( THREAD, cpce->f1_as_method());
Handle appendix( THREAD, cpce->appendix_if_resolved(pool));
Handle method_type(THREAD, cpce->method_type_if_resolved(pool));
- result.set_handle(method, appendix, method_type, CHECK);
+ result.set_handle(method, appendix, method_type, THREAD);
+ wrap_invokedynamic_exception(CHECK);
return;
}
@@ -1640,25 +1669,9 @@ void LinkResolver::resolve_dynamic_call(CallInfo& result,
&resolved_appendix,
&resolved_method_type,
THREAD);
- if (HAS_PENDING_EXCEPTION) {
- if (TraceMethodHandles) {
- tty->print_cr("invokedynamic throws BSME for " INTPTR_FORMAT, p2i((void *)PENDING_EXCEPTION));
- PENDING_EXCEPTION->print();
- }
- if (PENDING_EXCEPTION->is_a(SystemDictionary::BootstrapMethodError_klass())) {
- // throw these guys, since they are already wrapped
- return;
- }
- if (!PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
- // intercept only LinkageErrors which might have failed to wrap
- return;
- }
- // See the "Linking Exceptions" section for the invokedynamic instruction in the JVMS.
- Handle nested_exception(THREAD, PENDING_EXCEPTION);
- CLEAR_PENDING_EXCEPTION;
- THROW_CAUSE(vmSymbols::java_lang_BootstrapMethodError(), nested_exception)
- }
- result.set_handle(resolved_method, resolved_appendix, resolved_method_type, CHECK);
+ wrap_invokedynamic_exception(CHECK);
+ result.set_handle(resolved_method, resolved_appendix, resolved_method_type, THREAD);
+ wrap_invokedynamic_exception(CHECK);
}
//------------------------------------------------------------------------------------------------------------------------
diff --git a/src/share/vm/interpreter/linkResolver.hpp b/src/share/vm/interpreter/linkResolver.hpp
index f59838059..5fa536491 100644
--- a/src/share/vm/interpreter/linkResolver.hpp
+++ b/src/share/vm/interpreter/linkResolver.hpp
@@ -181,10 +181,10 @@ class LinkResolver: AllStatic {
// same as above for compile-time resolution; but returns null handle instead of throwing an exception on error
// also, does not initialize klass (i.e., no side effects)
- static methodHandle resolve_virtual_call_or_null (KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
- static methodHandle resolve_interface_call_or_null(KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
- static methodHandle resolve_static_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
- static methodHandle resolve_special_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
+ static methodHandle resolve_virtual_call_or_null (KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access = true);
+ static methodHandle resolve_interface_call_or_null(KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access = true);
+ static methodHandle resolve_static_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access = true);
+ static methodHandle resolve_special_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access = true);
static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method);
// same as above for compile-time resolution; returns vtable_index if current_klass if linked
diff --git a/src/share/vm/interpreter/oopMapCache.cpp b/src/share/vm/interpreter/oopMapCache.cpp
index e3d41a3c4..d3f188863 100644
--- a/src/share/vm/interpreter/oopMapCache.cpp
+++ b/src/share/vm/interpreter/oopMapCache.cpp
@@ -244,10 +244,8 @@ void InterpreterOopMap::print() const {
method()->print_value();
tty->print(" @ %d = [%d] { ", bci(), n);
for (int i = 0; i < n; i++) {
-#ifdef ENABLE_ZAP_DEAD_LOCALS
if (is_dead(i)) tty->print("%d+ ", i);
else
-#endif
if (is_oop(i)) tty->print("%d ", i);
}
tty->print_cr("}");
@@ -402,13 +400,11 @@ void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int s
value |= (mask << oop_bit_number );
}
- #ifdef ENABLE_ZAP_DEAD_LOCALS
// set dead bit
if (!cell->is_live()) {
value |= (mask << dead_bit_number);
assert(!cell->is_reference(), "dead value marked as oop");
}
- #endif
}
// make sure last word is stored
diff --git a/src/share/vm/interpreter/oopMapCache.hpp b/src/share/vm/interpreter/oopMapCache.hpp
index a56dcb8d4..dbdd4cb1a 100644
--- a/src/share/vm/interpreter/oopMapCache.hpp
+++ b/src/share/vm/interpreter/oopMapCache.hpp
@@ -66,19 +66,15 @@ class InterpreterOopMap: ResourceObj {
public:
enum {
- N = 2, // the number of words reserved
+ N = 4, // the number of words reserved
// for inlined mask storage
small_mask_limit = N * BitsPerWord, // the maximum number of bits
// available for small masks,
// small_mask_limit can be set to 0
// for testing bit_mask allocation
-#ifdef ENABLE_ZAP_DEAD_LOCALS
bits_per_entry = 2,
dead_bit_number = 1,
-#else
- bits_per_entry = 1,
-#endif
oop_bit_number = 0
};
@@ -119,10 +115,6 @@ class InterpreterOopMap: ResourceObj {
void set_expression_stack_size(int sz) { _expression_stack_size = sz; }
-#ifdef ENABLE_ZAP_DEAD_LOCALS
- bool is_dead(int offset) const { return (entry_at(offset) & (1 << dead_bit_number)) != 0; }
-#endif
-
// Lookup
bool match(methodHandle method, int bci) const { return _method == method() && _bci == bci; }
bool is_empty() const;
@@ -144,6 +136,7 @@ class InterpreterOopMap: ResourceObj {
void print() const;
int number_of_entries() const { return mask_size() / bits_per_entry; }
+ bool is_dead(int offset) const { return (entry_at(offset) & (1 << dead_bit_number)) != 0; }
bool is_oop (int offset) const { return (entry_at(offset) & (1 << oop_bit_number )) != 0; }
int expression_stack_size() const { return _expression_stack_size; }
diff --git a/src/share/vm/interpreter/templateTable.hpp b/src/share/vm/interpreter/templateTable.hpp
index c6ea51537..2832f13e5 100644
--- a/src/share/vm/interpreter/templateTable.hpp
+++ b/src/share/vm/interpreter/templateTable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,22 +28,15 @@
#include "interpreter/bytecodes.hpp"
#include "memory/allocation.hpp"
#include "runtime/frame.hpp"
-#ifdef TARGET_ARCH_x86
+#if defined INTERP_MASM_MD_HPP
+# include INTERP_MASM_MD_HPP
+#elif defined TARGET_ARCH_x86
# include "interp_masm_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "interp_masm_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "interp_masm_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "interp_masm_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "interp_masm_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "interp_masm_ppc_64.hpp"
#endif
@@ -358,25 +351,17 @@ class TemplateTable: AllStatic {
static Template* template_for_wide(Bytecodes::Code code) { Bytecodes::wide_check(code); return &_template_table_wide[code]; }
// Platform specifics
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined TEMPLATETABLE_MD_HPP
+# include TEMPLATETABLE_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "templateTable_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "templateTable_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "templateTable_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "templateTable_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "templateTable_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "templateTable_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "templateTable_ppc_64.hpp"
#endif
diff --git a/src/share/vm/memory/allocation.inline.hpp b/src/share/vm/memory/allocation.inline.hpp
index d527237a8..1bd851552 100644
--- a/src/share/vm/memory/allocation.inline.hpp
+++ b/src/share/vm/memory/allocation.inline.hpp
@@ -62,11 +62,18 @@ inline char* AllocateHeap(size_t size, MEMFLAGS flags,
}
return p;
}
+
+#ifdef __GNUC__
+__attribute__((always_inline))
+#endif
inline char* AllocateHeap(size_t size, MEMFLAGS flags,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
}
+#ifdef __GNUC__
+__attribute__((always_inline))
+#endif
inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
diff --git a/src/share/vm/memory/collectorPolicy.cpp b/src/share/vm/memory/collectorPolicy.cpp
index db6af88ee..727408d52 100644
--- a/src/share/vm/memory/collectorPolicy.cpp
+++ b/src/share/vm/memory/collectorPolicy.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -656,7 +656,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
// Loop until the allocation is satisified,
// or unsatisfied after GC.
- for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
+ for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
HandleMark hm; // discard any handles allocated in each iteration
// First allocation attempt is lock-free.
@@ -670,7 +670,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
return result;
}
}
- unsigned int gc_count_before; // read inside the Heap_lock locked region
+ uint gc_count_before; // read inside the Heap_lock locked region
{
MutexLocker ml(Heap_lock);
if (PrintGC && Verbose) {
diff --git a/src/share/vm/memory/defNewGeneration.cpp b/src/share/vm/memory/defNewGeneration.cpp
index ac41f33b4..273d8a313 100644
--- a/src/share/vm/memory/defNewGeneration.cpp
+++ b/src/share/vm/memory/defNewGeneration.cpp
@@ -629,7 +629,7 @@ void DefNewGeneration::collect(bool full,
true, // Process younger gens, if any,
// as strong roots.
true, // activate StrongRootsScope
- SharedHeap::SO_ScavengeCodeCache,
+ GenCollectedHeap::SO_ScavengeCodeCache,
GenCollectedHeap::StrongAndWeakRoots,
&fsc_with_no_gc_barrier,
&fsc_with_gc_barrier,
diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp
index da49aa442..f7c9591d9 100644
--- a/src/share/vm/memory/genCollectedHeap.cpp
+++ b/src/share/vm/memory/genCollectedHeap.cpp
@@ -26,6 +26,7 @@
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
+#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
@@ -49,6 +50,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/vmThread.hpp"
+#include "services/management.hpp"
#include "services/memoryService.hpp"
#include "utilities/vmError.hpp"
#include "utilities/workgroup.hpp"
@@ -63,7 +65,15 @@ NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
// The set of potentially parallel tasks in root scanning.
enum GCH_strong_roots_tasks {
- // We probably want to parallelize both of these internally, but for now...
+ GCH_PS_Universe_oops_do,
+ GCH_PS_JNIHandles_oops_do,
+ GCH_PS_ObjectSynchronizer_oops_do,
+ GCH_PS_FlatProfiler_oops_do,
+ GCH_PS_Management_oops_do,
+ GCH_PS_SystemDictionary_oops_do,
+ GCH_PS_ClassLoaderDataGraph_oops_do,
+ GCH_PS_jvmti_oops_do,
+ GCH_PS_CodeCache_oops_do,
GCH_PS_younger_gens,
// Leave this one last.
GCH_PS_NumElements
@@ -72,13 +82,9 @@ enum GCH_strong_roots_tasks {
GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
SharedHeap(policy),
_gen_policy(policy),
- _gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
+ _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
_full_collections_completed(0)
{
- if (_gen_process_roots_tasks == NULL ||
- !_gen_process_roots_tasks->valid()) {
- vm_exit_during_initialization("Failed necessary allocation.");
- }
assert(policy != NULL, "Sanity check");
}
@@ -384,7 +390,6 @@ void GenCollectedHeap::do_collection(bool full,
bool complete = full && (max_level == (n_gens()-1));
const char* gc_cause_prefix = complete ? "Full GC" : "GC";
- gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
// so we can assume here that the next GC id is what we want.
@@ -590,29 +595,137 @@ HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab)
void GenCollectedHeap::set_par_threads(uint t) {
SharedHeap::set_par_threads(t);
- _gen_process_roots_tasks->set_n_threads(t);
+ set_n_termination(t);
+}
+
+void GenCollectedHeap::set_n_termination(uint t) {
+ _process_strong_tasks->set_n_threads(t);
}
-void GenCollectedHeap::
-gen_process_roots(int level,
- bool younger_gens_as_roots,
- bool activate_scope,
- SharedHeap::ScanningOption so,
- OopsInGenClosure* not_older_gens,
- OopsInGenClosure* weak_roots,
- OopsInGenClosure* older_gens,
- CLDClosure* cld_closure,
- CLDClosure* weak_cld_closure,
- CodeBlobClosure* code_closure) {
+#ifdef ASSERT
+class AssertNonScavengableClosure: public OopClosure {
+public:
+ virtual void do_oop(oop* p) {
+ assert(!Universe::heap()->is_in_partial_collection(*p),
+ "Referent should not be scavengable."); }
+ virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+static AssertNonScavengableClosure assert_is_non_scavengable_closure;
+#endif
+
+void GenCollectedHeap::process_roots(bool activate_scope,
+ ScanningOption so,
+ OopClosure* strong_roots,
+ OopClosure* weak_roots,
+ CLDClosure* strong_cld_closure,
+ CLDClosure* weak_cld_closure,
+ CodeBlobClosure* code_roots) {
+ StrongRootsScope srs(this, activate_scope);
// General roots.
- SharedHeap::process_roots(activate_scope, so,
- not_older_gens, weak_roots,
- cld_closure, weak_cld_closure,
- code_closure);
+ assert(_strong_roots_parity != 0, "must have called prologue code");
+ assert(code_roots != NULL, "code root closure should always be set");
+ // _n_termination for _process_strong_tasks should be set up stream
+ // in a method not running in a GC worker. Otherwise the GC worker
+ // could be trying to change the termination condition while the task
+ // is executing in another GC worker.
+
+ if (!_process_strong_tasks->is_task_claimed(GCH_PS_ClassLoaderDataGraph_oops_do)) {
+ ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
+ }
+
+ // Some CLDs contained in the thread frames should be considered strong.
+ // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
+ CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
+ // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
+ CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
+
+ Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
+
+ if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
+ Universe::oops_do(strong_roots);
+ }
+ // Global (strong) JNI handles
+ if (!_process_strong_tasks->is_task_claimed(GCH_PS_JNIHandles_oops_do)) {
+ JNIHandles::oops_do(strong_roots);
+ }
+
+ if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) {
+ ObjectSynchronizer::oops_do(strong_roots);
+ }
+ if (!_process_strong_tasks->is_task_claimed(GCH_PS_FlatProfiler_oops_do)) {
+ FlatProfiler::oops_do(strong_roots);
+ }
+ if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) {
+ Management::oops_do(strong_roots);
+ }
+ if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) {
+ JvmtiExport::oops_do(strong_roots);
+ }
+
+ if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) {
+ SystemDictionary::roots_oops_do(strong_roots, weak_roots);
+ }
+
+ // All threads execute the following. A specific chunk of buckets
+ // from the StringTable are the individual tasks.
+ if (weak_roots != NULL) {
+ if (CollectedHeap::use_parallel_gc_threads()) {
+ StringTable::possibly_parallel_oops_do(weak_roots);
+ } else {
+ StringTable::oops_do(weak_roots);
+ }
+ }
+
+ if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
+ if (so & SO_ScavengeCodeCache) {
+ assert(code_roots != NULL, "must supply closure for code cache");
+
+ // We only visit parts of the CodeCache when scavenging.
+ CodeCache::scavenge_root_nmethods_do(code_roots);
+ }
+ if (so & SO_AllCodeCache) {
+ assert(code_roots != NULL, "must supply closure for code cache");
+
+ // CMSCollector uses this to do intermediate-strength collections.
+ // We scan the entire code cache, since CodeCache::do_unloading is not called.
+ CodeCache::blobs_do(code_roots);
+ }
+ // Verify that the code cache contents are not subject to
+ // movement by a scavenging collection.
+ DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
+ DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
+ }
+
+}
+
+void GenCollectedHeap::gen_process_roots(int level,
+ bool younger_gens_as_roots,
+ bool activate_scope,
+ ScanningOption so,
+ bool only_strong_roots,
+ OopsInGenClosure* not_older_gens,
+ OopsInGenClosure* older_gens,
+ CLDClosure* cld_closure) {
+ const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
+
+ bool is_moving_collection = false;
+ if (level == 0 || is_adjust_phase) {
+ // young collections are always moving
+ is_moving_collection = true;
+ }
+
+ MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
+ OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
+ CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
+
+ process_roots(activate_scope, so,
+ not_older_gens, weak_roots,
+ cld_closure, weak_cld_closure,
+ &mark_code_closure);
if (younger_gens_as_roots) {
- if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
+ if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
for (int i = 0; i < level; i++) {
not_older_gens->set_generation(_gens[i]);
_gens[i]->oop_iterate(not_older_gens);
@@ -628,43 +741,18 @@ gen_process_roots(int level,
older_gens->reset_generation();
}
- _gen_process_roots_tasks->all_tasks_completed();
+ _process_strong_tasks->all_tasks_completed();
}
-void GenCollectedHeap::
-gen_process_roots(int level,
- bool younger_gens_as_roots,
- bool activate_scope,
- SharedHeap::ScanningOption so,
- bool only_strong_roots,
- OopsInGenClosure* not_older_gens,
- OopsInGenClosure* older_gens,
- CLDClosure* cld_closure) {
-
- const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
-
- bool is_moving_collection = false;
- if (level == 0 || is_adjust_phase) {
- // young collections are always moving
- is_moving_collection = true;
- }
-
- MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
- CodeBlobClosure* code_closure = &mark_code_closure;
- gen_process_roots(level,
- younger_gens_as_roots,
- activate_scope, so,
- not_older_gens, only_strong_roots ? NULL : not_older_gens,
- older_gens,
- cld_closure, only_strong_roots ? NULL : cld_closure,
- code_closure);
-
-}
+class AlwaysTrueClosure: public BoolObjectClosure {
+public:
+ bool do_object_b(oop p) { return true; }
+};
+static AlwaysTrueClosure always_true;
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
- SharedHeap::process_weak_roots(root_closure);
- // "Local" "weak" refs
+ JNIHandles::weak_oops_do(&always_true, root_closure);
for (int i = 0; i < _n_gens; i++) {
_gens[i]->ref_processor()->weak_oops_do(root_closure);
}
diff --git a/src/share/vm/memory/genCollectedHeap.hpp b/src/share/vm/memory/genCollectedHeap.hpp
index 77ae3ad88..58786eded 100644
--- a/src/share/vm/memory/genCollectedHeap.hpp
+++ b/src/share/vm/memory/genCollectedHeap.hpp
@@ -79,8 +79,7 @@ public:
// Data structure for claiming the (potentially) parallel tasks in
// (gen-specific) roots processing.
- SubTasksDone* _gen_process_roots_tasks;
- SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; }
+ SubTasksDone* _process_strong_tasks;
// In block contents verification, the number of header words to skip
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
@@ -390,6 +389,7 @@ public:
static GenCollectedHeap* heap();
void set_par_threads(uint t);
+ void set_n_termination(uint t);
// Invoke the "do_oop" method of one of the closures "not_older_gens"
// or "older_gens" on root locations for the generation at
@@ -403,11 +403,25 @@ public:
// The "so" argument determines which of the roots
// the closure is applied to:
// "SO_None" does none;
+ enum ScanningOption {
+ SO_None = 0x0,
+ SO_AllCodeCache = 0x8,
+ SO_ScavengeCodeCache = 0x10
+ };
+
private:
+ void process_roots(bool activate_scope,
+ ScanningOption so,
+ OopClosure* strong_roots,
+ OopClosure* weak_roots,
+ CLDClosure* strong_cld_closure,
+ CLDClosure* weak_cld_closure,
+ CodeBlobClosure* code_roots);
+
void gen_process_roots(int level,
bool younger_gens_as_roots,
bool activate_scope,
- SharedHeap::ScanningOption so,
+ ScanningOption so,
OopsInGenClosure* not_older_gens,
OopsInGenClosure* weak_roots,
OopsInGenClosure* older_gens,
@@ -422,7 +436,7 @@ public:
void gen_process_roots(int level,
bool younger_gens_as_roots,
bool activate_scope,
- SharedHeap::ScanningOption so,
+ ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* not_older_gens,
OopsInGenClosure* older_gens,
diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp
index 48911b1ae..c5dc4947c 100644
--- a/src/share/vm/memory/genMarkSweep.cpp
+++ b/src/share/vm/memory/genMarkSweep.cpp
@@ -210,8 +210,8 @@ void GenMarkSweep::mark_sweep_phase1(int level,
gch->gen_process_roots(level,
false, // Younger gens are not roots.
true, // activate StrongRootsScope
- SharedHeap::SO_None,
- GenCollectedHeap::StrongRootsOnly,
+ GenCollectedHeap::SO_None,
+ ClassUnloading,
&follow_root_closure,
&follow_root_closure,
&follow_cld_closure);
@@ -295,7 +295,7 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
gch->gen_process_roots(level,
false, // Younger gens are not roots.
true, // activate StrongRootsScope
- SharedHeap::SO_AllCodeCache,
+ GenCollectedHeap::SO_AllCodeCache,
GenCollectedHeap::StrongAndWeakRoots,
&adjust_pointer_closure,
&adjust_pointer_closure,
diff --git a/src/share/vm/memory/generation.hpp b/src/share/vm/memory/generation.hpp
index bacd50265..63dccb70f 100644
--- a/src/share/vm/memory/generation.hpp
+++ b/src/share/vm/memory/generation.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -145,7 +145,7 @@ class Generation: public CHeapObj<mtGC> {
// GenGrain.
// Note: on ARM we add 1 bit for card_table_base to be properly aligned
// (we expect its low byte to be zero - see implementation of post_barrier)
- LogOfGenGrain = 16 ARM_ONLY(+1),
+ LogOfGenGrain = 16 ARM32_ONLY(+1),
GenGrain = 1 << LogOfGenGrain
};
diff --git a/src/share/vm/memory/guardedMemory.hpp b/src/share/vm/memory/guardedMemory.hpp
index dada10d84..66b163c2a 100644
--- a/src/share/vm/memory/guardedMemory.hpp
+++ b/src/share/vm/memory/guardedMemory.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -235,7 +235,7 @@ protected:
* @return the size of the user data.
*/
size_t get_user_size() const {
- assert(_base_addr, "Not wrapping any memory");
+ assert(_base_addr != NULL, "Not wrapping any memory");
return get_head_guard()->get_user_size();
}
@@ -245,7 +245,7 @@ protected:
* @return the user data pointer.
*/
u_char* get_user_ptr() const {
- assert(_base_addr, "Not wrapping any memory");
+ assert(_base_addr != NULL, "Not wrapping any memory");
return _base_addr + sizeof(GuardHeader);
}
@@ -281,7 +281,7 @@ protected:
memset(get_user_ptr(), ch, get_user_size());
}
-public:
+ public:
/**
* Return the total size required for wrapping the given user size.
*
diff --git a/src/share/vm/memory/heap.cpp b/src/share/vm/memory/heap.cpp
index f00709684..a0fb60346 100644
--- a/src/share/vm/memory/heap.cpp
+++ b/src/share/vm/memory/heap.cpp
@@ -97,9 +97,11 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
_log2_segment_size = exact_log2(segment_size);
// Reserve and initialize space for _memory.
- const size_t page_size = os::can_execute_large_page_memory() ?
- os::page_size_for_region(committed_size, reserved_size, 8) :
- os::vm_page_size();
+ size_t page_size = os::vm_page_size();
+ if (os::can_execute_large_page_memory()) {
+ page_size = os::page_size_for_region_unaligned(reserved_size, 8);
+ }
+
const size_t granularity = os::vm_allocation_granularity();
const size_t r_align = MAX2(page_size, granularity);
const size_t r_size = align_size_up(reserved_size, r_align);
diff --git a/src/share/vm/memory/referenceProcessor.cpp b/src/share/vm/memory/referenceProcessor.cpp
index 12a99921d..3c776f4b8 100644
--- a/src/share/vm/memory/referenceProcessor.cpp
+++ b/src/share/vm/memory/referenceProcessor.cpp
@@ -252,7 +252,7 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
// Cleaner references to be temporary, and don't want to deal with
// possible incompatibilities arising from making it more visible.
phantom_count +=
- process_discovered_reflist(_discoveredCleanerRefs, NULL, false,
+ process_discovered_reflist(_discoveredCleanerRefs, NULL, true,
is_alive, keep_alive, complete_gc, task_executor);
}
diff --git a/src/share/vm/memory/sharedHeap.cpp b/src/share/vm/memory/sharedHeap.cpp
index bf7a4bb7c..ef22f01c5 100644
--- a/src/share/vm/memory/sharedHeap.cpp
+++ b/src/share/vm/memory/sharedHeap.cpp
@@ -32,7 +32,6 @@
#include "runtime/atomic.inline.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/java.hpp"
-#include "services/management.hpp"
#include "utilities/copy.hpp"
#include "utilities/workgroup.hpp"
@@ -40,33 +39,13 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
SharedHeap* SharedHeap::_sh;
-// The set of potentially parallel tasks in root scanning.
-enum SH_process_roots_tasks {
- SH_PS_Universe_oops_do,
- SH_PS_JNIHandles_oops_do,
- SH_PS_ObjectSynchronizer_oops_do,
- SH_PS_FlatProfiler_oops_do,
- SH_PS_Management_oops_do,
- SH_PS_SystemDictionary_oops_do,
- SH_PS_ClassLoaderDataGraph_oops_do,
- SH_PS_jvmti_oops_do,
- SH_PS_CodeCache_oops_do,
- // Leave this one last.
- SH_PS_NumElements
-};
-
SharedHeap::SharedHeap(CollectorPolicy* policy_) :
CollectedHeap(),
_collector_policy(policy_),
_rem_set(NULL),
- _strong_roots_scope(NULL),
_strong_roots_parity(0),
- _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
_workers(NULL)
{
- if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
- vm_exit_during_initialization("Failed necessary allocation.");
- }
_sh = this; // ch is static, should be set only once.
if ((UseParNewGC ||
(UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled ||
@@ -84,14 +63,6 @@ SharedHeap::SharedHeap(CollectorPolicy* policy_) :
}
}
-int SharedHeap::n_termination() {
- return _process_strong_tasks->n_threads();
-}
-
-void SharedHeap::set_n_termination(int t) {
- _process_strong_tasks->set_n_threads(t);
-}
-
bool SharedHeap::heap_lock_held_for_gc() {
Thread* t = Thread::current();
return Heap_lock->owned_by_self()
@@ -102,31 +73,6 @@ bool SharedHeap::heap_lock_held_for_gc() {
void SharedHeap::set_par_threads(uint t) {
assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
_n_par_threads = t;
- _process_strong_tasks->set_n_threads(t);
-}
-
-#ifdef ASSERT
-class AssertNonScavengableClosure: public OopClosure {
-public:
- virtual void do_oop(oop* p) {
- assert(!Universe::heap()->is_in_partial_collection(*p),
- "Referent should not be scavengable."); }
- virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-};
-static AssertNonScavengableClosure assert_is_non_scavengable_closure;
-#endif
-
-SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
- return _strong_roots_scope;
-}
-void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
- assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
- assert(scope != NULL, "Illegal argument");
- _strong_roots_scope = scope;
-}
-void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
- assert(_strong_roots_scope == scope, "Wrong scope unregistered");
- _strong_roots_scope = NULL;
}
void SharedHeap::change_strong_roots_parity() {
@@ -140,174 +86,15 @@ void SharedHeap::change_strong_roots_parity() {
}
SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
- : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
+ : MarkScope(activate), _sh(heap)
{
if (_active) {
- _sh->register_strong_roots_scope(this);
_sh->change_strong_roots_parity();
// Zero the claimed high water mark in the StringTable
StringTable::clear_parallel_claimed_index();
}
}
-SharedHeap::StrongRootsScope::~StrongRootsScope() {
- if (_active) {
- _sh->unregister_strong_roots_scope(this);
- }
-}
-
-Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
-
-void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
- // The Thread work barrier is only needed by G1 Class Unloading.
- // No need to use the barrier if this is single-threaded code.
- if (UseG1GC && ClassUnloadingWithConcurrentMark && n_workers > 0) {
- uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
- if (new_value == n_workers) {
- // This thread is last. Notify the others.
- MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
- _lock->notify_all();
- }
- }
-}
-
-void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
- assert(UseG1GC, "Currently only used by G1");
- assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
-
- // No need to use the barrier if this is single-threaded code.
- if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
- MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
- while ((uint)_n_workers_done_with_threads != n_workers) {
- _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
- }
- }
-}
-
-void SharedHeap::process_roots(bool activate_scope,
- ScanningOption so,
- OopClosure* strong_roots,
- OopClosure* weak_roots,
- CLDClosure* strong_cld_closure,
- CLDClosure* weak_cld_closure,
- CodeBlobClosure* code_roots) {
- StrongRootsScope srs(this, activate_scope);
-
- // General roots.
- assert(_strong_roots_parity != 0, "must have called prologue code");
- assert(code_roots != NULL, "code root closure should always be set");
- // _n_termination for _process_strong_tasks should be set up stream
- // in a method not running in a GC worker. Otherwise the GC worker
- // could be trying to change the termination condition while the task
- // is executing in another GC worker.
-
- // Iterating over the CLDG and the Threads are done early to allow G1 to
- // first process the strong CLDs and nmethods and then, after a barrier,
- // let the thread process the weak CLDs and nmethods.
-
- if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
- ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
- }
-
- // Some CLDs contained in the thread frames should be considered strong.
- // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
- CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
- // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
- CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
-
- Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
-
- // This is the point where this worker thread will not find more strong CLDs/nmethods.
- // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
- active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
-
- if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
- Universe::oops_do(strong_roots);
- }
- // Global (strong) JNI handles
- if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
- JNIHandles::oops_do(strong_roots);
-
- if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
- ObjectSynchronizer::oops_do(strong_roots);
- if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
- FlatProfiler::oops_do(strong_roots);
- if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
- Management::oops_do(strong_roots);
- if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
- JvmtiExport::oops_do(strong_roots);
-
- if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
- SystemDictionary::roots_oops_do(strong_roots, weak_roots);
- }
-
- // All threads execute the following. A specific chunk of buckets
- // from the StringTable are the individual tasks.
- if (weak_roots != NULL) {
- if (CollectedHeap::use_parallel_gc_threads()) {
- StringTable::possibly_parallel_oops_do(weak_roots);
- } else {
- StringTable::oops_do(weak_roots);
- }
- }
-
- if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
- if (so & SO_ScavengeCodeCache) {
- assert(code_roots != NULL, "must supply closure for code cache");
-
- // We only visit parts of the CodeCache when scavenging.
- CodeCache::scavenge_root_nmethods_do(code_roots);
- }
- if (so & SO_AllCodeCache) {
- assert(code_roots != NULL, "must supply closure for code cache");
-
- // CMSCollector uses this to do intermediate-strength collections.
- // We scan the entire code cache, since CodeCache::do_unloading is not called.
- CodeCache::blobs_do(code_roots);
- }
- // Verify that the code cache contents are not subject to
- // movement by a scavenging collection.
- DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
- DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
- }
-
- _process_strong_tasks->all_tasks_completed();
-}
-
-void SharedHeap::process_all_roots(bool activate_scope,
- ScanningOption so,
- OopClosure* roots,
- CLDClosure* cld_closure,
- CodeBlobClosure* code_closure) {
- process_roots(activate_scope, so,
- roots, roots,
- cld_closure, cld_closure,
- code_closure);
-}
-
-void SharedHeap::process_strong_roots(bool activate_scope,
- ScanningOption so,
- OopClosure* roots,
- CLDClosure* cld_closure,
- CodeBlobClosure* code_closure) {
- process_roots(activate_scope, so,
- roots, NULL,
- cld_closure, NULL,
- code_closure);
-}
-
-
-class AlwaysTrueClosure: public BoolObjectClosure {
-public:
- bool do_object_b(oop p) { return true; }
-};
-static AlwaysTrueClosure always_true;
-
-void SharedHeap::process_weak_roots(OopClosure* root_closure) {
- // Global (weak) JNI handles
- JNIHandles::weak_oops_do(&always_true, root_closure);
-}
-
void SharedHeap::set_barrier_set(BarrierSet* bs) {
_barrier_set = bs;
// Cached barrier set for fast access in oops
diff --git a/src/share/vm/memory/sharedHeap.hpp b/src/share/vm/memory/sharedHeap.hpp
index 98278174f..610bf33ab 100644
--- a/src/share/vm/memory/sharedHeap.hpp
+++ b/src/share/vm/memory/sharedHeap.hpp
@@ -61,18 +61,18 @@ class KlassClosure;
// counts the number of tasks that have been done and then reset
// the SubTasksDone so that it can be used again. When the number of
// tasks is set to the number of GC workers, then _n_threads must
-// be set to the number of active GC workers. G1CollectedHeap,
-// HRInto_G1RemSet, GenCollectedHeap and SharedHeap have SubTasksDone.
-// This seems too many.
+// be set to the number of active GC workers. G1RootProcessor and
+// GenCollectedHeap have SubTasksDone.
// 3) SequentialSubTasksDone has an _n_threads that is used in
// a way similar to SubTasksDone and has the same dependency on the
// number of active GC workers. CompactibleFreeListSpace and Space
// have SequentialSubTasksDone's.
-// Example of using SubTasksDone and SequentialSubTasksDone
-// G1CollectedHeap::g1_process_roots()
-// to SharedHeap::process_roots() and uses
-// SubTasksDone* _process_strong_tasks to claim tasks.
-// process_roots() calls
+//
+// Examples of using SubTasksDone and SequentialSubTasksDone:
+// G1RootProcessor and GenCollectedHeap::process_roots() use
+// SubTasksDone* _process_strong_tasks to claim tasks for workers
+//
+// GenCollectedHeap::gen_process_roots() calls
// rem_set()->younger_refs_iterate()
// to scan the card table and which eventually calls down into
// CardTableModRefBS::par_non_clean_card_iterate_work(). This method
@@ -104,10 +104,6 @@ class SharedHeap : public CollectedHeap {
friend class VM_GC_Operation;
friend class VM_CGC_Operation;
-private:
- // For claiming strong_roots tasks.
- SubTasksDone* _process_strong_tasks;
-
protected:
// There should be only a single instance of "SharedHeap" in a program.
// This is enforced with the protected constructor below, which will also
@@ -144,7 +140,6 @@ public:
static SharedHeap* heap() { return _sh; }
void set_barrier_set(BarrierSet* bs);
- SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
// Does operations required after initialization has been done.
virtual void post_initialize();
@@ -201,69 +196,19 @@ public:
// strong_roots_prologue calls change_strong_roots_parity, if
// parallel tasks are enabled.
class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
- // Used to implement the Thread work barrier.
- static Monitor* _lock;
-
SharedHeap* _sh;
- volatile jint _n_workers_done_with_threads;
public:
StrongRootsScope(SharedHeap* heap, bool activate = true);
- ~StrongRootsScope();
-
- // Mark that this thread is done with the Threads work.
- void mark_worker_done_with_threads(uint n_workers);
- // Wait until all n_workers are done with the Threads work.
- void wait_until_all_workers_done_with_threads(uint n_workers);
};
friend class StrongRootsScope;
- // The current active StrongRootScope
- StrongRootsScope* _strong_roots_scope;
-
- StrongRootsScope* active_strong_roots_scope() const;
-
private:
- void register_strong_roots_scope(StrongRootsScope* scope);
- void unregister_strong_roots_scope(StrongRootsScope* scope);
void change_strong_roots_parity();
public:
- enum ScanningOption {
- SO_None = 0x0,
- SO_AllCodeCache = 0x8,
- SO_ScavengeCodeCache = 0x10
- };
-
FlexibleWorkGang* workers() const { return _workers; }
- // Invoke the "do_oop" method the closure "roots" on all root locations.
- // The "so" argument determines which roots the closure is applied to:
- // "SO_None" does none;
- // "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
- // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
- void process_roots(bool activate_scope,
- ScanningOption so,
- OopClosure* strong_roots,
- OopClosure* weak_roots,
- CLDClosure* strong_cld_closure,
- CLDClosure* weak_cld_closure,
- CodeBlobClosure* code_roots);
- void process_all_roots(bool activate_scope,
- ScanningOption so,
- OopClosure* roots,
- CLDClosure* cld_closure,
- CodeBlobClosure* code_roots);
- void process_strong_roots(bool activate_scope,
- ScanningOption so,
- OopClosure* roots,
- CLDClosure* cld_closure,
- CodeBlobClosure* code_roots);
-
-
- // Apply "root_closure" to the JNI weak roots..
- void process_weak_roots(OopClosure* root_closure);
-
// The functions below are helper functions that a subclass of
// "SharedHeap" can use in the implementation of its virtual
// functions.
@@ -278,9 +223,6 @@ public:
// (such as process roots) subsequently.
virtual void set_par_threads(uint t);
- int n_termination();
- void set_n_termination(int t);
-
//
// New methods from CollectedHeap
//
@@ -292,8 +234,4 @@ public:
size_t capacity);
};
-inline SharedHeap::ScanningOption operator|(SharedHeap::ScanningOption so0, SharedHeap::ScanningOption so1) {
- return static_cast<SharedHeap::ScanningOption>(static_cast<int>(so0) | static_cast<int>(so1));
-}
-
#endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP
diff --git a/src/share/vm/memory/tenuredGeneration.cpp b/src/share/vm/memory/tenuredGeneration.cpp
index a18d6813e..d7432ba29 100644
--- a/src/share/vm/memory/tenuredGeneration.cpp
+++ b/src/share/vm/memory/tenuredGeneration.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/blockOffsetTable.inline.hpp"
#include "memory/generation.inline.hpp"
@@ -34,6 +33,9 @@
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#endif
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size, int level,
diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp
index 18021f791..a0febc5c0 100644
--- a/src/share/vm/memory/universe.cpp
+++ b/src/share/vm/memory/universe.cpp
@@ -114,6 +114,7 @@ oop Universe::_the_min_jint_string = NULL;
LatestMethodCache* Universe::_finalizer_register_cache = NULL;
LatestMethodCache* Universe::_loader_addClass_cache = NULL;
LatestMethodCache* Universe::_pd_implies_cache = NULL;
+LatestMethodCache* Universe::_throw_illegal_access_error_cache = NULL;
oop Universe::_out_of_memory_error_java_heap = NULL;
oop Universe::_out_of_memory_error_metaspace = NULL;
oop Universe::_out_of_memory_error_class_metaspace = NULL;
@@ -129,7 +130,6 @@ oop Universe::_virtual_machine_error_instance = NULL;
oop Universe::_vm_exception = NULL;
oop Universe::_allocation_context_notification_obj = NULL;
-Method* Universe::_throw_illegal_access_error = NULL;
Array<int>* Universe::_the_empty_int_array = NULL;
Array<u2>* Universe::_the_empty_short_array = NULL;
Array<Klass*>* Universe::_the_empty_klass_array = NULL;
@@ -235,6 +235,7 @@ void Universe::serialize(SerializeClosure* f, bool do_all) {
_finalizer_register_cache->serialize(f);
_loader_addClass_cache->serialize(f);
_pd_implies_cache->serialize(f);
+ _throw_illegal_access_error_cache->serialize(f);
}
void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
@@ -663,6 +664,7 @@ jint universe_init() {
Universe::_finalizer_register_cache = new LatestMethodCache();
Universe::_loader_addClass_cache = new LatestMethodCache();
Universe::_pd_implies_cache = new LatestMethodCache();
+ Universe::_throw_illegal_access_error_cache = new LatestMethodCache();
if (UseSharedSpaces) {
// Read the data structures supporting the shared spaces (shared
@@ -847,12 +849,6 @@ jint Universe::initialize_heap() {
// See needs_explicit_null_check.
// Only set the heap base for compressed oops because it indicates
// compressed oops for pstack code.
- bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
- if (verbose) {
- tty->cr();
- tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
- Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
- }
if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
// Can't reserve heap below 32Gb.
// keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
@@ -862,16 +858,8 @@ jint Universe::initialize_heap() {
// are decoded so that NULL is preserved, so this page will not be accessed.
Universe::set_narrow_oop_use_implicit_null_checks(false);
#endif
- if (verbose) {
- tty->print(", %s: "PTR_FORMAT,
- narrow_oop_mode_to_string(HeapBasedNarrowOop),
- Universe::narrow_oop_base());
- }
} else {
Universe::set_narrow_oop_base(0);
- if (verbose) {
- tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop));
- }
#ifdef _WIN64
if (!Universe::narrow_oop_use_implicit_null_checks()) {
// Don't need guard page for implicit checks in indexed addressing
@@ -884,17 +872,14 @@ jint Universe::initialize_heap() {
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
} else {
Universe::set_narrow_oop_shift(0);
- if (verbose) {
- tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop));
- }
}
}
- if (verbose) {
- tty->cr();
- tty->cr();
- }
Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
+
+ if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
+ Universe::print_compressed_oops_mode();
+ }
}
// Universe::narrow_oop_base() is one page below the heap.
assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
@@ -915,6 +900,24 @@ jint Universe::initialize_heap() {
return JNI_OK;
}
+void Universe::print_compressed_oops_mode() {
+ tty->cr();
+ tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
+ Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
+
+ tty->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
+
+ if (Universe::narrow_oop_base() != 0) {
+ tty->print(":" PTR_FORMAT, Universe::narrow_oop_base());
+ }
+
+ if (Universe::narrow_oop_shift() != 0) {
+ tty->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
+ }
+
+ tty->cr();
+ tty->cr();
+}
// Reserve the Java heap, which is now the same for all GCs.
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
@@ -984,11 +987,11 @@ void Universe::update_heap_info_at_gc() {
const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
switch (mode) {
case UnscaledNarrowOop:
- return "32-bits Oops";
+ return "32-bit";
case ZeroBasedNarrowOop:
- return "zero based Compressed Oops";
+ return "Zero based";
case HeapBasedNarrowOop:
- return "Compressed Oops with base";
+ return "Non-zero based";
}
ShouldNotReachHere();
@@ -1134,7 +1137,8 @@ bool universe_post_init() {
tty->print_cr("Unable to link/verify Unsafe.throwIllegalAccessError method");
return false; // initialization failed (cannot throw exception yet)
}
- Universe::_throw_illegal_access_error = m;
+ Universe::_throw_illegal_access_error_cache->init(
+ SystemDictionary::misc_Unsafe_klass(), m);
// Setup method for registering loaded classes in class loader vector
InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
@@ -1160,7 +1164,7 @@ bool universe_post_init() {
return false; // initialization failed
}
Universe::_pd_implies_cache->init(
- SystemDictionary::ProtectionDomain_klass(), m);;
+ SystemDictionary::ProtectionDomain_klass(), m);
}
// The folowing is initializing converter functions for serialization in
diff --git a/src/share/vm/memory/universe.hpp b/src/share/vm/memory/universe.hpp
index 2c9e90b14..db806ef6d 100644
--- a/src/share/vm/memory/universe.hpp
+++ b/src/share/vm/memory/universe.hpp
@@ -148,8 +148,7 @@ class Universe: AllStatic {
static LatestMethodCache* _finalizer_register_cache; // static method for registering finalizable objects
static LatestMethodCache* _loader_addClass_cache; // method for registering loaded classes in class loader vector
static LatestMethodCache* _pd_implies_cache; // method for checking protection domain attributes
-
- static Method* _throw_illegal_access_error;
+ static LatestMethodCache* _throw_illegal_access_error_cache; // Unsafe.throwIllegalAccessError() method
// preallocated error objects (no backtrace)
static oop _out_of_memory_error_java_heap;
@@ -305,6 +304,7 @@ class Universe: AllStatic {
static Method* loader_addClass_method() { return _loader_addClass_cache->get_method(); }
static Method* protection_domain_implies_method() { return _pd_implies_cache->get_method(); }
+ static Method* throw_illegal_access_error() { return _throw_illegal_access_error_cache->get_method(); }
static oop null_ptr_exception_instance() { return _null_ptr_exception_instance; }
static oop arithmetic_exception_instance() { return _arithmetic_exception_instance; }
@@ -314,8 +314,6 @@ class Universe: AllStatic {
static inline oop allocation_context_notification_obj();
static inline void set_allocation_context_notification_obj(oop obj);
- static Method* throw_illegal_access_error() { return _throw_illegal_access_error; }
-
static Array<int>* the_empty_int_array() { return _the_empty_int_array; }
static Array<u2>* the_empty_short_array() { return _the_empty_short_array; }
static Array<Method*>* the_empty_method_array() { return _the_empty_method_array; }
@@ -376,6 +374,8 @@ class Universe: AllStatic {
static void set_narrow_ptrs_base(address a) { _narrow_ptrs_base = a; }
static address narrow_ptrs_base() { return _narrow_ptrs_base; }
+ static void print_compressed_oops_mode();
+
// this is set in vm_version on sparc (and then reset in universe afaict)
static void set_narrow_oop_shift(int shift) {
_narrow_oop._shift = shift;
diff --git a/src/share/vm/oops/constMethod.hpp b/src/share/vm/oops/constMethod.hpp
index 21df75bde..e10cb4c1f 100644
--- a/src/share/vm/oops/constMethod.hpp
+++ b/src/share/vm/oops/constMethod.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -222,6 +222,7 @@ private:
u2 _max_stack; // Maximum number of entries on the expression stack
u2 _max_locals; // Number of local variables used by this method
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
+ u2 _orig_method_idnum; // Original unique identification number for the method
// Constructor
ConstMethod(int byte_code_size,
@@ -475,6 +476,9 @@ public:
u2 method_idnum() const { return _method_idnum; }
void set_method_idnum(u2 idnum) { _method_idnum = idnum; }
+ u2 orig_method_idnum() const { return _orig_method_idnum; }
+ void set_orig_method_idnum(u2 idnum) { _orig_method_idnum = idnum; }
+
// max stack
int max_stack() const { return _max_stack; }
void set_max_stack(int size) { _max_stack = size; }
diff --git a/src/share/vm/oops/cpCache.cpp b/src/share/vm/oops/cpCache.cpp
index 761caec12..d0c77501e 100644
--- a/src/share/vm/oops/cpCache.cpp
+++ b/src/share/vm/oops/cpCache.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -449,7 +449,6 @@ bool ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,
new_method->name()->as_C_string(),
new_method->signature()->as_C_string()));
}
-
return true;
}
@@ -477,7 +476,6 @@ bool ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,
new_method->name()->as_C_string(),
new_method->signature()->as_C_string()));
}
-
return true;
}
@@ -498,41 +496,39 @@ bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() {
// _f1 == NULL || !_f1->is_method() are OK here
return true;
}
- // return false if _f1 refers to an old or an obsolete method
+ // return false if _f1 refers to a non-deleted old or obsolete method
return (NOT_PRODUCT(_f1->is_valid() &&) _f1->is_method() &&
- !((Method*)_f1)->is_old() && !((Method*)_f1)->is_obsolete());
+ (f1_as_method()->is_deleted() ||
+ (!f1_as_method()->is_old() && !f1_as_method()->is_obsolete())));
}
-bool ConstantPoolCacheEntry::is_interesting_method_entry(Klass* k) {
+Method* ConstantPoolCacheEntry::get_interesting_method_entry(Klass* k) {
if (!is_method_entry()) {
// not a method entry so not interesting by default
- return false;
+ return NULL;
}
-
Method* m = NULL;
if (is_vfinal()) {
// virtual and final so _f2 contains method ptr instead of vtable index
m = f2_as_vfinal_method();
} else if (is_f1_null()) {
// NULL _f1 means this is a virtual entry so also not interesting
- return false;
+ return NULL;
} else {
if (!(_f1->is_method())) {
// _f1 can also contain a Klass* for an interface
- return false;
+ return NULL;
}
m = f1_as_method();
}
-
assert(m != NULL && m->is_method(), "sanity check");
if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) {
// robustness for above sanity checks or method is not in
// the interesting class
- return false;
+ return NULL;
}
-
// the method is in the interesting class so the entry is interesting
- return true;
+ return m;
}
#endif // INCLUDE_JVMTI
@@ -608,46 +604,31 @@ void ConstantPoolCache::initialize(const intArray& inverse_index_map,
// RedefineClasses() API support:
// If any entry of this ConstantPoolCache points to any of
// old_methods, replace it with the corresponding new_method.
-void ConstantPoolCache::adjust_method_entries(Method** old_methods, Method** new_methods,
- int methods_length, bool * trace_name_printed) {
-
- if (methods_length == 0) {
- // nothing to do if there are no methods
- return;
- }
-
- // get shorthand for the interesting class
- Klass* old_holder = old_methods[0]->method_holder();
-
+void ConstantPoolCache::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
for (int i = 0; i < length(); i++) {
- if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
- // skip uninteresting methods
+ ConstantPoolCacheEntry* entry = entry_at(i);
+ Method* old_method = entry->get_interesting_method_entry(holder);
+ if (old_method == NULL || !old_method->is_old()) {
+ continue; // skip uninteresting entries
+ }
+ if (old_method->is_deleted()) {
+ // clean up entries with deleted methods
+ entry->initialize_entry(entry->constant_pool_index());
continue;
}
+ Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
- // The ConstantPoolCache contains entries for several different
- // things, but we only care about methods. In fact, we only care
- // about methods in the same class as the one that contains the
- // old_methods. At this point, we have an interesting entry.
-
- for (int j = 0; j < methods_length; j++) {
- Method* old_method = old_methods[j];
- Method* new_method = new_methods[j];
+ assert(new_method != NULL, "method_with_idnum() should not be NULL");
+ assert(old_method != new_method, "sanity check");
- if (entry_at(i)->adjust_method_entry(old_method, new_method,
- trace_name_printed)) {
- // current old_method matched this entry and we updated it so
- // break out and get to the next interesting entry if there one
- break;
- }
- }
+ entry_at(i)->adjust_method_entry(old_method, new_method, trace_name_printed);
}
}
// the constant pool cache should never contain old or obsolete methods
bool ConstantPoolCache::check_no_old_or_obsolete_entries() {
for (int i = 1; i < length(); i++) {
- if (entry_at(i)->is_interesting_method_entry(NULL) &&
+ if (entry_at(i)->get_interesting_method_entry(NULL) != NULL &&
!entry_at(i)->check_no_old_or_obsolete_entries()) {
return false;
}
@@ -657,7 +638,7 @@ bool ConstantPoolCache::check_no_old_or_obsolete_entries() {
void ConstantPoolCache::dump_cache() {
for (int i = 1; i < length(); i++) {
- if (entry_at(i)->is_interesting_method_entry(NULL)) {
+ if (entry_at(i)->get_interesting_method_entry(NULL) != NULL) {
entry_at(i)->print(tty, i);
}
}
diff --git a/src/share/vm/oops/cpCache.hpp b/src/share/vm/oops/cpCache.hpp
index a597b28a3..1d5f8013e 100644
--- a/src/share/vm/oops/cpCache.hpp
+++ b/src/share/vm/oops/cpCache.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -378,9 +378,9 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
// printed the klass name so that other routines in the adjust_*
// group don't print the klass name.
bool adjust_method_entry(Method* old_method, Method* new_method,
- bool * trace_name_printed);
+ bool* trace_name_printed);
bool check_no_old_or_obsolete_entries();
- bool is_interesting_method_entry(Klass* k);
+ Method* get_interesting_method_entry(Klass* k);
#endif // INCLUDE_JVMTI
// Debugging & Printing
@@ -476,8 +476,7 @@ class ConstantPoolCache: public MetaspaceObj {
// trace_name_printed is set to true if the current call has
// printed the klass name so that other routines in the adjust_*
// group don't print the klass name.
- void adjust_method_entries(Method** old_methods, Method** new_methods,
- int methods_length, bool * trace_name_printed);
+ void adjust_method_entries(InstanceKlass* holder, bool* trace_name_printed);
bool check_no_old_or_obsolete_entries();
void dump_cache();
#endif // INCLUDE_JVMTI
diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp
index d4f497ed3..c6a5bddb0 100644
--- a/src/share/vm/oops/instanceKlass.cpp
+++ b/src/share/vm/oops/instanceKlass.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,7 @@
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
+#include "prims/jvmtiThreadState.hpp"
#include "prims/methodComparator.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/handles.inline.hpp"
@@ -110,7 +111,7 @@ HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
len = name->utf8_length(); \
} \
HS_DTRACE_PROBE4(hotspot, class__initialization__##type, \
- data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type); \
+ data, len, (void *)(clss)->class_loader(), thread_type); \
}
#define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
@@ -123,7 +124,7 @@ HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
len = name->utf8_length(); \
} \
HS_DTRACE_PROBE5(hotspot, class__initialization__##type, \
- data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type, wait); \
+ data, len, (void *)(clss)->class_loader(), thread_type, wait); \
}
#else /* USDT2 */
@@ -438,6 +439,9 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
if (!constants()->is_shared()) {
MetadataFactory::free_metadata(loader_data, constants());
}
+ // Delete any cached resolution errors for the constant pool
+ SystemDictionary::delete_resolution_error(constants());
+
set_constants(NULL);
}
@@ -928,10 +932,16 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
// Step 10 and 11
Handle e(THREAD, PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION;
+ // JVMTI has already reported the pending exception
+ // JVMTI internal flag reset is needed in order to report ExceptionInInitializerError
+ JvmtiExport::clear_detected_exception((JavaThread*)THREAD);
{
EXCEPTION_MARK;
this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below
+ // JVMTI has already reported the pending exception
+ // JVMTI internal flag reset is needed in order to report ExceptionInInitializerError
+ JvmtiExport::clear_detected_exception((JavaThread*)THREAD);
}
DTRACE_CLASSINIT_PROBE_WAIT(error, InstanceKlass::cast(this_oop()), -1,wait);
if (e->is_a(SystemDictionary::Error_klass())) {
@@ -1562,6 +1572,21 @@ Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature, M
return NULL;
}
+#ifdef ASSERT
+// search through class hierarchy and return true if this class or
+// one of the superclasses was redefined
+bool InstanceKlass::has_redefined_this_or_super() const {
+ const InstanceKlass* klass = this;
+ while (klass != NULL) {
+ if (klass->has_been_redefined()) {
+ return true;
+ }
+ klass = InstanceKlass::cast(klass->super());
+ }
+ return false;
+}
+#endif
+
// lookup a method in the default methods list then in all transitive interfaces
// Do NOT return private or static methods
Method* InstanceKlass::lookup_method_in_ordered_interfaces(Symbol* name,
@@ -1944,7 +1969,7 @@ void InstanceKlass::add_dependent_nmethod(nmethod* nm) {
// find a corresponding bucket otherwise there's a bug in the
// recording of dependecies.
//
-void InstanceKlass::remove_dependent_nmethod(nmethod* nm) {
+void InstanceKlass::remove_dependent_nmethod(nmethod* nm, bool delete_immediately) {
assert_locked_or_safepoint(CodeCache_lock);
nmethodBucket* b = _dependencies;
nmethodBucket* last = NULL;
@@ -1953,7 +1978,17 @@ void InstanceKlass::remove_dependent_nmethod(nmethod* nm) {
int val = b->decrement();
guarantee(val >= 0, err_msg("Underflow: %d", val));
if (val == 0) {
- set_has_unloaded_dependent(true);
+ if (delete_immediately) {
+ if (last == NULL) {
+ _dependencies = b->next();
+ } else {
+ last->set_next(b->next());
+ }
+ delete b;
+ } else {
+ // The deletion of this entry is deferred until a later, potentially parallel GC phase.
+ set_has_unloaded_dependent(true);
+ }
}
return;
}
@@ -2293,6 +2328,13 @@ int InstanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
#endif // INCLUDE_ALL_GCS
+void InstanceKlass::clean_weak_instanceklass_links(BoolObjectClosure* is_alive) {
+ clean_implementors_list(is_alive);
+ clean_method_data(is_alive);
+
+ clean_dependent_nmethods();
+}
+
void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
assert(class_loader_data()->is_alive(is_alive), "this klass should be live");
if (is_interface()) {
@@ -2798,30 +2840,33 @@ Method* InstanceKlass::method_at_itable(Klass* holder, int index, TRAPS) {
// not yet in the vtable due to concurrent subclass define and superinterface
// redefinition
// Note: those in the vtable, should have been updated via adjust_method_entries
-void InstanceKlass::adjust_default_methods(Method** old_methods, Method** new_methods,
- int methods_length, bool* trace_name_printed) {
+void InstanceKlass::adjust_default_methods(InstanceKlass* holder, bool* trace_name_printed) {
// search the default_methods for uses of either obsolete or EMCP methods
if (default_methods() != NULL) {
- for (int j = 0; j < methods_length; j++) {
- Method* old_method = old_methods[j];
- Method* new_method = new_methods[j];
-
- for (int index = 0; index < default_methods()->length(); index ++) {
- if (default_methods()->at(index) == old_method) {
- default_methods()->at_put(index, new_method);
- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
- if (!(*trace_name_printed)) {
- // RC_TRACE_MESG macro has an embedded ResourceMark
- RC_TRACE_MESG(("adjust: klassname=%s default methods from name=%s",
- external_name(),
- old_method->method_holder()->external_name()));
- *trace_name_printed = true;
- }
- RC_TRACE(0x00100000, ("default method update: %s(%s) ",
- new_method->name()->as_C_string(),
- new_method->signature()->as_C_string()));
- }
+ for (int index = 0; index < default_methods()->length(); index ++) {
+ Method* old_method = default_methods()->at(index);
+ if (old_method == NULL || old_method->method_holder() != holder || !old_method->is_old()) {
+ continue; // skip uninteresting entries
+ }
+ assert(!old_method->is_deleted(), "default methods may not be deleted");
+
+ Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
+
+ assert(new_method != NULL, "method_with_idnum() should not be NULL");
+ assert(old_method != new_method, "sanity check");
+
+ default_methods()->at_put(index, new_method);
+ if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
+ if (!(*trace_name_printed)) {
+ // RC_TRACE_MESG macro has an embedded ResourceMark
+ RC_TRACE_MESG(("adjust: klassname=%s default methods from name=%s",
+ external_name(),
+ old_method->method_holder()->external_name()));
+ *trace_name_printed = true;
}
+ RC_TRACE(0x00100000, ("default method update: %s(%s) ",
+ new_method->name()->as_C_string(),
+ new_method->signature()->as_C_string()));
}
}
}
@@ -3744,6 +3789,22 @@ bool InstanceKlass::has_previous_version() const {
} // end has_previous_version()
+InstanceKlass* InstanceKlass::get_klass_version(int version) {
+ if (constants()->version() == version) {
+ return this;
+ }
+ PreviousVersionWalker pvw(Thread::current(), (InstanceKlass*)this);
+ for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+ pv_node != NULL; pv_node = pvw.next_previous_version()) {
+ ConstantPool* prev_cp = pv_node->prev_constant_pool();
+ if (prev_cp->version() == version) {
+ return prev_cp->pool_holder();
+ }
+ }
+ return NULL; // None found
+}
+
+
Method* InstanceKlass::method_with_idnum(int idnum) {
Method* m = NULL;
if (idnum < methods()->length()) {
@@ -3762,6 +3823,37 @@ Method* InstanceKlass::method_with_idnum(int idnum) {
return m;
}
+
+Method* InstanceKlass::method_with_orig_idnum(int idnum) {
+ if (idnum >= methods()->length()) {
+ return NULL;
+ }
+ Method* m = methods()->at(idnum);
+ if (m != NULL && m->orig_method_idnum() == idnum) {
+ return m;
+ }
+ // Obsolete method idnum does not match the original idnum
+ for (int index = 0; index < methods()->length(); ++index) {
+ m = methods()->at(index);
+ if (m->orig_method_idnum() == idnum) {
+ return m;
+ }
+ }
+ // None found, return null for the caller to handle.
+ return NULL;
+}
+
+
+Method* InstanceKlass::method_with_orig_idnum(int idnum, int version) {
+ InstanceKlass* holder = get_klass_version(version);
+ if (holder == NULL) {
+ return NULL; // The version of klass is gone, no method is found
+ }
+ Method* method = holder->method_with_orig_idnum(idnum);
+ return method;
+}
+
+
jint InstanceKlass::get_cached_class_file_len() {
return VM_RedefineClasses::get_cached_class_file_len(_cached_class_file);
}
diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp
index f19678ec4..dc4a5651d 100644
--- a/src/share/vm/oops/instanceKlass.hpp
+++ b/src/share/vm/oops/instanceKlass.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -358,6 +358,8 @@ class InstanceKlass: public Klass {
Array<Method*>* methods() const { return _methods; }
void set_methods(Array<Method*>* a) { _methods = a; }
Method* method_with_idnum(int idnum);
+ Method* method_with_orig_idnum(int idnum);
+ Method* method_with_orig_idnum(int idnum, int version);
// method ordering
Array<int>* method_ordering() const { return _method_ordering; }
@@ -658,6 +660,7 @@ class InstanceKlass: public Klass {
return _previous_versions;
}
+ InstanceKlass* get_klass_version(int version);
static void purge_previous_versions(InstanceKlass* ik);
// JVMTI: Support for caching a class file before it is modified by an agent that can do retransformation
@@ -782,7 +785,7 @@ class InstanceKlass: public Klass {
// maintenance of deoptimization dependencies
int mark_dependent_nmethods(DepChange& changes);
void add_dependent_nmethod(nmethod* nm);
- void remove_dependent_nmethod(nmethod* nm);
+ void remove_dependent_nmethod(nmethod* nm, bool delete_immediately);
// On-stack replacement support
nmethod* osr_nmethods_head() const { return _osr_nmethods_head; };
@@ -805,6 +808,11 @@ class InstanceKlass: public Klass {
bool implements_interface(Klass* k) const;
bool is_same_or_direct_interface(Klass* k) const;
+#ifdef ASSERT
+ // check whether this class or one of its superclasses was redefined
+ bool has_redefined_this_or_super() const;
+#endif
+
// Access to the implementor of an interface.
Klass* implementor() const
{
@@ -862,8 +870,8 @@ class InstanceKlass: public Klass {
// Casting from Klass*
static InstanceKlass* cast(Klass* k) {
- assert(k->is_klass(), "must be");
- assert(k->oop_is_instance(), "cast to InstanceKlass");
+ assert(k == NULL || k->is_klass(), "must be");
+ assert(k == NULL || k->oop_is_instance(), "cast to InstanceKlass");
return (InstanceKlass*) k;
}
@@ -959,14 +967,14 @@ class InstanceKlass: public Klass {
Method* method_at_itable(Klass* holder, int index, TRAPS);
#if INCLUDE_JVMTI
- void adjust_default_methods(Method** old_methods, Method** new_methods,
- int methods_length, bool* trace_name_printed);
+ void adjust_default_methods(InstanceKlass* holder, bool* trace_name_printed);
#endif // INCLUDE_JVMTI
// Garbage collection
void oop_follow_contents(oop obj);
int oop_adjust_pointers(oop obj);
+ void clean_weak_instanceklass_links(BoolObjectClosure* is_alive);
void clean_implementors_list(BoolObjectClosure* is_alive);
void clean_method_data(BoolObjectClosure* is_alive);
void clean_dependent_nmethods();
diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp
index d8279dfd2..7b01be68a 100644
--- a/src/share/vm/oops/klass.cpp
+++ b/src/share/vm/oops/klass.cpp
@@ -454,8 +454,7 @@ void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive
// Clean the implementors list and method data.
if (clean_alive_klasses && current->oop_is_instance()) {
InstanceKlass* ik = InstanceKlass::cast(current);
- ik->clean_implementors_list(is_alive);
- ik->clean_method_data(is_alive);
+ ik->clean_weak_instanceklass_links(is_alive);
}
}
}
diff --git a/src/share/vm/oops/klassVtable.cpp b/src/share/vm/oops/klassVtable.cpp
index 7f8fee0f8..f7adf47b9 100644
--- a/src/share/vm/oops/klassVtable.cpp
+++ b/src/share/vm/oops/klassVtable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -401,13 +401,15 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
// get super_klass for method_holder for the found method
InstanceKlass* super_klass = super_method->method_holder();
- if (is_default
+ // private methods are also never overridden
+ if (!super_method->is_private() &&
+ (is_default
|| ((super_klass->is_override(super_method, target_loader, target_classname, THREAD))
|| ((klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION)
&& ((super_klass = find_transitive_override(super_klass,
target_method, i, target_loader,
target_classname, THREAD))
- != (InstanceKlass*)NULL))))
+ != (InstanceKlass*)NULL)))))
{
// Package private methods always need a new entry to root their own
// overriding. They may also override other methods.
@@ -689,9 +691,15 @@ bool klassVtable::is_miranda_entry_at(int i) {
// check if a method is a miranda method, given a class's methods table,
// its default_method table and its super
// Miranda methods are calculated twice:
-// first: before vtable size calculation: including abstract and default
+// first: before vtable size calculation: including abstract and superinterface default
+// We include potential default methods to give them space in the vtable.
+// During the first run, the default_methods list is empty
// This is seen by default method creation
-// Second: recalculated during vtable initialization: only abstract
+// Second: recalculated during vtable initialization: only include abstract methods.
+// During the second run, default_methods is set up, so concrete methods from
+// superinterfaces with matching names/signatures to default_methods are already
+// in the default_methods list and do not need to be appended to the vtable
+// as mirandas
// This is seen by link resolution and selection.
// "miranda" means not static, not defined by this class.
// private methods in interfaces do not belong in the miranda list.
@@ -706,8 +714,9 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
}
Symbol* name = m->name();
Symbol* signature = m->signature();
+ Method* mo;
- if (InstanceKlass::find_instance_method(class_methods, name, signature) == NULL) {
+ if ((mo = InstanceKlass::find_instance_method(class_methods, name, signature)) == NULL) {
// did not find it in the method table of the current class
if ((default_methods == NULL) ||
InstanceKlass::find_method(default_methods, name, signature) == NULL) {
@@ -716,7 +725,7 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
return true;
}
- Method* mo = InstanceKlass::cast(super)->lookup_method(name, signature);
+ mo = InstanceKlass::cast(super)->lookup_method(name, signature);
while (mo != NULL && mo->access_flags().is_static()
&& mo->method_holder() != NULL
&& mo->method_holder()->super() != NULL)
@@ -728,6 +737,18 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
return true;
}
}
+ } else {
+ // if the local class has a private method, the miranda will not
+ // override it, so a vtable slot is needed
+ if (mo->access_flags().is_private()) {
+
+ // Second round, weed out any superinterface methods that turned
+ // into default methods, i.e. were concrete not abstract in the end
+ if ((default_methods == NULL) ||
+ InstanceKlass::find_method(default_methods, name, signature) == NULL) {
+ return true;
+ }
+ }
}
return false;
@@ -860,44 +881,43 @@ bool klassVtable::adjust_default_method(int vtable_index, Method* old_method, Me
}
return updated;
}
-void klassVtable::adjust_method_entries(Method** old_methods, Method** new_methods,
- int methods_length, bool * trace_name_printed) {
- // search the vtable for uses of either obsolete or EMCP methods
- for (int j = 0; j < methods_length; j++) {
- Method* old_method = old_methods[j];
- Method* new_method = new_methods[j];
-
- // In the vast majority of cases we could get the vtable index
- // by using: old_method->vtable_index()
- // However, there are rare cases, eg. sun.awt.X11.XDecoratedPeer.getX()
- // in sun.awt.X11.XFramePeer where methods occur more than once in the
- // vtable, so, alas, we must do an exhaustive search.
- for (int index = 0; index < length(); index++) {
- if (unchecked_method_at(index) == old_method) {
- put_method_at(new_method, index);
- // For default methods, need to update the _default_methods array
- // which can only have one method entry for a given signature
- bool updated_default = false;
- if (old_method->is_default_method()) {
- updated_default = adjust_default_method(index, old_method, new_method);
- }
- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
- if (!(*trace_name_printed)) {
- // RC_TRACE_MESG macro has an embedded ResourceMark
- RC_TRACE_MESG(("adjust: klassname=%s for methods from name=%s",
- klass()->external_name(),
- old_method->method_holder()->external_name()));
- *trace_name_printed = true;
- }
- // RC_TRACE macro has an embedded ResourceMark
- RC_TRACE(0x00100000, ("vtable method update: %s(%s), updated default = %s",
- new_method->name()->as_C_string(),
- new_method->signature()->as_C_string(),
- updated_default ? "true" : "false"));
- }
- // cannot 'break' here; see for-loop comment above.
+// search the vtable for uses of either obsolete or EMCP methods
+void klassVtable::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
+ int prn_enabled = 0;
+ for (int index = 0; index < length(); index++) {
+ Method* old_method = unchecked_method_at(index);
+ if (old_method == NULL || old_method->method_holder() != holder || !old_method->is_old()) {
+ continue; // skip uninteresting entries
+ }
+ assert(!old_method->is_deleted(), "vtable methods may not be deleted");
+
+ Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
+
+ assert(new_method != NULL, "method_with_idnum() should not be NULL");
+ assert(old_method != new_method, "sanity check");
+
+ put_method_at(new_method, index);
+ // For default methods, need to update the _default_methods array
+ // which can only have one method entry for a given signature
+ bool updated_default = false;
+ if (old_method->is_default_method()) {
+ updated_default = adjust_default_method(index, old_method, new_method);
+ }
+
+ if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
+ if (!(*trace_name_printed)) {
+ // RC_TRACE_MESG macro has an embedded ResourceMark
+ RC_TRACE_MESG(("adjust: klassname=%s for methods from name=%s",
+ klass()->external_name(),
+ old_method->method_holder()->external_name()));
+ *trace_name_printed = true;
}
+ // RC_TRACE macro has an embedded ResourceMark
+ RC_TRACE(0x00100000, ("vtable method update: %s(%s), updated default = %s",
+ new_method->name()->as_C_string(),
+ new_method->signature()->as_C_string(),
+ updated_default ? "true" : "false"));
}
}
}
@@ -1190,37 +1210,35 @@ void klassItable::initialize_with_method(Method* m) {
}
#if INCLUDE_JVMTI
-void klassItable::adjust_method_entries(Method** old_methods, Method** new_methods,
- int methods_length, bool * trace_name_printed) {
- // search the itable for uses of either obsolete or EMCP methods
- for (int j = 0; j < methods_length; j++) {
- Method* old_method = old_methods[j];
- Method* new_method = new_methods[j];
- itableMethodEntry* ime = method_entry(0);
-
- // The itable can describe more than one interface and the same
- // method signature can be specified by more than one interface.
- // This means we have to do an exhaustive search to find all the
- // old_method references.
- for (int i = 0; i < _size_method_table; i++) {
- if (ime->method() == old_method) {
- ime->initialize(new_method);
-
- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
- if (!(*trace_name_printed)) {
- // RC_TRACE_MESG macro has an embedded ResourceMark
- RC_TRACE_MESG(("adjust: name=%s",
- old_method->method_holder()->external_name()));
- *trace_name_printed = true;
- }
- // RC_TRACE macro has an embedded ResourceMark
- RC_TRACE(0x00200000, ("itable method update: %s(%s)",
- new_method->name()->as_C_string(),
- new_method->signature()->as_C_string()));
- }
- // cannot 'break' here; see for-loop comment above.
+// search the itable for uses of either obsolete or EMCP methods
+void klassItable::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
+
+ itableMethodEntry* ime = method_entry(0);
+ for (int i = 0; i < _size_method_table; i++, ime++) {
+ Method* old_method = ime->method();
+ if (old_method == NULL || old_method->method_holder() != holder || !old_method->is_old()) {
+ continue; // skip uninteresting entries
+ }
+ assert(!old_method->is_deleted(), "itable methods may not be deleted");
+
+ Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
+
+ assert(new_method != NULL, "method_with_idnum() should not be NULL");
+ assert(old_method != new_method, "sanity check");
+
+ ime->initialize(new_method);
+
+ if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
+ if (!(*trace_name_printed)) {
+ // RC_TRACE_MESG macro has an embedded ResourceMark
+ RC_TRACE_MESG(("adjust: name=%s",
+ old_method->method_holder()->external_name()));
+ *trace_name_printed = true;
}
- ime++;
+ // RC_TRACE macro has an embedded ResourceMark
+ RC_TRACE(0x00200000, ("itable method update: %s(%s)",
+ new_method->name()->as_C_string(),
+ new_method->signature()->as_C_string()));
}
}
}
diff --git a/src/share/vm/oops/klassVtable.hpp b/src/share/vm/oops/klassVtable.hpp
index 01ad63fcf..e4b4c9f51 100644
--- a/src/share/vm/oops/klassVtable.hpp
+++ b/src/share/vm/oops/klassVtable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -98,8 +98,7 @@ class klassVtable : public ResourceObj {
// printed the klass name so that other routines in the adjust_*
// group don't print the klass name.
bool adjust_default_method(int vtable_index, Method* old_method, Method* new_method);
- void adjust_method_entries(Method** old_methods, Method** new_methods,
- int methods_length, bool * trace_name_printed);
+ void adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed);
bool check_no_old_or_obsolete_entries();
void dump_vtable();
#endif // INCLUDE_JVMTI
@@ -288,8 +287,7 @@ class klassItable : public ResourceObj {
// trace_name_printed is set to true if the current call has
// printed the klass name so that other routines in the adjust_*
// group don't print the klass name.
- void adjust_method_entries(Method** old_methods, Method** new_methods,
- int methods_length, bool * trace_name_printed);
+ void adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed);
bool check_no_old_or_obsolete_entries();
void dump_itable();
#endif // INCLUDE_JVMTI
diff --git a/src/share/vm/oops/markOop.cpp b/src/share/vm/oops/markOop.cpp
index 85dd5a06a..1cc8a19f5 100644
--- a/src/share/vm/oops/markOop.cpp
+++ b/src/share/vm/oops/markOop.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,17 +25,40 @@
#include "precompiled.hpp"
#include "oops/markOop.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/objectMonitor.inline.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
void markOopDesc::print_on(outputStream* st) const {
- if (is_locked()) {
- st->print("locked(" INTPTR_FORMAT ")->", value());
- markOop(*(markOop*)value())->print_on(st);
+ if (is_marked()) {
+ st->print(" marked(" INTPTR_FORMAT ")", value());
+ } else if (is_locked()) {
+ st->print(" locked(" INTPTR_FORMAT ")->", value());
+ if (is_neutral()) {
+ st->print("is_neutral");
+ if (has_no_hash()) st->print(" no_hash");
+ else st->print(" hash=" INTPTR_FORMAT, hash());
+ st->print(" age=%d", age());
+ } else if (has_bias_pattern()) {
+ st->print("is_biased");
+ JavaThread* jt = biased_locker();
+ st->print(" biased_locker=" INTPTR_FORMAT, p2i(jt));
+ } else if (has_monitor()) {
+ ObjectMonitor* mon = monitor();
+ if (mon == NULL)
+ st->print("monitor=NULL");
+ else {
+ BasicLock * bl = (BasicLock *) mon->owner();
+ st->print("monitor={count="INTPTR_FORMAT",waiters="INTPTR_FORMAT",recursions="INTPTR_FORMAT",owner="INTPTR_FORMAT"}",
+ mon->count(), mon->waiters(), mon->recursions(), p2i(bl));
+ }
+ } else {
+ st->print("??");
+ }
} else {
assert(is_unlocked() || has_bias_pattern(), "just checking");
st->print("mark(");
- if (has_bias_pattern()) st->print("biased,");
+ if (has_bias_pattern()) st->print("biased,");
st->print("hash %#lx,", hash());
st->print("age %d)", age());
}
diff --git a/src/share/vm/oops/method.cpp b/src/share/vm/oops/method.cpp
index 25fc780c0..abb67edd0 100644
--- a/src/share/vm/oops/method.cpp
+++ b/src/share/vm/oops/method.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -92,6 +92,7 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) {
set_force_inline(false);
set_hidden(false);
set_dont_inline(false);
+ set_has_injected_profile(false);
set_method_data(NULL);
clear_method_counters();
set_vtable_index(Method::garbage_vtable_index);
@@ -1419,6 +1420,7 @@ void Method::sort_methods(Array<Method*>* methods, bool idempotent, bool set_idn
for (int i = 0; i < length; i++) {
Method* m = methods->at(i);
m->set_method_idnum(i);
+ m->set_orig_method_idnum(i);
}
}
}
diff --git a/src/share/vm/oops/method.hpp b/src/share/vm/oops/method.hpp
index fb2af30ce..8cef629c5 100644
--- a/src/share/vm/oops/method.hpp
+++ b/src/share/vm/oops/method.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -110,12 +110,13 @@ class Method : public Metadata {
#endif
u2 _method_size; // size of this object
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
- u1 _jfr_towrite : 1, // Flags
- _caller_sensitive : 1,
- _force_inline : 1,
- _hidden : 1,
- _dont_inline : 1,
- : 3;
+ u1 _jfr_towrite : 1, // Flags
+ _caller_sensitive : 1,
+ _force_inline : 1,
+ _hidden : 1,
+ _dont_inline : 1,
+ _has_injected_profile : 1,
+ : 2;
#ifndef PRODUCT
int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
@@ -265,6 +266,9 @@ class Method : public Metadata {
u2 method_idnum() const { return constMethod()->method_idnum(); }
void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); }
+ u2 orig_method_idnum() const { return constMethod()->orig_method_idnum(); }
+ void set_orig_method_idnum(u2 idnum) { constMethod()->set_orig_method_idnum(idnum); }
+
// code size
int code_size() const { return constMethod()->code_size(); }
@@ -714,6 +718,8 @@ class Method : public Metadata {
void set_is_old() { _access_flags.set_is_old(); }
bool is_obsolete() const { return access_flags().is_obsolete(); }
void set_is_obsolete() { _access_flags.set_is_obsolete(); }
+ bool is_deleted() const { return access_flags().is_deleted(); }
+ void set_is_deleted() { _access_flags.set_is_deleted(); }
bool on_stack() const { return access_flags().on_stack(); }
void set_on_stack(const bool value);
@@ -776,16 +782,19 @@ class Method : public Metadata {
void init_intrinsic_id(); // updates from _none if a match
static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
- bool jfr_towrite() { return _jfr_towrite; }
- void set_jfr_towrite(bool x) { _jfr_towrite = x; }
- bool caller_sensitive() { return _caller_sensitive; }
- void set_caller_sensitive(bool x) { _caller_sensitive = x; }
- bool force_inline() { return _force_inline; }
- void set_force_inline(bool x) { _force_inline = x; }
- bool dont_inline() { return _dont_inline; }
- void set_dont_inline(bool x) { _dont_inline = x; }
- bool is_hidden() { return _hidden; }
- void set_hidden(bool x) { _hidden = x; }
+ bool jfr_towrite() { return _jfr_towrite; }
+ void set_jfr_towrite(bool x) { _jfr_towrite = x; }
+ bool caller_sensitive() { return _caller_sensitive; }
+ void set_caller_sensitive(bool x) { _caller_sensitive = x; }
+ bool force_inline() { return _force_inline; }
+ void set_force_inline(bool x) { _force_inline = x; }
+ bool dont_inline() { return _dont_inline; }
+ void set_dont_inline(bool x) { _dont_inline = x; }
+ bool is_hidden() { return _hidden; }
+ void set_hidden(bool x) { _hidden = x; }
+ bool has_injected_profile() { return _has_injected_profile; }
+ void set_has_injected_profile(bool x) { _has_injected_profile = x; }
+
ConstMethod::MethodType method_type() const {
return _constMethod->method_type();
}
diff --git a/src/share/vm/opto/block.cpp b/src/share/vm/opto/block.cpp
index f9fec2935..f0599a19c 100644
--- a/src/share/vm/opto/block.cpp
+++ b/src/share/vm/opto/block.cpp
@@ -393,7 +393,7 @@ uint PhaseCFG::build_cfg() {
VectorSet visited(a);
// Allocate stack with enough space to avoid frequent realloc
- Node_Stack nstack(a, C->unique() >> 1);
+ Node_Stack nstack(a, C->live_nodes() >> 1);
nstack.push(_root, 0);
uint sum = 0; // Counter for blocks
diff --git a/src/share/vm/opto/bytecodeInfo.cpp b/src/share/vm/opto/bytecodeInfo.cpp
index ae8da0fcd..d16b5631b 100644
--- a/src/share/vm/opto/bytecodeInfo.cpp
+++ b/src/share/vm/opto/bytecodeInfo.cpp
@@ -608,11 +608,11 @@ InlineTree *InlineTree::build_inline_tree_for_callee( ciMethod* callee_method, J
}
int max_inline_level_adjust = 0;
if (caller_jvms->method() != NULL) {
- if (caller_jvms->method()->is_compiled_lambda_form())
+ if (caller_jvms->method()->is_compiled_lambda_form()) {
max_inline_level_adjust += 1; // don't count actions in MH or indy adapter frames
- else if (callee_method->is_method_handle_intrinsic() ||
- callee_method->is_compiled_lambda_form()) {
- max_inline_level_adjust += 1; // don't count method handle calls from java.lang.invoke implem
+ } else if (callee_method->is_method_handle_intrinsic() ||
+ callee_method->is_compiled_lambda_form()) {
+ max_inline_level_adjust += 1; // don't count method handle calls from java.lang.invoke implementation
}
if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) {
CompileTask::print_inline_indent(inline_level());
diff --git a/src/share/vm/opto/c2_globals.hpp b/src/share/vm/opto/c2_globals.hpp
index adeee7a83..fe1834bcd 100644
--- a/src/share/vm/opto/c2_globals.hpp
+++ b/src/share/vm/opto/c2_globals.hpp
@@ -205,6 +205,9 @@
notproduct(bool, TraceProfileTripCount, false, \
"Trace profile loop trip count information") \
\
+ product(bool, UseCountedLoopSafepoints, false, \
+ "Force counted loops to keep a safepoint") \
+ \
product(bool, UseLoopPredicate, true, \
"Generate a predicate to select fast/slow loop versions") \
\
@@ -669,6 +672,9 @@
product_pd(bool, TrapBasedRangeChecks, \
"Generate code for range checks that uses a cmp and trap " \
"instruction raising SIGTRAP. Used on PPC64.") \
+ \
+ develop(bool, RenumberLiveNodes, true, \
+ "Renumber live nodes") \
C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
diff --git a/src/share/vm/opto/c2compiler.cpp b/src/share/vm/opto/c2compiler.cpp
index 977fa878f..87932d268 100644
--- a/src/share/vm/opto/c2compiler.cpp
+++ b/src/share/vm/opto/c2compiler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,25 +25,17 @@
#include "precompiled.hpp"
#include "opto/c2compiler.hpp"
#include "opto/runtime.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined AD_MD_HPP
+# include AD_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/ad_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/ad_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif
diff --git a/src/share/vm/opto/callGenerator.cpp b/src/share/vm/opto/callGenerator.cpp
index 96025cb60..f715fd2a8 100644
--- a/src/share/vm/opto/callGenerator.cpp
+++ b/src/share/vm/opto/callGenerator.cpp
@@ -859,7 +859,8 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
// Parse::do_call())
target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
target, receiver_type, is_virtual,
- call_does_dispatch, vtable_index); // out-parameters
+ call_does_dispatch, vtable_index, // out-parameters
+ /*check_access=*/false);
// We lack profiling at this call but type speculation may
// provide us with a type
speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
diff --git a/src/share/vm/opto/callnode.cpp b/src/share/vm/opto/callnode.cpp
index 365b19d2a..a99af8089 100644
--- a/src/share/vm/opto/callnode.cpp
+++ b/src/share/vm/opto/callnode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "compiler/compileLog.hpp"
#include "ci/bcEscapeAnalyzer.hpp"
#include "compiler/oopMap.hpp"
#include "opto/callGenerator.hpp"
@@ -1670,6 +1671,9 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// The lock could be marked eliminated by lock coarsening
// code during first IGVN before EA. Replace coarsened flag
// to eliminate all associated locks/unlocks.
+#ifdef ASSERT
+ this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
+#endif
this->set_non_esc_obj();
return result;
}
@@ -1731,6 +1735,9 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
AbstractLockNode* lock = lock_ops.at(i);
// Mark it eliminated by coarsening and update any counters
+#ifdef ASSERT
+ lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened");
+#endif
lock->set_coarsened();
}
} else if (ctrl->is_Region() &&
@@ -1749,16 +1756,33 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
//=============================================================================
bool LockNode::is_nested_lock_region() {
+ return is_nested_lock_region(NULL);
+}
+
+// p is used for access to compilation log; no logging if NULL
+bool LockNode::is_nested_lock_region(Compile * c) {
BoxLockNode* box = box_node()->as_BoxLock();
int stk_slot = box->stack_slot();
- if (stk_slot <= 0)
+ if (stk_slot <= 0) {
+#ifdef ASSERT
+ this->log_lock_optimization(c, "eliminate_lock_INLR_1");
+#endif
return false; // External lock or it is not Box (Phi node).
+ }
// Ignore complex cases: merged locks or multiple locks.
Node* obj = obj_node();
LockNode* unique_lock = NULL;
- if (!box->is_simple_lock_region(&unique_lock, obj) ||
- (unique_lock != this)) {
+ if (!box->is_simple_lock_region(&unique_lock, obj)) {
+#ifdef ASSERT
+ this->log_lock_optimization(c, "eliminate_lock_INLR_2a");
+#endif
+ return false;
+ }
+ if (unique_lock != this) {
+#ifdef ASSERT
+ this->log_lock_optimization(c, "eliminate_lock_INLR_2b");
+#endif
return false;
}
@@ -1778,6 +1802,9 @@ bool LockNode::is_nested_lock_region() {
}
}
}
+#ifdef ASSERT
+ this->log_lock_optimization(c, "eliminate_lock_INLR_3");
+#endif
return false;
}
@@ -1809,8 +1836,40 @@ Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// The lock could be marked eliminated by lock coarsening
// code during first IGVN before EA. Replace coarsened flag
// to eliminate all associated locks/unlocks.
+#ifdef ASSERT
+ this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
+#endif
this->set_non_esc_obj();
}
}
return result;
}
+
+const char * AbstractLockNode::kind_as_string() const {
+ return is_coarsened() ? "coarsened" :
+ is_nested() ? "nested" :
+ is_non_esc_obj() ? "non_escaping" :
+ "?";
+}
+
+void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag) const {
+ if (C == NULL) {
+ return;
+ }
+ CompileLog* log = C->log();
+ if (log != NULL) {
+ log->begin_head("%s lock='%d' compile_id='%d' class_id='%s' kind='%s'",
+ tag, is_Lock(), C->compile_id(),
+ is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?",
+ kind_as_string());
+ log->stamp();
+ log->end_head();
+ JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms();
+ while (p != NULL) {
+ log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+ p = p->caller();
+ }
+ log->tail(tag);
+ }
+}
+
diff --git a/src/share/vm/opto/callnode.hpp b/src/share/vm/opto/callnode.hpp
index 6eb78b0cb..26f5c6ba7 100644
--- a/src/share/vm/opto/callnode.hpp
+++ b/src/share/vm/opto/callnode.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -985,6 +985,9 @@ public:
bool is_coarsened() const { return (_kind == Coarsened); }
bool is_nested() const { return (_kind == Nested); }
+ const char * kind_as_string() const;
+ void log_lock_optimization(Compile* c, const char * tag) const;
+
void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
@@ -1045,15 +1048,24 @@ public:
}
bool is_nested_lock_region(); // Is this Lock nested?
+ bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
};
//------------------------------Unlock---------------------------------------
// High-level unlock operation
class UnlockNode : public AbstractLockNode {
+private:
+#ifdef ASSERT
+ JVMState* const _dbg_jvms; // Pointer to list of JVM State objects
+#endif
public:
virtual int Opcode() const;
virtual uint size_of() const; // Size is bigger
- UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
+ UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )
+#ifdef ASSERT
+ , _dbg_jvms(NULL)
+#endif
+ {
init_class_id(Class_Unlock);
init_flags(Flag_is_macro);
C->add_macro_node(this);
@@ -1061,6 +1073,14 @@ public:
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
// unlock is never a safepoint
virtual bool guaranteed_safepoint() { return false; }
+#ifdef ASSERT
+ void set_dbg_jvms(JVMState* s) {
+ *(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor
+ }
+ JVMState* dbg_jvms() const { return _dbg_jvms; }
+#else
+ JVMState* dbg_jvms() const { return NULL; }
+#endif
};
#endif // SHARE_VM_OPTO_CALLNODE_HPP
diff --git a/src/share/vm/opto/cfgnode.cpp b/src/share/vm/opto/cfgnode.cpp
index 25223035f..85c6d5198 100644
--- a/src/share/vm/opto/cfgnode.cpp
+++ b/src/share/vm/opto/cfgnode.cpp
@@ -791,7 +791,7 @@ PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) cons
Compile *C = igvn->C;
Arena *a = Thread::current()->resource_area();
Node_Array node_map = new Node_Array(a);
- Node_Stack stack(a, C->unique() >> 4);
+ Node_Stack stack(a, C->live_nodes() >> 4);
PhiNode *nphi = slice_memory(at);
igvn->register_new_node_with_optimizer( nphi );
node_map.map(_idx, nphi);
diff --git a/src/share/vm/opto/chaitin.cpp b/src/share/vm/opto/chaitin.cpp
index e54d5b674..59306506b 100644
--- a/src/share/vm/opto/chaitin.cpp
+++ b/src/share/vm/opto/chaitin.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -575,6 +575,9 @@ void PhaseChaitin::Register_Allocate() {
// Peephole remove copies
post_allocate_copy_removal();
+ // Merge multidefs if multiple defs representing the same value are used in a single block.
+ merge_multidefs();
+
#ifdef ASSERT
// Veify the graph after RA.
verify(&live_arena);
@@ -837,7 +840,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
case Op_RegD:
lrg.set_num_regs(2);
// Define platform specific register pressure
-#if defined(SPARC) || defined(ARM)
+#if defined(SPARC) || defined(ARM32)
lrg.set_reg_pressure(2);
#elif defined(IA32)
if( ireg == Op_RegL ) {
diff --git a/src/share/vm/opto/chaitin.hpp b/src/share/vm/opto/chaitin.hpp
index b188cc9bb..de6d443cd 100644
--- a/src/share/vm/opto/chaitin.hpp
+++ b/src/share/vm/opto/chaitin.hpp
@@ -578,6 +578,32 @@ private:
// Extend the node to LRG mapping
void add_reference( const Node *node, const Node *old_node);
+ // Record the first use of a def in the block for a register.
+ class RegDefUse {
+ Node* _def;
+ Node* _first_use;
+ public:
+ RegDefUse() : _def(NULL), _first_use(NULL) { }
+ Node* def() const { return _def; }
+ Node* first_use() const { return _first_use; }
+
+ void update(Node* def, Node* use) {
+ if (_def != def) {
+ _def = def;
+ _first_use = use;
+ }
+ }
+ void clear() {
+ _def = NULL;
+ _first_use = NULL;
+ }
+ };
+ typedef GrowableArray<RegDefUse> RegToDefUseMap;
+ int possibly_merge_multidef(Node *n, uint k, Block *block, RegToDefUseMap& reg2defuse);
+
+ // Merge nodes that are a part of a multidef lrg and produce the same value within a block.
+ void merge_multidefs();
+
private:
static int _final_loads, _final_stores, _final_copies, _final_memoves;
diff --git a/src/share/vm/opto/classes.hpp b/src/share/vm/opto/classes.hpp
index 54a63db1a..b4fbbacca 100644
--- a/src/share/vm/opto/classes.hpp
+++ b/src/share/vm/opto/classes.hpp
@@ -199,6 +199,7 @@ macro(NeverBranch)
macro(Opaque1)
macro(Opaque2)
macro(Opaque3)
+macro(ProfileBoolean)
macro(OrI)
macro(OrL)
macro(OverflowAddI)
diff --git a/src/share/vm/opto/compile.cpp b/src/share/vm/opto/compile.cpp
index 4d2050c5a..9473a62a3 100644
--- a/src/share/vm/opto/compile.cpp
+++ b/src/share/vm/opto/compile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -67,25 +67,17 @@
#include "runtime/timer.hpp"
#include "trace/tracing.hpp"
#include "utilities/copy.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined AD_MD_HPP
+# include AD_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/ad_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/ad_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif
@@ -335,7 +327,7 @@ static inline bool not_a_node(const Node* n) {
// Use breadth-first pass that records state in a Unique_Node_List,
// recursive traversal is slower.
void Compile::identify_useful_nodes(Unique_Node_List &useful) {
- int estimated_worklist_size = unique();
+ int estimated_worklist_size = live_nodes();
useful.map( estimated_worklist_size, NULL ); // preallocate space
// Initialize worklist
@@ -2101,6 +2093,20 @@ void Compile::Optimize() {
// so keep only the actual candidates for optimizations.
cleanup_expensive_nodes(igvn);
+ if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
+ NOT_PRODUCT(Compile::TracePhase t2("", &_t_renumberLive, TimeCompiler);)
+ initial_gvn()->replace_with(&igvn);
+ for_igvn()->clear();
+ Unique_Node_List new_worklist(C->comp_arena());
+ {
+ ResourceMark rm;
+ PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
+ }
+ set_for_igvn(&new_worklist);
+ igvn = PhaseIterGVN(initial_gvn());
+ igvn.optimize();
+ }
+
// Perform escape analysis
if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
if (has_loops()) {
@@ -3083,6 +3089,7 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
default:
assert( !n->is_Call(), "" );
assert( !n->is_Mem(), "" );
+ assert( nop != Op_ProfileBoolean, "should be eliminated during IGVN");
break;
}
@@ -3219,8 +3226,8 @@ bool Compile::final_graph_reshaping() {
Final_Reshape_Counts frc;
// Visit everybody reachable!
- // Allocate stack of size C->unique()/2 to avoid frequent realloc
- Node_Stack nstack(unique() >> 1);
+ // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
+ Node_Stack nstack(live_nodes() >> 1);
final_graph_reshaping_walk(nstack, root(), frc);
// Check for unreachable (from below) code (i.e., infinite loops).
diff --git a/src/share/vm/opto/compile.hpp b/src/share/vm/opto/compile.hpp
index 2cc3c811c..570c99ec0 100644
--- a/src/share/vm/opto/compile.hpp
+++ b/src/share/vm/opto/compile.hpp
@@ -861,9 +861,11 @@ class Compile : public Phase {
ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
ciKlass* holder, ciMethod* callee,
const TypeOopPtr* receiver_type, bool is_virtual,
- bool &call_does_dispatch, int &vtable_index);
+ bool &call_does_dispatch, int &vtable_index,
+ bool check_access = true);
ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
- ciMethod* callee, const TypeOopPtr* receiver_type);
+ ciMethod* callee, const TypeOopPtr* receiver_type,
+ bool check_access = true);
// Report if there were too many traps at a current method and bci.
// Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
diff --git a/src/share/vm/opto/connode.cpp b/src/share/vm/opto/connode.cpp
index fe12b4f49..bfebd1c5b 100644
--- a/src/share/vm/opto/connode.cpp
+++ b/src/share/vm/opto/connode.cpp
@@ -1338,6 +1338,30 @@ uint Opaque2Node::cmp( const Node &n ) const {
return (&n == this); // Always fail except on self
}
+//=============================================================================
+
+uint ProfileBooleanNode::hash() const { return NO_HASH; }
+uint ProfileBooleanNode::cmp( const Node &n ) const {
+ return (&n == this);
+}
+
+Node *ProfileBooleanNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+ if (can_reshape && _delay_removal) {
+ _delay_removal = false;
+ return this;
+ } else {
+ return NULL;
+ }
+}
+
+Node *ProfileBooleanNode::Identity( PhaseTransform *phase ) {
+ if (_delay_removal) {
+ return this;
+ } else {
+ assert(_consumed, "profile should be consumed before elimination");
+ return in(1);
+ }
+}
//------------------------------Value------------------------------------------
const Type *MoveL2DNode::Value( PhaseTransform *phase ) const {
diff --git a/src/share/vm/opto/connode.hpp b/src/share/vm/opto/connode.hpp
index 30692763d..555802835 100644
--- a/src/share/vm/opto/connode.hpp
+++ b/src/share/vm/opto/connode.hpp
@@ -669,6 +669,31 @@ public:
bool rtm_opt() const { return (_opt == RTM_OPT); }
};
+//------------------------------ProfileBooleanNode-------------------------------
+// A node represents value profile for a boolean during parsing.
+// Once parsing is over, the node goes away (during IGVN).
+// It is used to override branch frequencies from MDO (see has_injected_profile in parse2.cpp).
+class ProfileBooleanNode : public Node {
+ uint _false_cnt;
+ uint _true_cnt;
+ bool _consumed;
+ bool _delay_removal;
+ virtual uint hash() const ; // { return NO_HASH; }
+ virtual uint cmp( const Node &n ) const;
+ public:
+ ProfileBooleanNode(Node *n, uint false_cnt, uint true_cnt) : Node(0, n),
+ _false_cnt(false_cnt), _true_cnt(true_cnt), _delay_removal(true), _consumed(false) {}
+
+ uint false_count() const { return _false_cnt; }
+ uint true_count() const { return _true_cnt; }
+
+ void consume() { _consumed = true; }
+
+ virtual int Opcode() const;
+ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+ virtual Node *Identity(PhaseTransform *phase);
+ virtual const Type *bottom_type() const { return TypeInt::BOOL; }
+};
//----------------------PartialSubtypeCheckNode--------------------------------
// The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
diff --git a/src/share/vm/opto/doCall.cpp b/src/share/vm/opto/doCall.cpp
index 960e61aaa..f4ed1aed1 100644
--- a/src/share/vm/opto/doCall.cpp
+++ b/src/share/vm/opto/doCall.cpp
@@ -956,13 +956,15 @@ void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
ciKlass* holder, ciMethod* callee,
const TypeOopPtr* receiver_type, bool is_virtual,
- bool& call_does_dispatch, int& vtable_index) {
+ bool& call_does_dispatch, int& vtable_index,
+ bool check_access) {
// Set default values for out-parameters.
call_does_dispatch = true;
vtable_index = Method::invalid_vtable_index;
// Choose call strategy.
- ciMethod* optimized_virtual_method = optimize_inlining(caller, bci, klass, callee, receiver_type);
+ ciMethod* optimized_virtual_method = optimize_inlining(caller, bci, klass, callee,
+ receiver_type, check_access);
// Have the call been sufficiently improved such that it is no longer a virtual?
if (optimized_virtual_method != NULL) {
@@ -977,7 +979,8 @@ ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKl
// Identify possible target method and inlining style
ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
- ciMethod* callee, const TypeOopPtr* receiver_type) {
+ ciMethod* callee, const TypeOopPtr* receiver_type,
+ bool check_access) {
// only use for virtual or interface calls
// If it is obviously final, do not bother to call find_monomorphic_target,
@@ -1017,7 +1020,7 @@ ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass*
}
ciInstanceKlass* calling_klass = caller->holder();
- ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver);
+ ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver, check_access);
if (cha_monomorphic_target != NULL) {
assert(!cha_monomorphic_target->is_abstract(), "");
// Look at the method-receiver type. Does it add "too much information"?
diff --git a/src/share/vm/opto/domgraph.cpp b/src/share/vm/opto/domgraph.cpp
index a7fa05327..576506e1d 100644
--- a/src/share/vm/opto/domgraph.cpp
+++ b/src/share/vm/opto/domgraph.cpp
@@ -505,8 +505,8 @@ void PhaseIdealLoop::Dominators() {
// Perform DFS search. Setup 'vertex' as DFS to vertex mapping. Setup
// 'semi' as vertex to DFS mapping. Set 'parent' to DFS parent.
int NTarjan::DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uint *dfsorder) {
- // Allocate stack of size C->unique()/8 to avoid frequent realloc
- GrowableArray <Node *> dfstack(pil->C->unique() >> 3);
+ // Allocate stack of size C->live_nodes()/8 to avoid frequent realloc
+ GrowableArray <Node *> dfstack(pil->C->live_nodes() >> 3);
Node *b = pil->C->root();
int dfsnum = 1;
dfsorder[b->_idx] = dfsnum; // Cache parent's dfsnum for a later use
diff --git a/src/share/vm/opto/escape.cpp b/src/share/vm/opto/escape.cpp
index 384fe0810..b5e1f6b72 100644
--- a/src/share/vm/opto/escape.cpp
+++ b/src/share/vm/opto/escape.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -205,6 +205,11 @@ bool ConnectionGraph::compute_escape() {
_verify = false;
}
#endif
+ // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes
+ // processing, calls to CI to resolve symbols (types, fields, methods)
+ // referenced in bytecode. During symbol resolution VM may throw
+ // an exception which CI cleans and converts to compilation failure.
+ if (C->failing()) return false;
// 2. Finish Graph construction by propagating references to all
// java objects through graph.
@@ -1789,6 +1794,9 @@ void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklis
// The lock could be marked eliminated by lock coarsening
// code during first IGVN before EA. Replace coarsened flag
// to eliminate all associated locks/unlocks.
+#ifdef ASSERT
+ alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
+#endif
alock->set_non_esc_obj();
}
}
@@ -3175,7 +3183,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
// Note 2: MergeMem may already contains instance memory slices added
// during find_inst_mem() call when memory nodes were processed above.
igvn->hash_delete(nmm);
- uint nslices = nmm->req();
+ uint nslices = MIN2(nmm->req(), new_index_start);
for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
Node* mem = nmm->in(i);
Node* cur = NULL;
diff --git a/src/share/vm/opto/gcm.cpp b/src/share/vm/opto/gcm.cpp
index 8c55b6520..c574998e1 100644
--- a/src/share/vm/opto/gcm.cpp
+++ b/src/share/vm/opto/gcm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,25 +35,17 @@
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "runtime/deoptimization.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined AD_MD_HPP
+# include AD_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/ad_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/ad_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif
@@ -126,8 +118,8 @@ void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
//------------------------------schedule_pinned_nodes--------------------------
// Set the basic block for Nodes pinned into blocks
void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
- // Allocate node stack of size C->unique()+8 to avoid frequent realloc
- GrowableArray <Node *> spstack(C->unique() + 8);
+ // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
+ GrowableArray <Node *> spstack(C->live_nodes() + 8);
spstack.push(_root);
while (spstack.is_nonempty()) {
Node* node = spstack.pop();
@@ -1293,7 +1285,7 @@ void PhaseCFG::global_code_motion() {
visited.Clear();
Node_List stack(arena);
// Pre-grow the list
- stack.map((C->unique() >> 1) + 16, NULL);
+ stack.map((C->live_nodes() >> 1) + 16, NULL);
if (!schedule_early(visited, stack)) {
// Bailout without retry
C->record_method_not_compilable("early schedule failed");
diff --git a/src/share/vm/opto/graphKit.cpp b/src/share/vm/opto/graphKit.cpp
index ca157ec91..4300aae00 100644
--- a/src/share/vm/opto/graphKit.cpp
+++ b/src/share/vm/opto/graphKit.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1452,16 +1452,18 @@ void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
// factory methods in "int adr_idx"
Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
int adr_idx,
- MemNode::MemOrd mo, bool require_atomic_access) {
+ MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency, bool require_atomic_access) {
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = NULL; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld;
if (require_atomic_access && bt == T_LONG) {
- ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo);
+ ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo, control_dependency);
+ } else if (require_atomic_access && bt == T_DOUBLE) {
+ ld = LoadDNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo, control_dependency);
} else {
- ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo);
+ ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency);
}
ld = _gvn.transform(ld);
if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
@@ -1482,6 +1484,8 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
Node* st;
if (require_atomic_access && bt == T_LONG) {
st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
+ } else if (require_atomic_access && bt == T_DOUBLE) {
+ st = StoreDNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
} else {
st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
}
@@ -1980,6 +1984,11 @@ void GraphKit::uncommon_trap(int trap_request,
Deoptimization::trap_request_index(trap_request) < 0 &&
too_many_recompiles(reason)) {
// This BCI is causing too many recompilations.
+ if (C->log() != NULL) {
+ C->log()->elem("observe that='trap_action_change' reason='%s' from='%s' to='none'",
+ Deoptimization::trap_reason_name(reason),
+ Deoptimization::trap_action_name(action));
+ }
action = Deoptimization::Action_none;
trap_request = Deoptimization::make_trap_request(reason, action);
} else {
@@ -2742,7 +2751,7 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
Deoptimization::DeoptReason reason = spec_klass == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check;
// Make sure we haven't already deoptimized from this tactic.
- if (too_many_traps(reason))
+ if (too_many_traps(reason) || too_many_recompiles(reason))
return NULL;
// (No, this isn't a call, but it's enough like a virtual call
@@ -2764,8 +2773,7 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
&exact_obj);
{ PreserveJVMState pjvms(this);
set_control(slow_ctl);
- uncommon_trap(reason,
- Deoptimization::Action_maybe_recompile);
+ uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
}
if (safe_for_replace) {
replace_in_map(not_null_obj, exact_obj);
@@ -2793,8 +2801,8 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
if (type != NULL) {
Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check;
- if (!too_many_traps(null_reason) &&
- !too_many_traps(class_reason)) {
+ if (!too_many_traps(null_reason) && !too_many_recompiles(null_reason) &&
+ !too_many_traps(class_reason) && !too_many_recompiles(class_reason)) {
Node* not_null_obj = NULL;
// not_null is true if we know the object is not null and
// there's no need for a null check
@@ -2813,14 +2821,14 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
{
PreserveJVMState pjvms(this);
set_control(slow_ctl);
- uncommon_trap(class_reason,
- Deoptimization::Action_maybe_recompile);
+ uncommon_trap_exact(class_reason, Deoptimization::Action_maybe_recompile);
}
replace_in_map(not_null_obj, exact_obj);
obj = exact_obj;
}
} else {
- if (!too_many_traps(Deoptimization::Reason_null_assert)) {
+ if (!too_many_traps(Deoptimization::Reason_null_assert) &&
+ !too_many_recompiles(Deoptimization::Reason_null_assert)) {
Node* exact_obj = null_assert(obj);
replace_in_map(obj, exact_obj);
obj = exact_obj;
@@ -3211,6 +3219,9 @@ void GraphKit::shared_unlock(Node* box, Node* obj) {
const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
UnlockNode *unlock = new (C) UnlockNode(C, tf);
+#ifdef ASSERT
+ unlock->set_dbg_jvms(sync_jvms());
+#endif
uint raw_idx = Compile::AliasIdxRaw;
unlock->init_req( TypeFunc::Control, control() );
unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
diff --git a/src/share/vm/opto/graphKit.hpp b/src/share/vm/opto/graphKit.hpp
index 2fcd0d2f4..a5437e780 100644
--- a/src/share/vm/opto/graphKit.hpp
+++ b/src/share/vm/opto/graphKit.hpp
@@ -516,21 +516,24 @@ class GraphKit : public Phase {
// adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
// of volatile fields.
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
- MemNode::MemOrd mo, bool require_atomic_access = false) {
+ MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
+ bool require_atomic_access = false) {
// This version computes alias_index from bottom_type
return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
- mo, require_atomic_access);
+ mo, control_dependency, require_atomic_access);
}
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
- MemNode::MemOrd mo, bool require_atomic_access = false) {
+ MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
+ bool require_atomic_access = false) {
// This version computes alias_index from an address type
assert(adr_type != NULL, "use other make_load factory");
return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
- mo, require_atomic_access);
+ mo, control_dependency, require_atomic_access);
}
// This is the base version which is given an alias index.
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
- MemNode::MemOrd mo, bool require_atomic_access = false);
+ MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
+ bool require_atomic_access = false);
// Create & transform a StoreNode and store the effect into the
// parser's memory state.
@@ -708,6 +711,15 @@ class GraphKit : public Phase {
klass, reason_string, must_throw, keep_exact_action);
}
+ // Bail out to the interpreter and keep exact action (avoid switching to Action_none).
+ void uncommon_trap_exact(Deoptimization::DeoptReason reason,
+ Deoptimization::DeoptAction action,
+ ciKlass* klass = NULL, const char* reason_string = NULL,
+ bool must_throw = false) {
+ uncommon_trap(Deoptimization::make_trap_request(reason, action),
+ klass, reason_string, must_throw, /*keep_exact_action=*/true);
+ }
+
// SP when bytecode needs to be reexecuted.
virtual int reexecute_sp() { return sp(); }
diff --git a/src/share/vm/opto/lcm.cpp b/src/share/vm/opto/lcm.cpp
index 8398bb3d2..56cde6f61 100644
--- a/src/share/vm/opto/lcm.cpp
+++ b/src/share/vm/opto/lcm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,25 +30,17 @@
#include "opto/cfgnode.hpp"
#include "opto/machnode.hpp"
#include "opto/runtime.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined AD_MD_HPP
+# include AD_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/ad_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/ad_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif
@@ -437,8 +429,15 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
old_tst->last_out(i2)->set_req(0, nul_chk);
// Clean-up any dead code
- for (uint i3 = 0; i3 < old_tst->req(); i3++)
+ for (uint i3 = 0; i3 < old_tst->req(); i3++) {
+ Node* in = old_tst->in(i3);
old_tst->set_req(i3, NULL);
+ if (in->outcnt() == 0) {
+ // Remove dead input node
+ in->disconnect_inputs(NULL, C);
+ block->find_remove(in);
+ }
+ }
latency_from_uses(nul_chk);
latency_from_uses(best);
diff --git a/src/share/vm/opto/library_call.cpp b/src/share/vm/opto/library_call.cpp
index 9278eb7ee..b234d8bb6 100644
--- a/src/share/vm/opto/library_call.cpp
+++ b/src/share/vm/opto/library_call.cpp
@@ -31,6 +31,7 @@
#include "opto/addnode.hpp"
#include "opto/callGenerator.hpp"
#include "opto/cfgnode.hpp"
+#include "opto/connode.hpp"
#include "opto/idealKit.hpp"
#include "opto/mathexactnode.hpp"
#include "opto/mulnode.hpp"
@@ -323,6 +324,8 @@ class LibraryCallKit : public GraphKit {
bool inline_updateBytesCRC32();
bool inline_updateByteBufferCRC32();
bool inline_multiplyToLen();
+
+ bool inline_profileBoolean();
};
@@ -934,6 +937,9 @@ bool LibraryCallKit::try_to_inline(int predicate) {
case vmIntrinsics::_updateByteBufferCRC32:
return inline_updateByteBufferCRC32();
+ case vmIntrinsics::_profileBoolean:
+ return inline_profileBoolean();
+
default:
// If you get here, it may be that someone has added a new intrinsic
// to the list in vmSymbols.hpp without implementing it here.
@@ -2663,7 +2669,9 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
if (!is_store) {
MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
- Node* p = make_load(control(), adr, value_type, type, adr_type, mo, is_volatile);
+ // To be valid, unsafe loads may depend on other conditions than
+ // the one that guards them: pin the Load node
+ Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile);
// load value
switch (type) {
case T_BOOLEAN:
@@ -6032,7 +6040,7 @@ Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * field
}
// Build the load.
MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
- Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, is_vol);
+ Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
// If reference is volatile, prevent following memory ops from
// floating up past the volatile read. Also prevents commoning
// another volatile read.
@@ -6544,3 +6552,79 @@ Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate)
return instof_false; // even if it is NULL
}
+
+bool LibraryCallKit::inline_profileBoolean() {
+ Node* counts = argument(1);
+ const TypeAryPtr* ary = NULL;
+ ciArray* aobj = NULL;
+ if (counts->is_Con()
+ && (ary = counts->bottom_type()->isa_aryptr()) != NULL
+ && (aobj = ary->const_oop()->as_array()) != NULL
+ && (aobj->length() == 2)) {
+ // Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively.
+ jint false_cnt = aobj->element_value(0).as_int();
+ jint true_cnt = aobj->element_value(1).as_int();
+
+ if (C->log() != NULL) {
+ C->log()->elem("observe source='profileBoolean' false='%d' true='%d'",
+ false_cnt, true_cnt);
+ }
+
+ if (false_cnt + true_cnt == 0) {
+ // According to profile, never executed.
+ uncommon_trap_exact(Deoptimization::Reason_intrinsic,
+ Deoptimization::Action_reinterpret);
+ return true;
+ }
+
+ // result is a boolean (0 or 1) and its profile (false_cnt & true_cnt)
+ // is a number of each value occurrences.
+ Node* result = argument(0);
+ if (false_cnt == 0 || true_cnt == 0) {
+ // According to profile, one value has been never seen.
+ int expected_val = (false_cnt == 0) ? 1 : 0;
+
+ Node* cmp = _gvn.transform(new (C) CmpINode(result, intcon(expected_val)));
+ Node* test = _gvn.transform(new (C) BoolNode(cmp, BoolTest::eq));
+
+ IfNode* check = create_and_map_if(control(), test, PROB_ALWAYS, COUNT_UNKNOWN);
+ Node* fast_path = _gvn.transform(new (C) IfTrueNode(check));
+ Node* slow_path = _gvn.transform(new (C) IfFalseNode(check));
+
+ { // Slow path: uncommon trap for never seen value and then reexecute
+ // MethodHandleImpl::profileBoolean() to bump the count, so JIT knows
+ // the value has been seen at least once.
+ PreserveJVMState pjvms(this);
+ PreserveReexecuteState preexecs(this);
+ jvms()->set_should_reexecute(true);
+
+ set_control(slow_path);
+ set_i_o(i_o());
+
+ uncommon_trap_exact(Deoptimization::Reason_intrinsic,
+ Deoptimization::Action_reinterpret);
+ }
+ // The guard for never seen value enables sharpening of the result and
+ // returning a constant. It allows to eliminate branches on the same value
+ // later on.
+ set_control(fast_path);
+ result = intcon(expected_val);
+ }
+ // Stop profiling.
+ // MethodHandleImpl::profileBoolean() has profiling logic in its bytecode.
+ // By replacing method body with profile data (represented as ProfileBooleanNode
+ // on IR level) we effectively disable profiling.
+ // It enables full speed execution once optimized code is generated.
+ Node* profile = _gvn.transform(new (C) ProfileBooleanNode(result, false_cnt, true_cnt));
+ C->record_for_igvn(profile);
+ set_result(profile);
+ return true;
+ } else {
+ // Continue profiling.
+ // Profile data isn't available at the moment. So, execute method's bytecode version.
+ // Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
+ // is compiled and counters aren't available since corresponding MethodHandle
+ // isn't a compile-time constant.
+ return false;
+ }
+}
diff --git a/src/share/vm/opto/locknode.hpp b/src/share/vm/opto/locknode.hpp
index 8bd6f35af..6a5276801 100644
--- a/src/share/vm/opto/locknode.hpp
+++ b/src/share/vm/opto/locknode.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,25 +28,17 @@
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
#include "opto/subnode.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined AD_MD_HPP
+# include AD_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/ad_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/ad_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif
diff --git a/src/share/vm/opto/loopPredicate.cpp b/src/share/vm/opto/loopPredicate.cpp
index c9245b06a..97df6ea6d 100644
--- a/src/share/vm/opto/loopPredicate.cpp
+++ b/src/share/vm/opto/loopPredicate.cpp
@@ -438,7 +438,13 @@ class Invariance : public StackObj {
}
}
if (all_inputs_invariant) {
- _invariant.set(n->_idx); // I am a invariant too
+ // If n's control is a predicate that was moved out of the
+ // loop, it was marked invariant but n is only invariant if
+ // it depends only on that test. Otherwise, unless that test
+ // is out of the loop, it's not invariant.
+ if (n->is_CFG() || n->depends_only_on_test() || n->in(0) == NULL || !_phase->is_member(_lpt, n->in(0))) {
+ _invariant.set(n->_idx); // I am a invariant too
+ }
}
} else { // process next input
_stack.set_index(idx + 1);
diff --git a/src/share/vm/opto/loopTransform.cpp b/src/share/vm/opto/loopTransform.cpp
index 140873b01..a6a7ab088 100644
--- a/src/share/vm/opto/loopTransform.cpp
+++ b/src/share/vm/opto/loopTransform.cpp
@@ -1739,6 +1739,12 @@ bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale,
}
return true;
}
+ if (is_scaled_iv(exp->in(2), iv, p_scale)) {
+ if (p_offset != NULL) {
+ *p_offset = exp->in(1);
+ }
+ return true;
+ }
if (exp->in(2)->is_Con()) {
Node* offset2 = NULL;
if (depth < 2 &&
@@ -1821,7 +1827,10 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
// Find the pre-loop limit; we will expand it's iterations to
// not ever trip low tests.
Node *p_f = iffm->in(0);
- assert(p_f->Opcode() == Op_IfFalse, "");
+ // pre loop may have been optimized out
+ if (p_f->Opcode() != Op_IfFalse) {
+ return;
+ }
CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
assert(pre_end->loopnode()->is_pre_loop(), "");
Node *pre_opaq1 = pre_end->limit();
diff --git a/src/share/vm/opto/loopnode.cpp b/src/share/vm/opto/loopnode.cpp
index d858012d2..1e44b80b5 100644
--- a/src/share/vm/opto/loopnode.cpp
+++ b/src/share/vm/opto/loopnode.cpp
@@ -436,6 +436,19 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
return false; // cyclic loop or this loop trips only once
}
+ if (phi_incr != NULL) {
+ // check if there is a possiblity of IV overflowing after the first increment
+ if (stride_con > 0) {
+ if (init_t->_hi > max_jint - stride_con) {
+ return false;
+ }
+ } else {
+ if (init_t->_lo < min_jint - stride_con) {
+ return false;
+ }
+ }
+ }
+
// =================================================
// ---- SUCCESS! Found A Trip-Counted Loop! -----
//
@@ -672,14 +685,16 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
} // LoopLimitCheck
- // Check for SafePoint on backedge and remove
- Node *sfpt = x->in(LoopNode::LoopBackControl);
- if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) {
- lazy_replace( sfpt, iftrue );
- if (loop->_safepts != NULL) {
- loop->_safepts->yank(sfpt);
+ if (!UseCountedLoopSafepoints) {
+ // Check for SafePoint on backedge and remove
+ Node *sfpt = x->in(LoopNode::LoopBackControl);
+ if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) {
+ lazy_replace( sfpt, iftrue );
+ if (loop->_safepts != NULL) {
+ loop->_safepts->yank(sfpt);
+ }
+ loop->_tail = iftrue;
}
- loop->_tail = iftrue;
}
// Build a canonical trip test.
@@ -768,12 +783,14 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
lazy_replace( x, l );
set_idom(l, init_control, dom_depth(x));
- // Check for immediately preceding SafePoint and remove
- Node *sfpt2 = le->in(0);
- if (sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2)) {
- lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control));
- if (loop->_safepts != NULL) {
- loop->_safepts->yank(sfpt2);
+ if (!UseCountedLoopSafepoints) {
+ // Check for immediately preceding SafePoint and remove
+ Node *sfpt2 = le->in(0);
+ if (sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2)) {
+ lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control));
+ if (loop->_safepts != NULL) {
+ loop->_safepts->yank(sfpt2);
+ }
}
}
@@ -1793,6 +1810,37 @@ void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) {
}
}
+void IdealLoopTree::remove_safepoints(PhaseIdealLoop* phase, bool keep_one) {
+ Node* keep = NULL;
+ if (keep_one) {
+ // Look for a safepoint on the idom-path.
+ for (Node* i = tail(); i != _head; i = phase->idom(i)) {
+ if (i->Opcode() == Op_SafePoint && phase->get_loop(i) == this) {
+ keep = i;
+ break; // Found one
+ }
+ }
+ }
+
+ // Don't remove any safepoints if it is requested to keep a single safepoint and
+ // no safepoint was found on idom-path. It is not safe to remove any safepoint
+ // in this case since there's no safepoint dominating all paths in the loop body.
+ bool prune = !keep_one || keep != NULL;
+
+ // Delete other safepoints in this loop.
+ Node_List* sfpts = _safepts;
+ if (prune && sfpts != NULL) {
+ assert(keep == NULL || keep->Opcode() == Op_SafePoint, "not safepoint");
+ for (uint i = 0; i < sfpts->size(); i++) {
+ Node* n = sfpts->at(i);
+ assert(phase->get_loop(n) == this, "");
+ if (n != keep && phase->is_deleteable_safept(n)) {
+ phase->lazy_replace(n, n->in(TypeFunc::Control));
+ }
+ }
+ }
+}
+
//------------------------------counted_loop-----------------------------------
// Convert to counted loops where possible
void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
@@ -1804,42 +1852,23 @@ void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
if (_head->is_CountedLoop() ||
phase->is_counted_loop(_head, this)) {
- _has_sfpt = 1; // Indicate we do not need a safepoint here
- // Look for safepoints to remove.
- Node_List* sfpts = _safepts;
- if (sfpts != NULL) {
- for (uint i = 0; i < sfpts->size(); i++) {
- Node* n = sfpts->at(i);
- assert(phase->get_loop(n) == this, "");
- if (phase->is_deleteable_safept(n)) {
- phase->lazy_replace(n, n->in(TypeFunc::Control));
- }
- }
+ if (!UseCountedLoopSafepoints) {
+ // Indicate we do not need a safepoint here
+ _has_sfpt = 1;
}
+ // Remove safepoints
+ bool keep_one_sfpt = !(_has_call || _has_sfpt);
+ remove_safepoints(phase, keep_one_sfpt);
+
// Look for induction variables
phase->replace_parallel_iv(this);
} else if (_parent != NULL && !_irreducible) {
- // Not a counted loop.
- // Look for a safepoint on the idom-path.
- Node* sfpt = tail();
- for (; sfpt != _head; sfpt = phase->idom(sfpt)) {
- if (sfpt->Opcode() == Op_SafePoint && phase->get_loop(sfpt) == this)
- break; // Found one
- }
- // Delete other safepoints in this loop.
- Node_List* sfpts = _safepts;
- if (sfpts != NULL && sfpt != _head && sfpt->Opcode() == Op_SafePoint) {
- for (uint i = 0; i < sfpts->size(); i++) {
- Node* n = sfpts->at(i);
- assert(phase->get_loop(n) == this, "");
- if (n != sfpt && phase->is_deleteable_safept(n)) {
- phase->lazy_replace(n, n->in(TypeFunc::Control));
- }
- }
- }
+ // Not a counted loop. Keep one safepoint.
+ bool keep_one_sfpt = true;
+ remove_safepoints(phase, keep_one_sfpt);
}
// Recursively
@@ -1893,6 +1922,15 @@ void IdealLoopTree::dump_head( ) const {
if (cl->is_main_loop()) tty->print(" main");
if (cl->is_post_loop()) tty->print(" post");
}
+ if (_has_call) tty->print(" has_call");
+ if (_has_sfpt) tty->print(" has_sfpt");
+ if (_rce_candidate) tty->print(" rce");
+ if (_safepts != NULL && _safepts->size() > 0) {
+ tty->print(" sfpts={"); _safepts->dump_simple(); tty->print(" }");
+ }
+ if (_required_safept != NULL && _required_safept->size() > 0) {
+ tty->print(" req={"); _required_safept->dump_simple(); tty->print(" }");
+ }
tty->cr();
}
@@ -2217,7 +2255,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
// _nodes array holds the earliest legal controlling CFG node.
// Allocate stack with enough space to avoid frequent realloc
- int stack_size = (C->unique() >> 1) + 16; // (unique>>1)+16 from Java2D stats
+ int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
Node_Stack nstack( a, stack_size );
visited.Clear();
@@ -2290,6 +2328,11 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
#endif
if (skip_loop_opts) {
+ // restore major progress flag
+ for (int i = 0; i < old_progress; i++) {
+ C->set_major_progress();
+ }
+
// Cleanup any modified bits
_igvn.optimize();
@@ -2673,7 +2716,7 @@ void PhaseIdealLoop::recompute_dom_depth() {
}
}
if (_dom_stk == NULL) {
- uint init_size = C->unique() / 100; // Guess that 1/100 is a reasonable initial size.
+ uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size.
if (init_size < 10) init_size = 10;
_dom_stk = new GrowableArray<uint>(init_size);
}
@@ -2763,8 +2806,8 @@ IdealLoopTree *PhaseIdealLoop::sort( IdealLoopTree *loop, IdealLoopTree *innermo
// The sort is of size number-of-control-children, which generally limits
// it to size 2 (i.e., I just choose between my 2 target loops).
void PhaseIdealLoop::build_loop_tree() {
- // Allocate stack of size C->unique()/2 to avoid frequent realloc
- GrowableArray <Node *> bltstack(C->unique() >> 1);
+ // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
+ GrowableArray <Node *> bltstack(C->live_nodes() >> 1);
Node *n = C->root();
bltstack.push(n);
int pre_order = 1;
@@ -3653,7 +3696,7 @@ void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node*
void PhaseIdealLoop::dump( ) const {
ResourceMark rm;
Arena* arena = Thread::current()->resource_area();
- Node_Stack stack(arena, C->unique() >> 2);
+ Node_Stack stack(arena, C->live_nodes() >> 2);
Node_List rpo_list;
VectorSet visited(arena);
visited.set(C->top()->_idx);
diff --git a/src/share/vm/opto/loopnode.hpp b/src/share/vm/opto/loopnode.hpp
index 0cb27311d..93536c832 100644
--- a/src/share/vm/opto/loopnode.hpp
+++ b/src/share/vm/opto/loopnode.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -403,6 +403,9 @@ public:
// encountered.
void allpaths_check_safepts(VectorSet &visited, Node_List &stack);
+ // Remove safepoints from loop. Optionally keeping one.
+ void remove_safepoints(PhaseIdealLoop* phase, bool keep_one);
+
// Convert to counted loops where possible
void counted_loop( PhaseIdealLoop *phase );
diff --git a/src/share/vm/opto/machnode.hpp b/src/share/vm/opto/machnode.hpp
index 8c55a8d23..5f8757c9a 100644
--- a/src/share/vm/opto/machnode.hpp
+++ b/src/share/vm/opto/machnode.hpp
@@ -558,6 +558,29 @@ public:
#endif
};
+// MachMergeNode is similar to a PhiNode in a sense it merges multiple values,
+// however it doesn't have a control input and is more like a MergeMem.
+// It is inserted after the register allocation is done to ensure that nodes use single
+// definition of a multidef lrg in a block.
+class MachMergeNode : public MachIdealNode {
+public:
+ MachMergeNode(Node *n1) {
+ init_class_id(Class_MachMerge);
+ add_req(NULL);
+ add_req(n1);
+ }
+ virtual const RegMask &out_RegMask() const { return in(1)->out_RegMask(); }
+ virtual const RegMask &in_RegMask(uint idx) const { return in(1)->in_RegMask(idx); }
+ virtual const class Type *bottom_type() const { return in(1)->bottom_type(); }
+ virtual uint ideal_reg() const { return bottom_type()->ideal_reg(); }
+ virtual uint oper_input_base() const { return 1; }
+ virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { }
+ virtual uint size(PhaseRegAlloc *ra_) const { return 0; }
+#ifndef PRODUCT
+ virtual const char *Name() const { return "MachMerge"; }
+#endif
+};
+
//------------------------------MachBranchNode--------------------------------
// Abstract machine branch Node
class MachBranchNode : public MachIdealNode {
diff --git a/src/share/vm/opto/macro.cpp b/src/share/vm/opto/macro.cpp
index 94fdbf86a..2d555c280 100644
--- a/src/share/vm/opto/macro.cpp
+++ b/src/share/vm/opto/macro.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1890,7 +1890,7 @@ void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) {
// Box is used only in one lock region. Mark this box as eliminated.
_igvn.hash_delete(oldbox);
oldbox->as_BoxLock()->set_eliminated(); // This changes box's hash value
- _igvn.hash_insert(oldbox);
+ _igvn.hash_insert(oldbox);
for (uint i = 0; i < oldbox->outcnt(); i++) {
Node* u = oldbox->raw_out(i);
@@ -1899,6 +1899,9 @@ void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) {
// Check lock's box since box could be referenced by Lock's debug info.
if (alock->box_node() == oldbox) {
// Mark eliminated all related locks and unlocks.
+#ifdef ASSERT
+ alock->log_lock_optimization(C, "eliminate_lock_set_non_esc4");
+#endif
alock->set_non_esc_obj();
}
}
@@ -1925,6 +1928,9 @@ void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) {
AbstractLockNode* alock = u->as_AbstractLock();
if (alock->box_node() == oldbox && alock->obj_node()->eqv_uncast(obj)) {
// Replace Box and mark eliminated all related locks and unlocks.
+#ifdef ASSERT
+ alock->log_lock_optimization(C, "eliminate_lock_set_non_esc5");
+#endif
alock->set_non_esc_obj();
_igvn.rehash_node_delayed(alock);
alock->set_box_node(newbox);
@@ -1971,26 +1977,38 @@ void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) {
return;
} else if (!alock->is_non_esc_obj()) { // Not eliminated or coarsened
// Only Lock node has JVMState needed here.
- if (alock->jvms() != NULL && alock->as_Lock()->is_nested_lock_region()) {
- // Mark eliminated related nested locks and unlocks.
- Node* obj = alock->obj_node();
- BoxLockNode* box_node = alock->box_node()->as_BoxLock();
- assert(!box_node->is_eliminated(), "should not be marked yet");
- // Note: BoxLock node is marked eliminated only here
- // and it is used to indicate that all associated lock
- // and unlock nodes are marked for elimination.
- box_node->set_eliminated(); // Box's hash is always NO_HASH here
- for (uint i = 0; i < box_node->outcnt(); i++) {
- Node* u = box_node->raw_out(i);
- if (u->is_AbstractLock()) {
- alock = u->as_AbstractLock();
- if (alock->box_node() == box_node) {
- // Verify that this Box is referenced only by related locks.
- assert(alock->obj_node()->eqv_uncast(obj), "");
- // Mark all related locks and unlocks.
- alock->set_nested();
+ // Not that preceding claim is documented anywhere else.
+ if (alock->jvms() != NULL) {
+ if (alock->as_Lock()->is_nested_lock_region()) {
+ // Mark eliminated related nested locks and unlocks.
+ Node* obj = alock->obj_node();
+ BoxLockNode* box_node = alock->box_node()->as_BoxLock();
+ assert(!box_node->is_eliminated(), "should not be marked yet");
+ // Note: BoxLock node is marked eliminated only here
+ // and it is used to indicate that all associated lock
+ // and unlock nodes are marked for elimination.
+ box_node->set_eliminated(); // Box's hash is always NO_HASH here
+ for (uint i = 0; i < box_node->outcnt(); i++) {
+ Node* u = box_node->raw_out(i);
+ if (u->is_AbstractLock()) {
+ alock = u->as_AbstractLock();
+ if (alock->box_node() == box_node) {
+ // Verify that this Box is referenced only by related locks.
+ assert(alock->obj_node()->eqv_uncast(obj), "");
+ // Mark all related locks and unlocks.
+#ifdef ASSERT
+ alock->log_lock_optimization(C, "eliminate_lock_set_nested");
+#endif
+ alock->set_nested();
+ }
}
}
+ } else {
+#ifdef ASSERT
+ alock->log_lock_optimization(C, "eliminate_lock_NOT_nested_lock_region");
+ if (C->log() != NULL)
+ alock->as_Lock()->is_nested_lock_region(C); // rerun for debugging output
+#endif
}
}
return;
@@ -2035,19 +2053,10 @@ bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
assert(oldbox->is_eliminated(), "should be done already");
}
#endif
- CompileLog* log = C->log();
- if (log != NULL) {
- log->head("eliminate_lock lock='%d'",
- alock->is_Lock());
- JVMState* p = alock->jvms();
- while (p != NULL) {
- log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
- p = p->caller();
- }
- log->tail("eliminate_lock");
- }
- #ifndef PRODUCT
+ alock->log_lock_optimization(C, "eliminate_lock");
+
+#ifndef PRODUCT
if (PrintEliminateLocks) {
if (alock->is_Lock()) {
tty->print_cr("++++ Eliminated: %d Lock", alock->_idx);
@@ -2055,7 +2064,7 @@ bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
tty->print_cr("++++ Eliminated: %d Unlock", alock->_idx);
}
}
- #endif
+#endif
Node* mem = alock->in(TypeFunc::Memory);
Node* ctrl = alock->in(TypeFunc::Control);
diff --git a/src/share/vm/opto/matcher.cpp b/src/share/vm/opto/matcher.cpp
index c7fa72c94..f4097b35e 100644
--- a/src/share/vm/opto/matcher.cpp
+++ b/src/share/vm/opto/matcher.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,25 +38,17 @@
#include "opto/vectornode.hpp"
#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined AD_MD_HPP
+# include AD_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/ad_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/ad_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif
@@ -343,14 +335,14 @@ void Matcher::match( ) {
grow_new_node_array(C->unique());
// Reset node counter so MachNodes start with _idx at 0
- int nodes = C->unique(); // save value
+ int live_nodes = C->live_nodes();
C->set_unique(0);
C->reset_dead_node_list();
// Recursively match trees from old space into new space.
// Correct leaves of new-space Nodes; they point to old-space.
_visited.Clear(); // Clear visit bits for xform call
- C->set_cached_top_node(xform( C->top(), nodes ));
+ C->set_cached_top_node(xform( C->top(), live_nodes));
if (!C->failing()) {
Node* xroot = xform( C->root(), 1 );
if (xroot == NULL) {
@@ -848,7 +840,7 @@ void Matcher::init_spill_mask( Node *ret ) {
MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
#endif
MachNode *spillI = match_tree(new (C) LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered));
- MachNode *spillL = match_tree(new (C) LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered,false));
+ MachNode *spillL = match_tree(new (C) LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered, LoadNode::DependsOnlyOnTest,false));
MachNode *spillF = match_tree(new (C) LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered));
MachNode *spillD = match_tree(new (C) LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered));
MachNode *spillP = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
@@ -1003,7 +995,7 @@ class MStack: public Node_Stack {
Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
Node *Matcher::xform( Node *n, int max_stack ) {
// Use one stack to keep both: child's node/state and parent's node/index
- MStack mstack(max_stack * 2 * 2); // C->unique() * 2 * 2
+ MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root
while (mstack.is_nonempty()) {
@@ -2029,8 +2021,8 @@ bool Matcher::is_bmi_pattern(Node *n, Node *m) {
//------------------------------find_shared------------------------------------
// Set bits if Node is shared or otherwise a root
void Matcher::find_shared( Node *n ) {
- // Allocate stack of size C->unique() * 2 to avoid frequent realloc
- MStack mstack(C->unique() * 2);
+ // Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc
+ MStack mstack(C->live_nodes() * 2);
// Mark nodes as address_visited if they are inputs to an address expression
VectorSet address_visited(Thread::current()->resource_area());
mstack.push(n, Visit); // Don't need to pre-visit root node
diff --git a/src/share/vm/opto/memnode.cpp b/src/share/vm/opto/memnode.cpp
index 46c2d4765..361f24a22 100644
--- a/src/share/vm/opto/memnode.cpp
+++ b/src/share/vm/opto/memnode.cpp
@@ -878,6 +878,9 @@ void LoadNode::dump_spec(outputStream *st) const {
// standard dump does this in Verbose and WizardMode
st->print(" #"); _type->dump_on(st);
}
+ if (!_depends_only_on_test) {
+ st->print(" (does not depend only on test)");
+ }
}
#endif
@@ -894,7 +897,7 @@ bool LoadNode::is_immutable_value(Node* adr) {
//----------------------------LoadNode::make-----------------------------------
// Polymorphic factory method:
-Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo) {
+Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo, ControlDependency control_dependency) {
Compile* C = gvn.C;
// sanity check the alias category against the created node type
@@ -910,36 +913,40 @@ Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypeP
rt->isa_oopptr() || is_immutable_value(adr),
"raw memory operations should have control edge");
switch (bt) {
- case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo);
- case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo);
- case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo);
- case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo);
- case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo);
- case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo);
- case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt, mo);
- case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt, mo);
- case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo);
+ case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
+ case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
+ case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
+ case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
+ case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
+ case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency);
+ case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt, mo, control_dependency);
+ case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency);
+ case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
case T_OBJECT:
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
- Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo));
+ Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency));
return new (C) DecodeNNode(load, load->bottom_type()->make_ptr());
} else
#endif
{
assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
- return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo);
+ return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo, control_dependency);
}
}
ShouldNotReachHere();
return (LoadNode*)NULL;
}
-LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
+LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo, ControlDependency control_dependency) {
bool require_atomic = true;
- return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic);
+ return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic);
}
+LoadDNode* LoadDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo, ControlDependency control_dependency) {
+ bool require_atomic = true;
+ return new (C) LoadDNode(ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic);
+}
@@ -2398,6 +2405,11 @@ StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr,
return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
}
+StoreDNode* StoreDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
+ bool require_atomic = true;
+ return new (C) StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
+}
+
//--------------------------bottom_type----------------------------------------
const Type *StoreNode::bottom_type() const {
diff --git a/src/share/vm/opto/memnode.hpp b/src/share/vm/opto/memnode.hpp
index 79ff5c029..f2ed639f9 100644
--- a/src/share/vm/opto/memnode.hpp
+++ b/src/share/vm/opto/memnode.hpp
@@ -138,7 +138,33 @@ public:
//------------------------------LoadNode---------------------------------------
// Load value; requires Memory and Address
class LoadNode : public MemNode {
+public:
+ // Some loads (from unsafe) should be pinned: they don't depend only
+ // on the dominating test. The boolean field _depends_only_on_test
+ // below records whether that node depends only on the dominating
+ // test.
+ // Methods used to build LoadNodes pass an argument of type enum
+ // ControlDependency instead of a boolean because those methods
+ // typically have multiple boolean parameters with default values:
+ // passing the wrong boolean to one of these parameters by mistake
+ // goes easily unnoticed. Using an enum, the compiler can check that
+ // the type of a value and the type of the parameter match.
+ enum ControlDependency {
+ Pinned,
+ DependsOnlyOnTest
+ };
private:
+ // LoadNode::hash() doesn't take the _depends_only_on_test field
+ // into account: If the graph already has a non-pinned LoadNode and
+ // we add a pinned LoadNode with the same inputs, it's safe for GVN
+ // to replace the pinned LoadNode with the non-pinned LoadNode,
+ // otherwise it wouldn't be safe to have a non pinned LoadNode with
+ // those inputs in the first place. If the graph already has a
+ // pinned LoadNode and we add a non pinned LoadNode with the same
+ // inputs, it's safe (but suboptimal) for GVN to replace the
+ // non-pinned LoadNode by the pinned LoadNode.
+ bool _depends_only_on_test;
+
// On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
// loads that can be reordered, and such requiring acquire semantics to
// adhere to the Java specification. The required behaviour is stored in
@@ -153,8 +179,8 @@ protected:
const Type* const _type; // What kind of value is loaded?
public:
- LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo)
- : MemNode(c,mem,adr,at), _type(rt), _mo(mo) {
+ LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
+ : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) {
init_class_id(Class_Load);
}
inline bool is_unordered() const { return !is_acquire(); }
@@ -165,7 +191,8 @@ public:
// Polymorphic factory method:
static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
- const TypePtr* at, const Type *rt, BasicType bt, MemOrd mo);
+ const TypePtr* at, const Type *rt, BasicType bt,
+ MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
virtual uint hash() const; // Check the type
@@ -233,16 +260,15 @@ protected:
// which produce results (new raw memory state) inside of loops preventing all
// manner of other optimizations). Basically, it's ugly but so is the alternative.
// See comment in macro.cpp, around line 125 expand_allocate_common().
- virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
-
+ virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
};
//------------------------------LoadBNode--------------------------------------
// Load a byte (8bits signed) from memory
class LoadBNode : public LoadNode {
public:
- LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo)
- : LoadNode(c, mem, adr, at, ti, mo) {}
+ LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
+ : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@@ -255,8 +281,8 @@ public:
// Load a unsigned byte (8bits unsigned) from memory
class LoadUBNode : public LoadNode {
public:
- LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo)
- : LoadNode(c, mem, adr, at, ti, mo) {}
+ LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
+ : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
@@ -269,8 +295,8 @@ public:
// Load an unsigned short/char (16bits unsigned) from memory
class LoadUSNode : public LoadNode {
public:
- LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo)
- : LoadNode(c, mem, adr, at, ti, mo) {}
+ LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
+ : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@@ -283,8 +309,8 @@ public:
// Load a short (16bits signed) from memory
class LoadSNode : public LoadNode {
public:
- LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo)
- : LoadNode(c, mem, adr, at, ti, mo) {}
+ LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
+ : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@@ -297,8 +323,8 @@ public:
// Load an integer from memory
class LoadINode : public LoadNode {
public:
- LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo)
- : LoadNode(c, mem, adr, at, ti, mo) {}
+ LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
+ : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual int store_Opcode() const { return Op_StoreI; }
@@ -330,15 +356,15 @@ class LoadLNode : public LoadNode {
public:
LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
- MemOrd mo, bool require_atomic_access = false)
- : LoadNode(c, mem, adr, at, tl, mo), _require_atomic_access(require_atomic_access) {}
+ MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
+ : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegL; }
virtual int store_Opcode() const { return Op_StoreL; }
virtual BasicType memory_type() const { return T_LONG; }
- bool require_atomic_access() { return _require_atomic_access; }
+ bool require_atomic_access() const { return _require_atomic_access; }
static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
- const Type* rt, MemOrd mo);
+ const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st);
@@ -351,8 +377,8 @@ public:
// Load a long from unaligned memory
class LoadL_unalignedNode : public LoadLNode {
public:
- LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo)
- : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo) {}
+ LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
+ : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
virtual int Opcode() const;
};
@@ -360,8 +386,8 @@ public:
// Load a float (64 bits) from memory
class LoadFNode : public LoadNode {
public:
- LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo)
- : LoadNode(c, mem, adr, at, t, mo) {}
+ LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
+ : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegF; }
virtual int store_Opcode() const { return Op_StoreF; }
@@ -371,21 +397,39 @@ public:
//------------------------------LoadDNode--------------------------------------
// Load a double (64 bits) from memory
class LoadDNode : public LoadNode {
+ virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
+ virtual uint cmp( const Node &n ) const {
+ return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
+ && LoadNode::cmp(n);
+ }
+ virtual uint size_of() const { return sizeof(*this); }
+ const bool _require_atomic_access; // is piecewise load forbidden?
+
public:
- LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo)
- : LoadNode(c, mem, adr, at, t, mo) {}
+ LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
+ MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
+ : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegD; }
virtual int store_Opcode() const { return Op_StoreD; }
virtual BasicType memory_type() const { return T_DOUBLE; }
+ bool require_atomic_access() const { return _require_atomic_access; }
+ static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
+ const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
+#ifndef PRODUCT
+ virtual void dump_spec(outputStream *st) const {
+ LoadNode::dump_spec(st);
+ if (_require_atomic_access) st->print(" Atomic!");
+ }
+#endif
};
//------------------------------LoadD_unalignedNode----------------------------
// Load a double from unaligned memory
class LoadD_unalignedNode : public LoadDNode {
public:
- LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo)
- : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo) {}
+ LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
+ : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
virtual int Opcode() const;
};
@@ -393,8 +437,8 @@ public:
// Load a pointer from memory (either object or array)
class LoadPNode : public LoadNode {
public:
- LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo)
- : LoadNode(c, mem, adr, at, t, mo) {}
+ LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
+ : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; }
virtual int store_Opcode() const { return Op_StoreP; }
@@ -406,8 +450,8 @@ public:
// Load a narrow oop from memory (either object or array)
class LoadNNode : public LoadNode {
public:
- LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo)
- : LoadNode(c, mem, adr, at, t, mo) {}
+ LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
+ : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegN; }
virtual int store_Opcode() const { return Op_StoreN; }
@@ -582,7 +626,7 @@ public:
: StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
virtual int Opcode() const;
virtual BasicType memory_type() const { return T_LONG; }
- bool require_atomic_access() { return _require_atomic_access; }
+ bool require_atomic_access() const { return _require_atomic_access; }
static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
@@ -605,11 +649,28 @@ public:
//------------------------------StoreDNode-------------------------------------
// Store double to memory
class StoreDNode : public StoreNode {
+ virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
+ virtual uint cmp( const Node &n ) const {
+ return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
+ && StoreNode::cmp(n);
+ }
+ virtual uint size_of() const { return sizeof(*this); }
+ const bool _require_atomic_access; // is piecewise store forbidden?
public:
- StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
- : StoreNode(c, mem, adr, at, val, mo) {}
+ StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
+ MemOrd mo, bool require_atomic_access = false)
+ : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
virtual int Opcode() const;
virtual BasicType memory_type() const { return T_DOUBLE; }
+ bool require_atomic_access() const { return _require_atomic_access; }
+ static StoreDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
+#ifndef PRODUCT
+ virtual void dump_spec(outputStream *st) const {
+ StoreNode::dump_spec(st);
+ if (_require_atomic_access) st->print(" Atomic!");
+ }
+#endif
+
};
//------------------------------StorePNode-------------------------------------
diff --git a/src/share/vm/opto/node.cpp b/src/share/vm/opto/node.cpp
index dd3c07915..26435b9e1 100644
--- a/src/share/vm/opto/node.cpp
+++ b/src/share/vm/opto/node.cpp
@@ -325,6 +325,9 @@ inline int Node::Init(int req, Compile* C) {
// Create a Node, with a given number of required edges.
Node::Node(uint req)
: _idx(IDX_INIT(req))
+#ifdef ASSERT
+ , _parse_idx(_idx)
+#endif
{
assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" );
debug_only( verify_construction() );
@@ -344,6 +347,9 @@ Node::Node(uint req)
//------------------------------Node-------------------------------------------
Node::Node(Node *n0)
: _idx(IDX_INIT(1))
+#ifdef ASSERT
+ , _parse_idx(_idx)
+#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
@@ -356,6 +362,9 @@ Node::Node(Node *n0)
//------------------------------Node-------------------------------------------
Node::Node(Node *n0, Node *n1)
: _idx(IDX_INIT(2))
+#ifdef ASSERT
+ , _parse_idx(_idx)
+#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
@@ -370,6 +379,9 @@ Node::Node(Node *n0, Node *n1)
//------------------------------Node-------------------------------------------
Node::Node(Node *n0, Node *n1, Node *n2)
: _idx(IDX_INIT(3))
+#ifdef ASSERT
+ , _parse_idx(_idx)
+#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
@@ -386,6 +398,9 @@ Node::Node(Node *n0, Node *n1, Node *n2)
//------------------------------Node-------------------------------------------
Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
: _idx(IDX_INIT(4))
+#ifdef ASSERT
+ , _parse_idx(_idx)
+#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
@@ -404,6 +419,9 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
//------------------------------Node-------------------------------------------
Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
: _idx(IDX_INIT(5))
+#ifdef ASSERT
+ , _parse_idx(_idx)
+#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
@@ -425,6 +443,9 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
Node *n4, Node *n5)
: _idx(IDX_INIT(6))
+#ifdef ASSERT
+ , _parse_idx(_idx)
+#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
@@ -448,6 +469,9 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
Node *n4, Node *n5, Node *n6)
: _idx(IDX_INIT(7))
+#ifdef ASSERT
+ , _parse_idx(_idx)
+#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
@@ -1749,7 +1773,7 @@ static void dump_nodes(const Node* start, int d, bool only_ctrl) {
uint depth = (uint)ABS(d);
int direction = d;
Compile* C = Compile::current();
- GrowableArray <Node *> nstack(C->unique());
+ GrowableArray <Node *> nstack(C->live_nodes());
nstack.append(s);
int begin = 0;
@@ -2083,6 +2107,17 @@ void Node_List::dump() const {
#endif
}
+void Node_List::dump_simple() const {
+#ifndef PRODUCT
+ for( uint i = 0; i < _cnt; i++ )
+ if( _nodes[i] ) {
+ tty->print(" %d", _nodes[i]->_idx);
+ } else {
+ tty->print(" NULL");
+ }
+#endif
+}
+
//=============================================================================
//------------------------------remove-----------------------------------------
void Unique_Node_List::remove( Node *n ) {
diff --git a/src/share/vm/opto/node.hpp b/src/share/vm/opto/node.hpp
index 0b51d0487..ffb096e9c 100644
--- a/src/share/vm/opto/node.hpp
+++ b/src/share/vm/opto/node.hpp
@@ -98,6 +98,7 @@ class MachReturnNode;
class MachSafePointNode;
class MachSpillCopyNode;
class MachTempNode;
+class MachMergeNode;
class Matcher;
class MemBarNode;
class MemBarStoreStoreNode;
@@ -293,10 +294,16 @@ protected:
public:
// Each Node is assigned a unique small/dense number. This number is used
- // to index into auxiliary arrays of data and bitvectors.
- // It is declared const to defend against inadvertant assignment,
- // since it is used by clients as a naked field.
+ // to index into auxiliary arrays of data and bit vectors.
+ // The field _idx is declared constant to defend against inadvertent assignments,
+ // since it is used by clients as a naked field. However, the field's value can be
+ // changed using the set_idx() method.
+ //
+ // The PhaseRenumberLive phase renumbers nodes based on liveness information.
+ // Therefore, it updates the value of the _idx field. The parse-time _idx is
+ // preserved in _parse_idx.
const node_idx_t _idx;
+ DEBUG_ONLY(const node_idx_t _parse_idx;)
// Get the (read-only) number of input edges
uint req() const { return _cnt; }
@@ -591,6 +598,7 @@ public:
DEFINE_CLASS_ID(MachTemp, Mach, 3)
DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
DEFINE_CLASS_ID(MachConstant, Mach, 5)
+ DEFINE_CLASS_ID(MachMerge, Mach, 6)
DEFINE_CLASS_ID(Type, Node, 2)
DEFINE_CLASS_ID(Phi, Type, 0)
@@ -761,6 +769,7 @@ public:
DEFINE_CLASS_QUERY(MachSafePoint)
DEFINE_CLASS_QUERY(MachSpillCopy)
DEFINE_CLASS_QUERY(MachTemp)
+ DEFINE_CLASS_QUERY(MachMerge)
DEFINE_CLASS_QUERY(Mem)
DEFINE_CLASS_QUERY(MemBar)
DEFINE_CLASS_QUERY(MemBarStoreStore)
@@ -1365,6 +1374,7 @@ public:
void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
uint size() const { return _cnt; }
void dump() const;
+ void dump_simple() const;
};
//------------------------------Unique_Node_List-------------------------------
diff --git a/src/share/vm/opto/output.cpp b/src/share/vm/opto/output.cpp
index 5a32e38ea..0563ebba5 100644
--- a/src/share/vm/opto/output.cpp
+++ b/src/share/vm/opto/output.cpp
@@ -2473,7 +2473,7 @@ void Scheduling::DoScheduling() {
if( iop == Op_Con ) continue; // Do not schedule Top
if( iop == Op_Node && // Do not schedule PhiNodes, ProjNodes
mach->pipeline() == MachNode::pipeline_class() &&
- !n->is_SpillCopy() ) // Breakpoints, Prolog, etc
+ !n->is_SpillCopy() && !n->is_MachMerge() ) // Breakpoints, Prolog, etc
continue;
break; // Funny loop structure to be sure...
}
diff --git a/src/share/vm/opto/output.hpp b/src/share/vm/opto/output.hpp
index 298de0fec..cdde2359a 100644
--- a/src/share/vm/opto/output.hpp
+++ b/src/share/vm/opto/output.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,25 +27,17 @@
#include "opto/block.hpp"
#include "opto/node.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined AD_MD_HPP
+# include AD_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/ad_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/ad_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif
diff --git a/src/share/vm/opto/parse.hpp b/src/share/vm/opto/parse.hpp
index 75cc76229..da8707490 100644
--- a/src/share/vm/opto/parse.hpp
+++ b/src/share/vm/opto/parse.hpp
@@ -549,8 +549,8 @@ class Parse : public GraphKit {
void do_jsr();
void do_ret();
- float dynamic_branch_prediction(float &cnt);
- float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci);
+ float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
+ float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
bool seems_never_taken(float prob) const;
bool path_is_suitable_for_uncommon_trap(float prob) const;
bool seems_stable_comparison() const;
diff --git a/src/share/vm/opto/parse2.cpp b/src/share/vm/opto/parse2.cpp
index 453ee34d2..a45e491fb 100644
--- a/src/share/vm/opto/parse2.cpp
+++ b/src/share/vm/opto/parse2.cpp
@@ -761,35 +761,64 @@ void Parse::do_ret() {
merge_common(target, pnum);
}
+static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
+ if (btest != BoolTest::eq && btest != BoolTest::ne) {
+ // Only ::eq and ::ne are supported for profile injection.
+ return false;
+ }
+ if (test->is_Cmp() &&
+ test->in(1)->Opcode() == Op_ProfileBoolean) {
+ ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
+ int false_cnt = profile->false_count();
+ int true_cnt = profile->true_count();
+
+ // Counts matching depends on the actual test operation (::eq or ::ne).
+ // No need to scale the counts because profile injection was designed
+ // to feed exact counts into VM.
+ taken = (btest == BoolTest::eq) ? false_cnt : true_cnt;
+ not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt;
+
+ profile->consume();
+ return true;
+ }
+ return false;
+}
//--------------------------dynamic_branch_prediction--------------------------
// Try to gather dynamic branch prediction behavior. Return a probability
// of the branch being taken and set the "cnt" field. Returns a -1.0
// if we need to use static prediction for some reason.
-float Parse::dynamic_branch_prediction(float &cnt) {
+float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
ResourceMark rm;
cnt = COUNT_UNKNOWN;
- // Use MethodData information if it is available
- // FIXME: free the ProfileData structure
- ciMethodData* methodData = method()->method_data();
- if (!methodData->is_mature()) return PROB_UNKNOWN;
- ciProfileData* data = methodData->bci_to_data(bci());
- if (!data->is_JumpData()) return PROB_UNKNOWN;
-
- // get taken and not taken values
- int taken = data->as_JumpData()->taken();
+ int taken = 0;
int not_taken = 0;
- if (data->is_BranchData()) {
- not_taken = data->as_BranchData()->not_taken();
- }
- // scale the counts to be commensurate with invocation counts:
- taken = method()->scale_count(taken);
- not_taken = method()->scale_count(not_taken);
+ bool use_mdo = !has_injected_profile(btest, test, taken, not_taken);
+
+ if (use_mdo) {
+ // Use MethodData information if it is available
+ // FIXME: free the ProfileData structure
+ ciMethodData* methodData = method()->method_data();
+ if (!methodData->is_mature()) return PROB_UNKNOWN;
+ ciProfileData* data = methodData->bci_to_data(bci());
+ if (!data->is_JumpData()) return PROB_UNKNOWN;
+
+ // get taken and not taken values
+ taken = data->as_JumpData()->taken();
+ not_taken = 0;
+ if (data->is_BranchData()) {
+ not_taken = data->as_BranchData()->not_taken();
+ }
+
+ // scale the counts to be commensurate with invocation counts:
+ taken = method()->scale_count(taken);
+ not_taken = method()->scale_count(not_taken);
+ }
// Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
- // We also check that individual counters are positive first, overwise the sum can become positive.
+ // We also check that individual counters are positive first, otherwise the sum can become positive.
if (taken < 0 || not_taken < 0 || taken + not_taken < 40) {
if (C->log() != NULL) {
C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
@@ -839,8 +868,9 @@ float Parse::dynamic_branch_prediction(float &cnt) {
//-----------------------------branch_prediction-------------------------------
float Parse::branch_prediction(float& cnt,
BoolTest::mask btest,
- int target_bci) {
- float prob = dynamic_branch_prediction(cnt);
+ int target_bci,
+ Node* test) {
+ float prob = dynamic_branch_prediction(cnt, btest, test);
// If prob is unknown, switch to static prediction
if (prob != PROB_UNKNOWN) return prob;
@@ -930,7 +960,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
Block* next_block = successor_for_bci(iter().next_bci());
float cnt;
- float prob = branch_prediction(cnt, btest, target_bci);
+ float prob = branch_prediction(cnt, btest, target_bci, c);
if (prob == PROB_UNKNOWN) {
// (An earlier version of do_ifnull omitted this trap for OSR methods.)
#ifndef PRODUCT
@@ -1011,7 +1041,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
Block* next_block = successor_for_bci(iter().next_bci());
float cnt;
- float prob = branch_prediction(cnt, btest, target_bci);
+ float prob = branch_prediction(cnt, btest, target_bci, c);
float untaken_prob = 1.0 - prob;
if (prob == PROB_UNKNOWN) {
diff --git a/src/share/vm/opto/parse3.cpp b/src/share/vm/opto/parse3.cpp
index 466e1ca3b..391bef6d3 100644
--- a/src/share/vm/opto/parse3.cpp
+++ b/src/share/vm/opto/parse3.cpp
@@ -233,7 +233,7 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
// Build the load.
//
MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
- Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, is_vol);
+ Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
// Adjust Java stack
if (type2size[bt] == 1)
diff --git a/src/share/vm/opto/phase.cpp b/src/share/vm/opto/phase.cpp
index a80840d7f..bb3ca8029 100644
--- a/src/share/vm/opto/phase.cpp
+++ b/src/share/vm/opto/phase.cpp
@@ -67,6 +67,8 @@ elapsedTimer Phase::_t_idealLoopVerify;
elapsedTimer Phase::_t_iterGVN;
elapsedTimer Phase::_t_iterGVN2;
elapsedTimer Phase::_t_incrInline;
+elapsedTimer Phase::_t_renumberLive;
+
// Subtimers for _t_registerAllocation
elapsedTimer Phase::_t_ctorChaitin;
@@ -74,6 +76,7 @@ elapsedTimer Phase::_t_buildIFGphysical;
elapsedTimer Phase::_t_computeLive;
elapsedTimer Phase::_t_regAllocSplit;
elapsedTimer Phase::_t_postAllocCopyRemoval;
+elapsedTimer Phase::_t_mergeMultidefs;
elapsedTimer Phase::_t_fixupSpills;
// Subtimers for _t_output
@@ -114,13 +117,14 @@ void Phase::print_timers() {
}
tty->print_cr (" iterGVN : %3.3f sec", Phase::_t_iterGVN.seconds());
tty->print_cr (" incrInline : %3.3f sec", Phase::_t_incrInline.seconds());
+ tty->print_cr (" renumberLive : %3.3f sec", Phase::_t_renumberLive.seconds());
tty->print_cr (" idealLoop : %3.3f sec", Phase::_t_idealLoop.seconds());
tty->print_cr (" idealLoopVerify: %3.3f sec", Phase::_t_idealLoopVerify.seconds());
tty->print_cr (" ccp : %3.3f sec", Phase::_t_ccp.seconds());
tty->print_cr (" iterGVN2 : %3.3f sec", Phase::_t_iterGVN2.seconds());
tty->print_cr (" macroExpand : %3.3f sec", Phase::_t_macroExpand.seconds());
tty->print_cr (" graphReshape : %3.3f sec", Phase::_t_graphReshaping.seconds());
- double optimizer_subtotal = Phase::_t_iterGVN.seconds() + Phase::_t_iterGVN2.seconds() +
+ double optimizer_subtotal = Phase::_t_iterGVN.seconds() + Phase::_t_iterGVN2.seconds() + Phase::_t_renumberLive.seconds() +
Phase::_t_escapeAnalysis.seconds() + Phase::_t_macroEliminate.seconds() +
Phase::_t_idealLoop.seconds() + Phase::_t_ccp.seconds() +
Phase::_t_macroExpand.seconds() + Phase::_t_graphReshaping.seconds();
@@ -136,11 +140,12 @@ void Phase::print_timers() {
tty->print_cr (" computeLive : %3.3f sec", Phase::_t_computeLive.seconds());
tty->print_cr (" regAllocSplit : %3.3f sec", Phase::_t_regAllocSplit.seconds());
tty->print_cr (" postAllocCopyRemoval: %3.3f sec", Phase::_t_postAllocCopyRemoval.seconds());
+ tty->print_cr (" mergeMultidefs: %3.3f sec", Phase::_t_mergeMultidefs.seconds());
tty->print_cr (" fixupSpills : %3.3f sec", Phase::_t_fixupSpills.seconds());
double regalloc_subtotal = Phase::_t_ctorChaitin.seconds() +
Phase::_t_buildIFGphysical.seconds() + Phase::_t_computeLive.seconds() +
Phase::_t_regAllocSplit.seconds() + Phase::_t_fixupSpills.seconds() +
- Phase::_t_postAllocCopyRemoval.seconds();
+ Phase::_t_postAllocCopyRemoval.seconds() + Phase::_t_mergeMultidefs.seconds();
double percent_of_regalloc = ((regalloc_subtotal == 0.0) ? 0.0 : (regalloc_subtotal / Phase::_t_registerAllocation.seconds() * 100.0));
tty->print_cr (" subtotal : %3.3f sec, %3.2f %%", regalloc_subtotal, percent_of_regalloc);
}
diff --git a/src/share/vm/opto/phase.hpp b/src/share/vm/opto/phase.hpp
index 08e957564..29350f539 100644
--- a/src/share/vm/opto/phase.hpp
+++ b/src/share/vm/opto/phase.hpp
@@ -40,22 +40,23 @@ class Compile;
class Phase : public StackObj {
public:
enum PhaseNumber {
- Compiler, // Top-level compiler phase
- Parser, // Parse bytecodes
- Remove_Useless, // Remove useless nodes
- Optimistic, // Optimistic analysis phase
- GVN, // Pessimistic global value numbering phase
- Ins_Select, // Instruction selection phase
- CFG, // Build a CFG
- BlockLayout, // Linear ordering of blocks
- Register_Allocation, // Register allocation, duh
- LIVE, // Dragon-book LIVE range problem
- StringOpts, // StringBuilder related optimizations
- Interference_Graph, // Building the IFG
- Coalesce, // Coalescing copies
- Ideal_Loop, // Find idealized trip-counted loops
- Macro_Expand, // Expand macro nodes
- Peephole, // Apply peephole optimizations
+ Compiler, // Top-level compiler phase
+ Parser, // Parse bytecodes
+ Remove_Useless, // Remove useless nodes
+ Remove_Useless_And_Renumber_Live, // First, remove useless nodes from the graph. Then, renumber live nodes.
+ Optimistic, // Optimistic analysis phase
+ GVN, // Pessimistic global value numbering phase
+ Ins_Select, // Instruction selection phase
+ CFG, // Build a CFG
+ BlockLayout, // Linear ordering of blocks
+ Register_Allocation, // Register allocation, duh
+ LIVE, // Dragon-book LIVE range problem
+ StringOpts, // StringBuilder related optimizations
+ Interference_Graph, // Building the IFG
+ Coalesce, // Coalescing copies
+ Ideal_Loop, // Find idealized trip-counted loops
+ Macro_Expand, // Expand macro nodes
+ Peephole, // Apply peephole optimizations
last_phase
};
protected:
@@ -102,6 +103,7 @@ protected:
static elapsedTimer _t_iterGVN;
static elapsedTimer _t_iterGVN2;
static elapsedTimer _t_incrInline;
+ static elapsedTimer _t_renumberLive;
// Subtimers for _t_registerAllocation
static elapsedTimer _t_ctorChaitin;
@@ -109,6 +111,7 @@ protected:
static elapsedTimer _t_computeLive;
static elapsedTimer _t_regAllocSplit;
static elapsedTimer _t_postAllocCopyRemoval;
+ static elapsedTimer _t_mergeMultidefs;
static elapsedTimer _t_fixupSpills;
// Subtimers for _t_output
diff --git a/src/share/vm/opto/phaseX.cpp b/src/share/vm/opto/phaseX.cpp
index 758359d7c..2c42f9a76 100644
--- a/src/share/vm/opto/phaseX.cpp
+++ b/src/share/vm/opto/phaseX.cpp
@@ -398,7 +398,7 @@ void NodeHash::operator=(const NodeHash& nh) {
//=============================================================================
//------------------------------PhaseRemoveUseless-----------------------------
// 1) Use a breadthfirst walk to collect useful nodes reachable from root.
-PhaseRemoveUseless::PhaseRemoveUseless( PhaseGVN *gvn, Unique_Node_List *worklist ) : Phase(Remove_Useless),
+PhaseRemoveUseless::PhaseRemoveUseless(PhaseGVN *gvn, Unique_Node_List *worklist, PhaseNumber phase_num) : Phase(phase_num),
_useful(Thread::current()->resource_area()) {
// Implementation requires 'UseLoopSafepoints == true' and an edge from root
@@ -435,6 +435,82 @@ PhaseRemoveUseless::PhaseRemoveUseless( PhaseGVN *gvn, Unique_Node_List *worklis
}
}
+//=============================================================================
+//------------------------------PhaseRenumberLive------------------------------
+// First, remove useless nodes (equivalent to identifying live nodes).
+// Then, renumber live nodes.
+//
+// The set of live nodes is returned by PhaseRemoveUseless in the _useful structure.
+// If the number of live nodes is 'x' (where 'x' == _useful.size()), then the
+// PhaseRenumberLive updates the node ID of each node (the _idx field) with a unique
+// value in the range [0, x).
+//
+// At the end of the PhaseRenumberLive phase, the compiler's count of unique nodes is
+// updated to 'x' and the list of dead nodes is reset (as there are no dead nodes).
+//
+// The PhaseRenumberLive phase updates two data structures with the new node IDs.
+// (1) The worklist is used by the PhaseIterGVN phase to identify nodes that must be
+// processed. A new worklist (with the updated node IDs) is returned in 'new_worklist'.
+// (2) Type information (the field PhaseGVN::_types) maps type information to each
+// node ID. The mapping is updated to use the new node IDs as well. Updated type
+// information is returned in PhaseGVN::_types.
+//
+// The PhaseRenumberLive phase does not preserve the order of elements in the worklist.
+//
+// Other data structures used by the compiler are not updated. The hash table for value
+// numbering (the field PhaseGVN::_table) is not updated because computing the hash
+// values is not based on node IDs. The field PhaseGVN::_nodes is not updated either
+// because it is empty wherever PhaseRenumberLive is used.
+PhaseRenumberLive::PhaseRenumberLive(PhaseGVN* gvn,
+ Unique_Node_List* worklist, Unique_Node_List* new_worklist,
+ PhaseNumber phase_num) :
+ PhaseRemoveUseless(gvn, worklist, Remove_Useless_And_Renumber_Live) {
+
+ assert(RenumberLiveNodes, "RenumberLiveNodes must be set to true for node renumbering to take place");
+ assert(C->live_nodes() == _useful.size(), "the number of live nodes must match the number of useful nodes");
+ assert(gvn->nodes_size() == 0, "GVN must not contain any nodes at this point");
+
+ uint old_unique_count = C->unique();
+ uint live_node_count = C->live_nodes();
+ uint worklist_size = worklist->size();
+
+ // Storage for the updated type information.
+ Type_Array new_type_array(C->comp_arena());
+
+ // Iterate over the set of live nodes.
+ uint current_idx = 0; // The current new node ID. Incremented after every assignment.
+ for (uint i = 0; i < _useful.size(); i++) {
+ Node* n = _useful.at(i);
+ const Type* type = gvn->type_or_null(n);
+ new_type_array.map(current_idx, type);
+
+ bool in_worklist = false;
+ if (worklist->member(n)) {
+ in_worklist = true;
+ }
+
+ n->set_idx(current_idx); // Update node ID.
+
+ if (in_worklist) {
+ new_worklist->push(n);
+ }
+
+ current_idx++;
+ }
+
+ assert(worklist_size == new_worklist->size(), "the new worklist must have the same size as the original worklist");
+ assert(live_node_count == current_idx, "all live nodes must be processed");
+
+ // Replace the compiler's type information with the updated type information.
+ gvn->replace_types(new_type_array);
+
+ // Update the unique node count of the compilation to the number of currently live nodes.
+ C->set_unique(live_node_count);
+
+ // Set the dead node count to 0 and reset dead node list.
+ C->reset_dead_node_list();
+}
+
//=============================================================================
//------------------------------PhaseTransform---------------------------------
@@ -783,7 +859,7 @@ void PhaseGVN::dead_loop_check( Node *n ) {
//------------------------------PhaseIterGVN-----------------------------------
// Initialize hash table to fresh and clean for +VerifyOpto
PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ),
- _stack(C->unique() >> 1),
+ _stack(C->live_nodes() >> 1),
_delay_transform(false) {
}
@@ -800,7 +876,11 @@ PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn ) : PhaseGVN(igvn),
// Initialize with previous PhaseGVN info from Parser
PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
_worklist(*C->for_igvn()),
- _stack(C->unique() >> 1),
+// TODO: Before incremental inlining it was allocated only once and it was fine. Now that
+// the constructor is used in incremental inlining, this consumes too much memory:
+// _stack(C->live_nodes() >> 1),
+// So, as a band-aid, we replace this by:
+ _stack(C->comp_arena(), 32),
_delay_transform(false)
{
uint max;
@@ -1521,11 +1601,12 @@ void PhaseCCP::analyze() {
set_type(n, t);
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* m = n->fast_out(i); // Get user
- if( m->is_Region() ) { // New path to Region? Must recheck Phis too
+ if (m->is_Region()) { // New path to Region? Must recheck Phis too
for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
Node* p = m->fast_out(i2); // Propagate changes to uses
- if( p->bottom_type() != type(p) ) // If not already bottomed out
+ if (p->bottom_type() != type(p)) { // If not already bottomed out
worklist.push(p); // Propagate change to user
+ }
}
}
// If we changed the receiver type to a call, we need to revisit
@@ -1535,12 +1616,31 @@ void PhaseCCP::analyze() {
if (m->is_Call()) {
for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
Node* p = m->fast_out(i2); // Propagate changes to uses
- if (p->is_Proj() && p->as_Proj()->_con == TypeFunc::Control && p->outcnt() == 1)
+ if (p->is_Proj() && p->as_Proj()->_con == TypeFunc::Control && p->outcnt() == 1) {
worklist.push(p->unique_out());
+ }
}
}
- if( m->bottom_type() != type(m) ) // If not already bottomed out
+ if (m->bottom_type() != type(m)) { // If not already bottomed out
worklist.push(m); // Propagate change to user
+ }
+
+ // CmpU nodes can get their type information from two nodes up in the
+ // graph (instead of from the nodes immediately above). Make sure they
+ // are added to the worklist if nodes they depend on are updated, since
+ // they could be missed and get wrong types otherwise.
+ uint m_op = m->Opcode();
+ if (m_op == Op_AddI || m_op == Op_SubI) {
+ for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
+ Node* p = m->fast_out(i2); // Propagate changes to uses
+ if (p->Opcode() == Op_CmpU) {
+ // Got a CmpU which might need the new type information from node n.
+ if(p->bottom_type() != type(p)) { // If not already bottomed out
+ worklist.push(p); // Propagate change to user
+ }
+ }
+ }
+ }
}
}
}
@@ -1566,7 +1666,7 @@ Node *PhaseCCP::transform( Node *n ) {
_nodes.map( n->_idx, new_node ); // Flag as having been cloned
// Allocate stack of size _nodes.Size()/2 to avoid frequent realloc
- GrowableArray <Node *> trstack(C->unique() >> 1);
+ GrowableArray <Node *> trstack(C->live_nodes() >> 1);
trstack.push(new_node); // Process children of cloned node
while ( trstack.is_nonempty() ) {
diff --git a/src/share/vm/opto/phaseX.hpp b/src/share/vm/opto/phaseX.hpp
index fcdd47ee8..ef55c0ec7 100644
--- a/src/share/vm/opto/phaseX.hpp
+++ b/src/share/vm/opto/phaseX.hpp
@@ -148,11 +148,21 @@ protected:
Unique_Node_List _useful; // Nodes reachable from root
// list is allocated from current resource area
public:
- PhaseRemoveUseless( PhaseGVN *gvn, Unique_Node_List *worklist );
+ PhaseRemoveUseless(PhaseGVN *gvn, Unique_Node_List *worklist, PhaseNumber phase_num = Remove_Useless);
Unique_Node_List *get_useful() { return &_useful; }
};
+//------------------------------PhaseRenumber----------------------------------
+// Phase that first performs a PhaseRemoveUseless, then it renumbers compiler
+// structures accordingly.
+class PhaseRenumberLive : public PhaseRemoveUseless {
+public:
+ PhaseRenumberLive(PhaseGVN* gvn,
+ Unique_Node_List* worklist, Unique_Node_List* new_worklist,
+ PhaseNumber phase_num = Remove_Useless_And_Renumber_Live);
+};
+
//------------------------------PhaseTransform---------------------------------
// Phases that analyze, then transform. Constructing the Phase object does any
@@ -162,7 +172,7 @@ public:
class PhaseTransform : public Phase {
protected:
Arena* _arena;
- Node_Array _nodes; // Map old node indices to new nodes.
+ Node_List _nodes; // Map old node indices to new nodes.
Type_Array _types; // Map old node indices to Types.
// ConNode caches:
@@ -187,7 +197,13 @@ public:
Arena* arena() { return _arena; }
Type_Array& types() { return _types; }
+ void replace_types(Type_Array new_types) {
+ _types = new_types;
+ }
// _nodes is used in varying ways by subclasses, which define local accessors
+ uint nodes_size() {
+ return _nodes.size();
+ }
public:
// Get a previously recorded type for the node n.
diff --git a/src/share/vm/opto/postaloc.cpp b/src/share/vm/opto/postaloc.cpp
index 86078979a..cec4c5dfb 100644
--- a/src/share/vm/opto/postaloc.cpp
+++ b/src/share/vm/opto/postaloc.cpp
@@ -263,20 +263,6 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v
// intermediate copies might be illegal, i.e., value is stored down to stack
// then reloaded BUT survives in a register the whole way.
Node *val = skip_copies(n->in(k));
-
- if (val == x && nk_idx != 0 &&
- regnd[nk_reg] != NULL && regnd[nk_reg] != x &&
- _lrg_map.live_range_id(x) == _lrg_map.live_range_id(regnd[nk_reg])) {
- // When rematerialzing nodes and stretching lifetimes, the
- // allocator will reuse the original def for multidef LRG instead
- // of the current reaching def because it can't know it's safe to
- // do so. After allocation completes if they are in the same LRG
- // then it should use the current reaching def instead.
- n->set_req(k, regnd[nk_reg]);
- blk_adjust += yank_if_dead(val, current_block, &value, &regnd);
- val = skip_copies(n->in(k));
- }
-
if (val == x) return blk_adjust; // No progress?
int n_regs = RegMask::num_registers(val->ideal_reg());
@@ -382,6 +368,95 @@ bool PhaseChaitin::eliminate_copy_of_constant(Node* val, Node* n,
return false;
}
+// The algorithms works as follows:
+// We traverse the block top to bottom. possibly_merge_multidef() is invoked for every input edge k
+// of the instruction n. We check to see if the input is a multidef lrg. If it is, we record the fact that we've
+// seen a definition (coming as an input) and add that fact to the reg2defuse array. The array maps registers to their
+// current reaching definitions (we track only multidefs though). With each definition we also associate the first
+// instruction we saw use it. If we encounter the situation when we observe an def (an input) that is a part of the
+// same lrg but is different from the previous seen def we merge the two with a MachMerge node and substitute
+// all the uses that we've seen so far to use the merge. After that we keep replacing the new defs in the same lrg
+// as they get encountered with the merge node and keep adding these defs to the merge inputs.
+void PhaseChaitin::merge_multidefs() {
+ NOT_PRODUCT( Compile::TracePhase t3("mergeMultidefs", &_t_mergeMultidefs, TimeCompiler); )
+ ResourceMark rm;
+ // Keep track of the defs seen in registers and collect their uses in the block.
+ RegToDefUseMap reg2defuse(_max_reg, _max_reg, RegDefUse());
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+ Block* block = _cfg.get_block(i);
+ for (uint j = 1; j < block->number_of_nodes(); j++) {
+ Node* n = block->get_node(j);
+ if (n->is_Phi()) continue;
+ for (uint k = 1; k < n->req(); k++) {
+ j += possibly_merge_multidef(n, k, block, reg2defuse);
+ }
+ // Null out the value produced by the instruction itself, since we're only interested in defs
+ // implicitly defined by the uses. We are actually interested in tracking only redefinitions
+ // of the multidef lrgs in the same register. For that matter it's enough to track changes in
+ // the base register only and ignore other effects of multi-register lrgs and fat projections.
+ // It is also ok to ignore defs coming from singledefs. After an implicit overwrite by one of
+ // those our register is guaranteed to be used by another lrg and we won't attempt to merge it.
+ uint lrg = _lrg_map.live_range_id(n);
+ if (lrg > 0 && lrgs(lrg).is_multidef()) {
+ OptoReg::Name reg = lrgs(lrg).reg();
+ reg2defuse.at(reg).clear();
+ }
+ }
+ // Clear reg->def->use tracking for the next block
+ for (int j = 0; j < reg2defuse.length(); j++) {
+ reg2defuse.at(j).clear();
+ }
+ }
+}
+
+int PhaseChaitin::possibly_merge_multidef(Node *n, uint k, Block *block, RegToDefUseMap& reg2defuse) {
+ int blk_adjust = 0;
+
+ uint lrg = _lrg_map.live_range_id(n->in(k));
+ if (lrg > 0 && lrgs(lrg).is_multidef()) {
+ OptoReg::Name reg = lrgs(lrg).reg();
+
+ Node* def = reg2defuse.at(reg).def();
+ if (def != NULL && lrg == _lrg_map.live_range_id(def) && def != n->in(k)) {
+ // Same lrg but different node, we have to merge.
+ MachMergeNode* merge;
+ if (def->is_MachMerge()) { // is it already a merge?
+ merge = def->as_MachMerge();
+ } else {
+ merge = new (C) MachMergeNode(def);
+
+ // Insert the merge node into the block before the first use.
+ uint use_index = block->find_node(reg2defuse.at(reg).first_use());
+ block->insert_node(merge, use_index++);
+ _cfg.map_node_to_block(merge, block);
+
+ // Let the allocator know about the new node, use the same lrg
+ _lrg_map.extend(merge->_idx, lrg);
+ blk_adjust++;
+
+ // Fixup all the uses (there is at least one) that happened between the first
+ // use and before the current one.
+ for (; use_index < block->number_of_nodes(); use_index++) {
+ Node* use = block->get_node(use_index);
+ if (use == n) {
+ break;
+ }
+ use->replace_edge(def, merge);
+ }
+ }
+ if (merge->find_edge(n->in(k)) == -1) {
+ merge->add_req(n->in(k));
+ }
+ n->set_req(k, merge);
+ }
+
+ // update the uses
+ reg2defuse.at(reg).update(n->in(k), n);
+ }
+
+ return blk_adjust;
+}
+
//------------------------------post_allocate_copy_removal---------------------
// Post-Allocation peephole copy removal. We do this in 1 pass over the
diff --git a/src/share/vm/opto/regmask.cpp b/src/share/vm/opto/regmask.cpp
index 4b367117e..3625d79d6 100644
--- a/src/share/vm/opto/regmask.cpp
+++ b/src/share/vm/opto/regmask.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,25 +25,17 @@
#include "precompiled.hpp"
#include "opto/compile.hpp"
#include "opto/regmask.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined AD_MD_HPP
+# include AD_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/ad_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/ad_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif
diff --git a/src/share/vm/opto/regmask.hpp b/src/share/vm/opto/regmask.hpp
index 2ea6dfffd..9b2608a39 100644
--- a/src/share/vm/opto/regmask.hpp
+++ b/src/share/vm/opto/regmask.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,25 +28,17 @@
#include "code/vmreg.hpp"
#include "libadt/port.hpp"
#include "opto/optoreg.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined ADGLOBALS_MD_HPP
+# include ADGLOBALS_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/adGlobals_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/adGlobals_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/adGlobals_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/adGlobals_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/adGlobals_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/adGlobals_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/adGlobals_ppc_64.hpp"
#endif
diff --git a/src/share/vm/opto/runtime.cpp b/src/share/vm/opto/runtime.cpp
index 89b9997e2..2b62a9961 100644
--- a/src/share/vm/opto/runtime.cpp
+++ b/src/share/vm/opto/runtime.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -68,25 +68,17 @@
#include "runtime/vframe_hp.hpp"
#include "utilities/copy.hpp"
#include "utilities/preserveException.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined AD_MD_HPP
+# include AD_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/ad_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/ad_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif
diff --git a/src/share/vm/opto/stringopts.cpp b/src/share/vm/opto/stringopts.cpp
index b00bb881f..90655d806 100644
--- a/src/share/vm/opto/stringopts.cpp
+++ b/src/share/vm/opto/stringopts.cpp
@@ -1507,10 +1507,12 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
}
case StringConcat::StringMode: {
const Type* type = kit.gvn().type(arg);
+ Node* count = NULL;
if (type == TypePtr::NULL_PTR) {
// replace the argument with the null checked version
arg = null_string;
sc->set_argument(argi, arg);
+ count = kit.load_String_length(kit.control(), arg);
} else if (!type->higher_equal(TypeInstPtr::NOTNULL)) {
// s = s != null ? s : "null";
// length = length + (s.count - s.offset);
@@ -1533,10 +1535,13 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
// replace the argument with the null checked version
arg = phi;
sc->set_argument(argi, arg);
+ count = kit.load_String_length(kit.control(), arg);
+ } else {
+ // A corresponding nullcheck will be connected during IGVN MemNode::Ideal_common_DU_postCCP
+ // kit.control might be a different test, that can be hoisted above the actual nullcheck
+ // in case, that the control input is not null, Ideal_common_DU_postCCP will not look for a nullcheck.
+ count = kit.load_String_length(NULL, arg);
}
-
- Node* count = kit.load_String_length(kit.control(), arg);
-
length = __ AddI(length, count);
string_sizes->init_req(argi, NULL);
break;
diff --git a/src/share/vm/opto/superword.cpp b/src/share/vm/opto/superword.cpp
index 297e4d369..785b8bbd5 100644
--- a/src/share/vm/opto/superword.cpp
+++ b/src/share/vm/opto/superword.cpp
@@ -232,6 +232,13 @@ void SuperWord::find_adjacent_refs() {
// if unaligned memory access is not allowed because number of
// iterations in pre-loop will be not enough to align it.
create_pack = false;
+ } else {
+ SWPointer p2(best_align_to_mem_ref, this);
+ if (align_to_ref_p.invar() != p2.invar()) {
+ // Do not vectorize memory accesses with different invariants
+ // if unaligned memory accesses are not allowed.
+ create_pack = false;
+ }
}
}
} else {
@@ -445,29 +452,57 @@ bool SuperWord::ref_is_alignable(SWPointer& p) {
int preloop_stride = pre_end->stride_con();
int span = preloop_stride * p.scale_in_bytes();
-
- // Stride one accesses are alignable.
- if (ABS(span) == p.memory_size())
+ int mem_size = p.memory_size();
+ int offset = p.offset_in_bytes();
+ // Stride one accesses are alignable if offset is aligned to memory operation size.
+ // Offset can be unaligned when UseUnalignedAccesses is used.
+ if (ABS(span) == mem_size && (ABS(offset) % mem_size) == 0) {
return true;
-
- // If initial offset from start of object is computable,
- // compute alignment within the vector.
+ }
+ // If the initial offset from start of the object is computable,
+ // check if the pre-loop can align the final offset accordingly.
+ //
+ // In other words: Can we find an i such that the offset
+ // after i pre-loop iterations is aligned to vw?
+ // (init_offset + pre_loop) % vw == 0 (1)
+ // where
+ // pre_loop = i * span
+ // is the number of bytes added to the offset by i pre-loop iterations.
+ //
+ // For this to hold we need pre_loop to increase init_offset by
+ // pre_loop = vw - (init_offset % vw)
+ //
+ // This is only possible if pre_loop is divisible by span because each
+ // pre-loop iteration increases the initial offset by 'span' bytes:
+ // (vw - (init_offset % vw)) % span == 0
+ //
int vw = vector_width_in_bytes(p.mem());
assert(vw > 1, "sanity");
- if (vw % span == 0) {
- Node* init_nd = pre_end->init_trip();
- if (init_nd->is_Con() && p.invar() == NULL) {
- int init = init_nd->bottom_type()->is_int()->get_con();
-
- int init_offset = init * p.scale_in_bytes() + p.offset_in_bytes();
- assert(init_offset >= 0, "positive offset from object start");
-
+ Node* init_nd = pre_end->init_trip();
+ if (init_nd->is_Con() && p.invar() == NULL) {
+ int init = init_nd->bottom_type()->is_int()->get_con();
+ int init_offset = init * p.scale_in_bytes() + offset;
+ assert(init_offset >= 0, "positive offset from object start");
+ if (vw % span == 0) {
+ // If vm is a multiple of span, we use formula (1).
if (span > 0) {
return (vw - (init_offset % vw)) % span == 0;
} else {
assert(span < 0, "nonzero stride * scale");
return (init_offset % vw) % -span == 0;
}
+ } else if (span % vw == 0) {
+ // If span is a multiple of vw, we can simplify formula (1) to:
+ // (init_offset + i * span) % vw == 0
+ // =>
+ // (init_offset % vw) + ((i * span) % vw) == 0
+ // =>
+ // init_offset % vw == 0
+ //
+ // Because we add a multiple of vw to the initial offset, the final
+ // offset is a multiple of vw if and only if init_offset is a multiple.
+ //
+ return (init_offset % vw) == 0;
}
}
return false;
@@ -479,17 +514,23 @@ int SuperWord::get_iv_adjustment(MemNode* mem_ref) {
SWPointer align_to_ref_p(mem_ref, this);
int offset = align_to_ref_p.offset_in_bytes();
int scale = align_to_ref_p.scale_in_bytes();
+ int elt_size = align_to_ref_p.memory_size();
int vw = vector_width_in_bytes(mem_ref);
assert(vw > 1, "sanity");
- int stride_sign = (scale * iv_stride()) > 0 ? 1 : -1;
- // At least one iteration is executed in pre-loop by default. As result
- // several iterations are needed to align memory operations in main-loop even
- // if offset is 0.
- int iv_adjustment_in_bytes = (stride_sign * vw - (offset % vw));
- int elt_size = align_to_ref_p.memory_size();
- assert(((ABS(iv_adjustment_in_bytes) % elt_size) == 0),
- err_msg_res("(%d) should be divisible by (%d)", iv_adjustment_in_bytes, elt_size));
- int iv_adjustment = iv_adjustment_in_bytes/elt_size;
+ int iv_adjustment;
+ if (scale != 0) {
+ int stride_sign = (scale * iv_stride()) > 0 ? 1 : -1;
+ // At least one iteration is executed in pre-loop by default. As result
+ // several iterations are needed to align memory operations in main-loop even
+ // if offset is 0.
+ int iv_adjustment_in_bytes = (stride_sign * vw - (offset % vw));
+ assert(((ABS(iv_adjustment_in_bytes) % elt_size) == 0),
+ err_msg_res("(%d) should be divisible by (%d)", iv_adjustment_in_bytes, elt_size));
+ iv_adjustment = iv_adjustment_in_bytes/elt_size;
+ } else {
+ // This memory op is not dependent on iv (scale == 0)
+ iv_adjustment = 0;
+ }
#ifndef PRODUCT
if (TraceSuperWord)
@@ -1390,7 +1431,7 @@ void SuperWord::output() {
}
Node* adr = low_adr->in(MemNode::Address);
const TypePtr* atyp = n->adr_type();
- vn = LoadVectorNode::make(C, opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n));
+ vn = LoadVectorNode::make(C, opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n), control_dependency(p));
vlen_in_bytes = vn->as_LoadVector()->memory_size();
} else if (n->is_Store()) {
// Promote value to be stored to vector
@@ -1988,6 +2029,19 @@ Node* SuperWord::executed_last(Node_List* p) {
return n;
}
+LoadNode::ControlDependency SuperWord::control_dependency(Node_List* p) {
+ LoadNode::ControlDependency dep = LoadNode::DependsOnlyOnTest;
+ for (uint i = 0; i < p->size(); i++) {
+ Node* n = p->at(i);
+ assert(n->is_Load(), "only meaningful for loads");
+ if (!n->depends_only_on_test()) {
+ dep = LoadNode::Pinned;
+ }
+ }
+ return dep;
+}
+
+
//----------------------------align_initial_loop_index---------------------------
// Adjust pre-loop limit so that in main loop, a load/store reference
// to align_to_ref will be a position zero in the vector.
@@ -2247,6 +2301,11 @@ SWPointer::SWPointer(MemNode* mem, SuperWord* slp) :
}
// Match AddP(base, AddP(ptr, k*iv [+ invariant]), constant)
Node* base = adr->in(AddPNode::Base);
+ // The base address should be loop invariant
+ if (!invariant(base)) {
+ assert(!valid(), "base address is loop variant");
+ return;
+ }
//unsafe reference could not be aligned appropriately without runtime checking
if (base == NULL || base->bottom_type() == Type::TOP) {
assert(!valid(), "unsafe access");
diff --git a/src/share/vm/opto/superword.hpp b/src/share/vm/opto/superword.hpp
index 508158647..a5b52d961 100644
--- a/src/share/vm/opto/superword.hpp
+++ b/src/share/vm/opto/superword.hpp
@@ -41,7 +41,7 @@
// Exploiting SuperWord Level Parallelism with
// Multimedia Instruction Sets
// by
-// Samuel Larsen and Saman Amarasighe
+// Samuel Larsen and Saman Amarasinghe
// MIT Laboratory for Computer Science
// date
// May 2000
@@ -399,6 +399,7 @@ class SuperWord : public ResourceObj {
Node* executed_first(Node_List* p);
// Return the node executed last in pack p.
Node* executed_last(Node_List* p);
+ static LoadNode::ControlDependency control_dependency(Node_List* p);
// Alignment within a vector memory reference
int memory_alignment(MemNode* s, int iv_adjust);
// (Start, end] half-open range defining which operands are vector
@@ -432,7 +433,7 @@ class SWPointer VALUE_OBJ_CLASS_SPEC {
Node* _base; // NULL if unsafe nonheap reference
Node* _adr; // address pointer
- jint _scale; // multipler for iv (in bytes), 0 if no loop iv
+ jint _scale; // multiplier for iv (in bytes), 0 if no loop iv
jint _offset; // constant offset (in bytes)
Node* _invar; // invariant offset (in bytes), NULL if none
bool _negate_invar; // if true then use: (0 - _invar)
diff --git a/src/share/vm/opto/type.cpp b/src/share/vm/opto/type.cpp
index e877ae953..e57e23b69 100644
--- a/src/share/vm/opto/type.cpp
+++ b/src/share/vm/opto/type.cpp
@@ -1180,11 +1180,11 @@ static int normalize_int_widen( jint lo, jint hi, int w ) {
// Certain normalizations keep us sane when comparing types.
// The 'SMALLINT' covers constants and also CC and its relatives.
if (lo <= hi) {
- if ((juint)(hi - lo) <= SMALLINT) w = Type::WidenMin;
- if ((juint)(hi - lo) >= max_juint) w = Type::WidenMax; // TypeInt::INT
+ if (((juint)hi - lo) <= SMALLINT) w = Type::WidenMin;
+ if (((juint)hi - lo) >= max_juint) w = Type::WidenMax; // TypeInt::INT
} else {
- if ((juint)(lo - hi) <= SMALLINT) w = Type::WidenMin;
- if ((juint)(lo - hi) >= max_juint) w = Type::WidenMin; // dual TypeInt::INT
+ if (((juint)lo - hi) <= SMALLINT) w = Type::WidenMin;
+ if (((juint)lo - hi) >= max_juint) w = Type::WidenMin; // dual TypeInt::INT
}
return w;
}
@@ -1438,11 +1438,11 @@ static int normalize_long_widen( jlong lo, jlong hi, int w ) {
// Certain normalizations keep us sane when comparing types.
// The 'SMALLINT' covers constants.
if (lo <= hi) {
- if ((julong)(hi - lo) <= SMALLINT) w = Type::WidenMin;
- if ((julong)(hi - lo) >= max_julong) w = Type::WidenMax; // TypeLong::LONG
+ if (((julong)hi - lo) <= SMALLINT) w = Type::WidenMin;
+ if (((julong)hi - lo) >= max_julong) w = Type::WidenMax; // TypeLong::LONG
} else {
- if ((julong)(lo - hi) <= SMALLINT) w = Type::WidenMin;
- if ((julong)(lo - hi) >= max_julong) w = Type::WidenMin; // dual TypeLong::LONG
+ if (((julong)lo - hi) <= SMALLINT) w = Type::WidenMin;
+ if (((julong)lo - hi) >= max_julong) w = Type::WidenMin; // dual TypeLong::LONG
}
return w;
}
@@ -3950,7 +3950,9 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const {
(tap->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) ||
// 'this' is exact and super or unrelated:
(this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) {
- tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
+ if (above_centerline(ptr)) {
+ tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
+ }
return make(NotNull, NULL, tary, lazy_klass, false, off, InstanceBot);
}
diff --git a/src/share/vm/opto/vectornode.cpp b/src/share/vm/opto/vectornode.cpp
index 9660d4ed8..d8b197175 100644
--- a/src/share/vm/opto/vectornode.cpp
+++ b/src/share/vm/opto/vectornode.cpp
@@ -403,9 +403,10 @@ PackNode* PackNode::binary_tree_pack(Compile* C, int lo, int hi) {
// Return the vector version of a scalar load node.
LoadVectorNode* LoadVectorNode::make(Compile* C, int opc, Node* ctl, Node* mem,
- Node* adr, const TypePtr* atyp, uint vlen, BasicType bt) {
+ Node* adr, const TypePtr* atyp, uint vlen, BasicType bt,
+ ControlDependency control_dependency) {
const TypeVect* vt = TypeVect::make(bt, vlen);
- return new (C) LoadVectorNode(ctl, mem, adr, atyp, vt);
+ return new (C) LoadVectorNode(ctl, mem, adr, atyp, vt, control_dependency);
}
// Return the vector version of a scalar store node.
diff --git a/src/share/vm/opto/vectornode.hpp b/src/share/vm/opto/vectornode.hpp
index 0b95a10d8..4f4703a49 100644
--- a/src/share/vm/opto/vectornode.hpp
+++ b/src/share/vm/opto/vectornode.hpp
@@ -355,8 +355,8 @@ class XorVNode : public VectorNode {
// Load Vector from memory
class LoadVectorNode : public LoadNode {
public:
- LoadVectorNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeVect* vt)
- : LoadNode(c, mem, adr, at, vt, MemNode::unordered) {
+ LoadVectorNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeVect* vt, ControlDependency control_dependency = LoadNode::DependsOnlyOnTest)
+ : LoadNode(c, mem, adr, at, vt, MemNode::unordered, control_dependency) {
init_class_id(Class_LoadVector);
}
@@ -372,7 +372,8 @@ class LoadVectorNode : public LoadNode {
virtual int store_Opcode() const { return Op_StoreVector; }
static LoadVectorNode* make(Compile* C, int opc, Node* ctl, Node* mem,
- Node* adr, const TypePtr* atyp, uint vlen, BasicType bt);
+ Node* adr, const TypePtr* atyp, uint vlen, BasicType bt,
+ ControlDependency control_dependency = LoadNode::DependsOnlyOnTest);
};
//------------------------------StoreVectorNode--------------------------------
diff --git a/src/share/vm/prims/forte.cpp b/src/share/vm/prims/forte.cpp
index 3d12b02c1..586ae6f61 100644
--- a/src/share/vm/prims/forte.cpp
+++ b/src/share/vm/prims/forte.cpp
@@ -172,8 +172,27 @@ static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, nmetho
// Now do we have a useful PcDesc?
if (pc_desc == NULL ||
pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) {
- // No debug information available for this pc
- // vframeStream would explode if we try and walk the frames.
+ // No debug information is available for this PC.
+ //
+ // vframeStreamCommon::fill_from_frame() will decode the frame depending
+ // on the state of the thread.
+ //
+ // Case #1: If the thread is in Java (state == _thread_in_Java), then
+ // the vframeStreamCommon object will be filled as if the frame were a native
+ // compiled frame. Therefore, no debug information is needed.
+ //
+ // Case #2: If the thread is in any other state, then two steps will be performed:
+ // - if asserts are enabled, found_bad_method_frame() will be called and
+ // the assert in found_bad_method_frame() will be triggered;
+ // - if asserts are disabled, the vframeStreamCommon object will be filled
+ // as if it were a native compiled frame.
+ //
+ // Case (2) is similar to the way interpreter frames are processed in
+ // vframeStreamCommon::fill_from_interpreter_frame in case no valid BCI
+ // was found for an interpreted frame. If asserts are enabled, the assert
+ // in found_bad_method_frame() will be triggered. If asserts are disabled,
+ // the vframeStreamCommon object will be filled afterwards as if the
+ // interpreter were at the point of entering into the method.
return false;
}
@@ -230,9 +249,10 @@ static bool is_decipherable_interpreted_frame(JavaThread* thread,
// a valid method. Then again we may have caught an interpreter
// frame in the middle of construction and the bci field is
// not yet valid.
-
- *method_p = method;
if (!method->is_valid_method()) return false;
+ *method_p = method; // If the Method* found is invalid, it is
+ // ignored by forte_fill_call_trace_given_top().
+ // So set method_p only if the Method is valid.
intptr_t bcx = fr->interpreter_frame_bcx();
@@ -247,18 +267,33 @@ static bool is_decipherable_interpreted_frame(JavaThread* thread,
}
-// Determine if 'fr' can be used to find an initial Java frame.
-// Return false if it can not find a fully decipherable Java frame
-// (in other words a frame that isn't safe to use in a vframe stream).
-// Obviously if it can't even find a Java frame false will also be returned.
+// Determine if a Java frame can be found starting with the frame 'fr'.
+//
+// Check the return value of find_initial_Java_frame and the value of
+// 'method_p' to decide on how use the results returned by this method.
+//
+// If 'method_p' is not NULL, an initial Java frame has been found and
+// the stack can be walked starting from that initial frame. In this case,
+// 'method_p' points to the Method that the initial frame belongs to and
+// the initial Java frame is returned in initial_frame_p.
+//
+// find_initial_Java_frame() returns true if a Method has been found (i.e.,
+// 'method_p' is not NULL) and the initial frame that belongs to that Method
+// is decipherable.
//
-// If we find a Java frame decipherable or not then by definition we have
-// identified a method and that will be returned to the caller via method_p.
-// If we can determine a bci that is returned also. (Hmm is it possible
-// to return a method and bci and still return false? )
+// A frame is considered to be decipherable:
//
-// The initial Java frame we find (if any) is return via initial_frame_p.
+// - if the frame is a compiled frame and a PCDesc is available;
//
+// - if the frame is an interpreter frame that is valid or the thread is
+// state (_thread_in_native || state == _thread_in_vm || state == _thread_blocked).
+//
+// Note that find_initial_Java_frame() can return false even if an initial
+// Java method was found (e.g., there is no PCDesc available for the method).
+//
+// If 'method_p' is NULL, it was not possible to find a Java frame when
+// walking the stack starting from 'fr'. In this case find_initial_Java_frame
+// returns false.
static bool find_initial_Java_frame(JavaThread* thread,
frame* fr,
@@ -278,8 +313,6 @@ static bool find_initial_Java_frame(JavaThread* thread,
// recognizable to us. This should only happen if we are in a JRT_LEAF
// or something called by a JRT_LEAF method.
-
-
frame candidate = *fr;
// If the starting frame we were given has no codeBlob associated with
@@ -334,9 +367,11 @@ static bool find_initial_Java_frame(JavaThread* thread,
nmethod* nm = (nmethod*) candidate.cb();
*method_p = nm->method();
- // If the frame isn't fully decipherable then the default
- // value for the bci is a signal that we don't have a bci.
- // If we have a decipherable frame this bci value will
+ // If the frame is not decipherable, then the value of -1
+ // for the BCI is used to signal that no BCI is available.
+ // Furthermore, the method returns false in this case.
+ //
+ // If a decipherable frame is available, the BCI value will
// not be used.
*bci_p = -1;
@@ -347,9 +382,9 @@ static bool find_initial_Java_frame(JavaThread* thread,
if (nm->is_native_method()) return true;
- // If it isn't decipherable then we have found a pc that doesn't
- // have a PCDesc that can get us a bci however we did find
- // a method
+ // If the frame is not decipherable, then a PC was found
+ // that does not have a PCDesc from which a BCI can be obtained.
+ // Nevertheless, a Method was found.
if (!is_decipherable_compiled_frame(thread, &candidate, nm)) {
return false;
@@ -358,7 +393,7 @@ static bool find_initial_Java_frame(JavaThread* thread,
// is_decipherable_compiled_frame may modify candidate's pc
*initial_frame_p = candidate;
- assert(nm->pc_desc_at(candidate.pc()) != NULL, "if it's decipherable then pc must be valid");
+ assert(nm->pc_desc_at(candidate.pc()) != NULL, "debug information must be available if the frame is decipherable");
return true;
}
@@ -388,17 +423,17 @@ static void forte_fill_call_trace_given_top(JavaThread* thd,
frame initial_Java_frame;
Method* method;
- int bci;
+ int bci = -1; // assume BCI is not available for method
+ // update with correct information if available
int count;
count = 0;
assert(trace->frames != NULL, "trace->frames must be non-NULL");
- bool fully_decipherable = find_initial_Java_frame(thd, &top_frame, &initial_Java_frame, &method, &bci);
-
- // The frame might not be walkable but still recovered a method
- // (e.g. an nmethod with no scope info for the pc)
+ // Walk the stack starting from 'top_frame' and search for an initial Java frame.
+ find_initial_Java_frame(thd, &top_frame, &initial_Java_frame, &method, &bci);
+ // Check if a Java Method has been found.
if (method == NULL) return;
if (!method->is_valid_method()) {
@@ -406,29 +441,6 @@ static void forte_fill_call_trace_given_top(JavaThread* thd,
return;
}
- // We got a Java frame however it isn't fully decipherable
- // so it won't necessarily be safe to use it for the
- // initial frame in the vframe stream.
-
- if (!fully_decipherable) {
- // Take whatever method the top-frame decoder managed to scrape up.
- // We look further at the top frame only if non-safepoint
- // debugging information is available.
- count++;
- trace->num_frames = count;
- trace->frames[0].method_id = method->find_jmethod_id_or_null();
- if (!method->is_native()) {
- trace->frames[0].lineno = bci;
- } else {
- trace->frames[0].lineno = -3;
- }
-
- if (!initial_Java_frame.safe_for_sender(thd)) return;
-
- RegisterMap map(thd, false);
- initial_Java_frame = initial_Java_frame.sender(&map);
- }
-
vframeStreamForte st(thd, initial_Java_frame, false);
for (; !st.at_end() && count < depth; st.forte_next(), count++) {
diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp
index f20e32a85..44cea8bd8 100644
--- a/src/share/vm/prims/jni.cpp
+++ b/src/share/vm/prims/jni.cpp
@@ -1325,39 +1325,32 @@ static void jni_invoke_nonstatic(JNIEnv *env, JavaValue* result, jobject receive
Method* m = Method::resolve_jmethod_id(method_id);
number_of_parameters = m->size_of_parameters();
Klass* holder = m->method_holder();
- if (!(holder)->is_interface()) {
+ if (call_type != JNI_VIRTUAL) {
+ selected_method = m;
+ } else if (!m->has_itable_index()) {
// non-interface call -- for that little speed boost, don't handlize
debug_only(No_Safepoint_Verifier nosafepoint;)
- if (call_type == JNI_VIRTUAL) {
- // jni_GetMethodID makes sure class is linked and initialized
- // so m should have a valid vtable index.
- assert(!m->has_itable_index(), "");
- int vtbl_index = m->vtable_index();
- if (vtbl_index != Method::nonvirtual_vtable_index) {
- Klass* k = h_recv->klass();
- // k might be an arrayKlassOop but all vtables start at
- // the same place. The cast is to avoid virtual call and assertion.
- InstanceKlass *ik = (InstanceKlass*)k;
- selected_method = ik->method_at_vtable(vtbl_index);
- } else {
- // final method
- selected_method = m;
- }
+ // jni_GetMethodID makes sure class is linked and initialized
+ // so m should have a valid vtable index.
+ assert(m->valid_vtable_index(), "no valid vtable index");
+ int vtbl_index = m->vtable_index();
+ if (vtbl_index != Method::nonvirtual_vtable_index) {
+ Klass* k = h_recv->klass();
+ // k might be an arrayKlassOop but all vtables start at
+ // the same place. The cast is to avoid virtual call and assertion.
+ InstanceKlass *ik = (InstanceKlass*)k;
+ selected_method = ik->method_at_vtable(vtbl_index);
} else {
- // JNI_NONVIRTUAL call
+ // final method
selected_method = m;
}
} else {
// interface call
KlassHandle h_holder(THREAD, holder);
- if (call_type == JNI_VIRTUAL) {
- int itbl_index = m->itable_index();
- Klass* k = h_recv->klass();
- selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK);
- } else {
- selected_method = m;
- }
+ int itbl_index = m->itable_index();
+ Klass* k = h_recv->klass();
+ selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK);
}
}
@@ -1803,34 +1796,34 @@ JNI_END
// the runtime type of subword integral basic types is integer
DEFINE_CALLMETHODV(jboolean, Boolean, T_BOOLEAN
- , HOTSPOT_JNI_CALLBOOLEANMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLBOOLEANMETHOD_RETURN(_ret_ref))
+ , HOTSPOT_JNI_CALLBOOLEANMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLBOOLEANMETHODV_RETURN(_ret_ref))
DEFINE_CALLMETHODV(jbyte, Byte, T_BYTE
- , HOTSPOT_JNI_CALLBYTEMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLBYTEMETHOD_RETURN(_ret_ref))
+ , HOTSPOT_JNI_CALLBYTEMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLBYTEMETHODV_RETURN(_ret_ref))
DEFINE_CALLMETHODV(jchar, Char, T_CHAR
- , HOTSPOT_JNI_CALLCHARMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLCHARMETHOD_RETURN(_ret_ref))
+ , HOTSPOT_JNI_CALLCHARMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLCHARMETHODV_RETURN(_ret_ref))
DEFINE_CALLMETHODV(jshort, Short, T_SHORT
- , HOTSPOT_JNI_CALLSHORTMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLSHORTMETHOD_RETURN(_ret_ref))
+ , HOTSPOT_JNI_CALLSHORTMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLSHORTMETHODV_RETURN(_ret_ref))
DEFINE_CALLMETHODV(jobject, Object, T_OBJECT
- , HOTSPOT_JNI_CALLOBJECTMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLOBJECTMETHOD_RETURN(_ret_ref))
+ , HOTSPOT_JNI_CALLOBJECTMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLOBJECTMETHODV_RETURN(_ret_ref))
DEFINE_CALLMETHODV(jint, Int, T_INT,
- HOTSPOT_JNI_CALLINTMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLINTMETHOD_RETURN(_ret_ref))
+ HOTSPOT_JNI_CALLINTMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLINTMETHODV_RETURN(_ret_ref))
DEFINE_CALLMETHODV(jlong, Long, T_LONG
- , HOTSPOT_JNI_CALLLONGMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLLONGMETHOD_RETURN(_ret_ref))
+ , HOTSPOT_JNI_CALLLONGMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLLONGMETHODV_RETURN(_ret_ref))
// Float and double probes don't return value because dtrace doesn't currently support it
DEFINE_CALLMETHODV(jfloat, Float, T_FLOAT
- , HOTSPOT_JNI_CALLFLOATMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLFLOATMETHOD_RETURN())
+ , HOTSPOT_JNI_CALLFLOATMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLFLOATMETHODV_RETURN())
DEFINE_CALLMETHODV(jdouble, Double, T_DOUBLE
- , HOTSPOT_JNI_CALLDOUBLEMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLDOUBLEMETHOD_RETURN())
+ , HOTSPOT_JNI_CALLDOUBLEMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLDOUBLEMETHODV_RETURN())
#define DEFINE_CALLMETHODA(ResultType, Result, Tag \
, EntryProbe, ReturnProbe) \
@@ -1855,34 +1848,34 @@ JNI_END
// the runtime type of subword integral basic types is integer
DEFINE_CALLMETHODA(jboolean, Boolean, T_BOOLEAN
- , HOTSPOT_JNI_CALLBOOLEANMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLBOOLEANMETHOD_RETURN(_ret_ref))
+ , HOTSPOT_JNI_CALLBOOLEANMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLBOOLEANMETHODA_RETURN(_ret_ref))
DEFINE_CALLMETHODA(jbyte, Byte, T_BYTE
- , HOTSPOT_JNI_CALLBYTEMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLBYTEMETHOD_RETURN(_ret_ref))
+ , HOTSPOT_JNI_CALLBYTEMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLBYTEMETHODA_RETURN(_ret_ref))
DEFINE_CALLMETHODA(jchar, Char, T_CHAR
- , HOTSPOT_JNI_CALLCHARMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLCHARMETHOD_RETURN(_ret_ref))
+ , HOTSPOT_JNI_CALLCHARMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLCHARMETHODA_RETURN(_ret_ref))
DEFINE_CALLMETHODA(jshort, Short, T_SHORT
- , HOTSPOT_JNI_CALLSHORTMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLSHORTMETHOD_RETURN(_ret_ref))
+ , HOTSPOT_JNI_CALLSHORTMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLSHORTMETHODA_RETURN(_ret_ref))
DEFINE_CALLMETHODA(jobject, Object, T_OBJECT
- , HOTSPOT_JNI_CALLOBJECTMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLOBJECTMETHOD_RETURN(_ret_ref))
+ , HOTSPOT_JNI_CALLOBJECTMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLOBJECTMETHODA_RETURN(_ret_ref))
DEFINE_CALLMETHODA(jint, Int, T_INT,
- HOTSPOT_JNI_CALLINTMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLINTMETHOD_RETURN(_ret_ref))
+ HOTSPOT_JNI_CALLINTMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLINTMETHODA_RETURN(_ret_ref))
DEFINE_CALLMETHODA(jlong, Long, T_LONG
- , HOTSPOT_JNI_CALLLONGMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLLONGMETHOD_RETURN(_ret_ref))
+ , HOTSPOT_JNI_CALLLONGMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLLONGMETHODA_RETURN(_ret_ref))
// Float and double probes don't return value because dtrace doesn't currently support it
DEFINE_CALLMETHODA(jfloat, Float, T_FLOAT
- , HOTSPOT_JNI_CALLFLOATMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLFLOATMETHOD_RETURN())
+ , HOTSPOT_JNI_CALLFLOATMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLFLOATMETHODA_RETURN())
DEFINE_CALLMETHODA(jdouble, Double, T_DOUBLE
- , HOTSPOT_JNI_CALLDOUBLEMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
- HOTSPOT_JNI_CALLDOUBLEMETHOD_RETURN())
+ , HOTSPOT_JNI_CALLDOUBLEMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+ HOTSPOT_JNI_CALLDOUBLEMETHODA_RETURN())
DT_VOID_RETURN_MARK_DECL(CallVoidMethod, HOTSPOT_JNI_CALLVOIDMETHOD_RETURN());
DT_VOID_RETURN_MARK_DECL(CallVoidMethodV, HOTSPOT_JNI_CALLVOIDMETHODV_RETURN());
@@ -3144,7 +3137,7 @@ JNI_ENTRY(void, jni_SetStatic##Result##Field(JNIEnv *env, jclass clazz, jfieldID
JNI_END
DEFINE_SETSTATICFIELD(jboolean, bool, Boolean, 'Z', z
- , HOTSPOT_JNI_SETBOOLEANFIELD_ENTRY(env, clazz, (uintptr_t)fieldID, value),
+ , HOTSPOT_JNI_SETSTATICBOOLEANFIELD_ENTRY(env, clazz, (uintptr_t)fieldID, value),
HOTSPOT_JNI_SETBOOLEANFIELD_RETURN())
DEFINE_SETSTATICFIELD(jbyte, byte, Byte, 'B', b
, HOTSPOT_JNI_SETSTATICBYTEFIELD_ENTRY(env, clazz, (uintptr_t) fieldID, value),
@@ -5071,6 +5064,7 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
unit_test_function_call
// Forward declaration
+void TestOS_test();
void TestReservedSpace_test();
void TestReserveMemorySpecial_test();
void TestVirtualSpace_test();
@@ -5092,6 +5086,7 @@ void FreeRegionList_test();
void execute_internal_vm_tests() {
if (ExecuteInternalVMTests) {
tty->print_cr("Running internal VM tests");
+ run_unit_test(TestOS_test());
run_unit_test(TestReservedSpace_test());
run_unit_test(TestReserveMemorySpecial_test());
run_unit_test(TestVirtualSpace_test());
diff --git a/src/share/vm/prims/jniCheck.cpp b/src/share/vm/prims/jniCheck.cpp
index 403813282..e95937ae9 100644
--- a/src/share/vm/prims/jniCheck.cpp
+++ b/src/share/vm/prims/jniCheck.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,8 @@
# include "jniTypes_ppc.hpp"
#endif
+// Complain every extra number of unplanned local refs
+#define CHECK_JNI_LOCAL_REF_CAP_WARN_THRESHOLD 32
// Heap objects are allowed to be directly referenced only in VM code,
// not in native code.
@@ -168,12 +170,55 @@ static void NativeReportJNIWarning(JavaThread* thr, const char *msg) {
* SUPPORT FUNCTIONS
*/
+/**
+ * Check whether or not a programmer has actually checked for exceptions. According
+ * to the JNI Specification ("jni/spec/design.html#java_exceptions"):
+ *
+ * There are two cases where the programmer needs to check for exceptions without
+ * being able to first check an error code:
+ *
+ * - The JNI functions that invoke a Java method return the result of the Java method.
+ * The programmer must call ExceptionOccurred() to check for possible exceptions
+ * that occurred during the execution of the Java method.
+ *
+ * - Some of the JNI array access functions do not return an error code, but may
+ * throw an ArrayIndexOutOfBoundsException or ArrayStoreException.
+ *
+ * In all other cases, a non-error return value guarantees that no exceptions have been thrown.
+ *
+ * Programmers often defend against ArrayIndexOutOfBoundsException, so warning
+ * for these functions would be pedantic.
+ */
static inline void
-functionEnterCritical(JavaThread* thr)
-{
+check_pending_exception(JavaThread* thr) {
if (thr->has_pending_exception()) {
NativeReportJNIWarning(thr, "JNI call made with exception pending");
}
+ if (thr->is_pending_jni_exception_check()) {
+ IN_VM(
+ tty->print_cr("WARNING in native method: JNI call made without checking exceptions when required to from %s",
+ thr->get_pending_jni_exception_check());
+ thr->print_stack();
+ )
+ thr->clear_pending_jni_exception_check(); // Just complain once
+ }
+}
+
+/**
+ * Add to the planned number of handles. I.e. plus current live & warning threshold
+ */
+static inline void
+add_planned_handle_capacity(JNIHandleBlock* handles, size_t capacity) {
+ handles->set_planned_capacity(capacity +
+ handles->get_number_of_live_handles() +
+ CHECK_JNI_LOCAL_REF_CAP_WARN_THRESHOLD);
+}
+
+
+static inline void
+functionEnterCritical(JavaThread* thr)
+{
+ check_pending_exception(thr);
}
static inline void
@@ -187,9 +232,7 @@ functionEnter(JavaThread* thr)
if (thr->in_critical()) {
tty->print_cr("%s", warn_other_function_in_critical);
}
- if (thr->has_pending_exception()) {
- NativeReportJNIWarning(thr, "JNI call made with exception pending");
- }
+ check_pending_exception(thr);
}
static inline void
@@ -201,9 +244,20 @@ functionEnterExceptionAllowed(JavaThread* thr)
}
static inline void
-functionExit(JNIEnv *env)
+functionExit(JavaThread* thr)
{
- /* nothing to do at this time */
+ JNIHandleBlock* handles = thr->active_handles();
+ size_t planned_capacity = handles->get_planned_capacity();
+ size_t live_handles = handles->get_number_of_live_handles();
+ if (live_handles > planned_capacity) {
+ IN_VM(
+ tty->print_cr("WARNING: JNI local refs: %zu, exceeds capacity: %zu",
+ live_handles, planned_capacity);
+ thr->print_stack();
+ )
+ // Complain just the once, reset to current + warn threshold
+ add_planned_handle_capacity(handles, 0);
+ }
}
static inline void
@@ -508,7 +562,7 @@ JNI_ENTRY_CHECKED(jclass,
jniCheck::validate_object(thr, loader);
)
jclass result = UNCHECKED()->DefineClass(env, name, loader, buf, len);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -520,7 +574,7 @@ JNI_ENTRY_CHECKED(jclass,
jniCheck::validate_class_descriptor(thr, name);
)
jclass result = UNCHECKED()->FindClass(env, name);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -532,7 +586,7 @@ JNI_ENTRY_CHECKED(jmethodID,
jniCheck::validate_object(thr, method);
)
jmethodID result = UNCHECKED()->FromReflectedMethod(env, method);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -544,7 +598,7 @@ JNI_ENTRY_CHECKED(jfieldID,
jniCheck::validate_object(thr, field);
)
jfieldID result = UNCHECKED()->FromReflectedField(env, field);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -560,7 +614,7 @@ JNI_ENTRY_CHECKED(jobject,
)
jobject result = UNCHECKED()->ToReflectedMethod(env, cls, methodID,
isStatic);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -572,7 +626,7 @@ JNI_ENTRY_CHECKED(jclass,
jniCheck::validate_class(thr, sub, true);
)
jclass result = UNCHECKED()->GetSuperclass(env, sub);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -586,7 +640,7 @@ JNI_ENTRY_CHECKED(jboolean,
jniCheck::validate_class(thr, sup, true);
)
jboolean result = UNCHECKED()->IsAssignableFrom(env, sub, sup);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -601,7 +655,7 @@ JNI_ENTRY_CHECKED(jobject,
)
jobject result = UNCHECKED()->ToReflectedField(env, cls, fieldID,
isStatic);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -619,7 +673,7 @@ JNI_ENTRY_CHECKED(jint,
}
)
jint result = UNCHECKED()->Throw(env, obj);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -634,15 +688,16 @@ JNI_ENTRY_CHECKED(jint,
jniCheck::validate_throwable_klass(thr, k);
)
jint result = UNCHECKED()->ThrowNew(env, clazz, msg);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
JNI_ENTRY_CHECKED(jthrowable,
checked_jni_ExceptionOccurred(JNIEnv *env))
+ thr->clear_pending_jni_exception_check();
functionEnterExceptionAllowed(thr);
jthrowable result = UNCHECKED()->ExceptionOccurred(env);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -650,22 +705,24 @@ JNI_ENTRY_CHECKED(void,
checked_jni_ExceptionDescribe(JNIEnv *env))
functionEnterExceptionAllowed(thr);
UNCHECKED()->ExceptionDescribe(env);
- functionExit(env);
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(void,
checked_jni_ExceptionClear(JNIEnv *env))
+ thr->clear_pending_jni_exception_check();
functionEnterExceptionAllowed(thr);
UNCHECKED()->ExceptionClear(env);
- functionExit(env);
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(void,
checked_jni_FatalError(JNIEnv *env,
const char *msg))
+ thr->clear_pending_jni_exception_check();
functionEnter(thr);
UNCHECKED()->FatalError(env, msg);
- functionExit(env);
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(jint,
@@ -675,7 +732,10 @@ JNI_ENTRY_CHECKED(jint,
if (capacity < 0)
NativeReportJNIFatalError(thr, "negative capacity");
jint result = UNCHECKED()->PushLocalFrame(env, capacity);
- functionExit(env);
+ if (result == JNI_OK) {
+ add_planned_handle_capacity(thr->active_handles(), capacity);
+ }
+ functionExit(thr);
return result;
JNI_END
@@ -684,7 +744,7 @@ JNI_ENTRY_CHECKED(jobject,
jobject result))
functionEnterExceptionAllowed(thr);
jobject res = UNCHECKED()->PopLocalFrame(env, result);
- functionExit(env);
+ functionExit(thr);
return res;
JNI_END
@@ -698,7 +758,7 @@ JNI_ENTRY_CHECKED(jobject,
}
)
jobject result = UNCHECKED()->NewGlobalRef(env,lobj);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -714,7 +774,7 @@ JNI_ENTRY_CHECKED(void,
}
)
UNCHECKED()->DeleteGlobalRef(env,gref);
- functionExit(env);
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(void,
@@ -729,7 +789,7 @@ JNI_ENTRY_CHECKED(void,
"Invalid local JNI handle passed to DeleteLocalRef");
)
UNCHECKED()->DeleteLocalRef(env, obj);
- functionExit(env);
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(jboolean,
@@ -750,7 +810,7 @@ JNI_ENTRY_CHECKED(jboolean,
}
)
jboolean result = UNCHECKED()->IsSameObject(env,obj1,obj2);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -764,7 +824,7 @@ JNI_ENTRY_CHECKED(jobject,
}
)
jobject result = UNCHECKED()->NewLocalRef(env, ref);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -776,7 +836,10 @@ JNI_ENTRY_CHECKED(jint,
NativeReportJNIFatalError(thr, "negative capacity");
}
jint result = UNCHECKED()->EnsureLocalCapacity(env, capacity);
- functionExit(env);
+ if (result == JNI_OK) {
+ add_planned_handle_capacity(thr->active_handles(), capacity);
+ }
+ functionExit(thr);
return result;
JNI_END
@@ -788,7 +851,7 @@ JNI_ENTRY_CHECKED(jobject,
jniCheck::validate_class(thr, clazz, false);
)
jobject result = UNCHECKED()->AllocObject(env,clazz);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -806,7 +869,7 @@ JNI_ENTRY_CHECKED(jobject,
va_start(args, methodID);
jobject result = UNCHECKED()->NewObjectV(env,clazz,methodID,args);
va_end(args);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -821,7 +884,7 @@ JNI_ENTRY_CHECKED(jobject,
jniCheck::validate_jmethod_id(thr, methodID);
)
jobject result = UNCHECKED()->NewObjectV(env,clazz,methodID,args);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -836,7 +899,7 @@ JNI_ENTRY_CHECKED(jobject,
jniCheck::validate_jmethod_id(thr, methodID);
)
jobject result = UNCHECKED()->NewObjectA(env,clazz,methodID,args);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -848,7 +911,7 @@ JNI_ENTRY_CHECKED(jclass,
jniCheck::validate_object(thr, obj);
)
jclass result = UNCHECKED()->GetObjectClass(env,obj);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -862,7 +925,7 @@ JNI_ENTRY_CHECKED(jboolean,
jniCheck::validate_class(thr, clazz, true);
)
jboolean result = UNCHECKED()->IsInstanceOf(env,obj,clazz);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -876,7 +939,7 @@ JNI_ENTRY_CHECKED(jmethodID,
jniCheck::validate_class(thr, clazz, false);
)
jmethodID result = UNCHECKED()->GetMethodID(env,clazz,name,sig);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -895,7 +958,8 @@ JNI_ENTRY_CHECKED(ResultType, \
ResultType result =UNCHECKED()->Call##Result##MethodV(env, obj, methodID, \
args); \
va_end(args); \
- functionExit(env); \
+ thr->set_pending_jni_exception_check("Call"#Result"Method"); \
+ functionExit(thr); \
return result; \
JNI_END \
\
@@ -910,7 +974,8 @@ JNI_ENTRY_CHECKED(ResultType, \
) \
ResultType result = UNCHECKED()->Call##Result##MethodV(env, obj, methodID,\
args); \
- functionExit(env); \
+ thr->set_pending_jni_exception_check("Call"#Result"MethodV"); \
+ functionExit(thr); \
return result; \
JNI_END \
\
@@ -925,7 +990,8 @@ JNI_ENTRY_CHECKED(ResultType, \
) \
ResultType result = UNCHECKED()->Call##Result##MethodA(env, obj, methodID,\
args); \
- functionExit(env); \
+ thr->set_pending_jni_exception_check("Call"#Result"MethodA"); \
+ functionExit(thr); \
return result; \
JNI_END
@@ -952,7 +1018,8 @@ JNI_ENTRY_CHECKED(void,
va_start(args,methodID);
UNCHECKED()->CallVoidMethodV(env,obj,methodID,args);
va_end(args);
- functionExit(env);
+ thr->set_pending_jni_exception_check("CallVoidMethod");
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(void,
@@ -965,7 +1032,8 @@ JNI_ENTRY_CHECKED(void,
jniCheck::validate_call_object(thr, obj, methodID);
)
UNCHECKED()->CallVoidMethodV(env,obj,methodID,args);
- functionExit(env);
+ thr->set_pending_jni_exception_check("CallVoidMethodV");
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(void,
@@ -978,7 +1046,8 @@ JNI_ENTRY_CHECKED(void,
jniCheck::validate_call_object(thr, obj, methodID);
)
UNCHECKED()->CallVoidMethodA(env,obj,methodID,args);
- functionExit(env);
+ thr->set_pending_jni_exception_check("CallVoidMethodA");
+ functionExit(thr);
JNI_END
#define WRAPPER_CallNonvirtualMethod(ResultType, Result) \
@@ -1001,7 +1070,8 @@ JNI_ENTRY_CHECKED(ResultType, \
methodID,\
args); \
va_end(args); \
- functionExit(env); \
+ thr->set_pending_jni_exception_check("CallNonvirtual"#Result"Method"); \
+ functionExit(thr); \
return result; \
JNI_END \
\
@@ -1021,7 +1091,8 @@ JNI_ENTRY_CHECKED(ResultType, \
clazz, \
methodID,\
args); \
- functionExit(env); \
+ thr->set_pending_jni_exception_check("CallNonvirtual"#Result"MethodV"); \
+ functionExit(thr); \
return result; \
JNI_END \
\
@@ -1041,7 +1112,8 @@ JNI_ENTRY_CHECKED(ResultType, \
clazz, \
methodID,\
args); \
- functionExit(env); \
+ thr->set_pending_jni_exception_check("CallNonvirtual"#Result"MethodA"); \
+ functionExit(thr); \
return result; \
JNI_END
@@ -1070,7 +1142,8 @@ JNI_ENTRY_CHECKED(void,
va_start(args,methodID);
UNCHECKED()->CallNonvirtualVoidMethodV(env,obj,clazz,methodID,args);
va_end(args);
- functionExit(env);
+ thr->set_pending_jni_exception_check("CallNonvirtualVoidMethod");
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(void,
@@ -1085,7 +1158,8 @@ JNI_ENTRY_CHECKED(void,
jniCheck::validate_call_class(thr, clazz, methodID);
)
UNCHECKED()->CallNonvirtualVoidMethodV(env,obj,clazz,methodID,args);
- functionExit(env);
+ thr->set_pending_jni_exception_check("CallNonvirtualVoidMethodV");
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(void,
@@ -1100,7 +1174,8 @@ JNI_ENTRY_CHECKED(void,
jniCheck::validate_call_class(thr, clazz, methodID);
)
UNCHECKED()->CallNonvirtualVoidMethodA(env,obj,clazz,methodID,args);
- functionExit(env);
+ thr->set_pending_jni_exception_check("CallNonvirtualVoidMethodA");
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(jfieldID,
@@ -1113,7 +1188,7 @@ JNI_ENTRY_CHECKED(jfieldID,
jniCheck::validate_class(thr, clazz, false);
)
jfieldID result = UNCHECKED()->GetFieldID(env,clazz,name,sig);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1127,7 +1202,7 @@ JNI_ENTRY_CHECKED(ReturnType, \
checkInstanceFieldID(thr, fieldID, obj, FieldType); \
) \
ReturnType result = UNCHECKED()->Get##Result##Field(env,obj,fieldID); \
- functionExit(env); \
+ functionExit(thr); \
return result; \
JNI_END
@@ -1152,7 +1227,7 @@ JNI_ENTRY_CHECKED(void, \
checkInstanceFieldID(thr, fieldID, obj, FieldType); \
) \
UNCHECKED()->Set##Result##Field(env,obj,fieldID,val); \
- functionExit(env); \
+ functionExit(thr); \
JNI_END
WRAPPER_SetField(jobject, Object, T_OBJECT)
@@ -1176,7 +1251,7 @@ JNI_ENTRY_CHECKED(jmethodID,
jniCheck::validate_class(thr, clazz, false);
)
jmethodID result = UNCHECKED()->GetStaticMethodID(env,clazz,name,sig);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1198,7 +1273,8 @@ JNI_ENTRY_CHECKED(ReturnType, \
methodID, \
args); \
va_end(args); \
- functionExit(env); \
+ thr->set_pending_jni_exception_check("CallStatic"#Result"Method"); \
+ functionExit(thr); \
return result; \
JNI_END \
\
@@ -1216,7 +1292,8 @@ JNI_ENTRY_CHECKED(ReturnType, \
clazz, \
methodID, \
args); \
- functionExit(env); \
+ thr->set_pending_jni_exception_check("CallStatic"#Result"MethodV"); \
+ functionExit(thr); \
return result; \
JNI_END \
\
@@ -1234,7 +1311,8 @@ JNI_ENTRY_CHECKED(ReturnType, \
clazz, \
methodID, \
args); \
- functionExit(env); \
+ thr->set_pending_jni_exception_check("CallStatic"#Result"MethodA"); \
+ functionExit(thr); \
return result; \
JNI_END
@@ -1262,7 +1340,8 @@ JNI_ENTRY_CHECKED(void,
va_start(args,methodID);
UNCHECKED()->CallStaticVoidMethodV(env,cls,methodID,args);
va_end(args);
- functionExit(env);
+ thr->set_pending_jni_exception_check("CallStaticVoidMethod");
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(void,
@@ -1276,7 +1355,8 @@ JNI_ENTRY_CHECKED(void,
jniCheck::validate_class(thr, cls, false);
)
UNCHECKED()->CallStaticVoidMethodV(env,cls,methodID,args);
- functionExit(env);
+ thr->set_pending_jni_exception_check("CallStaticVoidMethodV");
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(void,
@@ -1290,7 +1370,8 @@ JNI_ENTRY_CHECKED(void,
jniCheck::validate_class(thr, cls, false);
)
UNCHECKED()->CallStaticVoidMethodA(env,cls,methodID,args);
- functionExit(env);
+ thr->set_pending_jni_exception_check("CallStaticVoidMethodA");
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(jfieldID,
@@ -1303,7 +1384,7 @@ JNI_ENTRY_CHECKED(jfieldID,
jniCheck::validate_class(thr, clazz, false);
)
jfieldID result = UNCHECKED()->GetStaticFieldID(env,clazz,name,sig);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1320,7 +1401,7 @@ JNI_ENTRY_CHECKED(ReturnType, \
ReturnType result = UNCHECKED()->GetStatic##Result##Field(env, \
clazz, \
fieldID); \
- functionExit(env); \
+ functionExit(thr); \
return result; \
JNI_END
@@ -1346,7 +1427,7 @@ JNI_ENTRY_CHECKED(void, \
checkStaticFieldID(thr, fieldID, clazz, FieldType); \
) \
UNCHECKED()->SetStatic##Result##Field(env,clazz,fieldID,value); \
- functionExit(env); \
+ functionExit(thr); \
JNI_END
WRAPPER_SetStaticField(jobject, Object, T_OBJECT)
@@ -1366,7 +1447,7 @@ JNI_ENTRY_CHECKED(jstring,
jsize len))
functionEnter(thr);
jstring result = UNCHECKED()->NewString(env,unicode,len);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1378,7 +1459,7 @@ JNI_ENTRY_CHECKED(jsize,
checkString(thr, str);
)
jsize result = UNCHECKED()->GetStringLength(env,str);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1407,7 +1488,7 @@ JNI_ENTRY_CHECKED(const jchar *,
// Note that the dtrace arguments for the allocated memory will not match up with this solution.
FreeHeap((char*)result);
}
- functionExit(env);
+ functionExit(thr);
return new_result;
JNI_END
@@ -1442,7 +1523,7 @@ JNI_ENTRY_CHECKED(void,
UNCHECKED()->ReleaseStringChars(env, str,
(const jchar*) guarded.release_for_freeing());
}
- functionExit(env);
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(jstring,
@@ -1450,7 +1531,7 @@ JNI_ENTRY_CHECKED(jstring,
const char *utf))
functionEnter(thr);
jstring result = UNCHECKED()->NewStringUTF(env,utf);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1462,7 +1543,7 @@ JNI_ENTRY_CHECKED(jsize,
checkString(thr, str);
)
jsize result = UNCHECKED()->GetStringUTFLength(env,str);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1490,7 +1571,7 @@ JNI_ENTRY_CHECKED(const char *,
// Note that the dtrace arguments for the allocated memory will not match up with this solution.
FreeHeap((char*)result, mtInternal);
}
- functionExit(env);
+ functionExit(thr);
return new_result;
JNI_END
@@ -1525,7 +1606,7 @@ JNI_ENTRY_CHECKED(void,
UNCHECKED()->ReleaseStringUTFChars(env, str,
(const char*) guarded.release_for_freeing());
}
- functionExit(env);
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(jsize,
@@ -1536,7 +1617,7 @@ JNI_ENTRY_CHECKED(jsize,
check_is_array(thr, array);
)
jsize result = UNCHECKED()->GetArrayLength(env,array);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1547,7 +1628,7 @@ JNI_ENTRY_CHECKED(jobjectArray,
jobject init))
functionEnter(thr);
jobjectArray result = UNCHECKED()->NewObjectArray(env,len,clazz,init);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1560,7 +1641,7 @@ JNI_ENTRY_CHECKED(jobject,
check_is_obj_array(thr, array);
)
jobject result = UNCHECKED()->GetObjectArrayElement(env,array,index);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1574,7 +1655,7 @@ JNI_ENTRY_CHECKED(void,
check_is_obj_array(thr, array);
)
UNCHECKED()->SetObjectArrayElement(env,array,index,val);
- functionExit(env);
+ functionExit(thr);
JNI_END
#define WRAPPER_NewScalarArray(Return, Result) \
@@ -1583,7 +1664,7 @@ JNI_ENTRY_CHECKED(Return, \
jsize len)) \
functionEnter(thr); \
Return result = UNCHECKED()->New##Result##Array(env,len); \
- functionExit(env); \
+ functionExit(thr); \
return (Return) result; \
JNI_END
@@ -1611,7 +1692,7 @@ JNI_ENTRY_CHECKED(ElementType *, \
if (result != NULL) { \
result = (ElementType *) check_jni_wrap_copy_array(thr, array, result); \
} \
- functionExit(env); \
+ functionExit(thr); \
return result; \
JNI_END
@@ -1639,7 +1720,7 @@ JNI_ENTRY_CHECKED(void, \
ElementType* orig_result = (ElementType *) check_wrapped_array_release( \
thr, "checked_jni_Release"#Result"ArrayElements", array, elems, mode); \
UNCHECKED()->Release##Result##ArrayElements(env, array, orig_result, mode); \
- functionExit(env); \
+ functionExit(thr); \
JNI_END
WRAPPER_ReleaseScalarArrayElements(T_BOOLEAN,jboolean, Boolean, bool)
@@ -1663,7 +1744,7 @@ JNI_ENTRY_CHECKED(void, \
check_primitive_array_type(thr, array, ElementTag); \
) \
UNCHECKED()->Get##Result##ArrayRegion(env,array,start,len,buf); \
- functionExit(env); \
+ functionExit(thr); \
JNI_END
WRAPPER_GetScalarArrayRegion(T_BOOLEAN, jboolean, Boolean)
@@ -1687,7 +1768,7 @@ JNI_ENTRY_CHECKED(void, \
check_primitive_array_type(thr, array, ElementTag); \
) \
UNCHECKED()->Set##Result##ArrayRegion(env,array,start,len,buf); \
- functionExit(env); \
+ functionExit(thr); \
JNI_END
WRAPPER_SetScalarArrayRegion(T_BOOLEAN, jboolean, Boolean)
@@ -1706,7 +1787,7 @@ JNI_ENTRY_CHECKED(jint,
jint nMethods))
functionEnter(thr);
jint result = UNCHECKED()->RegisterNatives(env,clazz,methods,nMethods);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1715,7 +1796,7 @@ JNI_ENTRY_CHECKED(jint,
jclass clazz))
functionEnter(thr);
jint result = UNCHECKED()->UnregisterNatives(env,clazz);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1727,7 +1808,7 @@ JNI_ENTRY_CHECKED(jint,
jniCheck::validate_object(thr, obj);
)
jint result = UNCHECKED()->MonitorEnter(env,obj);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1739,7 +1820,7 @@ JNI_ENTRY_CHECKED(jint,
jniCheck::validate_object(thr, obj);
)
jint result = UNCHECKED()->MonitorExit(env,obj);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1748,7 +1829,7 @@ JNI_ENTRY_CHECKED(jint,
JavaVM **vm))
functionEnter(thr);
jint result = UNCHECKED()->GetJavaVM(env,vm);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1763,7 +1844,7 @@ JNI_ENTRY_CHECKED(void,
checkString(thr, str);
)
UNCHECKED()->GetStringRegion(env, str, start, len, buf);
- functionExit(env);
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(void,
@@ -1777,7 +1858,7 @@ JNI_ENTRY_CHECKED(void,
checkString(thr, str);
)
UNCHECKED()->GetStringUTFRegion(env, str, start, len, buf);
- functionExit(env);
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(void *,
@@ -1792,7 +1873,7 @@ JNI_ENTRY_CHECKED(void *,
if (result != NULL) {
result = check_jni_wrap_copy_array(thr, array, result);
}
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1808,7 +1889,7 @@ JNI_ENTRY_CHECKED(void,
// Check the element array...
void* orig_result = check_wrapped_array_release(thr, "ReleasePrimitiveArrayCritical", array, carray, mode);
UNCHECKED()->ReleasePrimitiveArrayCritical(env, array, orig_result, mode);
- functionExit(env);
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(const jchar*,
@@ -1820,7 +1901,7 @@ JNI_ENTRY_CHECKED(const jchar*,
checkString(thr, string);
)
const jchar *result = UNCHECKED()->GetStringCritical(env, string, isCopy);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1836,7 +1917,7 @@ JNI_ENTRY_CHECKED(void,
* string parameter as a minor sanity check
*/
UNCHECKED()->ReleaseStringCritical(env, str, chars);
- functionExit(env);
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(jweak,
@@ -1849,7 +1930,7 @@ JNI_ENTRY_CHECKED(jweak,
}
)
jweak result = UNCHECKED()->NewWeakGlobalRef(env, obj);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1858,14 +1939,15 @@ JNI_ENTRY_CHECKED(void,
jweak ref))
functionEnterExceptionAllowed(thr);
UNCHECKED()->DeleteWeakGlobalRef(env, ref);
- functionExit(env);
+ functionExit(thr);
JNI_END
JNI_ENTRY_CHECKED(jboolean,
checked_jni_ExceptionCheck(JNIEnv *env))
+ thr->clear_pending_jni_exception_check();
functionEnterExceptionAllowed(thr);
jboolean result = UNCHECKED()->ExceptionCheck(env);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1875,7 +1957,7 @@ JNI_ENTRY_CHECKED(jobject,
jlong capacity))
functionEnter(thr);
jobject result = UNCHECKED()->NewDirectByteBuffer(env, address, capacity);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1884,7 +1966,7 @@ JNI_ENTRY_CHECKED(void *,
jobject buf))
functionEnter(thr);
void* result = UNCHECKED()->GetDirectBufferAddress(env, buf);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1893,7 +1975,7 @@ JNI_ENTRY_CHECKED(jlong,
jobject buf))
functionEnter(thr);
jlong result = UNCHECKED()->GetDirectBufferCapacity(env, buf);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1906,7 +1988,7 @@ JNI_ENTRY_CHECKED(jobjectRefType,
jniCheck::validate_object(thr, obj);
)
jobjectRefType result = UNCHECKED()->GetObjectRefType(env, obj);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
@@ -1915,7 +1997,7 @@ JNI_ENTRY_CHECKED(jint,
checked_jni_GetVersion(JNIEnv *env))
functionEnter(thr);
jint result = UNCHECKED()->GetVersion(env);
- functionExit(env);
+ functionExit(thr);
return result;
JNI_END
diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp
index 5586ed7de..76886fbcc 100644
--- a/src/share/vm/prims/jvm.cpp
+++ b/src/share/vm/prims/jvm.cpp
@@ -1372,7 +1372,11 @@ JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, job
if (HAS_PENDING_EXCEPTION) {
pending_exception = Handle(THREAD, PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION;
-
+ // JVMTI has already reported the pending exception
+ // JVMTI internal flag reset is needed in order to report PrivilegedActionException
+ if (THREAD->is_Java_thread()) {
+ JvmtiExport::clear_detected_exception((JavaThread*) THREAD);
+ }
if ( pending_exception->is_a(SystemDictionary::Exception_klass()) &&
!pending_exception->is_a(SystemDictionary::RuntimeException_klass())) {
// Throw a java.security.PrivilegedActionException(Exception e) exception
diff --git a/src/share/vm/prims/jvmtiClassFileReconstituter.hpp b/src/share/vm/prims/jvmtiClassFileReconstituter.hpp
index 4170cf915..1ac45efb9 100644
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.hpp
+++ b/src/share/vm/prims/jvmtiClassFileReconstituter.hpp
@@ -68,11 +68,11 @@ class JvmtiConstantPoolReconstituter : public StackObj {
~JvmtiConstantPoolReconstituter() {
if (_symmap != NULL) {
- os::free(_symmap, mtClass);
+ delete _symmap;
_symmap = NULL;
}
if (_classmap != NULL) {
- os::free(_classmap, mtClass);
+ delete _classmap;
_classmap = NULL;
}
}
diff --git a/src/share/vm/prims/jvmtiExport.cpp b/src/share/vm/prims/jvmtiExport.cpp
index 86a173ff2..719a4c3d2 100644
--- a/src/share/vm/prims/jvmtiExport.cpp
+++ b/src/share/vm/prims/jvmtiExport.cpp
@@ -2163,6 +2163,15 @@ void JvmtiExport::cleanup_thread(JavaThread* thread) {
}
}
+void JvmtiExport::clear_detected_exception(JavaThread* thread) {
+ assert(JavaThread::current() == thread, "thread is not current");
+
+ JvmtiThreadState* state = thread->jvmti_thread_state();
+ if (state != NULL) {
+ state->clear_exception_detected();
+ }
+}
+
void JvmtiExport::oops_do(OopClosure* f) {
JvmtiCurrentBreakpoints::oops_do(f);
JvmtiVMObjectAllocEventCollector::oops_do_for_all_threads(f);
diff --git a/src/share/vm/prims/jvmtiExport.hpp b/src/share/vm/prims/jvmtiExport.hpp
index d2a7dec71..4b1ff34cf 100644
--- a/src/share/vm/prims/jvmtiExport.hpp
+++ b/src/share/vm/prims/jvmtiExport.hpp
@@ -363,6 +363,7 @@ class JvmtiExport : public AllStatic {
}
static void cleanup_thread (JavaThread* thread) NOT_JVMTI_RETURN;
+ static void clear_detected_exception (JavaThread* thread) NOT_JVMTI_RETURN;
static void oops_do(OopClosure* f) NOT_JVMTI_RETURN;
static void weak_oops_do(BoolObjectClosure* b, OopClosure* f) NOT_JVMTI_RETURN;
diff --git a/src/share/vm/prims/jvmtiImpl.cpp b/src/share/vm/prims/jvmtiImpl.cpp
index 0d6d00d4e..9b308d43f 100644
--- a/src/share/vm/prims/jvmtiImpl.cpp
+++ b/src/share/vm/prims/jvmtiImpl.cpp
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "interpreter/interpreter.hpp"
+#include "interpreter/oopMapCache.hpp"
#include "jvmtifiles/jvmtiEnv.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
@@ -744,6 +745,13 @@ bool VM_GetOrSetLocal::doit_prologue() {
}
void VM_GetOrSetLocal::doit() {
+ InterpreterOopMap oop_mask;
+ _jvf->method()->mask_for(_jvf->bci(), &oop_mask);
+ if (oop_mask.is_dead(_index)) {
+ // The local can be invalid and uninitialized in the scope of current bci
+ _result = JVMTI_ERROR_INVALID_SLOT;
+ return;
+ }
if (_set) {
// Force deoptimization of frame if compiled because it's
// possible the compiler emitted some locals as constant values,
diff --git a/src/share/vm/prims/jvmtiRedefineClasses.cpp b/src/share/vm/prims/jvmtiRedefineClasses.cpp
index 548057fe6..aa2db58a1 100644
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -777,9 +777,13 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions(
Method* idnum_owner = scratch_class->method_with_idnum(old_num);
if (idnum_owner != NULL) {
// There is already a method assigned this idnum -- switch them
+ // Take current and original idnum from the new_method
idnum_owner->set_method_idnum(new_num);
+ idnum_owner->set_orig_method_idnum(k_new_method->orig_method_idnum());
}
+ // Take current and original idnum from the old_method
k_new_method->set_method_idnum(old_num);
+ k_new_method->set_orig_method_idnum(k_old_method->orig_method_idnum());
if (thread->has_pending_exception()) {
return JVMTI_ERROR_OUT_OF_MEMORY;
}
@@ -812,9 +816,12 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions(
Method* idnum_owner = scratch_class->method_with_idnum(num);
if (idnum_owner != NULL) {
// There is already a method assigned this idnum -- switch them
+ // Take current and original idnum from the new_method
idnum_owner->set_method_idnum(new_num);
+ idnum_owner->set_orig_method_idnum(k_new_method->orig_method_idnum());
}
k_new_method->set_method_idnum(num);
+ k_new_method->set_orig_method_idnum(num);
if (thread->has_pending_exception()) {
return JVMTI_ERROR_OUT_OF_MEMORY;
}
@@ -3322,6 +3329,7 @@ void VM_RedefineClasses::AdjustCpoolCacheAndVtable::do_klass(Klass* k) {
// This is a very busy routine. We don't want too much tracing
// printed out.
bool trace_name_printed = false;
+ InstanceKlass *the_class = InstanceKlass::cast(_the_class_oop);
// Very noisy: only enable this call if you are trying to determine
// that a specific class gets found by this routine.
@@ -3333,10 +3341,8 @@ void VM_RedefineClasses::AdjustCpoolCacheAndVtable::do_klass(Klass* k) {
// If the class being redefined is java.lang.Object, we need to fix all
// array class vtables also
if (k->oop_is_array() && _the_class_oop == SystemDictionary::Object_klass()) {
- k->vtable()->adjust_method_entries(_matching_old_methods,
- _matching_new_methods,
- _matching_methods_length,
- &trace_name_printed);
+ k->vtable()->adjust_method_entries(the_class, &trace_name_printed);
+
} else if (k->oop_is_instance()) {
HandleMark hm(_thread);
InstanceKlass *ik = InstanceKlass::cast(k);
@@ -3372,18 +3378,15 @@ void VM_RedefineClasses::AdjustCpoolCacheAndVtable::do_klass(Klass* k) {
// not yet in the vtable, because the vtable setup is in progress.
// This must be done after we adjust the default_methods and
// default_vtable_indices for methods already in the vtable.
+ // If redefining Unsafe, walk all the vtables looking for entries.
if (ik->vtable_length() > 0 && (_the_class_oop->is_interface()
+ || _the_class_oop == SystemDictionary::misc_Unsafe_klass()
|| ik->is_subtype_of(_the_class_oop))) {
// ik->vtable() creates a wrapper object; rm cleans it up
ResourceMark rm(_thread);
- ik->vtable()->adjust_method_entries(_matching_old_methods,
- _matching_new_methods,
- _matching_methods_length,
- &trace_name_printed);
- ik->adjust_default_methods(_matching_old_methods,
- _matching_new_methods,
- _matching_methods_length,
- &trace_name_printed);
+
+ ik->vtable()->adjust_method_entries(the_class, &trace_name_printed);
+ ik->adjust_default_methods(the_class, &trace_name_printed);
}
// If the current class has an itable and we are either redefining an
@@ -3392,14 +3395,14 @@ void VM_RedefineClasses::AdjustCpoolCacheAndVtable::do_klass(Klass* k) {
// interface, then we have to call adjust_method_entries() for
// every InstanceKlass that has an itable since there isn't a
// subclass relationship between an interface and an InstanceKlass.
+ // If redefining Unsafe, walk all the itables looking for entries.
if (ik->itable_length() > 0 && (_the_class_oop->is_interface()
+ || _the_class_oop == SystemDictionary::misc_Unsafe_klass()
|| ik->is_subclass_of(_the_class_oop))) {
// ik->itable() creates a wrapper object; rm cleans it up
ResourceMark rm(_thread);
- ik->itable()->adjust_method_entries(_matching_old_methods,
- _matching_new_methods,
- _matching_methods_length,
- &trace_name_printed);
+
+ ik->itable()->adjust_method_entries(the_class, &trace_name_printed);
}
// The constant pools in other classes (other_cp) can refer to
@@ -3423,10 +3426,7 @@ void VM_RedefineClasses::AdjustCpoolCacheAndVtable::do_klass(Klass* k) {
other_cp = constantPoolHandle(ik->constants());
cp_cache = other_cp->cache();
if (cp_cache != NULL) {
- cp_cache->adjust_method_entries(_matching_old_methods,
- _matching_new_methods,
- _matching_methods_length,
- &trace_name_printed);
+ cp_cache->adjust_method_entries(the_class, &trace_name_printed);
}
}
@@ -3437,10 +3437,7 @@ void VM_RedefineClasses::AdjustCpoolCacheAndVtable::do_klass(Klass* k) {
other_cp = pv_node->prev_constant_pool();
cp_cache = other_cp->cache();
if (cp_cache != NULL) {
- cp_cache->adjust_method_entries(_matching_old_methods,
- _matching_new_methods,
- _matching_methods_length,
- &trace_name_printed);
+ cp_cache->adjust_method_entries(other_cp->pool_holder(), &trace_name_printed);
}
}
}
@@ -3555,6 +3552,7 @@ void VM_RedefineClasses::check_methods_and_mark_as_obsolete(
// obsolete methods need a unique idnum so they become new entries in
// the jmethodID cache in InstanceKlass
+ assert(old_method->method_idnum() == new_method->method_idnum(), "must match");
u2 num = InstanceKlass::cast(_the_class_oop)->next_method_idnum();
if (num != ConstMethod::UNSET_IDNUM) {
old_method->set_method_idnum(num);
@@ -3575,7 +3573,8 @@ void VM_RedefineClasses::check_methods_and_mark_as_obsolete(
assert(!old_method->has_vtable_index(),
"cannot delete methods with vtable entries");;
- // Mark all deleted methods as old and obsolete
+ // Mark all deleted methods as old, obsolete and deleted
+ old_method->set_is_deleted();
old_method->set_is_old();
old_method->set_is_obsolete();
++obsolete_count;
@@ -3756,7 +3755,7 @@ void VM_RedefineClasses::flush_dependent_code(instanceKlassHandle k_h, TRAPS) {
// Deoptimize all activations depending on marked nmethods
Deoptimization::deoptimize_dependents();
- // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
+ // Make the dependent methods not entrant
CodeCache::make_marked_nmethods_not_entrant();
// From now on we know that the dependency information is complete
@@ -3981,14 +3980,13 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
// the_class doesn't have a cache yet so copy it
the_class->set_cached_class_file(scratch_class->get_cached_class_file());
}
-#ifndef PRODUCT
- else {
- assert(the_class->get_cached_class_file_bytes() ==
- scratch_class->get_cached_class_file_bytes(), "cache ptrs must match");
- assert(the_class->get_cached_class_file_len() ==
- scratch_class->get_cached_class_file_len(), "cache lens must match");
+ else if (scratch_class->get_cached_class_file_bytes() !=
+ the_class->get_cached_class_file_bytes()) {
+ // The same class can be present twice in the scratch classes list or there
+ // are multiple concurrent RetransformClasses calls on different threads.
+ // In such cases we have to deallocate scratch_class cached_class_file.
+ os::free(scratch_class->get_cached_class_file());
}
-#endif
// NULL out in scratch class to not delete twice. The class to be redefined
// always owns these bytes.
@@ -4074,15 +4072,9 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
MemberNameTable* mnt = the_class->member_names();
if (mnt != NULL) {
bool trace_name_printed = false;
- mnt->adjust_method_entries(_matching_old_methods,
- _matching_new_methods,
- _matching_methods_length,
- &trace_name_printed);
+ mnt->adjust_method_entries(the_class(), &trace_name_printed);
}
- // Fix Resolution Error table also to remove old constant pools
- SystemDictionary::delete_resolution_error(old_constants);
-
if (the_class->oop_map_cache() != NULL) {
// Flush references to any obsolete methods from the oop map cache
// so that obsolete methods are not pinned.
@@ -4164,7 +4156,7 @@ void VM_RedefineClasses::CheckClass::do_klass(Klass* k) {
no_old_methods = false;
}
- // the constant pool cache should never contain old or obsolete methods
+ // the constant pool cache should never contain non-deleted old or obsolete methods
if (ik->constants() != NULL &&
ik->constants()->cache() != NULL &&
!ik->constants()->cache()->check_no_old_or_obsolete_entries()) {
diff --git a/src/share/vm/prims/jvmtiTagMap.cpp b/src/share/vm/prims/jvmtiTagMap.cpp
index 3967e5bb1..1ae7ce63d 100644
--- a/src/share/vm/prims/jvmtiTagMap.cpp
+++ b/src/share/vm/prims/jvmtiTagMap.cpp
@@ -1045,10 +1045,16 @@ static jint invoke_string_value_callback(jvmtiStringPrimitiveValueCallback cb,
{
assert(str->klass() == SystemDictionary::String_klass(), "not a string");
+ typeArrayOop s_value = java_lang_String::value(str);
+
+ // JDK-6584008: the value field may be null if a String instance is
+ // partially constructed.
+ if (s_value == NULL) {
+ return 0;
+ }
// get the string value and length
// (string value may be offset from the base)
int s_len = java_lang_String::length(str);
- typeArrayOop s_value = java_lang_String::value(str);
int s_offset = java_lang_String::offset(str);
jchar* value;
if (s_len > 0) {
diff --git a/src/share/vm/prims/methodHandles.cpp b/src/share/vm/prims/methodHandles.cpp
index 7c95d515f..62aece732 100644
--- a/src/share/vm/prims/methodHandles.cpp
+++ b/src/share/vm/prims/methodHandles.cpp
@@ -30,6 +30,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
#include "prims/methodHandles.hpp"
+#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/reflection.hpp"
@@ -955,21 +956,41 @@ void MemberNameTable::add_member_name(jweak mem_name_wref) {
#if INCLUDE_JVMTI
// It is called at safepoint only for RedefineClasses
-void MemberNameTable::adjust_method_entries(Method** old_methods, Method** new_methods,
- int methods_length, bool *trace_name_printed) {
+void MemberNameTable::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
// For each redefined method
- for (int j = 0; j < methods_length; j++) {
- Method* old_method = old_methods[j];
- Method* new_method = new_methods[j];
-
- // search the MemberNameTable for uses of either obsolete or EMCP methods
- for (int idx = 0; idx < length(); idx++) {
- oop mem_name = JNIHandles::resolve(this->at(idx));
- if (mem_name != NULL) {
- java_lang_invoke_MemberName::adjust_vmtarget(mem_name, old_method, new_method,
- trace_name_printed);
+ for (int idx = 0; idx < length(); idx++) {
+ oop mem_name = JNIHandles::resolve(this->at(idx));
+ if (mem_name == NULL) {
+ continue;
+ }
+ Method* old_method = (Method*)java_lang_invoke_MemberName::vmtarget(mem_name);
+
+ if (old_method == NULL || !old_method->is_old()) {
+ continue; // skip uninteresting entries
+ }
+ if (old_method->is_deleted()) {
+ // skip entries with deleted methods
+ continue;
+ }
+ Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
+
+ assert(new_method != NULL, "method_with_idnum() should not be NULL");
+ assert(old_method != new_method, "sanity check");
+
+ java_lang_invoke_MemberName::set_vmtarget(mem_name, new_method);
+
+ if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
+ if (!(*trace_name_printed)) {
+ // RC_TRACE_MESG macro has an embedded ResourceMark
+ RC_TRACE_MESG(("adjust: name=%s",
+ old_method->method_holder()->external_name()));
+ *trace_name_printed = true;
}
+ // RC_TRACE macro has an embedded ResourceMark
+ RC_TRACE(0x00400000, ("MemberName method update: %s(%s)",
+ new_method->name()->as_C_string(),
+ new_method->signature()->as_C_string()));
}
}
}
diff --git a/src/share/vm/prims/methodHandles.hpp b/src/share/vm/prims/methodHandles.hpp
index 9caf8004e..c28d29460 100644
--- a/src/share/vm/prims/methodHandles.hpp
+++ b/src/share/vm/prims/methodHandles.hpp
@@ -240,10 +240,8 @@ class MemberNameTable : public GrowableArray<jweak> {
#if INCLUDE_JVMTI
// RedefineClasses() API support:
- // If a MemberName refers to old_method then update it
- // to refer to new_method.
- void adjust_method_entries(Method** old_methods, Method** new_methods,
- int methods_length, bool *trace_name_printed);
+ // If a MemberName refers to old_method then update it to refer to new_method.
+ void adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed);
#endif // INCLUDE_JVMTI
};
diff --git a/src/share/vm/prims/unsafe.cpp b/src/share/vm/prims/unsafe.cpp
index 8879f21d1..ba2d85626 100644
--- a/src/share/vm/prims/unsafe.cpp
+++ b/src/share/vm/prims/unsafe.cpp
@@ -322,10 +322,33 @@ UNSAFE_ENTRY(void, Unsafe_SetObjectVolatile(JNIEnv *env, jobject unsafe, jobject
UNSAFE_END
#ifndef SUPPORTS_NATIVE_CX8
-// Keep old code for platforms which may not have atomic jlong (8 bytes) instructions
-// Volatile long versions must use locks if !VM_Version::supports_cx8().
-// support_cx8 is a surrogate for 'supports atomic long memory ops'.
+// VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'.
+//
+// On platforms which do not support atomic compare-and-swap of jlong (8 byte)
+// values we have to use a lock-based scheme to enforce atomicity. This has to be
+// applied to all Unsafe operations that set the value of a jlong field. Even so
+// the compareAndSwapLong operation will not be atomic with respect to direct stores
+// to the field from Java code. It is important therefore that any Java code that
+// utilizes these Unsafe jlong operations does not perform direct stores. To permit
+// direct loads of the field from Java code we must also use Atomic::store within the
+// locked regions. And for good measure, in case there are direct stores, we also
+// employ Atomic::load within those regions. Note that the field in question must be
+// volatile and so must have atomic load/store accesses applied at the Java level.
+//
+// The locking scheme could utilize a range of strategies for controlling the locking
+// granularity: from a lock per-field through to a single global lock. The latter is
+// the simplest and is used for the current implementation. Note that the Java object
+// that contains the field, can not, in general, be used for locking. To do so can lead
+// to deadlocks as we may introduce locking into what appears to the Java code to be a
+// lock-free path.
+//
+// As all the locked-regions are very short and themselves non-blocking we can treat
+// them as leaf routines and elide safepoint checks (ie we don't perform any thread
+// state transitions even when blocking for the lock). Note that if we do choose to
+// add safepoint checks and thread state transitions, we must ensure that we calculate
+// the address of the field _after_ we have acquired the lock, else the object may have
+// been moved by the GC
UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
UnsafeWrapper("Unsafe_GetLongVolatile");
@@ -337,8 +360,8 @@ UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject
else {
Handle p (THREAD, JNIHandles::resolve(obj));
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
- ObjectLocker ol(p, THREAD);
- jlong value = *addr;
+ MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
+ jlong value = Atomic::load(addr);
return value;
}
}
@@ -353,8 +376,8 @@ UNSAFE_ENTRY(void, Unsafe_SetLongVolatile(JNIEnv *env, jobject unsafe, jobject o
else {
Handle p (THREAD, JNIHandles::resolve(obj));
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
- ObjectLocker ol(p, THREAD);
- *addr = x;
+ MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
+ Atomic::store(x, addr);
}
}
UNSAFE_END
@@ -463,8 +486,8 @@ UNSAFE_ENTRY(void, Unsafe_SetOrderedLong(JNIEnv *env, jobject unsafe, jobject ob
else {
Handle p (THREAD, JNIHandles::resolve(obj));
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
- ObjectLocker ol(p, THREAD);
- *addr = x;
+ MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
+ Atomic::store(x, addr);
}
}
#endif
@@ -1213,14 +1236,19 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapLong(JNIEnv *env, jobject unsafe, jo
UnsafeWrapper("Unsafe_CompareAndSwapLong");
Handle p (THREAD, JNIHandles::resolve(obj));
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
+#ifdef SUPPORTS_NATIVE_CX8
+ return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
+#else
if (VM_Version::supports_cx8())
return (jlong)(Atomic::cmpxchg(x, addr, e)) == e;
else {
jboolean success = false;
- ObjectLocker ol(p, THREAD);
- if (*addr == e) { *addr = x; success = true; }
+ MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
+ jlong val = Atomic::load(addr);
+ if (val == e) { Atomic::store(x, addr); success = true; }
return success;
}
+#endif
UNSAFE_END
UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time))
diff --git a/src/share/vm/prims/whitebox.cpp b/src/share/vm/prims/whitebox.cpp
index 0132cb13e..ef1da19d2 100644
--- a/src/share/vm/prims/whitebox.cpp
+++ b/src/share/vm/prims/whitebox.cpp
@@ -45,6 +45,7 @@
#if INCLUDE_ALL_GCS
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
#include "gc_implementation/g1/concurrentMark.hpp"
+#include "gc_implementation/g1/concurrentMarkThread.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#endif // INCLUDE_ALL_GCS
@@ -73,6 +74,13 @@ WB_ENTRY(jint, WB_GetHeapOopSize(JNIEnv* env, jobject o))
return heapOopSize;
WB_END
+WB_ENTRY(jint, WB_GetVMPageSize(JNIEnv* env, jobject o))
+ return os::vm_page_size();
+WB_END
+
+WB_ENTRY(jlong, WB_GetVMLargePageSize(JNIEnv* env, jobject o))
+ return os::large_page_size();
+WB_END
class WBIsKlassAliveClosure : public KlassClosure {
Symbol* _name;
@@ -302,6 +310,12 @@ WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
return hr->isHumongous();
WB_END
+WB_ENTRY(jlong, WB_G1NumMaxRegions(JNIEnv* env, jobject o))
+ G1CollectedHeap* g1 = G1CollectedHeap::heap();
+ size_t nr = g1->max_regions();
+ return (jlong)nr;
+WB_END
+
WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o))
G1CollectedHeap* g1 = G1CollectedHeap::heap();
size_t nr = g1->num_free_regions();
@@ -310,13 +324,29 @@ WB_END
WB_ENTRY(jboolean, WB_G1InConcurrentMark(JNIEnv* env, jobject o))
G1CollectedHeap* g1 = G1CollectedHeap::heap();
- ConcurrentMark* cm = g1->concurrent_mark();
- return cm->concurrent_marking_in_progress();
+ return g1->concurrent_mark()->cmThread()->during_cycle();
+WB_END
+
+WB_ENTRY(jboolean, WB_G1StartMarkCycle(JNIEnv* env, jobject o))
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ if (!g1h->concurrent_mark()->cmThread()->during_cycle()) {
+ g1h->collect(GCCause::_wb_conc_mark);
+ return true;
+ }
+ return false;
WB_END
WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o))
return (jint)HeapRegion::GrainBytes;
WB_END
+
+WB_ENTRY(jobject, WB_G1AuxiliaryMemoryUsage(JNIEnv* env))
+ ResourceMark rm(THREAD);
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ MemoryUsage usage = g1h->get_auxiliary_data_memory_usage();
+ Handle h = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
+ return JNIHandles::make_local(env, h());
+WB_END
#endif // INCLUDE_ALL_GCS
#if INCLUDE_NMT
@@ -884,6 +914,16 @@ WB_ENTRY(jlong, WB_MetaspaceCapacityUntilGC(JNIEnv* env, jobject wb))
return (jlong) MetaspaceGC::capacity_until_GC();
WB_END
+WB_ENTRY(jboolean, WB_IsMonitorInflated(JNIEnv* env, jobject wb, jobject obj))
+ oop obj_oop = JNIHandles::resolve(obj);
+ return (jboolean) obj_oop->mark()->has_monitor();
+WB_END
+
+WB_ENTRY(void, WB_ForceSafepoint(JNIEnv* env, jobject wb))
+ VM_ForceSafepoint force_safepoint_op;
+ VMThread::execute(&force_safepoint_op);
+WB_END
+
//Some convenience methods to deal with objects from java
int WhiteBox::offset_for_field(const char* field_name, oop object,
Symbol* signature_symbol) {
@@ -906,7 +946,7 @@ int WhiteBox::offset_for_field(const char* field_name, oop object,
if (res == NULL) {
tty->print_cr("Invalid layout of %s at %s", ik->external_name(),
name_symbol->as_C_string());
- fatal("Invalid layout of preloaded class");
+ vm_exit_during_initialization("Invalid layout of preloaded class: use -XX:+TraceClassLoading to see the origin of the problem class");
}
//fetch the field at the offset we've found
@@ -972,6 +1012,8 @@ static JNINativeMethod methods[] = {
{CC"getObjectSize", CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectSize },
{CC"isObjectInOldGen", CC"(Ljava/lang/Object;)Z", (void*)&WB_isObjectInOldGen },
{CC"getHeapOopSize", CC"()I", (void*)&WB_GetHeapOopSize },
+ {CC"getVMPageSize", CC"()I", (void*)&WB_GetVMPageSize },
+ {CC"getVMLargePageSize", CC"()J", (void*)&WB_GetVMLargePageSize},
{CC"isClassAlive0", CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive },
{CC"classKnownToNotExist",
CC"(Ljava/lang/ClassLoader;Ljava/lang/String;)Z",(void*)&WB_ClassKnownToNotExist},
@@ -995,8 +1037,12 @@ static JNINativeMethod methods[] = {
#if INCLUDE_ALL_GCS
{CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark},
{CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous },
+ {CC"g1NumMaxRegions", CC"()J", (void*)&WB_G1NumMaxRegions },
{CC"g1NumFreeRegions", CC"()J", (void*)&WB_G1NumFreeRegions },
{CC"g1RegionSize", CC"()I", (void*)&WB_G1RegionSize },
+ {CC"g1StartConcMarkCycle", CC"()Z", (void*)&WB_G1StartMarkCycle },
+ {CC"g1AuxiliaryMemoryUsage", CC"()Ljava/lang/management/MemoryUsage;",
+ (void*)&WB_G1AuxiliaryMemoryUsage },
#endif // INCLUDE_ALL_GCS
#if INCLUDE_NMT
{CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc },
@@ -1067,6 +1113,8 @@ static JNINativeMethod methods[] = {
{CC"getCPUFeatures", CC"()Ljava/lang/String;", (void*)&WB_GetCPUFeatures },
{CC"getNMethod", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
(void*)&WB_GetNMethod },
+ {CC"isMonitorInflated", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsMonitorInflated },
+ {CC"forceSafepoint", CC"()V", (void*)&WB_ForceSafepoint },
};
#undef CC
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
index 0bee43f03..5e7c6d10c 100644
--- a/src/share/vm/runtime/arguments.cpp
+++ b/src/share/vm/runtime/arguments.cpp
@@ -1372,6 +1372,12 @@ void Arguments::set_cms_and_parnew_gc_flags() {
if (!FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
}
+
+ if (!ClassUnloading) {
+ FLAG_SET_CMDLINE(bool, CMSClassUnloadingEnabled, false);
+ FLAG_SET_CMDLINE(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false);
+ }
+
if (PrintGCDetails && Verbose) {
tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
(unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
@@ -1572,7 +1578,7 @@ void Arguments::select_gc_ergonomically() {
void Arguments::select_gc() {
if (!gc_selected()) {
- ArgumentsExt::select_gc_ergonomically();
+ select_gc_ergonomically();
}
}
@@ -2067,7 +2073,7 @@ bool Arguments::verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_hea
}
// Check consistency of GC selection
-bool Arguments::check_gc_consistency_user() {
+bool Arguments::check_gc_consistency() {
check_gclog_consistency();
bool status = true;
// Ensure that the user has not selected conflicting sets
@@ -2233,7 +2239,7 @@ bool Arguments::check_vm_args_consistency() {
FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
}
- status = status && check_gc_consistency_user();
+ status = status && check_gc_consistency();
status = status && check_stack_pages();
if (CMSIncrementalMode) {
@@ -2310,7 +2316,7 @@ bool Arguments::check_vm_args_consistency() {
"G1ConcMarkStepDurationMillis");
status = status && verify_interval(G1ConcRSHotCardLimit, 0, max_jubyte,
"G1ConcRSHotCardLimit");
- status = status && verify_interval(G1ConcRSLogCacheSize, 0, 31,
+ status = status && verify_interval(G1ConcRSLogCacheSize, 0, 27,
"G1ConcRSLogCacheSize");
status = status && verify_interval(StringDeduplicationAgeThreshold, 1, markOopDesc::max_age,
"StringDeduplicationAgeThreshold");
@@ -3408,7 +3414,7 @@ void Arguments::fix_appclasspath() {
}
if (!PrintSharedArchiveAndExit) {
- ClassLoader::trace_class_path("[classpath: ", _java_class_path->value());
+ ClassLoader::trace_class_path(tty, "[classpath: ", _java_class_path->value());
}
}
@@ -3837,8 +3843,8 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
CommandLineFlags::printFlags(tty, false);
vm_exit(0);
}
-#if INCLUDE_NMT
if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
+#if INCLUDE_NMT
// The launcher did not setup nmt environment variable properly.
if (!MemTracker::check_launcher_nmt_support(tail)) {
warning("Native Memory Tracking did not setup properly, using wrong launcher?");
@@ -3853,8 +3859,12 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
} else {
vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
}
- }
+#else
+ jio_fprintf(defaultStream::error_stream(),
+ "Native Memory Tracking is not supported in this VM\n");
+ return JNI_ERR;
#endif
+ }
#ifndef PRODUCT
@@ -4006,7 +4016,7 @@ jint Arguments::apply_ergo() {
set_shared_spaces_flags();
// Check the GC selections again.
- if (!ArgumentsExt::check_gc_consistency_ergo()) {
+ if (!check_gc_consistency()) {
return JNI_EINVAL;
}
diff --git a/src/share/vm/runtime/arguments.hpp b/src/share/vm/runtime/arguments.hpp
index e42f9acb6..8fd3e3f2f 100644
--- a/src/share/vm/runtime/arguments.hpp
+++ b/src/share/vm/runtime/arguments.hpp
@@ -466,8 +466,7 @@ class Arguments : AllStatic {
static bool verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_heap_free_ratio);
// Check for consistency in the selection of the garbage collector.
- static bool check_gc_consistency_user(); // Check user-selected gc
- static inline bool check_gc_consistency_ergo(); // Check ergonomic-selected gc
+ static bool check_gc_consistency(); // Check user-selected gc
static void check_deprecated_gcs();
static void check_deprecated_gc_flags();
// Check consistecy or otherwise of VM argument settings
@@ -615,8 +614,4 @@ bool Arguments::gc_selected() {
UseParNewGC || UseSerialGC;
}
-bool Arguments::check_gc_consistency_ergo() {
- return check_gc_consistency_user();
-}
-
#endif // SHARE_VM_RUNTIME_ARGUMENTS_HPP
diff --git a/src/share/vm/runtime/arguments_ext.hpp b/src/share/vm/runtime/arguments_ext.hpp
index b1451229d..26a311f7c 100644
--- a/src/share/vm/runtime/arguments_ext.hpp
+++ b/src/share/vm/runtime/arguments_ext.hpp
@@ -30,22 +30,12 @@
class ArgumentsExt: AllStatic {
public:
- static inline void select_gc_ergonomically();
static inline void set_gc_specific_flags();
- static inline bool check_gc_consistency_ergo();
static void process_options(const JavaVMInitArgs* args) {}
};
-void ArgumentsExt::select_gc_ergonomically() {
- Arguments::select_gc_ergonomically();
-}
-
void ArgumentsExt::set_gc_specific_flags() {
Arguments::set_gc_specific_flags();
}
-bool ArgumentsExt::check_gc_consistency_ergo() {
- return Arguments::check_gc_consistency_ergo();
-}
-
#endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
diff --git a/src/share/vm/runtime/basicLock.cpp b/src/share/vm/runtime/basicLock.cpp
index 992f73550..5d69b30ae 100644
--- a/src/share/vm/runtime/basicLock.cpp
+++ b/src/share/vm/runtime/basicLock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,9 @@
void BasicLock::print_on(outputStream* st) const {
st->print("monitor");
+ markOop moop = displaced_header();
+ if (moop != NULL)
+ moop->print_on(st);
}
void BasicLock::move_to(oop obj, BasicLock* dest) {
diff --git a/src/share/vm/runtime/deoptimization.cpp b/src/share/vm/runtime/deoptimization.cpp
index 41fc29905..c21b86c03 100644
--- a/src/share/vm/runtime/deoptimization.cpp
+++ b/src/share/vm/runtime/deoptimization.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -66,25 +66,17 @@
# include "vmreg_ppc.inline.hpp"
#endif
#ifdef COMPILER2
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined AD_MD_HPP
+# include AD_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/ad_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/ad_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif
#endif // COMPILER2
@@ -1477,7 +1469,10 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
//
// The other actions cause immediate removal of the present code.
- bool update_trap_state = true;
+ // Traps caused by injected profile shouldn't pollute trap counts.
+ bool injected_profile_trap = trap_method->has_injected_profile() &&
+ (reason == Reason_intrinsic || reason == Reason_unreached);
+ bool update_trap_state = !injected_profile_trap;
bool make_not_entrant = false;
bool make_not_compilable = false;
bool reprofile = false;
diff --git a/src/share/vm/runtime/frame.hpp b/src/share/vm/runtime/frame.hpp
index f715f68b5..c83808b6a 100644
--- a/src/share/vm/runtime/frame.hpp
+++ b/src/share/vm/runtime/frame.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,25 +31,17 @@
#include "runtime/registerMap.hpp"
#include "utilities/top.hpp"
#ifdef COMPILER2
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined ADGLOBALS_MD_HPP
+# include ADGLOBALS_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/adGlobals_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/adGlobals_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/adGlobals_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/adGlobals_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/adGlobals_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/adGlobals_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/adGlobals_ppc_64.hpp"
#endif
#endif // COMPILER2
@@ -91,6 +83,15 @@ class frame VALUE_OBJ_CLASS_SPEC {
// Constructors
frame();
+#ifndef PRODUCT
+ // This is a generic constructor which is only used by pns() in debug.cpp.
+ // pns (i.e. print native stack) uses this constructor to create a starting
+ // frame for stack walking. The implementation of this constructor is platform
+ // dependent (i.e. SPARC doesn't need an 'fp' argument an will ignore it) but
+ // we want to keep the signature generic because pns() is shared code.
+ frame(void* sp, void* fp, void* pc);
+#endif
+
// Accessors
// pc: Returns the pc at which this frame will continue normally.
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index d3389308b..1e01b2e7a 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1279,6 +1279,13 @@ class CommandLineFlags {
"Decay time (in milliseconds) to re-enable bulk rebiasing of a " \
"type after previous bulk rebias") \
\
+ product(bool, ExitOnOutOfMemoryError, false, \
+ "JVM exits on the first occurrence of an out-of-memory error") \
+ \
+ product(bool, CrashOnOutOfMemoryError, false, \
+ "JVM aborts, producing an error log and core/mini dump, on the " \
+ "first occurrence of an out-of-memory error") \
+ \
/* tracing */ \
\
notproduct(bool, TraceRuntimeCalls, false, \
@@ -1494,7 +1501,7 @@ class CommandLineFlags {
"How much the GC can expand the eden by while the GC locker " \
"is active (as a percentage)") \
\
- diagnostic(intx, GCLockerRetryAllocationCount, 2, \
+ diagnostic(uintx, GCLockerRetryAllocationCount, 2, \
"Number of times to retry allocations when " \
"blocked by the GC locker") \
\
@@ -2048,9 +2055,6 @@ class CommandLineFlags {
"Provide more detailed and expensive TLAB statistics " \
"(with PrintTLAB)") \
\
- EMBEDDED_ONLY(product(bool, LowMemoryProtection, true, \
- "Enable LowMemoryProtection")) \
- \
product_pd(bool, NeverActAsServerClassMachine, \
"Never act like a server-class machine") \
\
@@ -3943,7 +3947,11 @@ class CommandLineFlags {
"Enable event-based tracing") \
\
product(bool, UseLockedTracing, false, \
- "Use locked-tracing when doing event-based tracing")
+ "Use locked-tracing when doing event-based tracing") \
+ \
+ product_pd(bool, PreserveFramePointer, \
+ "Use the FP register for holding the frame pointer " \
+ "and not as a general purpose register.")
/*
* Macros for factoring of globals
diff --git a/src/share/vm/runtime/interfaceSupport.cpp b/src/share/vm/runtime/interfaceSupport.cpp
index 3b338de6f..e78bc1210 100644
--- a/src/share/vm/runtime/interfaceSupport.cpp
+++ b/src/share/vm/runtime/interfaceSupport.cpp
@@ -185,19 +185,22 @@ void InterfaceSupport::zap_dead_locals_old() {
# endif
-
+// invocation counter for InterfaceSupport::deoptimizeAll/zombieAll functions
int deoptimizeAllCounter = 0;
int zombieAllCounter = 0;
-
void InterfaceSupport::zombieAll() {
- if (is_init_completed() && zombieAllCounter > ZombieALotInterval) {
+ // This method is called by all threads when a thread make
+ // transition to VM state (for example, runtime calls).
+ // Divide number of calls by number of threads to avoid
+ // dependence of ZombieAll events frequency on number of threads.
+ int value = zombieAllCounter / Threads::number_of_threads();
+ if (is_init_completed() && value > ZombieALotInterval) {
zombieAllCounter = 0;
VM_ZombieAll op;
VMThread::execute(&op);
- } else {
- zombieAllCounter++;
}
+ zombieAllCounter++;
}
void InterfaceSupport::unlinkSymbols() {
@@ -206,12 +209,17 @@ void InterfaceSupport::unlinkSymbols() {
}
void InterfaceSupport::deoptimizeAll() {
- if (is_init_completed() ) {
- if (DeoptimizeALot && deoptimizeAllCounter > DeoptimizeALotInterval) {
+ // This method is called by all threads when a thread make
+ // transition to VM state (for example, runtime calls).
+ // Divide number of calls by number of threads to avoid
+ // dependence of DeoptimizeAll events frequency on number of threads.
+ int value = deoptimizeAllCounter / Threads::number_of_threads();
+ if (is_init_completed()) {
+ if (DeoptimizeALot && value > DeoptimizeALotInterval) {
deoptimizeAllCounter = 0;
VM_DeoptimizeAll op;
VMThread::execute(&op);
- } else if (DeoptimizeRandom && (deoptimizeAllCounter & 0x1f) == (os::random() & 0x1f)) {
+ } else if (DeoptimizeRandom && (value & 0x1F) == (os::random() & 0x1F)) {
VM_DeoptimizeAll op;
VMThread::execute(&op);
}
diff --git a/src/share/vm/runtime/jniHandles.cpp b/src/share/vm/runtime/jniHandles.cpp
index 4acf50b0d..675b9269e 100644
--- a/src/share/vm/runtime/jniHandles.cpp
+++ b/src/share/vm/runtime/jniHandles.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -296,6 +296,7 @@ JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread) {
block->_top = 0;
block->_next = NULL;
block->_pop_frame_link = NULL;
+ block->_planned_capacity = block_size_in_oops;
// _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
debug_only(block->_last = NULL);
debug_only(block->_free_list = NULL);
@@ -529,6 +530,12 @@ int JNIHandleBlock::length() const {
return result;
}
+const size_t JNIHandleBlock::get_number_of_live_handles() {
+ CountHandleClosure counter;
+ oops_do(&counter);
+ return counter.count();
+}
+
// This method is not thread-safe, i.e., must be called whule holding a lock on the
// structure.
long JNIHandleBlock::memory_usage() const {
diff --git a/src/share/vm/runtime/jniHandles.hpp b/src/share/vm/runtime/jniHandles.hpp
index 1b6b6b81f..1f749536f 100644
--- a/src/share/vm/runtime/jniHandles.hpp
+++ b/src/share/vm/runtime/jniHandles.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -112,6 +112,9 @@ class JNIHandleBlock : public CHeapObj<mtInternal> {
oop* _free_list; // Handle free list
int _allocate_before_rebuild; // Number of blocks to allocate before rebuilding free list
+ // Check JNI, "planned capacity" for current frame (or push/ensure)
+ size_t _planned_capacity;
+
#ifndef PRODUCT
JNIHandleBlock* _block_list_link; // Link for list below
static JNIHandleBlock* _block_list; // List of all allocated blocks (for debugging only)
@@ -152,6 +155,11 @@ class JNIHandleBlock : public CHeapObj<mtInternal> {
// Traversal of weak handles. Unreachable oops are cleared.
void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
+ // Checked JNI support
+ void set_planned_capacity(size_t planned_capacity) { _planned_capacity = planned_capacity; }
+ const size_t get_planned_capacity() { return _planned_capacity; }
+ const size_t get_number_of_live_handles();
+
// Debugging
bool chain_contains(jobject handle) const; // Does this block or following blocks contain handle
bool contains(jobject handle) const; // Does this block contain handle
diff --git a/src/share/vm/runtime/mutexLocker.cpp b/src/share/vm/runtime/mutexLocker.cpp
index 136bc4d28..f358c75ea 100644
--- a/src/share/vm/runtime/mutexLocker.cpp
+++ b/src/share/vm/runtime/mutexLocker.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -119,7 +119,6 @@ Monitor* SecondaryFreeList_lock = NULL;
Mutex* OldSets_lock = NULL;
Monitor* RootRegionScan_lock = NULL;
Mutex* MMUTracker_lock = NULL;
-Mutex* HotCardCache_lock = NULL;
Monitor* GCTaskManager_lock = NULL;
@@ -135,6 +134,10 @@ Mutex* JfrStream_lock = NULL;
Mutex* JfrThreadGroups_lock = NULL;
#endif
+#ifndef SUPPORTS_NATIVE_CX8
+Mutex* UnsafeJlong_lock = NULL;
+#endif
+
#define MAX_NUM_MUTEX 128
static Monitor * _mutex_array[MAX_NUM_MUTEX];
static int _num_mutex;
@@ -196,7 +199,6 @@ void mutex_init() {
def(OldSets_lock , Mutex , leaf , true );
def(RootRegionScan_lock , Monitor, leaf , true );
def(MMUTracker_lock , Mutex , leaf , true );
- def(HotCardCache_lock , Mutex , special , true );
def(EvacFailureStack_lock , Mutex , nonleaf , true );
def(StringDedupQueue_lock , Monitor, leaf, true );
@@ -280,12 +282,15 @@ void mutex_init() {
#ifdef INCLUDE_TRACE
def(JfrMsg_lock , Monitor, leaf, true);
- def(JfrBuffer_lock , Mutex, nonleaf+1, true);
- def(JfrThreadGroups_lock , Mutex, nonleaf+1, true);
- def(JfrStream_lock , Mutex, nonleaf+2, true);
- def(JfrStacktrace_lock , Mutex, special, true );
+ def(JfrBuffer_lock , Mutex, leaf, true);
+ def(JfrThreadGroups_lock , Mutex, leaf, true);
+ def(JfrStream_lock , Mutex, nonleaf, true);
+ def(JfrStacktrace_lock , Mutex, special, true);
#endif
+#ifndef SUPPORTS_NATIVE_CX8
+ def(UnsafeJlong_lock , Mutex, special, false);
+#endif
}
GCMutexLocker::GCMutexLocker(Monitor * mutex) {
diff --git a/src/share/vm/runtime/mutexLocker.hpp b/src/share/vm/runtime/mutexLocker.hpp
index 0aa19a33a..be86bac71 100644
--- a/src/share/vm/runtime/mutexLocker.hpp
+++ b/src/share/vm/runtime/mutexLocker.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -137,7 +137,6 @@ extern Mutex* OldSets_lock; // protects the old region sets
extern Monitor* RootRegionScan_lock; // used to notify that the CM threads have finished scanning the IM snapshot regions
extern Mutex* MMUTracker_lock; // protects the MMU
// tracker data structures
-extern Mutex* HotCardCache_lock; // protects the hot card cache
extern Mutex* Management_lock; // a lock used to serialize JVM management
extern Monitor* Service_lock; // a lock used for service thread operation
@@ -151,6 +150,10 @@ extern Mutex* JfrStream_lock; // protects JFR stream access
extern Mutex* JfrThreadGroups_lock; // protects JFR access to Thread Groups
#endif
+#ifndef SUPPORTS_NATIVE_CX8
+extern Mutex* UnsafeJlong_lock; // provides Unsafe atomic updates to jlongs on platforms that don't support cx8
+#endif
+
// A MutexLocker provides mutual exclusion with respect to a given mutex
// for the scope which contains the locker. The lock is an OS lock, not
// an object lock, and the two do not interoperate. Do not use Mutex-based
diff --git a/src/share/vm/runtime/objectMonitor.cpp b/src/share/vm/runtime/objectMonitor.cpp
index 20fe5e8e5..31f899b12 100644
--- a/src/share/vm/runtime/objectMonitor.cpp
+++ b/src/share/vm/runtime/objectMonitor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -226,7 +226,8 @@ static volatile int InitDone = 0 ;
//
// * The monitor entry list operations avoid locks, but strictly speaking
// they're not lock-free. Enter is lock-free, exit is not.
-// See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
+// For a description of 'Methods and apparatus providing non-blocking access
+// to a resource,' see U.S. Pat. No. 7844973.
//
// * The cxq can have multiple concurrent "pushers" but only one concurrent
// detaching thread. This mechanism is immune from the ABA corruption.
@@ -1955,7 +1956,8 @@ void ObjectMonitor::notifyAll(TRAPS) {
// (duration) or we can fix the count at approximately the duration of
// a context switch and vary the frequency. Of course we could also
// vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
-// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
+// For a description of 'Adaptive spin-then-block mutual exclusion in
+// multi-threaded processing,' see U.S. Pat. No. 8046758.
//
// This implementation varies the duration "D", where D varies with
// the success rate of recent spin attempts. (D is capped at approximately
diff --git a/src/share/vm/runtime/os.cpp b/src/share/vm/runtime/os.cpp
index 3a830617a..e50f8888f 100644
--- a/src/share/vm/runtime/os.cpp
+++ b/src/share/vm/runtime/os.cpp
@@ -1315,24 +1315,17 @@ bool os::stack_shadow_pages_available(Thread *thread, methodHandle method) {
return (sp > (stack_limit + reserved_area));
}
-size_t os::page_size_for_region(size_t region_min_size, size_t region_max_size,
- uint min_pages)
-{
+size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) {
assert(min_pages > 0, "sanity");
if (UseLargePages) {
- const size_t max_page_size = region_max_size / min_pages;
-
- for (unsigned int i = 0; _page_sizes[i] != 0; ++i) {
- const size_t sz = _page_sizes[i];
- const size_t mask = sz - 1;
- if ((region_min_size & mask) == 0 && (region_max_size & mask) == 0) {
- // The largest page size with no fragmentation.
- return sz;
- }
+ const size_t max_page_size = region_size / min_pages;
- if (sz <= max_page_size) {
- // The largest page size that satisfies the min_pages requirement.
- return sz;
+ for (size_t i = 0; _page_sizes[i] != 0; ++i) {
+ const size_t page_size = _page_sizes[i];
+ if (page_size <= max_page_size) {
+ if (!must_be_aligned || is_size_aligned(region_size, page_size)) {
+ return page_size;
+ }
}
}
}
@@ -1340,6 +1333,14 @@ size_t os::page_size_for_region(size_t region_min_size, size_t region_max_size,
return vm_page_size();
}
+size_t os::page_size_for_region_aligned(size_t region_size, size_t min_pages) {
+ return page_size_for_region(region_size, min_pages, true);
+}
+
+size_t os::page_size_for_region_unaligned(size_t region_size, size_t min_pages) {
+ return page_size_for_region(region_size, min_pages, false);
+}
+
#ifndef PRODUCT
void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count)
{
@@ -1513,6 +1514,11 @@ bool os::release_memory(char* addr, size_t bytes) {
return res;
}
+void os::pretouch_memory(char* start, char* end) {
+ for (volatile char *p = start; p < end; p += os::vm_page_size()) {
+ *p = 0;
+ }
+}
char* os::map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
@@ -1569,3 +1575,95 @@ os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::Stat
return result;
}
#endif
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#define assert_eq(a,b) assert(a == b, err_msg(SIZE_FORMAT " != " SIZE_FORMAT, a, b))
+
+class TestOS : AllStatic {
+ static size_t small_page_size() {
+ return os::vm_page_size();
+ }
+
+ static size_t large_page_size() {
+ const size_t large_page_size_example = 4 * M;
+ return os::page_size_for_region_aligned(large_page_size_example, 1);
+ }
+
+ static void test_page_size_for_region_aligned() {
+ if (UseLargePages) {
+ const size_t small_page = small_page_size();
+ const size_t large_page = large_page_size();
+
+ if (large_page > small_page) {
+ size_t num_small_pages_in_large = large_page / small_page;
+ size_t page = os::page_size_for_region_aligned(large_page, num_small_pages_in_large);
+
+ assert_eq(page, small_page);
+ }
+ }
+ }
+
+ static void test_page_size_for_region_alignment() {
+ if (UseLargePages) {
+ const size_t small_page = small_page_size();
+ const size_t large_page = large_page_size();
+ if (large_page > small_page) {
+ const size_t unaligned_region = large_page + 17;
+ size_t page = os::page_size_for_region_aligned(unaligned_region, 1);
+ assert_eq(page, small_page);
+
+ const size_t num_pages = 5;
+ const size_t aligned_region = large_page * num_pages;
+ page = os::page_size_for_region_aligned(aligned_region, num_pages);
+ assert_eq(page, large_page);
+ }
+ }
+ }
+
+ static void test_page_size_for_region_unaligned() {
+ if (UseLargePages) {
+ // Given exact page size, should return that page size.
+ for (size_t i = 0; os::_page_sizes[i] != 0; i++) {
+ size_t expected = os::_page_sizes[i];
+ size_t actual = os::page_size_for_region_unaligned(expected, 1);
+ assert_eq(expected, actual);
+ }
+
+ // Given slightly larger size than a page size, return the page size.
+ for (size_t i = 0; os::_page_sizes[i] != 0; i++) {
+ size_t expected = os::_page_sizes[i];
+ size_t actual = os::page_size_for_region_unaligned(expected + 17, 1);
+ assert_eq(expected, actual);
+ }
+
+ // Given a slightly smaller size than a page size,
+ // return the next smaller page size.
+ if (os::_page_sizes[1] > os::_page_sizes[0]) {
+ size_t expected = os::_page_sizes[0];
+ size_t actual = os::page_size_for_region_unaligned(os::_page_sizes[1] - 17, 1);
+ assert_eq(actual, expected);
+ }
+
+ // Return small page size for values less than a small page.
+ size_t small_page = small_page_size();
+ size_t actual = os::page_size_for_region_unaligned(small_page - 17, 1);
+ assert_eq(small_page, actual);
+ }
+ }
+
+ public:
+ static void run_tests() {
+ test_page_size_for_region_aligned();
+ test_page_size_for_region_alignment();
+ test_page_size_for_region_unaligned();
+ }
+};
+
+void TestOS_test() {
+ TestOS::run_tests();
+}
+
+#endif // PRODUCT
diff --git a/src/share/vm/runtime/os.hpp b/src/share/vm/runtime/os.hpp
index c1e9d45b7..944ec6431 100644
--- a/src/share/vm/runtime/os.hpp
+++ b/src/share/vm/runtime/os.hpp
@@ -149,6 +149,7 @@ class os: AllStatic {
static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
+ static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned);
public:
static void init(void); // Called before command line parsing
@@ -159,7 +160,6 @@ class os: AllStatic {
static void init_globals(void) { // Called from init_globals() in init.cpp
init_globals_ext();
}
- static void init_3(void); // Called at the end of vm init
// File names are case-insensitive on windows only
// Override me as needed
@@ -266,19 +266,16 @@ class os: AllStatic {
// Return the default page size.
static int vm_page_size();
- // Return the page size to use for a region of memory. The min_pages argument
- // is a hint intended to limit fragmentation; it says the returned page size
- // should be <= region_max_size / min_pages. Because min_pages is a hint,
- // this routine may return a size larger than region_max_size / min_pages.
- //
- // The current implementation ignores min_pages if a larger page size is an
- // exact multiple of both region_min_size and region_max_size. This allows
- // larger pages to be used when doing so would not cause fragmentation; in
- // particular, a single page can be used when region_min_size ==
- // region_max_size == a supported page size.
- static size_t page_size_for_region(size_t region_min_size,
- size_t region_max_size,
- uint min_pages);
+ // Returns the page size to use for a region of memory.
+ // region_size / min_pages will always be greater than or equal to the
+ // returned value. The returned value will divide region_size.
+ static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages);
+
+ // Returns the page size to use for a region of memory.
+ // region_size / min_pages will always be greater than or equal to the
+ // returned value. The returned value might not divide region_size.
+ static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages);
+
// Return the largest page size that can be used
static size_t max_page_size() {
// The _page_sizes array is sorted in descending order.
@@ -320,6 +317,12 @@ class os: AllStatic {
static bool uncommit_memory(char* addr, size_t bytes);
static bool release_memory(char* addr, size_t bytes);
+ // Touch memory pages that cover the memory range from start to end (exclusive)
+ // to make the OS back the memory range with actual memory.
+ // Current implementation may not touch the last page if unaligned addresses
+ // are passed.
+ static void pretouch_memory(char* start, char* end);
+
enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
static bool protect_memory(char* addr, size_t bytes, ProtType prot,
bool is_committed = true);
diff --git a/src/share/vm/runtime/reflection.cpp b/src/share/vm/runtime/reflection.cpp
index f0df88c7e..e2aeab328 100644
--- a/src/share/vm/runtime/reflection.cpp
+++ b/src/share/vm/runtime/reflection.cpp
@@ -36,6 +36,7 @@
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.hpp"
#include "prims/jvm.h"
+#include "prims/jvmtiExport.hpp"
#include "runtime/arguments.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
@@ -942,6 +943,11 @@ oop Reflection::invoke(instanceKlassHandle klass, methodHandle reflected_method,
// Method resolution threw an exception; wrap it in an InvocationTargetException
oop resolution_exception = PENDING_EXCEPTION;
CLEAR_PENDING_EXCEPTION;
+ // JVMTI has already reported the pending exception
+ // JVMTI internal flag reset is needed in order to report InvocationTargetException
+ if (THREAD->is_Java_thread()) {
+ JvmtiExport::clear_detected_exception((JavaThread*) THREAD);
+ }
JavaCallArguments args(Handle(THREAD, resolution_exception));
THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(),
vmSymbols::throwable_void_signature(),
@@ -1074,6 +1080,12 @@ oop Reflection::invoke(instanceKlassHandle klass, methodHandle reflected_method,
// Method threw an exception; wrap it in an InvocationTargetException
oop target_exception = PENDING_EXCEPTION;
CLEAR_PENDING_EXCEPTION;
+ // JVMTI has already reported the pending exception
+ // JVMTI internal flag reset is needed in order to report InvocationTargetException
+ if (THREAD->is_Java_thread()) {
+ JvmtiExport::clear_detected_exception((JavaThread*) THREAD);
+ }
+
JavaCallArguments args(Handle(THREAD, target_exception));
THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(),
vmSymbols::throwable_void_signature(),
diff --git a/src/share/vm/runtime/safepoint.cpp b/src/share/vm/runtime/safepoint.cpp
index 52f59be8c..cf64606e5 100644
--- a/src/share/vm/runtime/safepoint.cpp
+++ b/src/share/vm/runtime/safepoint.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -739,80 +739,12 @@ void SafepointSynchronize::block(JavaThread *thread) {
// ------------------------------------------------------------------------------------------------------
// Exception handlers
-#ifndef PRODUCT
-
-#ifdef SPARC
-
-#ifdef _LP64
-#define PTR_PAD ""
-#else
-#define PTR_PAD " "
-#endif
-
-static void print_ptrs(intptr_t oldptr, intptr_t newptr, bool wasoop) {
- bool is_oop = newptr ? (cast_to_oop(newptr))->is_oop() : false;
- tty->print_cr(PTR_FORMAT PTR_PAD " %s %c " PTR_FORMAT PTR_PAD " %s %s",
- oldptr, wasoop?"oop":" ", oldptr == newptr ? ' ' : '!',
- newptr, is_oop?"oop":" ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":" "));
-}
-
-static void print_longs(jlong oldptr, jlong newptr, bool wasoop) {
- bool is_oop = newptr ? (cast_to_oop(newptr))->is_oop() : false;
- tty->print_cr(PTR64_FORMAT " %s %c " PTR64_FORMAT " %s %s",
- oldptr, wasoop?"oop":" ", oldptr == newptr ? ' ' : '!',
- newptr, is_oop?"oop":" ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":" "));
-}
-
-static void print_me(intptr_t *new_sp, intptr_t *old_sp, bool *was_oops) {
-#ifdef _LP64
- tty->print_cr("--------+------address-----+------before-----------+-------after----------+");
- const int incr = 1; // Increment to skip a long, in units of intptr_t
-#else
- tty->print_cr("--------+--address-+------before-----------+-------after----------+");
- const int incr = 2; // Increment to skip a long, in units of intptr_t
-#endif
- tty->print_cr("---SP---|");
- for( int i=0; i<16; i++ ) {
- tty->print("blob %c%d |"PTR_FORMAT" ","LO"[i>>3],i&7,new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); }
- tty->print_cr("--------|");
- for( int i1=0; i1<frame::memory_parameter_word_sp_offset-16; i1++ ) {
- tty->print("argv pad|"PTR_FORMAT" ",new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); }
- tty->print(" pad|"PTR_FORMAT" ",new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++);
- tty->print_cr("--------|");
- tty->print(" G1 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr;
- tty->print(" G3 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr;
- tty->print(" G4 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr;
- tty->print(" G5 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr;
- tty->print_cr(" FSR |"PTR_FORMAT" "PTR64_FORMAT" "PTR64_FORMAT,new_sp,*(jlong*)old_sp,*(jlong*)new_sp);
- old_sp += incr; new_sp += incr; was_oops += incr;
- // Skip the floats
- tty->print_cr("--Float-|"PTR_FORMAT,new_sp);
- tty->print_cr("---FP---|");
- old_sp += incr*32; new_sp += incr*32; was_oops += incr*32;
- for( int i2=0; i2<16; i2++ ) {
- tty->print("call %c%d |"PTR_FORMAT" ","LI"[i2>>3],i2&7,new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); }
- tty->cr();
-}
-#endif // SPARC
-#endif // PRODUCT
-
void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) {
assert(thread->is_Java_thread(), "polling reference encountered by VM thread");
assert(thread->thread_state() == _thread_in_Java, "should come from Java code");
assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization");
- // Uncomment this to get some serious before/after printing of the
- // Sparc safepoint-blob frame structure.
- /*
- intptr_t* sp = thread->last_Java_sp();
- intptr_t stack_copy[150];
- for( int i=0; i<150; i++ ) stack_copy[i] = sp[i];
- bool was_oops[150];
- for( int i=0; i<150; i++ )
- was_oops[i] = stack_copy[i] ? ((oop)stack_copy[i])->is_oop() : false;
- */
-
if (ShowSafepointMsgs) {
tty->print("handle_polling_page_exception: ");
}
@@ -824,7 +756,6 @@ void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) {
ThreadSafepointState* state = thread->safepoint_state();
state->handle_polling_page_exception();
- // print_me(sp,stack_copy,was_oops);
}
diff --git a/src/share/vm/runtime/sharedRuntime.cpp b/src/share/vm/runtime/sharedRuntime.cpp
index 3d409ed25..5638dd5ca 100644
--- a/src/share/vm/runtime/sharedRuntime.cpp
+++ b/src/share/vm/runtime/sharedRuntime.cpp
@@ -1230,7 +1230,7 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
#endif
// JSR 292 key invariant:
- // If the resolved method is a MethodHandle invoke target the call
+ // If the resolved method is a MethodHandle invoke target, the call
// site must be a MethodHandle call site, because the lambda form might tail-call
// leaving the stack in a state unknown to either caller or callee
// TODO detune for now but we might need it again
diff --git a/src/share/vm/runtime/stubRoutines.hpp b/src/share/vm/runtime/stubRoutines.hpp
index 1c56a2d90..819b9c42e 100644
--- a/src/share/vm/runtime/stubRoutines.hpp
+++ b/src/share/vm/runtime/stubRoutines.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -99,29 +99,20 @@ class StubRoutines: AllStatic {
// Dependencies
friend class StubGenerator;
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined STUBROUTINES_MD_HPP
+# include STUBROUTINES_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "stubRoutines_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "stubRoutines_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "stubRoutines_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "stubRoutines_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "stubRoutines_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "stubRoutines_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "stubRoutines_ppc_64.hpp"
#endif
-
static jint _verify_oop_count;
static address _verify_oop_subroutine_entry;
diff --git a/src/share/vm/runtime/sweeper.cpp b/src/share/vm/runtime/sweeper.cpp
index 4eba81deb..b4954ff86 100644
--- a/src/share/vm/runtime/sweeper.cpp
+++ b/src/share/vm/runtime/sweeper.cpp
@@ -538,10 +538,14 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
} else if (nm->is_not_entrant()) {
// If there are no current activations of this method on the
// stack we can safely convert it to a zombie method
- if (nm->can_not_entrant_be_converted()) {
+ if (nm->can_convert_to_zombie()) {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
}
+ // Clear ICStubs to prevent back patching stubs of zombie or unloaded
+ // nmethods during the next safepoint (see ICStub::finalize).
+ MutexLocker cl(CompiledIC_lock);
+ nm->clear_ic_stubs();
// Code cache state change is tracked in make_zombie()
nm->make_zombie();
_zombified_count++;
@@ -567,6 +571,12 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
release_nmethod(nm);
_flushed_count++;
} else {
+ {
+ // Clean ICs of unloaded nmethods as well because they may reference other
+ // unloaded nmethods that may be flushed earlier in the sweeper cycle.
+ MutexLocker cl(CompiledIC_lock);
+ nm->cleanup_inline_caches();
+ }
// Code cache state change is tracked in make_zombie()
nm->make_zombie();
_zombified_count++;
diff --git a/src/share/vm/runtime/synchronizer.cpp b/src/share/vm/runtime/synchronizer.cpp
index 33dfbb205..b35f86fc2 100644
--- a/src/share/vm/runtime/synchronizer.cpp
+++ b/src/share/vm/runtime/synchronizer.cpp
@@ -121,7 +121,7 @@ HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
} \
}
-#define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_PROBE_WAITED
+#define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
{ \
diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp
index 83dc76d11..4866ec1ef 100644
--- a/src/share/vm/runtime/thread.cpp
+++ b/src/share/vm/runtime/thread.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -141,8 +141,8 @@ HS_DTRACE_PROBE_DECL5(hotspot, thread__stop, char*, intptr_t,
#else /* USDT2 */
-#define HOTSPOT_THREAD_PROBE_start HOTSPOT_THREAD_PROBE_START
-#define HOTSPOT_THREAD_PROBE_stop HOTSPOT_THREAD_PROBE_STOP
+#define HOTSPOT_THREAD_PROBE_start HOTSPOT_THREAD_START
+#define HOTSPOT_THREAD_PROBE_stop HOTSPOT_THREAD_STOP
#define DTRACE_THREAD_PROBE(probe, javathread) \
{ \
@@ -1465,6 +1465,7 @@ void JavaThread::initialize() {
_thread_stat = new ThreadStatistics();
_blocked_on_compilation = false;
_jni_active_critical = 0;
+ _pending_jni_exception_check_fn = NULL;
_do_not_unlock_if_synchronized = false;
_cached_monitor_info = NULL;
_parker = Parker::Allocate(this) ;
@@ -3306,6 +3307,9 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
extern void JDK_Version_init();
+ // Preinitialize version info.
+ VM_Version::early_initialize();
+
// Check version
if (!is_supported_jni_version(args->version)) return JNI_EVERSION;
@@ -3689,9 +3693,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
}
}
- // Give os specific code one last chance to start
- os::init_3();
-
create_vm_timer.end();
#ifdef ASSERT
_vm_complete = true;
diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp
index 2bffb5d74..1117c872b 100644
--- a/src/share/vm/runtime/thread.hpp
+++ b/src/share/vm/runtime/thread.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -926,6 +926,9 @@ class JavaThread: public Thread {
// support for JNI critical regions
jint _jni_active_critical; // count of entries into JNI critical region
+ // Checked JNI: function name requires exception check
+ char* _pending_jni_exception_check_fn;
+
// For deadlock detection.
int _depth_first_number;
@@ -1408,6 +1411,12 @@ class JavaThread: public Thread {
assert(_jni_active_critical >= 0,
"JNI critical nesting problem?"); }
+ // Checked JNI, is the programmer required to check for exceptions, specify which function name
+ bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; }
+ void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; }
+ const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; }
+ void set_pending_jni_exception_check(const char* fn_name) { _pending_jni_exception_check_fn = (char*) fn_name; }
+
// For deadlock detection
int depth_first_number() { return _depth_first_number; }
void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
diff --git a/src/share/vm/runtime/threadLocalStorage.cpp b/src/share/vm/runtime/threadLocalStorage.cpp
index 639118fe4..30dacbb6a 100644
--- a/src/share/vm/runtime/threadLocalStorage.cpp
+++ b/src/share/vm/runtime/threadLocalStorage.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,11 @@
# include "os_bsd.inline.hpp"
#endif
+// Solaris no longer has this kind of ThreadLocalStorage implementation.
+// This will be removed from all platforms in the near future.
+
+#ifndef SOLARIS
+
// static member initialization
int ThreadLocalStorage::_thread_index = -1;
@@ -65,3 +70,5 @@ void ThreadLocalStorage::init() {
bool ThreadLocalStorage::is_initialized() {
return (thread_index() != -1);
}
+
+#endif // SOLARIS
diff --git a/src/share/vm/runtime/threadLocalStorage.hpp b/src/share/vm/runtime/threadLocalStorage.hpp
index 0f570f3d3..4883ee5b1 100644
--- a/src/share/vm/runtime/threadLocalStorage.hpp
+++ b/src/share/vm/runtime/threadLocalStorage.hpp
@@ -38,10 +38,14 @@ extern "C" Thread* get_thread();
extern "C" uintptr_t _raw_thread_id();
class ThreadLocalStorage : AllStatic {
+
+ // Exported API
public:
static void set_thread(Thread* thread);
static Thread* get_thread_slow();
static void invalidate_all() { pd_invalidate_all(); }
+ static void init();
+ static bool is_initialized();
// Machine dependent stuff
#ifdef TARGET_OS_ARCH_linux_x86
@@ -78,17 +82,12 @@ class ThreadLocalStorage : AllStatic {
# include "threadLS_bsd_zero.hpp"
#endif
-
+#ifndef SOLARIS
public:
// Accessor
static inline int thread_index() { return _thread_index; }
static inline void set_thread_index(int index) { _thread_index = index; }
- // Initialization
- // Called explicitly from VMThread::activate_system instead of init_globals.
- static void init();
- static bool is_initialized();
-
private:
static int _thread_index;
@@ -97,6 +96,9 @@ class ThreadLocalStorage : AllStatic {
// Processor dependent parts of set_thread and initialization
static void pd_set_thread(Thread* thread);
static void pd_init();
+
+#endif // SOLARIS
+
// Invalidate any thread cacheing or optimization schemes.
static void pd_invalidate_all();
diff --git a/src/share/vm/runtime/vframe.cpp b/src/share/vm/runtime/vframe.cpp
index a47f96c28..0d5524118 100644
--- a/src/share/vm/runtime/vframe.cpp
+++ b/src/share/vm/runtime/vframe.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -148,8 +148,7 @@ static void print_locked_object_class_name(outputStream* st, Handle obj, const c
if (obj.not_null()) {
st->print("\t- %s <" INTPTR_FORMAT "> ", lock_state, (address)obj());
if (obj->klass() == SystemDictionary::Class_klass()) {
- Klass* target_klass = java_lang_Class::as_Klass(obj());
- st->print_cr("(a java.lang.Class for %s)", InstanceKlass::cast(target_klass)->external_name());
+ st->print_cr("(a java.lang.Class for %s)", java_lang_Class::as_external_name(obj()));
} else {
Klass* k = obj->klass();
st->print_cr("(a %s)", k->external_name());
@@ -189,6 +188,7 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
if (monitor->eliminated() && is_compiled_frame()) { // Eliminated in compiled code
if (monitor->owner_is_scalar_replaced()) {
Klass* k = java_lang_Class::as_Klass(monitor->owner_klass());
+ // format below for lockbits matches this one.
st->print("\t- eliminated <owner is scalar replaced> (a %s)", k->external_name());
} else {
oop obj = monitor->owner();
@@ -206,9 +206,10 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
// see if we have completed the lock or we are blocked trying to
// acquire it - we can only be blocked if the monitor is inflated
+ markOop mark = NULL;
const char *lock_state = "locked"; // assume we have the monitor locked
if (!found_first_monitor && frame_count == 0) {
- markOop mark = monitor->owner()->mark();
+ mark = monitor->owner()->mark();
if (mark->has_monitor() &&
( // we have marked ourself as pending on this monitor
mark->monitor() == thread()->current_pending_monitor() ||
@@ -216,11 +217,19 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
!mark->monitor()->is_entered(thread())
)) {
lock_state = "waiting to lock";
+ } else {
+ mark = NULL; // Disable printing below
}
}
+ print_locked_object_class_name(st, monitor->owner(), lock_state);
+ if (Verbose && mark != NULL) {
+ // match with format above, replacing "-" with " ".
+ st->print("\t lockbits=");
+ mark->print_on(st);
+ st->cr();
+ }
found_first_monitor = true;
- print_locked_object_class_name(st, monitor->owner(), lock_state);
}
}
}
@@ -578,10 +587,15 @@ void javaVFrame::print() {
tty->print("( null )");
} else {
monitor->owner()->print_value();
- tty->print("(" INTPTR_FORMAT ")", (address)monitor->owner());
+ tty->print("(owner=" INTPTR_FORMAT ")", (address)monitor->owner());
+ }
+ if (monitor->eliminated()) {
+ if(is_compiled_frame()) {
+ tty->print(" ( lock is eliminated in compiled frame )");
+ } else {
+ tty->print(" ( lock is eliminated, frame not compiled )");
+ }
}
- if (monitor->eliminated() && is_compiled_frame())
- tty->print(" ( lock is eliminated )");
tty->cr();
tty->print("\t ");
monitor->lock()->print_on(tty);
diff --git a/src/share/vm/runtime/vframe.hpp b/src/share/vm/runtime/vframe.hpp
index 70f31b14c..2adaf841f 100644
--- a/src/share/vm/runtime/vframe.hpp
+++ b/src/share/vm/runtime/vframe.hpp
@@ -389,12 +389,12 @@ inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
decode_offset < 0 ||
decode_offset >= nm()->scopes_data_size()) {
// 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
- // If we attempt to read nmethod::scopes_data at serialized_null (== 0),
- // or if we read some at other crazy offset,
- // we will decode garbage and make wild references into the heap,
- // leading to crashes in product mode.
- // (This isn't airtight, of course, since there are internal
- // offsets which are also crazy.)
+ // If we read nmethod::scopes_data at serialized_null (== 0)
+ // or if read some at other invalid offset, invalid values will be decoded.
+ // Based on these values, invalid heap locations could be referenced
+ // that could lead to crashes in product mode.
+ // Therefore, do not use the decode offset if invalid, but fill the frame
+ // as it were a native compiled frame (no Java-level assumptions).
#ifdef ASSERT
if (WizardMode) {
tty->print_cr("Error in fill_from_frame: pc_desc for "
@@ -514,9 +514,15 @@ inline void vframeStreamCommon::fill_from_interpreter_frame() {
intptr_t bcx = _frame.interpreter_frame_bcx();
int bci = method->validate_bci_from_bcx(bcx);
// 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
+ // AsyncGetCallTrace interrupts the VM asynchronously. As a result
+ // it is possible to access an interpreter frame for which
+ // no Java-level information is yet available (e.g., becasue
+ // the frame was being created when the VM interrupted it).
+ // In this scenario, pretend that the interpreter is at the point
+ // of entering the method.
if (bci < 0) {
found_bad_method_frame();
- bci = 0; // pretend it's on the point of entering
+ bci = 0;
}
_mode = interpreted_mode;
_method = method;
diff --git a/src/share/vm/runtime/virtualspace.cpp b/src/share/vm/runtime/virtualspace.cpp
index c3c4ba632..14dbd74b1 100644
--- a/src/share/vm/runtime/virtualspace.cpp
+++ b/src/share/vm/runtime/virtualspace.cpp
@@ -52,12 +52,22 @@ ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
_alignment(0), _special(false), _executable(false) {
}
-ReservedSpace::ReservedSpace(size_t size) {
- size_t page_size = os::page_size_for_region(size, size, 1);
+ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
+ bool has_preferred_page_size = preferred_page_size != 0;
+ // Want to use large pages where possible and pad with small pages.
+ size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
bool large_pages = page_size != (size_t)os::vm_page_size();
- // Don't force the alignment to be large page aligned,
- // since that will waste memory.
- size_t alignment = os::vm_allocation_granularity();
+ size_t alignment;
+ if (large_pages && has_preferred_page_size) {
+ alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
+ // ReservedSpace initialization requires size to be aligned to the given
+ // alignment. Align the size up.
+ size = align_size_up(size, alignment);
+ } else {
+ // Don't force the alignment to be large page aligned,
+ // since that will waste memory.
+ alignment = os::vm_allocation_granularity();
+ }
initialize(size, alignment, large_pages, NULL, 0, false);
}
@@ -372,7 +382,7 @@ VirtualSpace::VirtualSpace() {
bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
- const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1);
+ const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
return initialize_with_granularity(rs, committed_size, max_commit_granularity);
}
@@ -630,19 +640,7 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
}
if (pre_touch || AlwaysPreTouch) {
- int vm_ps = os::vm_page_size();
- for (char* curr = previous_high;
- curr < unaligned_new_high;
- curr += vm_ps) {
- // Note the use of a write here; originally we tried just a read, but
- // since the value read was unused, the optimizer removed the read.
- // If we ever have a concurrent touchahead thread, we'll want to use
- // a read, to avoid the potential of overwriting data (if a mutator
- // thread beats the touchahead thread to a page). There are various
- // ways of making sure this read is not optimized away: for example,
- // generating the code for a read procedure at runtime.
- *curr = 0;
- }
+ os::pretouch_memory(previous_high, unaligned_new_high);
}
_high += bytes;
@@ -1007,7 +1005,7 @@ class TestVirtualSpace : AllStatic {
case Disable:
return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
case Commit:
- return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1));
+ return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
}
}
diff --git a/src/share/vm/runtime/virtualspace.hpp b/src/share/vm/runtime/virtualspace.hpp
index e614d0d67..808e45cde 100644
--- a/src/share/vm/runtime/virtualspace.hpp
+++ b/src/share/vm/runtime/virtualspace.hpp
@@ -54,7 +54,11 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
public:
// Constructor
ReservedSpace();
- ReservedSpace(size_t size);
+ // Initialize the reserved space with the given size. If preferred_page_size
+ // is set, use this as minimum page size/alignment. This may waste some space
+ // if the given size is not aligned to that value, as the reservation will be
+ // aligned up to the final alignment in this case.
+ ReservedSpace(size_t size, size_t preferred_page_size = 0);
ReservedSpace(size_t size, size_t alignment, bool large,
char* requested_address = NULL,
const size_t noaccess_prefix = 0);
diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp
index 42c56236e..f1200e339 100644
--- a/src/share/vm/runtime/vmStructs.cpp
+++ b/src/share/vm/runtime/vmStructs.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -103,6 +103,7 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/hashtable.hpp"
#include "utilities/macros.hpp"
+
#ifdef TARGET_ARCH_x86
# include "vmStructs_x86.hpp"
#endif
@@ -167,6 +168,11 @@
#include "gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp"
#include "gc_implementation/g1/vmStructs_g1.hpp"
#endif // INCLUDE_ALL_GCS
+
+#if INCLUDE_TRACE
+ #include "runtime/vmStructs_trace.hpp"
+#endif
+
#ifdef COMPILER2
#include "opto/addnode.hpp"
#include "opto/block.hpp"
@@ -186,25 +192,17 @@
#include "opto/rootnode.hpp"
#include "opto/subnode.hpp"
#include "opto/vectornode.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
+#if defined ADGLOBALS_MD_HPP
+# include ADGLOBALS_MD_HPP
+#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/adGlobals_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
+#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/adGlobals_x86_64.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_sparc
+#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/adGlobals_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_zero
+#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/adGlobals_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_arm
-# include "adfiles/adGlobals_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "adfiles/adGlobals_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
+#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/adGlobals_ppc_64.hpp"
#endif
#endif // COMPILER2
@@ -248,7 +246,6 @@ typedef TwoOopHashtable<Klass*, mtClass> KlassTwoOopHashtable;
typedef Hashtable<Klass*, mtClass> KlassHashtable;
typedef HashtableEntry<Klass*, mtClass> KlassHashtableEntry;
typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
-typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > MetablockTreeDictionary;
//--------------------------------------------------------------------------------
// VM_STRUCTS
@@ -1290,11 +1287,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > MetablockTreeDicti
volatile_nonstatic_field(FreeChunk, _size, size_t) \
nonstatic_field(FreeChunk, _next, FreeChunk*) \
nonstatic_field(FreeChunk, _prev, FreeChunk*) \
- nonstatic_field(FreeList<FreeChunk>, _size, size_t) \
- nonstatic_field(FreeList<Metablock>, _size, size_t) \
- nonstatic_field(FreeList<FreeChunk>, _count, ssize_t) \
- nonstatic_field(FreeList<Metablock>, _count, ssize_t) \
- nonstatic_field(MetablockTreeDictionary, _total_size, size_t)
+ nonstatic_field(AdaptiveFreeList<FreeChunk>, _size, size_t) \
+ nonstatic_field(AdaptiveFreeList<FreeChunk>, _count, ssize_t)
//--------------------------------------------------------------------------------
@@ -1386,6 +1380,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > MetablockTreeDicti
/* unsigned short on Win32 */ \
declare_unsigned_integer_type(u1) \
declare_unsigned_integer_type(u2) \
+ declare_unsigned_integer_type(u4) \
+ declare_unsigned_integer_type(u8) \
declare_unsigned_integer_type(unsigned) \
\
/*****************************/ \
@@ -2166,14 +2162,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > MetablockTreeDicti
\
/* freelist */ \
declare_toplevel_type(FreeChunk*) \
- declare_toplevel_type(Metablock*) \
- declare_toplevel_type(FreeBlockDictionary<FreeChunk>*) \
- declare_toplevel_type(FreeList<FreeChunk>*) \
- declare_toplevel_type(FreeList<FreeChunk>) \
- declare_toplevel_type(FreeBlockDictionary<Metablock>*) \
- declare_toplevel_type(FreeList<Metablock>*) \
- declare_toplevel_type(FreeList<Metablock>) \
- declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>)
+ declare_toplevel_type(AdaptiveFreeList<FreeChunk>*) \
+ declare_toplevel_type(AdaptiveFreeList<FreeChunk>)
//--------------------------------------------------------------------------------
@@ -2915,6 +2905,11 @@ VMStructEntry VMStructs::localHotSpotVMStructs[] = {
GENERATE_STATIC_VM_STRUCT_ENTRY)
#endif // INCLUDE_ALL_GCS
+#if INCLUDE_TRACE
+ VM_STRUCTS_TRACE(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
+ GENERATE_STATIC_VM_STRUCT_ENTRY)
+#endif
+
VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
GENERATE_STATIC_VM_STRUCT_ENTRY,
GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY,
@@ -2960,6 +2955,11 @@ VMTypeEntry VMStructs::localHotSpotVMTypes[] = {
GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
#endif // INCLUDE_ALL_GCS
+#if INCLUDE_TRACE
+ VM_TYPES_TRACE(GENERATE_VM_TYPE_ENTRY,
+ GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
+#endif
+
VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY,
GENERATE_TOPLEVEL_VM_TYPE_ENTRY,
GENERATE_OOP_VM_TYPE_ENTRY,
@@ -2995,6 +2995,10 @@ VMIntConstantEntry VMStructs::localHotSpotVMIntConstants[] = {
VM_INT_CONSTANTS_PARNEW(GENERATE_VM_INT_CONSTANT_ENTRY)
#endif // INCLUDE_ALL_GCS
+#if INCLUDE_TRACE
+ VM_INT_CONSTANTS_TRACE(GENERATE_VM_INT_CONSTANT_ENTRY)
+#endif
+
VM_INT_CONSTANTS_CPU(GENERATE_VM_INT_CONSTANT_ENTRY,
GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY,
GENERATE_C1_VM_INT_CONSTANT_ENTRY,
@@ -3057,8 +3061,14 @@ VMStructs::init() {
VM_STRUCTS_G1(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY);
+
#endif // INCLUDE_ALL_GCS
+#if INCLUDE_TRACE
+ VM_STRUCTS_TRACE(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_STATIC_VM_STRUCT_ENTRY);
+#endif
+
VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY,
CHECK_NO_OP,
@@ -3097,8 +3107,14 @@ VMStructs::init() {
VM_TYPES_G1(CHECK_VM_TYPE_ENTRY,
CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
+
#endif // INCLUDE_ALL_GCS
+#if INCLUDE_TRACE
+ VM_TYPES_TRACE(CHECK_VM_TYPE_ENTRY,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
+#endif
+
VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY,
CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
@@ -3161,6 +3177,12 @@ VMStructs::init() {
debug_only(VM_STRUCTS_G1(ENSURE_FIELD_TYPE_PRESENT,
ENSURE_FIELD_TYPE_PRESENT));
#endif // INCLUDE_ALL_GCS
+
+#if INCLUDE_TRACE
+ debug_only(VM_STRUCTS_TRACE(ENSURE_FIELD_TYPE_PRESENT,
+ ENSURE_FIELD_TYPE_PRESENT));
+#endif
+
debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT,
ENSURE_FIELD_TYPE_PRESENT,
CHECK_NO_OP,
diff --git a/src/share/vm/runtime/vmStructs_trace.hpp b/src/share/vm/runtime/vmStructs_trace.hpp
new file mode 100644
index 000000000..f4a9f4d89
--- /dev/null
+++ b/src/share/vm/runtime/vmStructs_trace.hpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_VMSTRUCTS_TRACE_HPP
+#define SHARE_VM_RUNTIME_VMSTRUCTS_TRACE_HPP
+
+#define VM_INT_CONSTANTS_TRACE(a)
+
+#define VM_STRUCTS_TRACE(a, b)
+
+#define VM_TYPES_TRACE(a, b)
+
+
+#endif // SHARE_VM_RUNTIME_VMSTRUCTS_TRACE_HPP
diff --git a/src/share/vm/runtime/vm_operations.cpp b/src/share/vm/runtime/vm_operations.cpp
index d46e1f775..a4219d2fd 100644
--- a/src/share/vm/runtime/vm_operations.cpp
+++ b/src/share/vm/runtime/vm_operations.cpp
@@ -106,8 +106,8 @@ void VM_Deoptimize::doit() {
// Deoptimize all activations depending on marked nmethods
Deoptimization::deoptimize_dependents();
- // Make the dependent methods zombies
- CodeCache::make_marked_nmethods_zombies();
+ // Make the dependent methods not entrant
+ CodeCache::make_marked_nmethods_not_entrant();
}
diff --git a/src/share/vm/runtime/vm_version.cpp b/src/share/vm/runtime/vm_version.cpp
index 6888c0919..a046d8142 100644
--- a/src/share/vm/runtime/vm_version.cpp
+++ b/src/share/vm/runtime/vm_version.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -180,17 +180,17 @@ const char* Abstract_VM_Version::jre_release_version() {
AIX_ONLY("aix") \
BSD_ONLY("bsd")
+#ifndef CPU
#ifdef ZERO
#define CPU ZERO_LIBARCH
#else
#define CPU IA32_ONLY("x86") \
IA64_ONLY("ia64") \
AMD64_ONLY("amd64") \
- ARM_ONLY("arm") \
- PPC32_ONLY("ppc") \
PPC64_ONLY("ppc64") \
SPARC_ONLY("sparc")
#endif // ZERO
+#endif
const char *Abstract_VM_Version::vm_platform_string() {
return OS "-" CPU;
@@ -251,12 +251,6 @@ const char* Abstract_VM_Version::internal_vm_info_string() {
#ifndef FLOAT_ARCH
#if defined(__SOFTFP__)
#define FLOAT_ARCH_STR "-sflt"
- #elif defined(E500V2)
- #define FLOAT_ARCH_STR "-e500v2"
- #elif defined(ARM)
- #define FLOAT_ARCH_STR "-vfp"
- #elif defined(PPC32)
- #define FLOAT_ARCH_STR "-hflt"
#else
#define FLOAT_ARCH_STR ""
#endif
diff --git a/src/share/vm/runtime/vm_version.hpp b/src/share/vm/runtime/vm_version.hpp
index d48b45422..07ff4ba8d 100644
--- a/src/share/vm/runtime/vm_version.hpp
+++ b/src/share/vm/runtime/vm_version.hpp
@@ -56,6 +56,13 @@ class Abstract_VM_Version: AllStatic {
public:
static void initialize();
+ // This allows for early initialization of VM_Version information
+ // that may be needed later in the initialization sequence but before
+ // full VM_Version initialization is possible. It can not depend on any
+ // other part of the VM being initialized when called. Platforms that
+ // need to specialize this define VM_Version::early_initialize().
+ static void early_initialize() { }
+
// Name
static const char* vm_name();
// Vendor
diff --git a/src/share/vm/services/classLoadingService.cpp b/src/share/vm/services/classLoadingService.cpp
index 51dfa9b1b..92a548ba5 100644
--- a/src/share/vm/services/classLoadingService.cpp
+++ b/src/share/vm/services/classLoadingService.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,7 @@ HS_DTRACE_PROBE_DECL4(hotspot, class__unloaded, char*, int, oop, bool);
len = name->utf8_length(); \
} \
HS_DTRACE_PROBE4(hotspot, class__##type, \
- data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), (shared)); \
+ data, len, (void *)(clss)->class_loader(), (shared)); \
}
#else /* USDT2 */
diff --git a/src/share/vm/services/management.cpp b/src/share/vm/services/management.cpp
index 2775781eb..a8e6b0b27 100644
--- a/src/share/vm/services/management.cpp
+++ b/src/share/vm/services/management.cpp
@@ -155,11 +155,14 @@ void Management::initialize(TRAPS) {
// Load and initialize the sun.management.Agent class
// invoke startAgent method to start the management server
Handle loader = Handle(THREAD, SystemDictionary::java_system_loader());
- Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::sun_management_Agent(),
+ Klass* k = SystemDictionary::resolve_or_null(vmSymbols::sun_management_Agent(),
loader,
Handle(),
- true,
- CHECK);
+ THREAD);
+ if (k == NULL) {
+ vm_exit_during_initialization("Management agent initialization failure: "
+ "class sun.management.Agent not found.");
+ }
instanceKlassHandle ik (THREAD, k);
JavaValue result(T_VOID);
@@ -1104,6 +1107,8 @@ static void do_thread_dump(ThreadDumpResult* dump_result,
bool with_locked_monitors,
bool with_locked_synchronizers,
TRAPS) {
+ // no need to actually perform thread dump if no TIDs are specified
+ if (num_threads == 0) return;
// First get an array of threadObj handles.
// A JavaThread may terminate before we get the stack trace.
diff --git a/src/share/vm/trace/trace.xml b/src/share/vm/trace/trace.xml
index fc3419836..52852deb7 100644
--- a/src/share/vm/trace/trace.xml
+++ b/src/share/vm/trace/trace.xml
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
This code is free software; you can redistribute it and/or modify it
@@ -352,6 +352,12 @@ Declares a structure type that can be used in other events.
<value type="UTF8" field="name" label="Name" />
</event>
+ <event id="AllocationRequiringGC" path="vm/gc/detailed/allocation_requiring_gc" label="Allocation Requiring GC"
+ has_thread="true" has_stacktrace="true" is_instant="true">
+ <value type="UINT" field="gcId" label="Pending GC ID" relation="GC_ID" />
+ <value type="BYTES64" field="size" label="Allocation Size" />
+ </event>
+
<!-- Compiler events -->
<event id="Compilation" path="vm/compiler/compilation" label="Compilation"
diff --git a/src/share/vm/utilities/accessFlags.hpp b/src/share/vm/utilities/accessFlags.hpp
index 555a6ad91..a66bc9ec0 100644
--- a/src/share/vm/utilities/accessFlags.hpp
+++ b/src/share/vm/utilities/accessFlags.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,8 @@ enum {
JVM_ACC_IS_OLD = 0x00010000, // RedefineClasses() has replaced this method
JVM_ACC_IS_OBSOLETE = 0x00020000, // RedefineClasses() has made method obsolete
JVM_ACC_IS_PREFIXED_NATIVE = 0x00040000, // JVMTI has prefixed this native method
- JVM_ACC_ON_STACK = 0x00080000, // RedefinedClasses() is used on the stack
+ JVM_ACC_ON_STACK = 0x00080000, // RedefineClasses() was used on the stack
+ JVM_ACC_IS_DELETED = 0x00008000, // RedefineClasses() has deleted this method
// Klass* flags
JVM_ACC_HAS_MIRANDA_METHODS = 0x10000000, // True if this class has miranda methods in it's vtable
@@ -131,6 +132,7 @@ class AccessFlags VALUE_OBJ_CLASS_SPEC {
bool has_jsrs () const { return (_flags & JVM_ACC_HAS_JSRS ) != 0; }
bool is_old () const { return (_flags & JVM_ACC_IS_OLD ) != 0; }
bool is_obsolete () const { return (_flags & JVM_ACC_IS_OBSOLETE ) != 0; }
+ bool is_deleted () const { return (_flags & JVM_ACC_IS_DELETED ) != 0; }
bool is_prefixed_native () const { return (_flags & JVM_ACC_IS_PREFIXED_NATIVE ) != 0; }
// Klass* flags
@@ -196,6 +198,7 @@ class AccessFlags VALUE_OBJ_CLASS_SPEC {
void set_has_jsrs() { atomic_set_bits(JVM_ACC_HAS_JSRS); }
void set_is_old() { atomic_set_bits(JVM_ACC_IS_OLD); }
void set_is_obsolete() { atomic_set_bits(JVM_ACC_IS_OBSOLETE); }
+ void set_is_deleted() { atomic_set_bits(JVM_ACC_IS_DELETED); }
void set_is_prefixed_native() { atomic_set_bits(JVM_ACC_IS_PREFIXED_NATIVE); }
void clear_not_c1_compilable() { atomic_clear_bits(JVM_ACC_NOT_C1_COMPILABLE); }
diff --git a/src/share/vm/utilities/debug.cpp b/src/share/vm/utilities/debug.cpp
index f39aa667c..232f35016 100644
--- a/src/share/vm/utilities/debug.cpp
+++ b/src/share/vm/utilities/debug.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -302,6 +302,16 @@ void report_java_out_of_memory(const char* message) {
VMError err(message);
err.report_java_out_of_memory();
}
+
+ if (CrashOnOutOfMemoryError) {
+ tty->print_cr("Aborting due to java.lang.OutOfMemoryError: %s", message);
+ fatal(err_msg("OutOfMemory encountered: %s", message));
+ }
+
+ if (ExitOnOutOfMemoryError) {
+ tty->print_cr("Terminating due to java.lang.OutOfMemoryError: %s", message);
+ exit(3);
+ }
}
}
@@ -666,6 +676,13 @@ void help() {
tty->print_cr(" pm(int pc) - print Method* given compiled PC");
tty->print_cr(" findm(intptr_t pc) - finds Method*");
tty->print_cr(" find(intptr_t x) - finds & prints nmethod/stub/bytecode/oop based on pointer into it");
+ tty->print_cr(" pns(void* sp, void* fp, void* pc) - print native (i.e. mixed) stack trace. E.g.");
+ tty->print_cr(" pns($sp, $rbp, $pc) on Linux/amd64 and Solaris/amd64 or");
+ tty->print_cr(" pns($sp, $ebp, $pc) on Linux/x86 or");
+ tty->print_cr(" pns($sp, 0, $pc) on Linux/ppc64 or");
+ tty->print_cr(" pns($sp + 0x7ff, 0, $pc) on Solaris/SPARC");
+ tty->print_cr(" - in gdb do 'set overload-resolution off' before calling pns()");
+ tty->print_cr(" - in dbx do 'frame 1' before calling pns()");
tty->print_cr("misc.");
tty->print_cr(" flush() - flushes the log file");
@@ -678,3 +695,56 @@ void help() {
}
#endif // !PRODUCT
+
+void print_native_stack(outputStream* st, frame fr, Thread* t, char* buf, int buf_size) {
+
+ // see if it's a valid frame
+ if (fr.pc()) {
+ st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
+
+ int count = 0;
+ while (count++ < StackPrintLimit) {
+ fr.print_on_error(st, buf, buf_size);
+ st->cr();
+ // Compiled code may use EBP register on x86 so it looks like
+ // non-walkable C frame. Use frame.sender() for java frames.
+ if (t && t->is_Java_thread()) {
+ // Catch very first native frame by using stack address.
+ // For JavaThread stack_base and stack_size should be set.
+ if (!t->on_local_stack((address)(fr.real_fp() + 1))) {
+ break;
+ }
+ if (fr.is_java_frame() || fr.is_native_frame() || fr.is_runtime_frame()) {
+ RegisterMap map((JavaThread*)t, false); // No update
+ fr = fr.sender(&map);
+ } else {
+ fr = os::get_sender_for_C_frame(&fr);
+ }
+ } else {
+ // is_first_C_frame() does only simple checks for frame pointer,
+ // it will pass if java compiled code has a pointer in EBP.
+ if (os::is_first_C_frame(&fr)) break;
+ fr = os::get_sender_for_C_frame(&fr);
+ }
+ }
+
+ if (count > StackPrintLimit) {
+ st->print_cr("...<more frames>...");
+ }
+
+ st->cr();
+ }
+}
+
+#ifndef PRODUCT
+
+extern "C" void pns(void* sp, void* fp, void* pc) { // print native stack
+ Command c("pns");
+ static char buf[O_BUFLEN];
+ Thread* t = ThreadLocalStorage::get_thread_slow();
+ // Call generic frame constructor (certain arguments may be ignored)
+ frame fr(sp, fp, pc);
+ print_native_stack(tty, fr, t, buf, sizeof(buf));
+}
+
+#endif // !PRODUCT
diff --git a/src/share/vm/utilities/debug.hpp b/src/share/vm/utilities/debug.hpp
index 5c8f2c3e7..5b47e6142 100644
--- a/src/share/vm/utilities/debug.hpp
+++ b/src/share/vm/utilities/debug.hpp
@@ -265,4 +265,7 @@ NOT_PRODUCT(void test_error_handler();)
void pd_ps(frame f);
void pd_obfuscate_location(char *buf, size_t buflen);
+class outputStream;
+void print_native_stack(outputStream* st, frame fr, Thread* t, char* buf, int buf_size);
+
#endif // SHARE_VM_UTILITIES_DEBUG_HPP
diff --git a/src/share/vm/utilities/elfFile.cpp b/src/share/vm/utilities/elfFile.cpp
index 3d8959563..ac943bd15 100644
--- a/src/share/vm/utilities/elfFile.cpp
+++ b/src/share/vm/utilities/elfFile.cpp
@@ -261,7 +261,12 @@ bool ElfFile::specifies_noexecstack() {
}
}
}
+// AARCH64 defaults to noexecstack. All others default to execstack.
+#ifdef AARCH64
+ return true;
+#else
return false;
+#endif
}
#endif
diff --git a/src/share/vm/utilities/globalDefinitions_gcc.hpp b/src/share/vm/utilities/globalDefinitions_gcc.hpp
index 576b0235a..efa0b4e1a 100644
--- a/src/share/vm/utilities/globalDefinitions_gcc.hpp
+++ b/src/share/vm/utilities/globalDefinitions_gcc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,14 +44,6 @@
#endif // SOLARIS
#include <math.h>
-#ifndef FP_PZERO
-// Linux doesn't have positive/negative zero
-#define FP_PZERO FP_ZERO
-#endif
-#if (!defined fpclass) && ((!defined SPARC) || (!defined SOLARIS))
-#define fpclass fpclassify
-#endif
-
#include <time.h>
#include <fcntl.h>
#include <dlfcn.h>
@@ -220,7 +212,7 @@ extern "C" {
#define DEBUG_EXCEPTION ::abort();
-#ifdef ARM
+#ifdef ARM32
#ifdef SOLARIS
#define BREAKPOINT __asm__ volatile (".long 0xe1200070")
#else
diff --git a/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp b/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp
index b64bbf8da..ccd4d8b3f 100644
--- a/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp
+++ b/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,9 @@
# include <ctype.h>
+#define __USE_LEGACY_PROTOTYPES__
# include <dirent.h>
+#undef __USE_LEGACY_PROTOTYPES__
# include <string.h>
# include <strings.h> // for bsd'isms
# include <stdarg.h>
@@ -46,15 +48,6 @@
# include <ieeefp.h>
#endif
# include <math.h>
-#ifdef LINUX
-#ifndef FP_PZERO
- // Linux doesn't have positive/negative zero
- #define FP_PZERO FP_ZERO
-#endif
-#ifndef fpclass
- #define fpclass fpclassify
-#endif
-#endif
# include <time.h>
# include <fcntl.h>
# include <dlfcn.h>
diff --git a/src/share/vm/utilities/globalDefinitions_xlc.hpp b/src/share/vm/utilities/globalDefinitions_xlc.hpp
index ad71883f8..7241dc028 100644
--- a/src/share/vm/utilities/globalDefinitions_xlc.hpp
+++ b/src/share/vm/utilities/globalDefinitions_xlc.hpp
@@ -41,14 +41,6 @@
#include <wchar.h>
#include <math.h>
-#ifndef FP_PZERO
-// Linux doesn't have positive/negative zero
-#define FP_PZERO FP_ZERO
-#endif
-#if (!defined fpclass)
-#define fpclass fpclassify
-#endif
-
#include <time.h>
#include <fcntl.h>
#include <dlfcn.h>
diff --git a/src/share/vm/utilities/macros.hpp b/src/share/vm/utilities/macros.hpp
index b6fa44e14..da206d398 100644
--- a/src/share/vm/utilities/macros.hpp
+++ b/src/share/vm/utilities/macros.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -392,7 +392,6 @@
#define NOT_E500V2(code) code
#endif
-
#ifdef ARM
#define ARM_ONLY(code) code
#define NOT_ARM(code)
@@ -401,6 +400,14 @@
#define NOT_ARM(code) code
#endif
+#ifdef ARM32
+#define ARM32_ONLY(code) code
+#define NOT_ARM32(code)
+#else
+#define ARM32_ONLY(code)
+#define NOT_ARM32(code) code
+#endif
+
#ifdef JAVASE_EMBEDDED
#define EMBEDDED_ONLY(code) code
#define NOT_EMBEDDED(code)
diff --git a/src/share/vm/utilities/vmError.cpp b/src/share/vm/utilities/vmError.cpp
index e9abf2858..15f6bf484 100644
--- a/src/share/vm/utilities/vmError.cpp
+++ b/src/share/vm/utilities/vmError.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -229,7 +229,7 @@ char* VMError::error_string(char* buf, int buflen) {
if (signame) {
jio_snprintf(buf, buflen,
- "%s (0x%x) at pc=" PTR_FORMAT ", pid=%d, tid=" UINTX_FORMAT,
+ "%s (0x%x) at pc=" PTR_FORMAT ", pid=%d, tid=" INTPTR_FORMAT,
signame, _id, _pc,
os::current_process_id(), os::current_thread_id());
} else if (_filename != NULL && _lineno > 0) {
@@ -237,7 +237,7 @@ char* VMError::error_string(char* buf, int buflen) {
char separator = os::file_separator()[0];
const char *p = strrchr(_filename, separator);
int n = jio_snprintf(buf, buflen,
- "Internal Error at %s:%d, pid=%d, tid=" UINTX_FORMAT,
+ "Internal Error at %s:%d, pid=%d, tid=" INTPTR_FORMAT,
p ? p + 1 : _filename, _lineno,
os::current_process_id(), os::current_thread_id());
if (n >= 0 && n < buflen && _message) {
@@ -251,7 +251,7 @@ char* VMError::error_string(char* buf, int buflen) {
}
} else {
jio_snprintf(buf, buflen,
- "Internal Error (0x%x), pid=%d, tid=" UINTX_FORMAT,
+ "Internal Error (0x%x), pid=%d, tid=" INTPTR_FORMAT,
_id, os::current_process_id(), os::current_thread_id());
}
@@ -438,7 +438,7 @@ void VMError::report(outputStream* st) {
// process id, thread id
st->print(", pid=%d", os::current_process_id());
- st->print(", tid=" UINTX_FORMAT, os::current_thread_id());
+ st->print(", tid=" INTPTR_FORMAT, os::current_thread_id());
st->cr();
STEP(40, "(printing error message)")
@@ -576,7 +576,7 @@ void VMError::report(outputStream* st) {
STEP(120, "(printing native stack)" )
- if (_verbose) {
+ if (_verbose) {
if (os::platform_print_native_stack(st, _context, buf, sizeof(buf))) {
// We have printed the native stack in platform-specific code
// Windows/x64 needs special handling.
@@ -584,43 +584,7 @@ void VMError::report(outputStream* st) {
frame fr = _context ? os::fetch_frame_from_context(_context)
: os::current_frame();
- // see if it's a valid frame
- if (fr.pc()) {
- st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
-
-
- int count = 0;
- while (count++ < StackPrintLimit) {
- fr.print_on_error(st, buf, sizeof(buf));
- st->cr();
- // Compiled code may use EBP register on x86 so it looks like
- // non-walkable C frame. Use frame.sender() for java frames.
- if (_thread && _thread->is_Java_thread()) {
- // Catch very first native frame by using stack address.
- // For JavaThread stack_base and stack_size should be set.
- if (!_thread->on_local_stack((address)(fr.sender_sp() + 1))) {
- break;
- }
- if (fr.is_java_frame()) {
- RegisterMap map((JavaThread*)_thread, false); // No update
- fr = fr.sender(&map);
- } else {
- fr = os::get_sender_for_C_frame(&fr);
- }
- } else {
- // is_first_C_frame() does only simple checks for frame pointer,
- // it will pass if java compiled code has a pointer in EBP.
- if (os::is_first_C_frame(&fr)) break;
- fr = os::get_sender_for_C_frame(&fr);
- }
- }
-
- if (count > StackPrintLimit) {
- st->print_cr("...<more frames>...");
- }
-
- st->cr();
- }
+ print_native_stack(st, fr, _thread, buf, sizeof(buf));
}
}
@@ -1049,7 +1013,9 @@ void VMError::report_and_die() {
out.print_raw (cmd);
out.print_raw_cr("\" ...");
- os::fork_and_exec(cmd);
+ if (os::fork_and_exec(cmd) < 0) {
+ out.print_cr("os::fork_and_exec failed: %s (%d)", strerror(errno), errno);
+ }
}
// done with OnError
@@ -1134,7 +1100,9 @@ void VM_ReportJavaOutOfMemory::doit() {
#endif
tty->print_cr("\"%s\"...", cmd);
- os::fork_and_exec(cmd);
+ if (os::fork_and_exec(cmd) < 0) {
+ tty->print_cr("os::fork_and_exec failed: %s (%d)", strerror(errno), errno);
+ }
}
}
diff --git a/test/TEST.groups b/test/TEST.groups
index 339ff38ca..9ec3c855d 100644
--- a/test/TEST.groups
+++ b/test/TEST.groups
@@ -96,7 +96,8 @@ needs_jdk = \
runtime/Thread/TestThreadDumpMonitorContention.java \
runtime/XCheckJniJsig/XCheckJSig.java \
serviceability/attach/AttachWithStalePidFile.java \
- serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java
+ serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java \
+ testlibrary_tests/
# JRE adds further tests to compact3
@@ -132,7 +133,8 @@ hotspot_compiler = \
sanity/ExecuteInternalVMTests.java
hotspot_gc = \
- sanity/ExecuteInternalVMTests.java
+ sanity/ExecuteInternalVMTests.java \
+ -gc/g1/TestGreyReclaimedHumongousObjects.java
hotspot_runtime = \
sanity/ExecuteInternalVMTests.java
@@ -148,6 +150,8 @@ hotspot_all = \
# Tests that require compact3 API's
#
needs_compact3 = \
+ compiler/jsr292/RedefineMethodUsedByMultipleMethodHandles.java \
+ compiler/rangechecks/TestRangeCheckSmearing.java \
compiler/whitebox/DeoptimizeMethodTest.java \
compiler/whitebox/SetForceInlineMethodTest.java \
compiler/whitebox/SetDontInlineMethodTest.java \
@@ -158,6 +162,7 @@ needs_compact3 = \
compiler/whitebox/IsMethodCompilableTest.java \
gc/6581734/Test6581734.java \
gc/7072527/TestFullGCCount.java \
+ gc/TestGCLogRotationViaJcmd.java \
gc/g1/TestHumongousAllocInitialMark.java \
gc/g1/TestHumongousShrinkHeap.java \
gc/arguments/TestG1HeapRegionSize.java \
@@ -176,6 +181,7 @@ needs_compact3 = \
serviceability/threads/TestFalseDeadLock.java \
serviceability/jvmti/GetObjectSizeOverflow.java \
serviceability/jvmti/TestRedefineWithUnresolvedClass.java \
+ serviceability/sa/jmap-hashcode/Test8028623.java \
compiler/tiered/NonTieredLevelsTest.java \
compiler/tiered/TieredLevelsTest.java \
compiler/intrinsics/bmi/verifycode
@@ -246,7 +252,6 @@ compact1_minimal = \
serviceability/ \
compiler/ \
testlibrary/ \
- testlibrary_tests/ \
sanity/ \
runtime/ \
gc/ \
diff --git a/test/compiler/codegen/IntRotateWithImmediate.java b/test/compiler/codegen/IntRotateWithImmediate.java
new file mode 100644
index 000000000..1d617a415
--- /dev/null
+++ b/test/compiler/codegen/IntRotateWithImmediate.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 SAP AG. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8080190
+ * @key regression
+ * @summary Test that the rotate distance used in the rotate instruction is properly masked with 0x1f
+ * @run main/othervm -Xbatch -XX:-UseOnStackReplacement IntRotateWithImmediate
+ * @author volker.simonis@gmail.com
+ */
+
+public class IntRotateWithImmediate {
+
+ // This is currently the same as Integer.rotateRight()
+ static int rotateRight(int i, int distance) {
+ // On some architectures (i.e. x86_64 and ppc64) the following computation is
+ // matched in the .ad file into a single MachNode which emmits a single rotate
+ // machine instruction. It is important that the shift amount is masked to match
+ // corresponding immediate width in the native instruction. On x86_64 the rotate
+ // left instruction ('rol') encodes an 8-bit immediate while the corresponding
+ // 'rotlwi' instruction on Power only encodes a 5-bit immediate.
+ return ((i >>> distance) | (i << -distance));
+ }
+
+ static int compute(int x) {
+ return rotateRight(x, 3);
+ }
+
+ public static void main(String args[]) {
+ int val = 4096;
+
+ int firstResult = compute(val);
+
+ for (int i = 0; i < 100000; i++) {
+ int newResult = compute(val);
+ if (firstResult != newResult) {
+ throw new InternalError(firstResult + " != " + newResult);
+ }
+ }
+ System.out.println("OK");
+ }
+
+}
diff --git a/test/compiler/codegen/LoadWithMask.java b/test/compiler/codegen/LoadWithMask.java
index 13ec4e58f..06c4c14a4 100644
--- a/test/compiler/codegen/LoadWithMask.java
+++ b/test/compiler/codegen/LoadWithMask.java
@@ -25,7 +25,7 @@
* @test
* @bug 8032207
* @summary Invalid node sizing for loadUS2L_immI16 and loadI2L_immI
- * @run main/othervm -server -Xbatch -XX:-TieredCompilation -XX:CompileCommand=compileonly,LoadWithMask.foo LoadWithMask
+ * @run main/othervm -Xbatch -XX:CompileCommand=compileonly,LoadWithMask.foo LoadWithMask
*
*/
public class LoadWithMask {
diff --git a/test/compiler/codegen/LoadWithMask2.java b/test/compiler/codegen/LoadWithMask2.java
index c1ae52881..82c942d7a 100644
--- a/test/compiler/codegen/LoadWithMask2.java
+++ b/test/compiler/codegen/LoadWithMask2.java
@@ -25,7 +25,7 @@
* @test
* @bug 8031743
* @summary loadI2L_immI broken for negative memory values
- * @run main/othervm -server -Xbatch -XX:-TieredCompilation -XX:CompileCommand=compileonly,*.foo* LoadWithMask2
+ * @run main/othervm -Xbatch -XX:CompileCommand=compileonly,*.foo* LoadWithMask2
*
*/
public class LoadWithMask2 {
diff --git a/test/compiler/escapeAnalysis/TestEABadMergeMem.java b/test/compiler/escapeAnalysis/TestEABadMergeMem.java
new file mode 100644
index 000000000..236cbe742
--- /dev/null
+++ b/test/compiler/escapeAnalysis/TestEABadMergeMem.java
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8134031
+ * @summary Bad rewiring of memory edges when we split unique types during EA
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestEABadMergeMem::m_notinlined TestEABadMergeMem
+ *
+ */
+
+public class TestEABadMergeMem {
+
+ static class Box {
+ int i;
+ }
+
+ static void m_notinlined() {
+ }
+
+ static float dummy1;
+ static float dummy2;
+
+ static int test(Box a, Box c, int i, int j, int k, boolean flag1, boolean flag2) {
+ Box b = new Box(); // non escaping
+ a.i = i;
+ b.i = j;
+ c.i = k;
+
+ m_notinlined();
+
+ boolean flag3 = false;
+ if (flag1) {
+ for (int ii = 0; ii < 100; ii++) {
+ if (flag2) {
+ dummy1 = (float)ii;
+ } else {
+ dummy2 = (float)ii;
+ }
+ }
+ flag3 = true;
+ }
+ // Memory Phi here with projection of not inlined call as one edge, MergeMem as other
+
+ if (flag3) { // will split through Phi during loopopts
+ int res = c.i + b.i;
+ m_notinlined(); // prevents split through phi during igvn
+ return res;
+ } else {
+ return 44 + 43;
+ }
+ }
+
+ static public void main(String[] args) {
+ for (int i = 0; i < 20000; i++) {
+ // m(2);
+ Box a = new Box();
+ Box c = new Box();
+ int res = test(a, c, 42, 43, 44, (i%2) == 0, (i%3) == 0);
+ if (res != 44 + 43) {
+ throw new RuntimeException("Bad result " + res);
+ }
+ }
+ }
+
+}
diff --git a/test/compiler/escapeAnalysis/TestEscapeThroughInvoke.java b/test/compiler/escapeAnalysis/TestEscapeThroughInvoke.java
new file mode 100644
index 000000000..7eac5e006
--- /dev/null
+++ b/test/compiler/escapeAnalysis/TestEscapeThroughInvoke.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8073956
+ * @summary Tests C2 EA with allocated object escaping through a call.
+ * @run main/othervm -XX:CompileCommand=dontinline,TestEscapeThroughInvoke::create TestEscapeThroughInvoke
+ */
+public class TestEscapeThroughInvoke {
+ private A a;
+
+ public static void main(String[] args) {
+ TestEscapeThroughInvoke test = new TestEscapeThroughInvoke();
+ test.a = new A(42);
+ // Make sure run gets compiled by C2
+ for (int i = 0; i < 100_000; ++i) {
+ test.run();
+ }
+ }
+
+ private void run() {
+ // Allocate something to trigger EA
+ new Object();
+ // Create a new escaping instance of A and
+ // verify that it is always equal to 'a.saved'.
+ A escapingA = create(42);
+ a.check(escapingA);
+ }
+
+ // Create and return a new instance of A that escaped through 'A::saveInto'.
+ // The 'dummy' parameters are needed to avoid EA skipping the methods.
+ private A create(Integer dummy) {
+ A result = new A(dummy);
+ result.saveInto(a, dummy); // result escapes into 'a' here
+ return result;
+ }
+}
+
+class A {
+ private A saved;
+
+ public A(Integer dummy) { }
+
+ public void saveInto(A other, Integer dummy) {
+ other.saved = this;
+ }
+
+ public void check(A other) {
+ if (this.saved != other) {
+ throw new RuntimeException("TEST FAILED: Objects not equal.");
+ }
+ }
+}
diff --git a/test/compiler/inlining/DefaultMethodsDependencies.java b/test/compiler/inlining/DefaultMethodsDependencies.java
new file mode 100644
index 000000000..5bdc60fda
--- /dev/null
+++ b/test/compiler/inlining/DefaultMethodsDependencies.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8069263
+ * @summary Deoptimization between array allocation and arraycopy may result in non initialized array
+ * @run main/othervm -XX:-BackgroundCompilation -XX:CompileOnly=DefaultMethodsDependencies::test -XX:CompileOnly=DefaultMethodsDependencies$I2::m1 DefaultMethodsDependencies
+ *
+ */
+
+public class DefaultMethodsDependencies {
+
+ interface I1 {
+ void m1();
+ // triggers processing of default methods in C1
+ default void m2() {
+ }
+ }
+
+ interface I2 extends I1 {
+ // added to C2 as default method
+ default void m1() {
+ }
+ }
+
+ static abstract class C1 implements I1 {
+ }
+
+ static class C2 extends C1 implements I2 {
+ }
+
+ static void test(C1 obj) {
+ obj.m1();
+ }
+
+ static public void main(String[] args) {
+ C2 obj = new C2();
+ for (int i = 0; i < 20000; i++) {
+ test(obj);
+ }
+ }
+}
diff --git a/test/compiler/jsr292/MHInlineTest.java b/test/compiler/jsr292/MHInlineTest.java
new file mode 100644
index 000000000..0587ab6ad
--- /dev/null
+++ b/test/compiler/jsr292/MHInlineTest.java
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+/**
+ * @test
+ * @bug 8062280
+ * @summary C2: inlining failure due to access checks being too strict
+ * @library /testlibrary
+ * @run main/othervm MHInlineTest
+ */
+import java.lang.invoke.*;
+import com.oracle.java.testlibrary.*;
+import static com.oracle.java.testlibrary.Asserts.*;
+
+public class MHInlineTest {
+ public static void main(String[] args) throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:+IgnoreUnrecognizedVMOptions", "-showversion",
+ "-server", "-XX:-TieredCompilation", "-Xbatch",
+ "-XX:+PrintCompilation", "-XX:+UnlockDiagnosticVMOptions", "-XX:+PrintInlining",
+ "-XX:CompileCommand=dontinline,MHInlineTest::test*",
+ "MHInlineTest$Launcher");
+
+ OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+
+ analyzer.shouldHaveExitValue(0);
+
+ // The test is applicable only to C2 (present in Server VM).
+ if (analyzer.getStderr().contains("Server VM")) {
+ analyzer.shouldContain("MHInlineTest$B::public_x (3 bytes) inline (hot)");
+ analyzer.shouldContain( "MHInlineTest$B::protected_x (3 bytes) inline (hot)");
+ analyzer.shouldContain( "MHInlineTest$B::package_x (3 bytes) inline (hot)");
+ analyzer.shouldContain("MHInlineTest$A::package_final_x (3 bytes) inline (hot)");
+ analyzer.shouldContain("MHInlineTest$B::private_x (3 bytes) inline (hot)");
+ analyzer.shouldContain("MHInlineTest$B::private_static_x (3 bytes) inline (hot)");
+ analyzer.shouldContain("MHInlineTest$A::package_static_x (3 bytes) inline (hot)");
+
+ analyzer.shouldNotContain("MHInlineTest$A::protected_x (3 bytes) virtual call");
+ analyzer.shouldNotContain("MHInlineTest$A::package_x (3 bytes) virtual call");
+ }
+ }
+
+ static class A {
+ public static final MethodHandles.Lookup LOOKUP = MethodHandles.lookup();
+
+ public Class<?> public_x() { return A.class; }
+ protected Class<?> protected_x() { return A.class; }
+ Class<?> package_x() { return A.class; }
+ final Class<?> package_final_x() { return A.class; }
+
+ static Class<?> package_static_x() { return A.class; }
+ }
+
+ static class B extends A {
+ public static final MethodHandles.Lookup LOOKUP = MethodHandles.lookup();
+
+ @Override public Class<?> public_x() { return B.class; }
+ @Override protected Class<?> protected_x() { return B.class; }
+ @Override Class<?> package_x() { return B.class; }
+
+ private Class<?> private_x() { return B.class; }
+ static Class<?> private_static_x() { return B.class; }
+ }
+
+ static final MethodHandle A_PUBLIC_X;
+ static final MethodHandle A_PROTECTED_X;
+ static final MethodHandle A_PACKAGE_X;
+ static final MethodHandle A_PACKAGE_STATIC_X;
+ static final MethodHandle A_PACKAGE_FINAL_X;
+
+ static final MethodHandle B_PRIVATE_X;
+ static final MethodHandle B_PRIVATE_STATIC_X;
+
+ static {
+ try {
+ MethodHandles.Lookup LOOKUP = MethodHandles.lookup();
+
+ A_PUBLIC_X = LOOKUP.findVirtual(
+ A.class, "public_x", MethodType.methodType(Class.class));
+ A_PROTECTED_X = LOOKUP.findVirtual(
+ A.class, "protected_x", MethodType.methodType(Class.class));
+ A_PACKAGE_X = LOOKUP.findVirtual(
+ A.class, "package_x", MethodType.methodType(Class.class));
+ A_PACKAGE_FINAL_X = LOOKUP.findVirtual(
+ A.class, "package_final_x", MethodType.methodType(Class.class));
+ A_PACKAGE_STATIC_X = LOOKUP.findStatic(
+ A.class, "package_static_x", MethodType.methodType(Class.class));
+
+ B_PRIVATE_X = B.LOOKUP.findVirtual(
+ B.class, "private_x", MethodType.methodType(Class.class));
+ B_PRIVATE_STATIC_X = B.LOOKUP.findStatic(
+ B.class, "private_static_x", MethodType.methodType(Class.class));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ static final A a = new B();
+
+ private static void testPublicMH() {
+ try {
+ Class<?> r = (Class<?>)A_PUBLIC_X.invokeExact(a);
+ assertEquals(r, B.class);
+ } catch (Throwable throwable) {
+ throw new Error(throwable);
+ }
+ }
+
+ private static void testProtectedMH() {
+ try {
+ Class<?> r = (Class<?>)A_PROTECTED_X.invokeExact(a);
+ assertEquals(r, B.class);
+ } catch (Throwable throwable) {
+ throw new Error(throwable);
+ }
+ }
+
+ private static void testPackageMH() {
+ try {
+ Class<?> r = (Class<?>)A_PACKAGE_X.invokeExact(a);
+ assertEquals(r, B.class);
+ } catch (Throwable throwable) {
+ throw new Error(throwable);
+ }
+ }
+
+ private static void testPackageFinalMH() {
+ try {
+ Class<?> r = (Class<?>)A_PACKAGE_FINAL_X.invokeExact(a);
+ assertEquals(r, A.class);
+ } catch (Throwable throwable) {
+ throw new Error(throwable);
+ }
+ }
+
+ private static void testPackageStaticMH() {
+ try {
+ Class<?> r = (Class<?>)A_PACKAGE_STATIC_X.invokeExact();
+ assertEquals(r, A.class);
+ } catch (Throwable throwable) {
+ throw new Error(throwable);
+ }
+ }
+
+ private static void testPrivateMH() {
+ try {
+ Class<?> r = (Class<?>)B_PRIVATE_X.invokeExact((B)a);
+ assertEquals(r, B.class);
+ } catch (Throwable throwable) {
+ throw new Error(throwable);
+ }
+ }
+
+ private static void testPrivateStaticMH() {
+ try {
+ Class<?> r = (Class<?>)B_PRIVATE_STATIC_X.invokeExact();
+ assertEquals(r, B.class);
+ } catch (Throwable throwable) {
+ throw new Error(throwable);
+ }
+ }
+ static class Launcher {
+ public static void main(String[] args) throws Exception {
+ for (int i = 0; i < 20_000; i++) {
+ testPublicMH();
+ }
+ for (int i = 0; i < 20_000; i++) {
+ testProtectedMH();
+ }
+ for (int i = 0; i < 20_000; i++) {
+ testPackageMH();
+ }
+ for (int i = 0; i < 20_000; i++) {
+ testPackageFinalMH();
+ }
+ for (int i = 0; i < 20_000; i++) {
+ testPackageStaticMH();
+ }
+ for (int i = 0; i < 20_000; i++) {
+ testPrivateMH();
+ }
+ for (int i = 0; i < 20_000; i++) {
+ testPrivateStaticMH();
+ }
+ }
+ }
+}
diff --git a/test/compiler/jsr292/PollutedTrapCounts.java b/test/compiler/jsr292/PollutedTrapCounts.java
new file mode 100644
index 000000000..36d531117
--- /dev/null
+++ b/test/compiler/jsr292/PollutedTrapCounts.java
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8074551
+ * @library /testlibrary
+ * @run main PollutedTrapCounts
+ */
+import java.lang.invoke.*;
+import com.oracle.java.testlibrary.*;
+
+public class PollutedTrapCounts {
+ public static void main(String[] args) throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:+IgnoreUnrecognizedVMOptions",
+ "-XX:-TieredCompilation", "-Xbatch",
+ "-XX:PerBytecodeRecompilationCutoff=10", "-XX:PerMethodRecompilationCutoff=10",
+ "-XX:+PrintCompilation", "-XX:+UnlockDiagnosticVMOptions", "-XX:+PrintInlining",
+ "PollutedTrapCounts$Test");
+
+ OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+
+ analyzer.shouldHaveExitValue(0);
+
+ analyzer.shouldNotContain("not compilable (disabled)");
+ }
+
+ static class Test {
+ public static final MethodHandle test1;
+ public static final MethodHandle test2;
+ public static final MethodHandle empty;
+
+ static {
+ try {
+ Class<?> THIS_CLASS = Test.class;
+ MethodHandles.Lookup LOOKUP = MethodHandles.lookup();
+ test1 = LOOKUP.findStatic(THIS_CLASS, "test1", MethodType.methodType(boolean.class, boolean.class));
+ test2 = LOOKUP.findStatic(THIS_CLASS, "test2", MethodType.methodType(boolean.class, boolean.class));
+ empty = LOOKUP.findStatic(THIS_CLASS, "empty", MethodType.methodType(void.class, boolean.class));
+ } catch(Throwable e) {
+ throw new Error(e);
+ }
+ }
+
+ static boolean test1(boolean b) {
+ return b;
+ }
+ static boolean test2(boolean b) {
+ return true;
+ }
+ static void empty(boolean b) {}
+
+ static void test(boolean freqValue, boolean removeInlineBlocker) throws Throwable {
+ MethodHandle innerGWT = MethodHandles.guardWithTest(test1, empty, empty);
+ MethodHandle outerGWT = MethodHandles.guardWithTest(test2, innerGWT, innerGWT);
+
+ // Trigger compilation
+ for (int i = 0; i < 20_000; i++) {
+ outerGWT.invokeExact(freqValue);
+ }
+
+ // Trigger deopt & nmethod invalidation
+ outerGWT.invokeExact(!freqValue);
+
+ // Force inline blocker removal on rare-taken path
+ if (removeInlineBlocker) {
+ for (int i = 0; i < 100; i++) {
+ outerGWT.invokeExact(!freqValue);
+ }
+ }
+
+ // Trigger recompilation
+ for (int i = 0; i < 20_000; i++) {
+ outerGWT.invokeExact(freqValue);
+ }
+ }
+
+ public static void main(String[] args) throws Throwable {
+ boolean freqValue = true;
+ boolean removeInlineBlocker = false;
+ for (int i = 0; i < 20; i++) {
+ test(freqValue, removeInlineBlocker);
+ freqValue = !freqValue;
+ removeInlineBlocker = !removeInlineBlocker;
+ }
+ }
+ }
+}
diff --git a/test/compiler/loopopts/ConstFPVectorization.java b/test/compiler/loopopts/ConstFPVectorization.java
new file mode 100644
index 000000000..50cb0e614
--- /dev/null
+++ b/test/compiler/loopopts/ConstFPVectorization.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8074869
+ * @summary C2 code generator can replace -0.0f with +0.0f on Linux
+ * @run main ConstFPVectorization 8
+ * @author volker.simonis@gmail.com
+ *
+ */
+
+public class ConstFPVectorization {
+
+ static float[] f = new float[16];
+ static double[] d = new double[16];
+
+ static void floatLoop(int count) {
+ for (int i = 0; i < count; i++) {
+ f[i] = -0.0f;
+ }
+ }
+
+ static void doubleLoop(int count) {
+ for (int i = 0; i < count; i++) {
+ d[i] = -0.0d;
+ }
+ }
+
+ public static void main(String args[]) {
+ for (int i = 0; i < 10_000; i++) {
+ floatLoop(Integer.parseInt(args[0]));
+ doubleLoop(Integer.parseInt(args[0]));
+ }
+ for (int i = 0; i < Integer.parseInt(args[0]); i++) {
+ if (Float.floatToRawIntBits(f[i]) != Float.floatToRawIntBits(-0.0f))
+ throw new Error("Float error at index " + i);
+ if (Double.doubleToRawLongBits(d[i]) != Double.doubleToRawLongBits(-0.0d))
+ throw new Error("Double error at index " + i);
+ }
+ }
+}
diff --git a/test/compiler/loopopts/CountedLoopProblem.java b/test/compiler/loopopts/CountedLoopProblem.java
new file mode 100644
index 000000000..da206cb86
--- /dev/null
+++ b/test/compiler/loopopts/CountedLoopProblem.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8072753
+ * @summary Inner loop induction variable increment occurs before compare which causes integer overflow
+ * @run main/othervm CountedLoopProblem
+ *
+ */
+
+import java.util.*;
+
+public class CountedLoopProblem {
+ public static void main(String[] args) throws Exception {
+ Random r = new Random(42);
+ int x = 0;
+ StringBuilder sb = new StringBuilder();
+ for(int i = 0; i < 1000000; ++i) {
+ int v = Math.abs(r.nextInt());
+ sb.append('+').append(v).append('\n');
+ x += v;
+ // To trigger the problem we must OSR in the following loop
+ // To make the problem 100% reproducible run with -XX:-TieredCompilation -XX:OSROnlyBCI=62
+ while(x < 0) x += 1000000000;
+ sb.append('=').append(x).append('\n');
+ }
+ if (sb.toString().hashCode() != 0xaba94591) {
+ throw new Exception("Unexpected result");
+ }
+ }
+}
+
diff --git a/test/compiler/loopopts/UseCountedLoopSafepoints.java b/test/compiler/loopopts/UseCountedLoopSafepoints.java
new file mode 100644
index 000000000..66d81a4c1
--- /dev/null
+++ b/test/compiler/loopopts/UseCountedLoopSafepoints.java
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6869327
+ * @summary Test that C2 flag UseCountedLoopSafepoints ensures a safepoint is kept in a CountedLoop
+ * @library /testlibrary
+ * @run main UseCountedLoopSafepoints
+ */
+
+import java.util.concurrent.atomic.AtomicLong;
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class UseCountedLoopSafepoints {
+ private static final AtomicLong _num = new AtomicLong(0);
+
+ // Uses the fact that an EnableBiasedLocking vmop will be started
+ // after 500ms, while we are still in the loop. If there is a
+ // safepoint in the counted loop, then we will reach safepoint
+ // very quickly. Otherwise SafepointTimeout will be hit.
+ public static void main (String args[]) throws Exception {
+ if (args.length == 1) {
+ final int loops = Integer.parseInt(args[0]);
+ for (int i = 0; i < loops; i++) {
+ _num.addAndGet(1);
+ }
+ } else {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:+IgnoreUnrecognizedVMOptions",
+ "-XX:-TieredCompilation",
+ "-XX:+UseBiasedLocking",
+ "-XX:BiasedLockingStartupDelay=500",
+ "-XX:+SafepointTimeout",
+ "-XX:SafepointTimeoutDelay=2000",
+ "-XX:+UseCountedLoopSafepoints",
+ "UseCountedLoopSafepoints",
+ "2000000000"
+ );
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ output.shouldNotContain("Timeout detected");
+ output.shouldHaveExitValue(0);
+ }
+ }
+}
diff --git a/test/compiler/loopopts/superword/TestVectorizationWithInvariant.java b/test/compiler/loopopts/superword/TestVectorizationWithInvariant.java
new file mode 100644
index 000000000..e36664aa2
--- /dev/null
+++ b/test/compiler/loopopts/superword/TestVectorizationWithInvariant.java
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import com.oracle.java.testlibrary.*;
+import sun.misc.Unsafe;
+
+/**
+ * @test
+ * @bug 8078497
+ * @summary Tests correct alignment of vectors with loop invariant offset.
+ * @library /testlibrary
+ * @run main TestVectorizationWithInvariant
+ */
+public class TestVectorizationWithInvariant {
+
+ private static Unsafe unsafe;
+ private static final long BYTE_ARRAY_OFFSET;
+ private static final long CHAR_ARRAY_OFFSET;
+
+ static {
+ unsafe = Utils.getUnsafe();
+ BYTE_ARRAY_OFFSET = unsafe.arrayBaseOffset(byte[].class);
+ CHAR_ARRAY_OFFSET = unsafe.arrayBaseOffset(char[].class);
+ }
+
+ public static void main(String[] args) throws Exception {
+ byte[] byte_array1 = new byte[1000];
+ byte[] byte_array2 = new byte[1000];
+ char[] char_array = new char[1000];
+
+ for (int i = 0; i < 20_000; ++i) {
+ copyByteToChar(byte_array1, byte_array2, char_array, 1);
+ copyCharToByte(char_array, byte_array1, 1);
+ copyCharToByteAligned(char_array, byte_array1);
+ copyCharToByteUnaligned(char_array, byte_array1);
+ }
+ }
+
+ /*
+ * Copy multiple consecutive chars from a byte array to a given offset in a char array
+ * to trigger C2's superword optimization. The offset in the byte array is independent
+ * of the loop induction variable and can be set to an arbitrary value. It may then not
+ * be possible to both align the LoadUS and the StoreC operations. Therefore, vectorization
+ * should only be done in this case if unaligned memory accesses are allowed.
+ */
+ public static void copyByteToChar(byte[] src1, byte[] src2, char[] dst, int off) {
+ off = (int) BYTE_ARRAY_OFFSET + (off << 1);
+ byte[] src = src1;
+ for (int i = (int) CHAR_ARRAY_OFFSET; i < 100; i = i + 8) {
+ // Copy 8 chars from src to dst
+ unsafe.putChar(dst, i + 0, unsafe.getChar(src, off + 0));
+ unsafe.putChar(dst, i + 2, unsafe.getChar(src, off + 2));
+ unsafe.putChar(dst, i + 4, unsafe.getChar(src, off + 4));
+ unsafe.putChar(dst, i + 6, unsafe.getChar(src, off + 6));
+ unsafe.putChar(dst, i + 8, unsafe.getChar(src, off + 8));
+ unsafe.putChar(dst, i + 10, unsafe.getChar(src, off + 10));
+ unsafe.putChar(dst, i + 12, unsafe.getChar(src, off + 12));
+ unsafe.putChar(dst, i + 14, unsafe.getChar(src, off + 14));
+
+ // Prevent loop invariant code motion of char read.
+ src = (src == src1) ? src2 : src1;
+ }
+ }
+
+ /*
+ * Copy multiple consecutive chars from a char array to a given offset in a byte array
+ * to trigger C2's superword optimization. Checks for similar problems as 'copyByteToChar'.
+ */
+ public static void copyCharToByte(char[] src, byte[] dst, int off) {
+ off = (int) BYTE_ARRAY_OFFSET + (off << 1);
+ for (int i = 0; i < 100; i = i + 8) {
+ // Copy 8 chars from src to dst
+ unsafe.putChar(dst, off + 0, src[i + 0]);
+ unsafe.putChar(dst, off + 2, src[i + 1]);
+ unsafe.putChar(dst, off + 4, src[i + 2]);
+ unsafe.putChar(dst, off + 6, src[i + 3]);
+ unsafe.putChar(dst, off + 8, src[i + 4]);
+ unsafe.putChar(dst, off + 10, src[i + 5]);
+ unsafe.putChar(dst, off + 12, src[i + 6]);
+ unsafe.putChar(dst, off + 14, src[i + 7]);
+ }
+ }
+
+ /*
+ * Variant of copyCharToByte with a constant destination array offset.
+ * The loop should always be vectorized because both the LoadUS and StoreC
+ * operations can be aligned.
+ */
+ public static void copyCharToByteAligned(char[] src, byte[] dst) {
+ final int off = (int) BYTE_ARRAY_OFFSET;
+ for (int i = 8; i < 100; i = i + 8) {
+ // Copy 8 chars from src to dst
+ unsafe.putChar(dst, off + 0, src[i + 0]);
+ unsafe.putChar(dst, off + 2, src[i + 1]);
+ unsafe.putChar(dst, off + 4, src[i + 2]);
+ unsafe.putChar(dst, off + 6, src[i + 3]);
+ unsafe.putChar(dst, off + 8, src[i + 4]);
+ unsafe.putChar(dst, off + 10, src[i + 5]);
+ unsafe.putChar(dst, off + 12, src[i + 6]);
+ unsafe.putChar(dst, off + 14, src[i + 7]);
+ }
+ }
+
+ /*
+ * Variant of copyCharToByte with a constant destination array offset. The
+ * loop should only be vectorized if unaligned memory operations are allowed
+ * because not both the LoadUS and the StoreC can be aligned.
+ */
+ public static void copyCharToByteUnaligned(char[] src, byte[] dst) {
+ final int off = (int) BYTE_ARRAY_OFFSET + 2;
+ for (int i = 0; i < 100; i = i + 8) {
+ // Copy 8 chars from src to dst
+ unsafe.putChar(dst, off + 0, src[i + 0]);
+ unsafe.putChar(dst, off + 2, src[i + 1]);
+ unsafe.putChar(dst, off + 4, src[i + 2]);
+ unsafe.putChar(dst, off + 6, src[i + 3]);
+ unsafe.putChar(dst, off + 8, src[i + 4]);
+ unsafe.putChar(dst, off + 10, src[i + 5]);
+ unsafe.putChar(dst, off + 12, src[i + 6]);
+ unsafe.putChar(dst, off + 14, src[i + 7]);
+ }
+ }
+}
diff --git a/test/compiler/regalloc/C1ObjectSpillInLogicOp.java b/test/compiler/regalloc/C1ObjectSpillInLogicOp.java
index 17571820b..f8fc18796 100644
--- a/test/compiler/regalloc/C1ObjectSpillInLogicOp.java
+++ b/test/compiler/regalloc/C1ObjectSpillInLogicOp.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
/*
* @test
* @bug 8027751
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @summary C1 crashes generating G1 post-barrier in Unsafe.getAndSetObject() intrinsic because of the new value spill
* @run main/othervm -XX:+UseG1GC C1ObjectSpillInLogicOp
*
diff --git a/test/compiler/rtm/locking/TestRTMAbortRatio.java b/test/compiler/rtm/locking/TestRTMAbortRatio.java
index 2c179d8ee..865255e39 100644
--- a/test/compiler/rtm/locking/TestRTMAbortRatio.java
+++ b/test/compiler/rtm/locking/TestRTMAbortRatio.java
@@ -126,10 +126,7 @@ public class TestRTMAbortRatio extends CommandLineOptionTest {
@Override
public String[] getMethodsToCompileNames() {
- return new String[] {
- getMethodWithLockName(),
- Unsafe.class.getName() + "::addressSize"
- };
+ return new String[] { getMethodWithLockName() };
}
public void lock(boolean abort) {
@@ -147,10 +144,12 @@ public class TestRTMAbortRatio extends CommandLineOptionTest {
public static void main(String args[]) throws Throwable {
Asserts.assertGTE(args.length, 1, "One argument required.");
Test t = new Test();
- if (Boolean.valueOf(args[0])) {
+ boolean shouldBeInflated = Boolean.valueOf(args[0]);
+ if (shouldBeInflated) {
AbortProvoker.inflateMonitor(t.monitor);
}
for (int i = 0; i < Test.TOTAL_ITERATIONS; i++) {
+ AbortProvoker.verifyMonitorState(t.monitor, shouldBeInflated);
t.lock(i >= Test.WARMUP_ITERATIONS);
}
}
diff --git a/test/compiler/rtm/locking/TestRTMAfterNonRTMDeopt.java b/test/compiler/rtm/locking/TestRTMAfterNonRTMDeopt.java
index 2cf81f7f6..8bde16056 100644
--- a/test/compiler/rtm/locking/TestRTMAfterNonRTMDeopt.java
+++ b/test/compiler/rtm/locking/TestRTMAfterNonRTMDeopt.java
@@ -156,10 +156,7 @@ public class TestRTMAfterNonRTMDeopt extends CommandLineOptionTest {
@Override
public String[] getMethodsToCompileNames() {
- return new String[] {
- getMethodWithLockName(),
- sun.misc.Unsafe.class.getName() + "::forceAbort"
- };
+ return new String[] { getMethodWithLockName() };
}
public void forceAbort(int a[], boolean abort) {
@@ -182,13 +179,15 @@ public class TestRTMAfterNonRTMDeopt extends CommandLineOptionTest {
public static void main(String args[]) throws Throwable {
Test t = new Test();
- if (Boolean.valueOf(args[0])) {
+ boolean shouldBeInflated = Boolean.valueOf(args[0]);
+ if (shouldBeInflated) {
AbortProvoker.inflateMonitor(t.monitor);
}
int tmp[] = new int[1];
for (int i = 0; i < Test.ITERATIONS; i++ ) {
+ AbortProvoker.verifyMonitorState(t.monitor, shouldBeInflated);
if (i == Test.RANGE_CHECK_AT) {
t.forceAbort(new int[0], false);
} else {
diff --git a/test/compiler/rtm/locking/TestRTMDeoptOnLowAbortRatio.java b/test/compiler/rtm/locking/TestRTMDeoptOnLowAbortRatio.java
index bcadaab6c..e18adb242 100644
--- a/test/compiler/rtm/locking/TestRTMDeoptOnLowAbortRatio.java
+++ b/test/compiler/rtm/locking/TestRTMDeoptOnLowAbortRatio.java
@@ -129,10 +129,7 @@ public class TestRTMDeoptOnLowAbortRatio extends CommandLineOptionTest {
@Override
public String[] getMethodsToCompileNames() {
- return new String[] {
- getMethodWithLockName(),
- sun.misc.Unsafe.class.getName() + "::addressSize"
- };
+ return new String[] { getMethodWithLockName() };
}
public void forceAbort(boolean abort) {
@@ -150,11 +147,12 @@ public class TestRTMDeoptOnLowAbortRatio extends CommandLineOptionTest {
public static void main(String args[]) throws Throwable {
Asserts.assertGTE(args.length, 1, "One argument required.");
Test t = new Test();
-
- if (Boolean.valueOf(args[0])) {
+ boolean shouldBeInflated = Boolean.valueOf(args[0]);
+ if (shouldBeInflated) {
AbortProvoker.inflateMonitor(t.monitor);
}
for (int i = 0; i < AbortProvoker.DEFAULT_ITERATIONS; i++) {
+ AbortProvoker.verifyMonitorState(t.monitor, shouldBeInflated);
t.forceAbort(
i == TestRTMDeoptOnLowAbortRatio.LOCKING_THRESHOLD);
}
diff --git a/test/compiler/rtm/locking/TestRTMLockingThreshold.java b/test/compiler/rtm/locking/TestRTMLockingThreshold.java
index 548a09c6e..e1414f9fd 100644
--- a/test/compiler/rtm/locking/TestRTMLockingThreshold.java
+++ b/test/compiler/rtm/locking/TestRTMLockingThreshold.java
@@ -142,10 +142,7 @@ public class TestRTMLockingThreshold extends CommandLineOptionTest {
@Override
public String[] getMethodsToCompileNames() {
- return new String[] {
- getMethodWithLockName(),
- sun.misc.Unsafe.class.getName() + "::addressSize"
- };
+ return new String[] { getMethodWithLockName() };
}
public void lock(boolean abort) {
@@ -163,11 +160,12 @@ public class TestRTMLockingThreshold extends CommandLineOptionTest {
public static void main(String args[]) throws Throwable {
Asserts.assertGTE(args.length, 1, "One argument required.");
Test t = new Test();
-
- if (Boolean.valueOf(args[0])) {
+ boolean shouldBeInflated = Boolean.valueOf(args[0]);
+ if (shouldBeInflated) {
AbortProvoker.inflateMonitor(t.monitor);
}
for (int i = 0; i < Test.TOTAL_ITERATIONS; i++) {
+ AbortProvoker.verifyMonitorState(t.monitor, shouldBeInflated);
t.lock(i % 2 == 1);
}
}
diff --git a/test/compiler/rtm/locking/TestRTMTotalCountIncrRate.java b/test/compiler/rtm/locking/TestRTMTotalCountIncrRate.java
index e1f94d31d..5f1c64cad 100644
--- a/test/compiler/rtm/locking/TestRTMTotalCountIncrRate.java
+++ b/test/compiler/rtm/locking/TestRTMTotalCountIncrRate.java
@@ -116,9 +116,7 @@ public class TestRTMTotalCountIncrRate extends CommandLineOptionTest {
@Override
public String[] getMethodsToCompileNames() {
- return new String[] {
- getMethodWithLockName()
- };
+ return new String[] { getMethodWithLockName() };
}
public void lock() {
@@ -134,11 +132,13 @@ public class TestRTMTotalCountIncrRate extends CommandLineOptionTest {
public static void main(String args[]) throws Throwable {
Asserts.assertGTE(args.length, 1, "One argument required.");
Test test = new Test();
-
- if (Boolean.valueOf(args[0])) {
+ boolean shouldBeInflated = Boolean.valueOf(args[0]);
+ if (shouldBeInflated) {
AbortProvoker.inflateMonitor(test.monitor);
}
for (long i = 0L; i < Test.TOTAL_ITERATIONS; i++) {
+ AbortProvoker.verifyMonitorState(test.monitor,
+ shouldBeInflated);
test.lock();
}
}
diff --git a/test/compiler/rtm/locking/TestUseRTMAfterLockInflation.java b/test/compiler/rtm/locking/TestUseRTMAfterLockInflation.java
index f1fa1393f..026af2ded 100644
--- a/test/compiler/rtm/locking/TestUseRTMAfterLockInflation.java
+++ b/test/compiler/rtm/locking/TestUseRTMAfterLockInflation.java
@@ -51,7 +51,7 @@ import rtm.predicate.SupportedVM;
* Compiled method invoked {@code AbortProvoker.DEFAULT_ITERATIONS} times before
* lock inflation and the same amount of times after inflation.
* As a result total locks count should be equal to
- * {@code 2*AbortProvoker.DEFAULT_ITERATIONS}.
+ * {@code 2 * AbortProvoker.DEFAULT_ITERATIONS}.
* It is a pretty strict assertion which could fail if some retriable abort
* happened: it could be {@code AbortType.RETRIABLE} or
* {@code AbortType.MEM_CONFLICT}, but unfortunately abort can has both these
@@ -100,7 +100,6 @@ public class TestUseRTMAfterLockInflation extends CommandLineOptionTest {
}
public static class Test {
-
/**
* Usage:
* Test &lt;provoker type&gt;
@@ -112,10 +111,12 @@ public class TestUseRTMAfterLockInflation extends CommandLineOptionTest {
AbortProvoker provoker
= AbortType.lookup(Integer.valueOf(args[0])).provoker();
for (int i = 0; i < AbortProvoker.DEFAULT_ITERATIONS; i++) {
+ AbortProvoker.verifyMonitorState(provoker, false /*deflated*/);
provoker.forceAbort();
}
provoker.inflateMonitor();
for (int i = 0; i < AbortProvoker.DEFAULT_ITERATIONS; i++) {
+ AbortProvoker.verifyMonitorState(provoker, true /*inflated*/);
provoker.forceAbort();
}
}
diff --git a/test/compiler/stable/TestStableBoolean.java b/test/compiler/stable/TestStableBoolean.java
index 37641d1b1..dfcf1edff 100644
--- a/test/compiler/stable/TestStableBoolean.java
+++ b/test/compiler/stable/TestStableBoolean.java
@@ -53,44 +53,32 @@
* java/lang/invoke/TestStableBoolean$DefaultStaticValue
* java/lang/invoke/TestStableBoolean$ObjectArrayLowerDim2
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableBoolean
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableBoolean
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableBoolean
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableBoolean
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:+FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableBoolean
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:-FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableBoolean
*/
package java.lang.invoke;
diff --git a/test/compiler/stable/TestStableByte.java b/test/compiler/stable/TestStableByte.java
index c62bc3508..3b2f5969b 100644
--- a/test/compiler/stable/TestStableByte.java
+++ b/test/compiler/stable/TestStableByte.java
@@ -53,44 +53,32 @@
* java/lang/invoke/TestStableByte$DefaultStaticValue
* java/lang/invoke/TestStableByte$ObjectArrayLowerDim2
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableByte
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableByte
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableByte
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableByte
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:+FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableByte
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:-FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableByte
*/
package java.lang.invoke;
diff --git a/test/compiler/stable/TestStableChar.java b/test/compiler/stable/TestStableChar.java
index 55bf749bf..84deb6461 100644
--- a/test/compiler/stable/TestStableChar.java
+++ b/test/compiler/stable/TestStableChar.java
@@ -53,44 +53,32 @@
* java/lang/invoke/TestStableChar$DefaultStaticValue
* java/lang/invoke/TestStableChar$ObjectArrayLowerDim2
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableChar
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableChar
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableChar
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableChar
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:+FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableChar
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:-FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableChar
*/
package java.lang.invoke;
diff --git a/test/compiler/stable/TestStableDouble.java b/test/compiler/stable/TestStableDouble.java
index 112d83633..f2f85ce4f 100644
--- a/test/compiler/stable/TestStableDouble.java
+++ b/test/compiler/stable/TestStableDouble.java
@@ -53,44 +53,32 @@
* java/lang/invoke/TestStableDouble$DefaultStaticValue
* java/lang/invoke/TestStableDouble$ObjectArrayLowerDim2
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableDouble
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableDouble
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableDouble
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableDouble
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:+FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableDouble
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:-FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableDouble
*/
package java.lang.invoke;
diff --git a/test/compiler/stable/TestStableFloat.java b/test/compiler/stable/TestStableFloat.java
index 973a60165..40e46bd6a 100644
--- a/test/compiler/stable/TestStableFloat.java
+++ b/test/compiler/stable/TestStableFloat.java
@@ -53,44 +53,32 @@
* java/lang/invoke/TestStableFloat$DefaultStaticValue
* java/lang/invoke/TestStableFloat$ObjectArrayLowerDim2
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableFloat
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableFloat
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableFloat
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableFloat
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:+FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableFloat
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:-FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableFloat
*/
package java.lang.invoke;
diff --git a/test/compiler/stable/TestStableInt.java b/test/compiler/stable/TestStableInt.java
index bf7fe337b..beeb3808f 100644
--- a/test/compiler/stable/TestStableInt.java
+++ b/test/compiler/stable/TestStableInt.java
@@ -53,44 +53,32 @@
* java/lang/invoke/TestStableInt$DefaultStaticValue
* java/lang/invoke/TestStableInt$ObjectArrayLowerDim2
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableInt
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableInt
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableInt
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableInt
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:+FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableInt
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:-FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableInt
*/
package java.lang.invoke;
diff --git a/test/compiler/stable/TestStableLong.java b/test/compiler/stable/TestStableLong.java
index f97e7e143..69c90fc29 100644
--- a/test/compiler/stable/TestStableLong.java
+++ b/test/compiler/stable/TestStableLong.java
@@ -53,44 +53,32 @@
* java/lang/invoke/TestStableLong$DefaultStaticValue
* java/lang/invoke/TestStableLong$ObjectArrayLowerDim2
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableLong
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableLong
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableLong
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableLong
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:+FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableLong
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:-FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableLong
*/
package java.lang.invoke;
diff --git a/test/compiler/stable/TestStableObject.java b/test/compiler/stable/TestStableObject.java
index dec466adc..7b2f31e14 100644
--- a/test/compiler/stable/TestStableObject.java
+++ b/test/compiler/stable/TestStableObject.java
@@ -54,44 +54,32 @@
* java/lang/invoke/TestStableObject$DefaultStaticValue
* java/lang/invoke/TestStableObject$ObjectArrayLowerDim2
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableObject
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableObject
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableObject
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableObject
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:+FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableObject
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:-FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableObject
*/
package java.lang.invoke;
diff --git a/test/compiler/stable/TestStableShort.java b/test/compiler/stable/TestStableShort.java
index 67a7f0453..52cf6477e 100644
--- a/test/compiler/stable/TestStableShort.java
+++ b/test/compiler/stable/TestStableShort.java
@@ -53,44 +53,32 @@
* java/lang/invoke/TestStableShort$DefaultStaticValue
* java/lang/invoke/TestStableShort$ObjectArrayLowerDim2
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableShort
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:-TieredCompilation
+ * -XX:-TieredCompilation
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableShort
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:+FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableShort
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -server -XX:+TieredCompilation -XX:TieredStopAtLevel=1
+ * -XX:+TieredCompilation -XX:TieredStopAtLevel=1
* -XX:-FoldStableValues
* -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
* java.lang.invoke.TestStableShort
*
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:+FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableShort
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xcomp
- * -client -XX:-TieredCompilation
- * -XX:-FoldStableValues
- * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
- * java.lang.invoke.TestStableShort
*/
package java.lang.invoke;
diff --git a/test/compiler/stringopts/TestOptimizeStringConcat.java b/test/compiler/stringopts/TestOptimizeStringConcat.java
new file mode 100644
index 000000000..771ffb0bd
--- /dev/null
+++ b/test/compiler/stringopts/TestOptimizeStringConcat.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2015 SAP AG. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8068909
+ * @key regression
+ * @summary test that string optimizations produce code, that doesn't lead to a crash.
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestOptimizeStringConcat
+ * @author axel.siebenborn@sap.com
+ */
+public class TestOptimizeStringConcat {
+
+ static boolean checkArgumentSyntax(String value, String allowedchars, String notallowedchars, String logmsg) {
+ String rc = null;
+
+ int maxchar = 99999;
+ int minchar = 1;
+ if ((allowedchars != null && notallowedchars != null) || minchar > maxchar) {
+ rc = "internal error";
+ } else {
+ if (value == null) {
+ rc = "the value null is not allowed, it is missing";
+ } else if (value != null && minchar > 0 && value.trim().equals("")) {
+ rc = "the value must not be empty";
+ } else if (value != null) {
+ if (value.length() < minchar || value.length() > maxchar) {
+ if (rc == null) {
+ rc = "the value length must be between +minchar+ and +maxchar";
+ }
+ }
+ char[] _value = value.toCharArray();
+ boolean dotfound = false;
+ int i = 1;
+ if (_value[i] == '.' && !dotfound) {
+ dotfound = true;
+ } else if (allowedchars != null && allowedchars.indexOf(_value[i]) == -1) {
+ if (rc == null) {
+ rc = "the value contains an illegal character: '" + _value[i] + "', only following characters are allowed: '+allowedchars+'";
+ } else {
+ rc += " / the value contains an illegal character: '" + _value[i] + "', only following characters are allowed: '+allowedchars+'";
+ }
+ } else if (notallowedchars != null && notallowedchars.indexOf(_value[i]) != -1) {
+ if (rc == null) {
+ rc = "the value contains an illegal character: '" + _value[i] + "', following characters are not allowed '+notallowedchars+'";
+ } else {
+ rc += " / the value contains an illegal character: '" + _value[i] + "', following characters are not allowed '+notallowedchars+'";
+ }
+ }
+ }
+ }
+
+ if (rc != null) {
+ System.out.println(logmsg + " ==> " + rc);
+ return false;
+ }
+ return true;
+ }
+
+ public static void main(String[] args) {
+ boolean failed = false;
+ for (int i = 0; i < 10000; i++) {
+ failed |= !checkArgumentSyntax("theName", null, "\"<&", "Error consistencyCheck: name in component definition");
+ failed |= !checkArgumentSyntax(null, null, "\"<&", "Error consistencyCheck: name in component definition");
+ failed |= !checkArgumentSyntax("42", "0123456789.", null, "Error consistencyCheck: counter in component definition");
+ }
+ System.out.println(failed);
+ }
+}
diff --git a/test/compiler/testlibrary/rtm/AbortProvoker.java b/test/compiler/testlibrary/rtm/AbortProvoker.java
index 1d129087d..b641ca084 100644
--- a/test/compiler/testlibrary/rtm/AbortProvoker.java
+++ b/test/compiler/testlibrary/rtm/AbortProvoker.java
@@ -29,8 +29,7 @@ import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import com.oracle.java.testlibrary.Asserts;
-import com.oracle.java.testlibrary.Utils;
-import sun.misc.Unsafe;
+import sun.hotspot.WhiteBox;
/**
* Base class for different transactional execution abortion
@@ -38,6 +37,9 @@ import sun.misc.Unsafe;
*/
public abstract class AbortProvoker implements CompilableTest {
public static final long DEFAULT_ITERATIONS = 10000L;
+ private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+ @SuppressWarnings("unused")
+ private static int sharedState = 0;
/**
* Inflates monitor associated with object {@code monitor}.
* Inflation is forced by entering the same monitor from
@@ -48,36 +50,76 @@ public abstract class AbortProvoker implements CompilableTest {
* @throws Exception if something went wrong.
*/
public static Object inflateMonitor(Object monitor) throws Exception {
- Unsafe unsafe = Utils.getUnsafe();
CyclicBarrier barrier = new CyclicBarrier(2);
Runnable inflatingRunnable = () -> {
- unsafe.monitorEnter(monitor);
- try {
- barrier.await();
- barrier.await();
- } catch (InterruptedException | BrokenBarrierException e) {
- throw new RuntimeException(
- "Synchronization issue occurred.", e);
- } finally {
- unsafe.monitorExit(monitor);
+ synchronized (monitor) {
+ try {
+ barrier.await();
+ } catch (BrokenBarrierException | InterruptedException e) {
+ throw new RuntimeException(
+ "Synchronization issue occurred.", e);
+ }
+ try {
+ monitor.wait();
+ } catch (InterruptedException e) {
+ throw new AssertionError("The thread waiting on an"
+ + " inflated monitor was interrupted, thus test"
+ + " results may be incorrect.", e);
+ }
}
};
Thread t = new Thread(inflatingRunnable);
+ t.setDaemon(true);
t.start();
// Wait until thread t enters the monitor.
barrier.await();
- // At this point monitor will be owned by thread t,
- // so our attempt to enter the same monitor will force
- // monitor inflation.
- Asserts.assertFalse(unsafe.tryMonitorEnter(monitor),
- "Not supposed to enter the monitor first");
- barrier.await();
- t.join();
+ synchronized (monitor) {
+ // At this point thread t is already waiting on the monitor.
+ // Modifying static field just to avoid lock's elimination.
+ sharedState++;
+ }
+ verifyMonitorState(monitor, true /* inflated */);
return monitor;
}
+ /**
+ * Verifies that {@code monitor} is a stack-lock or inflated lock depending
+ * on {@code shouldBeInflated} value. If {@code monitor} is inflated while
+ * it is expected that it should be a stack-lock, then this method attempts
+ * to deflate it by forcing a safepoint and then verifies the state once
+ * again.
+ *
+ * @param monitor monitor to be verified.
+ * @param shouldBeInflated flag indicating whether or not monitor is
+ * expected to be inflated.
+ * @throws RuntimeException if the {@code monitor} in a wrong state.
+ */
+ public static void verifyMonitorState(Object monitor,
+ boolean shouldBeInflated) {
+ if (!shouldBeInflated && WHITE_BOX.isMonitorInflated(monitor)) {
+ WHITE_BOX.forceSafepoint();
+ }
+ Asserts.assertEQ(WHITE_BOX.isMonitorInflated(monitor), shouldBeInflated,
+ "Monitor in a wrong state.");
+ }
+ /**
+ * Verifies that monitor used by the {@code provoker} is a stack-lock or
+ * inflated lock depending on {@code shouldBeInflated} value. If such
+ * monitor is inflated while it is expected that it should be a stack-lock,
+ * then this method attempts to deflate it by forcing a safepoint and then
+ * verifies the state once again.
+ *
+ * @param provoker AbortProvoker whose monitor's state should be verified.
+ * @param shouldBeInflated flag indicating whether or not monitor is
+ * expected to be inflated.
+ * @throws RuntimeException if the {@code monitor} in a wrong state.
+ */
+ public static void verifyMonitorState(AbortProvoker provoker,
+ boolean shouldBeInflated) {
+ verifyMonitorState(provoker.monitor, shouldBeInflated);
+ }
/**
* Get instance of specified AbortProvoker, inflate associated monitor
@@ -120,6 +162,7 @@ public abstract class AbortProvoker implements CompilableTest {
}
for (long i = 0; i < iterations; i++) {
+ AbortProvoker.verifyMonitorState(provoker, monitorShouldBeInflated);
provoker.forceAbort();
}
}
diff --git a/test/compiler/testlibrary/rtm/BusyLock.java b/test/compiler/testlibrary/rtm/BusyLock.java
index 70e80f70c..55985b61b 100644
--- a/test/compiler/testlibrary/rtm/BusyLock.java
+++ b/test/compiler/testlibrary/rtm/BusyLock.java
@@ -77,7 +77,7 @@ public class BusyLock implements CompilableTest, Runnable {
}
}
- public void test() {
+ public void syncAndTest() {
try {
barrier.await();
// wait until monitor is locked by a ::run method
@@ -85,6 +85,10 @@ public class BusyLock implements CompilableTest, Runnable {
} catch (InterruptedException | BrokenBarrierException e) {
throw new RuntimeException("Synchronization error happened.", e);
}
+ test();
+ }
+
+ public void test() {
synchronized(monitor) {
BusyLock.field++;
}
@@ -130,7 +134,7 @@ public class BusyLock implements CompilableTest, Runnable {
Thread t = new Thread(busyLock);
t.start();
- busyLock.test();
+ busyLock.syncAndTest();
t.join();
}
}
diff --git a/test/compiler/testlibrary/rtm/MemoryConflictProvoker.java b/test/compiler/testlibrary/rtm/MemoryConflictProvoker.java
index 48cf799eb..670e97511 100644
--- a/test/compiler/testlibrary/rtm/MemoryConflictProvoker.java
+++ b/test/compiler/testlibrary/rtm/MemoryConflictProvoker.java
@@ -69,11 +69,6 @@ class MemoryConflictProvoker extends AbortProvoker {
* Accesses and modifies memory region from within the transaction.
*/
public void transactionalRegion() {
- try {
- barrier.await();
- } catch (InterruptedException | BrokenBarrierException e) {
- throw new RuntimeException(e);
- }
for (int i = 0; i < MemoryConflictProvoker.INNER_ITERATIONS; i++) {
synchronized(monitor) {
MemoryConflictProvoker.field--;
@@ -86,6 +81,11 @@ class MemoryConflictProvoker extends AbortProvoker {
try {
Thread t = new Thread(conflictingThread);
t.start();
+ try {
+ barrier.await();
+ } catch (InterruptedException | BrokenBarrierException e) {
+ throw new RuntimeException(e);
+ }
transactionalRegion();
t.join();
} catch (Exception e) {
diff --git a/test/compiler/testlibrary/rtm/RTMTestBase.java b/test/compiler/testlibrary/rtm/RTMTestBase.java
index 64adabc05..406443a65 100644
--- a/test/compiler/testlibrary/rtm/RTMTestBase.java
+++ b/test/compiler/testlibrary/rtm/RTMTestBase.java
@@ -238,10 +238,10 @@ public class RTMTestBase {
String[] filteredVMOpts = Utils.getFilteredTestJavaOpts(filters);
Collections.addAll(finalVMOpts, filteredVMOpts);
Collections.addAll(finalVMOpts, "-Xcomp", "-server",
- "-XX:-TieredCompilation",
+ "-XX:-TieredCompilation", "-XX:+UseRTMLocking",
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
- "-XX:+UseRTMLocking");
+ "-Xbootclasspath/a:.", "-XX:+WhiteBoxAPI");
if (test != null) {
for (String method : test.getMethodsToCompileNames()) {
diff --git a/test/compiler/types/TestMeetExactConstantArrays.java b/test/compiler/types/TestMeetExactConstantArrays.java
new file mode 100644
index 000000000..951288ec2
--- /dev/null
+++ b/test/compiler/types/TestMeetExactConstantArrays.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8075587
+ * @summary meet of 2 constant arrays result in bottom
+ * @run main/othervm TestMeetExactConstantArrays
+ *
+ */
+
+public class TestMeetExactConstantArrays {
+ public abstract static class NumbersHolder {
+ public Number[] getNumbers() {
+ return null;
+ }
+ }
+
+ public static class IntegersHolder extends NumbersHolder {
+ private final static Integer integers[] = { new Integer(1) };
+
+ public Number[] getNumbers() {
+ return integers;
+ }
+ }
+
+ public static class LongsHolder extends NumbersHolder {
+ private final static Long longs[] = { new Long(1) };
+
+ public Number[] getNumbers() {
+ return longs;
+ }
+ }
+
+ public static final void loopNumbers(NumbersHolder numbersHolder) {
+ Number[] numbers = numbersHolder.getNumbers();
+ for (int i = 0; i < numbers.length; i++) {
+ numbers[i].longValue();
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ for (int i = 0; i < 10000; i++) {
+ IntegersHolder integersHolder = new IntegersHolder();
+ LongsHolder longsHolder = new LongsHolder();
+ loopNumbers(integersHolder);
+ loopNumbers(longsHolder);
+ }
+ }
+}
diff --git a/test/compiler/types/TestTypePropagationToCmpU.java b/test/compiler/types/TestTypePropagationToCmpU.java
new file mode 100644
index 000000000..9e08a8b4c
--- /dev/null
+++ b/test/compiler/types/TestTypePropagationToCmpU.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8080156 8060036
+ * @summary Test correctness of type propagation to CmpUNodes.
+ * @run main TestTypePropagationToCmpU
+ */
+public class TestTypePropagationToCmpU {
+ public static void main(String[] args) {
+ try {
+ // Trigger compilation
+ for (int i = 0; i < 100_000; ++i) {
+ test();
+ }
+ } catch (NullPointerException e) {
+ // Test should never throw a NullPointerException
+ throw new RuntimeException("Test failed");
+ }
+ }
+
+ static int global = 42;
+
+ public static void test() {
+ int a = Integer.MIN_VALUE;
+ int b = global;
+ char[] buf = { 0 };
+ for (int i = 0; i <= b; ++i) {
+ a = i - b;
+ }
+ // C2 adds a range check and an uncommon trap here to ensure that the array index
+ // is in bounds. If type information is not propagated correctly to the corresponding
+ // CmpUNode, this trap may be always taken. Because C2 also removes the unnecessary
+ // allocation of 'buf', a NullPointerException is thrown in this case.
+ char c = buf[(a * 11) / 2 - a]; // a is 0 here if global >= 0
+ buf[0] = 0;
+ }
+}
diff --git a/test/compiler/uncommontrap/UncommonTrapStackBang.java b/test/compiler/uncommontrap/UncommonTrapStackBang.java
index 164453b7b..e0c093b7b 100644
--- a/test/compiler/uncommontrap/UncommonTrapStackBang.java
+++ b/test/compiler/uncommontrap/UncommonTrapStackBang.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,9 +27,7 @@
* @bug 8026775
* @summary Uncommon trap blob did not bang all the stack shadow pages
*
- * @run main/othervm -server -XX:+IgnoreUnrecognizedVMOptions -XX:+TieredCompilation UncommonTrapStackBang
- * @run main/othervm -server -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation UncommonTrapStackBang
- *
+ * @run main/othervm UncommonTrapStackBang
*
* Note: This test does not reproduce the problem with absolute
* certainty. Empirically the bug reproduces on Windows some 80+% of
diff --git a/test/compiler/unsafe/TestUnsafeLoadControl.java b/test/compiler/unsafe/TestUnsafeLoadControl.java
new file mode 100644
index 000000000..edd559753
--- /dev/null
+++ b/test/compiler/unsafe/TestUnsafeLoadControl.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8077504
+ * @summary Unsafe load can loose control dependency and cause crash
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestUnsafeLoadControl
+ *
+ */
+
+import java.lang.reflect.Field;
+import sun.misc.Unsafe;
+
+public class TestUnsafeLoadControl {
+
+ private static final Unsafe UNSAFE;
+
+ static {
+ try {
+ Field unsafeField = Unsafe.class.getDeclaredField("theUnsafe");
+ unsafeField.setAccessible(true);
+ UNSAFE = (Unsafe) unsafeField.get(null);
+ } catch(Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ static int val;
+ static void test1(int[] a, boolean[] flags, boolean flag, long j) {
+ for (int i = 0; i < 10; i++) {
+ if (flags[i]) {
+ if (flag) {
+ long address = (j << 2) + UNSAFE.ARRAY_INT_BASE_OFFSET;
+ int v = UNSAFE.getInt(a, address);
+ val = v;
+ }
+ }
+ }
+ }
+
+ static int test2(int[] a, boolean[] flags, boolean flag, long j) {
+ int sum = 0;
+ for (int i = 0; i < 10; i++) {
+ if (flags[i]) {
+ if (flag) {
+ long address = (j << 2) + UNSAFE.ARRAY_INT_BASE_OFFSET;
+ int v = UNSAFE.getInt(a, address);
+ if (v == 0) {
+ sum++;
+ }
+ }
+ }
+ }
+ return sum;
+ }
+
+ static public void main(String[] args) {
+ boolean[] flags = new boolean[10];
+ for (int i = 0; i < flags.length; i++) {
+ flags[i] = true;
+ }
+ int[] array = new int[10];
+ for (int i = 0; i < 20000; i++) {
+ test1(array, flags, true, 0);
+ }
+ for (int i = 0; i < flags.length; i++) {
+ flags[i] = false;
+ }
+ test1(array, flags, true, Long.MAX_VALUE/4);
+
+ for (int i = 0; i < flags.length; i++) {
+ flags[i] = true;
+ }
+ for (int i = 0; i < 20000; i++) {
+ test2(array, flags, true, 0);
+ }
+ for (int i = 0; i < flags.length; i++) {
+ flags[i] = false;
+ }
+ test2(array, flags, true, Long.MAX_VALUE/4);
+ }
+}
diff --git a/test/gc/6581734/Test6581734.java b/test/gc/6581734/Test6581734.java
index 143340dc0..9ec55c857 100644
--- a/test/gc/6581734/Test6581734.java
+++ b/test/gc/6581734/Test6581734.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
/*
* @test Test6581734.java
* @bug 6581734
+ * @requires vm.gc=="ConcMarkSweep" | vm.gc=="null"
* @summary CMS Old Gen's collection usage is zero after GC which is incorrect
* @run main/othervm -Xmx512m -verbose:gc -XX:+UseConcMarkSweepGC Test6581734
*
diff --git a/test/gc/TestSystemGC.java b/test/gc/TestSystemGC.java
index b882f9fc7..d03465951 100644
--- a/test/gc/TestSystemGC.java
+++ b/test/gc/TestSystemGC.java
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
/*
* @test TestSystemGC
* @key gc
+ * @requires vm.gc=="null"
* @summary Runs System.gc() with different flags.
* @run main/othervm TestSystemGC
* @run main/othervm -XX:+UseSerialGC TestSystemGC
diff --git a/test/gc/arguments/TestAlignmentToUseLargePages.java b/test/gc/arguments/TestAlignmentToUseLargePages.java
index 125c1aabd..45d447840 100644
--- a/test/gc/arguments/TestAlignmentToUseLargePages.java
+++ b/test/gc/arguments/TestAlignmentToUseLargePages.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
* @bug 8024396
* @key gc
* @key regression
+ * @requires vm.gc=="null"
* @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
* @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages
* @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
diff --git a/test/gc/arguments/TestG1HeapRegionSize.java b/test/gc/arguments/TestG1HeapRegionSize.java
index 193dacf98..b55dc8b32 100644
--- a/test/gc/arguments/TestG1HeapRegionSize.java
+++ b/test/gc/arguments/TestG1HeapRegionSize.java
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,11 +25,12 @@
* @test TestG1HeapRegionSize
* @key gc
* @bug 8021879
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @summary Verify that the flag G1HeapRegionSize is updated properly
* @run main/othervm -Xmx64m TestG1HeapRegionSize 1048576
- * @run main/othervm -XX:G1HeapRegionSize=2m -Xmx64m TestG1HeapRegionSize 2097152
- * @run main/othervm -XX:G1HeapRegionSize=3m -Xmx64m TestG1HeapRegionSize 2097152
- * @run main/othervm -XX:G1HeapRegionSize=64m -Xmx256m TestG1HeapRegionSize 33554432
+ * @run main/othervm -XX:G1HeapRegionSize=2m -Xmx64m -XX:+UseG1GC TestG1HeapRegionSize 2097152
+ * @run main/othervm -XX:G1HeapRegionSize=3m -Xmx64m -XX:+UseG1GC TestG1HeapRegionSize 2097152
+ * @run main/othervm -XX:G1HeapRegionSize=64m -Xmx256m -XX:+UseG1GC TestG1HeapRegionSize 33554432
*/
import sun.management.ManagementFactoryHelper;
@@ -41,14 +42,8 @@ public class TestG1HeapRegionSize {
public static void main(String[] args) {
HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
- VMOption option = diagnostic.getVMOption("UseG1GC");
- if (option.getValue().equals("false")) {
- System.out.println("Skipping this test. It is only a G1 test.");
- return;
- }
-
String expectedValue = getExpectedValue(args);
- option = diagnostic.getVMOption("G1HeapRegionSize");
+ VMOption option = diagnostic.getVMOption("G1HeapRegionSize");
if (!expectedValue.equals(option.getValue())) {
throw new RuntimeException("Wrong value for G1HeapRegionSize. Expected " + expectedValue + " but got " + option.getValue());
}
diff --git a/test/gc/concurrentMarkSweep/DisableResizePLAB.java b/test/gc/concurrentMarkSweep/DisableResizePLAB.java
index c504d069a..be8ac5d95 100644
--- a/test/gc/concurrentMarkSweep/DisableResizePLAB.java
+++ b/test/gc/concurrentMarkSweep/DisableResizePLAB.java
@@ -26,6 +26,7 @@
* @key gc
* @bug 8060467
* @author filipp.zhinkin@oracle.com, john.coomes@oracle.com
+ * @requires vm.gc=="ConcMarkSweep" | vm.gc=="null"
* @summary Run CMS with PLAB resizing disabled and a small OldPLABSize
* @run main/othervm -XX:+UseConcMarkSweepGC -XX:-ResizePLAB -XX:OldPLABSize=1k -Xmx256m -XX:+PrintGCDetails DisableResizePLAB
*/
diff --git a/test/gc/g1/Test2GbHeap.java b/test/gc/g1/Test2GbHeap.java
new file mode 100644
index 000000000..6b0cd3b8d
--- /dev/null
+++ b/test/gc/g1/Test2GbHeap.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test Test2GbHeap
+ * @bug 8031686
+ * @summary Regression test to ensure we can start G1 with 2gb heap.
+ * @key gc
+ * @key regression
+ * @library /testlibrary
+ */
+
+import java.util.ArrayList;
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+public class Test2GbHeap {
+ public static void main(String[] args) throws Exception {
+ ArrayList<String> testArguments = new ArrayList<String>();
+
+ testArguments.add("-XX:+UseG1GC");
+ testArguments.add("-Xmx2g");
+ testArguments.add("-version");
+
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(testArguments.toArray(new String[0]));
+
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ // Avoid failing test for setups not supported.
+ if (output.getOutput().contains("Could not reserve enough space for 2097152KB object heap")) {
+ // Will fail on machines with too little memory (and Windows 32-bit VM), ignore such failures.
+ output.shouldHaveExitValue(1);
+ } else if (output.getOutput().contains("G1 GC is disabled in this release")) {
+ // G1 is not supported on embedded, ignore such failures.
+ output.shouldHaveExitValue(1);
+ } else {
+ // Normally everything should be fine.
+ output.shouldHaveExitValue(0);
+ }
+ }
+}
diff --git a/test/gc/g1/TestEagerReclaimHumongousRegions2.java b/test/gc/g1/TestEagerReclaimHumongousRegionsClearMarkBits.java
index dafbde0ed..5b4e69477 100644
--- a/test/gc/g1/TestEagerReclaimHumongousRegions2.java
+++ b/test/gc/g1/TestEagerReclaimHumongousRegionsClearMarkBits.java
@@ -22,7 +22,7 @@
*/
/*
- * @test TestEagerReclaimHumongousRegions2
+ * @test TestEagerReclaimHumongousRegionsClearMarkBits
* @bug 8051973
* @summary Test to make sure that eager reclaim of humongous objects correctly clears
* mark bitmaps at reclaim.
@@ -109,7 +109,7 @@ class ReclaimRegionFast {
}
}
-public class TestEagerReclaimHumongousRegions2 {
+public class TestEagerReclaimHumongousRegionsClearMarkBits {
public static void main(String[] args) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UseG1GC",
diff --git a/test/gc/g1/TestEagerReclaimHumongousRegionsWithRefs.java b/test/gc/g1/TestEagerReclaimHumongousRegionsWithRefs.java
new file mode 100644
index 000000000..d12e25af7
--- /dev/null
+++ b/test/gc/g1/TestEagerReclaimHumongousRegionsWithRefs.java
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestEagerReclaimHumongousRegionsWithRefs
+ * @bug 8048179
+ * @summary Test to make sure that eager reclaim of humongous objects that have previously
+ * been referenced by other old gen regions work. We simply try to fill
+ * up the heap with humongous objects and create a remembered set entry from an object by
+ * referencing that we know is in the old gen. After changing this reference, the object
+ * should still be eagerly reclaimable to avoid Full GC.
+ * @key gc
+ * @library /testlibrary
+ */
+
+import java.util.regex.Pattern;
+import java.util.regex.Matcher;
+import java.util.LinkedList;
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+import static com.oracle.java.testlibrary.Asserts.*;
+
+class RefHolder {
+ Object ref;
+}
+
+class ReclaimRegionFast {
+
+ public static final int M = 1024*1024;
+
+ public static LinkedList<Object> garbageList = new LinkedList<Object>();
+
+ public static void genGarbage() {
+ for (int i = 0; i < 32*1024; i++) {
+ garbageList.add(new int[100]);
+ }
+ garbageList.clear();
+ }
+
+
+ // A large object referenced by a static.
+ static int[] filler = new int[10 * M];
+
+ // Old gen object referencing the large object, generating remembered
+ // set entries.
+ static RefHolder fromOld = new RefHolder();
+
+ public static void main(String[] args) {
+
+ int[] large = new int[M];
+
+ Object ref_from_stack = large;
+
+ for (int i = 0; i < 100; i++) {
+ // A large object that will be reclaimed eagerly.
+ large = new int[6*M];
+ fromOld.ref = large;
+ genGarbage();
+ }
+
+ // Keep the reference to the first object alive.
+ System.out.println(ref_from_stack);
+ }
+}
+
+public class TestEagerReclaimHumongousRegionsWithRefs {
+
+ public static void main(String[] args) throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UseG1GC",
+ "-Xms128M",
+ "-Xmx128M",
+ "-Xmn16M",
+ "-XX:+PrintGC",
+ ReclaimRegionFast.class.getName());
+
+ Pattern p = Pattern.compile("Full GC");
+
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ int found = 0;
+ Matcher m = p.matcher(output.getStdout());
+ while (m.find()) {
+ found++;
+ }
+ System.out.println("Issued " + found + " Full GCs");
+
+ assertLessThan(found, 10, "Found that " + found + " Full GCs were issued. This is larger than the bound. Eager reclaim of objects once referenced from old gen seems to not work at all");
+ output.shouldHaveExitValue(0);
+ }
+}
+
diff --git a/test/gc/g1/TestG1TraceEagerReclaimHumongousObjects.java b/test/gc/g1/TestG1TraceEagerReclaimHumongousObjects.java
new file mode 100644
index 000000000..e653554c9
--- /dev/null
+++ b/test/gc/g1/TestG1TraceEagerReclaimHumongousObjects.java
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestG1TraceEagerReclaimHumongousObjects
+ * @bug 8058801 8048179
+ * @summary Ensure that the output for a G1TraceEagerReclaimHumongousObjects
+ * includes the expected necessary messages.
+ * @key gc
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import java.util.LinkedList;
+
+public class TestG1TraceEagerReclaimHumongousObjects {
+ public static void main(String[] args) throws Exception {
+ testGCLogs();
+ testHumongousObjectGCLogs();
+ }
+
+ private static void testGCLogs() throws Exception {
+
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+ "-Xms128M",
+ "-Xmx128M",
+ "-Xmn16M",
+ "-XX:G1HeapRegionSize=1M",
+ "-XX:+PrintGC",
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:G1LogLevel=finest",
+ "-XX:+G1TraceEagerReclaimHumongousObjects",
+ GCTest.class.getName());
+
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ // As G1EagerReclaimHumongousObjects is set(default), below logs should be displayed.
+ // And GCTest doesn't have humongous objects, so values should be zero.
+ output.shouldContain("[Humongous Reclaim");
+ output.shouldContain("[Humongous Total: 0]");
+ output.shouldContain("[Humongous Candidate: 0]");
+ output.shouldContain("[Humongous Reclaimed: 0]");
+
+ output.shouldHaveExitValue(0);
+ }
+
+ private static void testHumongousObjectGCLogs() throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+ "-Xms128M",
+ "-Xmx128M",
+ "-Xmn16M",
+ "-XX:G1HeapRegionSize=1M",
+ "-XX:+PrintGC",
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:G1LogLevel=finest",
+ "-XX:+G1TraceEagerReclaimHumongousObjects",
+ GCWithHumongousObjectTest.class.getName());
+
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ // As G1ReclaimDeadHumongousObjectsAtYoungGC is set(default), below logs should be displayed.
+ output.shouldContain("[Humongous Reclaim");
+ output.shouldContain("[Humongous Total");
+ output.shouldContain("[Humongous Candidate");
+ output.shouldContain("[Humongous Reclaimed");
+
+ // As G1TraceReclaimDeadHumongousObjectsAtYoungGC is set and GCWithHumongousObjectTest has humongous objects,
+ // these logs should be displayed.
+ output.shouldContain("Live humongous");
+ output.shouldContain("Dead humongous region");
+ output.shouldHaveExitValue(0);
+ }
+
+ static class GCTest {
+ private static byte[] garbage;
+
+ public static void main(String [] args) {
+ System.out.println("Creating garbage");
+ // create 128MB of garbage. This should result in at least one GC
+ for (int i = 0; i < 1024; i++) {
+ garbage = new byte[128 * 1024];
+ }
+ System.out.println("Done");
+ }
+ }
+
+ static class GCWithHumongousObjectTest {
+
+ public static final int M = 1024*1024;
+ public static LinkedList<Object> garbageList = new LinkedList<Object>();
+ // A large object referenced by a static.
+ static int[] filler = new int[10 * M];
+
+ public static void genGarbage() {
+ for (int i = 0; i < 32*1024; i++) {
+ garbageList.add(new int[100]);
+ }
+ garbageList.clear();
+ }
+
+ public static void main(String[] args) {
+
+ int[] large = new int[M];
+ Object ref = large;
+
+ System.out.println("Creating garbage");
+ for (int i = 0; i < 100; i++) {
+ // A large object that will be reclaimed eagerly.
+ large = new int[6*M];
+ genGarbage();
+ // Make sure that the compiler cannot completely remove
+ // the allocation of the large object until here.
+ System.out.println(large);
+ }
+
+ // Keep the reference to the first object alive.
+ System.out.println(ref);
+ System.out.println("Done");
+ }
+ }
+}
diff --git a/test/gc/g1/TestGCLogMessages.java b/test/gc/g1/TestGCLogMessages.java
index ee338013f..1a4d6532b 100644
--- a/test/gc/g1/TestGCLogMessages.java
+++ b/test/gc/g1/TestGCLogMessages.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
/*
* @test TestGCLogMessages
- * @bug 8035406 8027295 8035398 8019342 8027959
+ * @bug 8035406 8027295 8035398 8019342 8027959 8048179 8027962
* @summary Ensure that the PrintGCDetails output for a minor GC with G1
* includes the expected necessary messages.
* @key gc
@@ -34,128 +34,159 @@ import com.oracle.java.testlibrary.ProcessTools;
import com.oracle.java.testlibrary.OutputAnalyzer;
public class TestGCLogMessages {
- public static void main(String[] args) throws Exception {
- testNormalLogs();
- testWithToSpaceExhaustionLogs();
- }
-
- private static void testNormalLogs() throws Exception {
-
- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
- "-Xmx10M",
- GCTest.class.getName());
-
- OutputAnalyzer output = new OutputAnalyzer(pb.start());
-
- output.shouldNotContain("[Redirty Cards");
- output.shouldNotContain("[Parallel Redirty");
- output.shouldNotContain("[Redirtied Cards");
- output.shouldNotContain("[Code Root Purge");
- output.shouldNotContain("[String Dedup Fixup");
- output.shouldNotContain("[Young Free CSet");
- output.shouldNotContain("[Non-Young Free CSet");
- output.shouldNotContain("[Humongous Reclaim");
- output.shouldHaveExitValue(0);
-
- pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
- "-XX:+UseStringDeduplication",
- "-Xmx10M",
- "-XX:+PrintGCDetails",
- GCTest.class.getName());
-
- output = new OutputAnalyzer(pb.start());
-
- output.shouldContain("[Redirty Cards");
- output.shouldNotContain("[Parallel Redirty");
- output.shouldNotContain("[Redirtied Cards");
- output.shouldContain("[Code Root Purge");
- output.shouldContain("[String Dedup Fixup");
- output.shouldNotContain("[Young Free CSet");
- output.shouldNotContain("[Non-Young Free CSet");
- output.shouldContain("[Humongous Reclaim");
- output.shouldNotContain("[Humongous Total");
- output.shouldNotContain("[Humongous Candidate");
- output.shouldNotContain("[Humongous Reclaimed");
- output.shouldHaveExitValue(0);
-
- pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
- "-XX:+UseStringDeduplication",
- "-Xmx10M",
- "-XX:+PrintGCDetails",
- "-XX:+UnlockExperimentalVMOptions",
- "-XX:G1LogLevel=finest",
- GCTest.class.getName());
-
- output = new OutputAnalyzer(pb.start());
-
- output.shouldContain("[Redirty Cards");
- output.shouldContain("[Parallel Redirty");
- output.shouldContain("[Redirtied Cards");
- output.shouldContain("[Code Root Purge");
- output.shouldContain("[String Dedup Fixup");
- output.shouldContain("[Young Free CSet");
- output.shouldContain("[Non-Young Free CSet");
- output.shouldContain("[Humongous Reclaim");
- output.shouldContain("[Humongous Total");
- output.shouldContain("[Humongous Candidate");
- output.shouldContain("[Humongous Reclaimed");
- output.shouldHaveExitValue(0);
- }
-
- private static void testWithToSpaceExhaustionLogs() throws Exception {
- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
- "-Xmx10M",
- "-Xmn5M",
- "-XX:+PrintGCDetails",
- GCTestWithToSpaceExhaustion.class.getName());
-
- OutputAnalyzer output = new OutputAnalyzer(pb.start());
- output.shouldContain("[Evacuation Failure");
- output.shouldNotContain("[Recalculate Used");
- output.shouldNotContain("[Remove Self Forwards");
- output.shouldNotContain("[Restore RemSet");
- output.shouldHaveExitValue(0);
-
- pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
- "-Xmx10M",
- "-Xmn5M",
- "-XX:+PrintGCDetails",
- "-XX:+UnlockExperimentalVMOptions",
- "-XX:G1LogLevel=finest",
- GCTestWithToSpaceExhaustion.class.getName());
-
- output = new OutputAnalyzer(pb.start());
- output.shouldContain("[Evacuation Failure");
- output.shouldContain("[Recalculate Used");
- output.shouldContain("[Remove Self Forwards");
- output.shouldContain("[Restore RemSet");
- output.shouldHaveExitValue(0);
- }
-
- static class GCTest {
- private static byte[] garbage;
- public static void main(String [] args) {
- System.out.println("Creating garbage");
- // create 128MB of garbage. This should result in at least one GC
- for (int i = 0; i < 1024; i++) {
- garbage = new byte[128 * 1024];
- }
- System.out.println("Done");
+
+ private enum Level {
+ OFF, FINER, FINEST;
+ public boolean lessOrEqualTo(Level other) {
+ return this.compareTo(other) < 0;
+ }
}
- }
-
- static class GCTestWithToSpaceExhaustion {
- private static byte[] garbage;
- private static byte[] largeObject;
- public static void main(String [] args) {
- largeObject = new byte[5*1024*1024];
- System.out.println("Creating garbage");
- // create 128MB of garbage. This should result in at least one GC,
- // some of them with to-space exhaustion.
- for (int i = 0; i < 1024; i++) {
- garbage = new byte[128 * 1024];
- }
- System.out.println("Done");
+
+ private class LogMessageWithLevel {
+ String message;
+ Level level;
+
+ public LogMessageWithLevel(String message, Level level) {
+ this.message = message;
+ this.level = level;
+ }
+ };
+
+ private LogMessageWithLevel allLogMessages[] = new LogMessageWithLevel[] {
+ // Ext Root Scan
+ new LogMessageWithLevel("Thread Roots (ms)", Level.FINEST),
+ new LogMessageWithLevel("StringTable Roots (ms)", Level.FINEST),
+ new LogMessageWithLevel("Universe Roots (ms)", Level.FINEST),
+ new LogMessageWithLevel("JNI Handles Roots (ms)", Level.FINEST),
+ new LogMessageWithLevel("ObjectSynchronizer Roots (ms)", Level.FINEST),
+ new LogMessageWithLevel("FlatProfiler Roots", Level.FINEST),
+ new LogMessageWithLevel("Management Roots", Level.FINEST),
+ new LogMessageWithLevel("SystemDictionary Roots", Level.FINEST),
+ new LogMessageWithLevel("CLDG Roots", Level.FINEST),
+ new LogMessageWithLevel("JVMTI Roots", Level.FINEST),
+ new LogMessageWithLevel("CodeCache Roots", Level.FINEST),
+ new LogMessageWithLevel("SATB Filtering", Level.FINEST),
+ new LogMessageWithLevel("CM RefProcessor Roots", Level.FINEST),
+ new LogMessageWithLevel("Wait For Strong CLD", Level.FINEST),
+ new LogMessageWithLevel("Weak CLD Roots", Level.FINEST),
+ // Redirty Cards
+ new LogMessageWithLevel("Redirty Cards", Level.FINER),
+ new LogMessageWithLevel("Parallel Redirty", Level.FINEST),
+ new LogMessageWithLevel("Redirtied Cards", Level.FINEST),
+ // Misc Top-level
+ new LogMessageWithLevel("Code Root Purge", Level.FINER),
+ new LogMessageWithLevel("String Dedup Fixup", Level.FINER),
+ // Free CSet
+ new LogMessageWithLevel("Young Free CSet", Level.FINEST),
+ new LogMessageWithLevel("Non-Young Free CSet", Level.FINEST),
+ // Humongous Eager Reclaim
+ new LogMessageWithLevel("Humongous Reclaim", Level.FINER),
+ new LogMessageWithLevel("Humongous Register", Level.FINER),
+ };
+
+ void checkMessagesAtLevel(OutputAnalyzer output, LogMessageWithLevel messages[], Level level) throws Exception {
+ for (LogMessageWithLevel l : messages) {
+ if (level.lessOrEqualTo(l.level)) {
+ output.shouldNotContain(l.message);
+ } else {
+ output.shouldContain(l.message);
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ new TestGCLogMessages().testNormalLogs();
+ new TestGCLogMessages().testWithToSpaceExhaustionLogs();
+ }
+
+ private void testNormalLogs() throws Exception {
+
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+ "-Xmx10M",
+ GCTest.class.getName());
+
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ checkMessagesAtLevel(output, allLogMessages, Level.OFF);
+ output.shouldHaveExitValue(0);
+
+ pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+ "-XX:+UseStringDeduplication",
+ "-Xmx10M",
+ "-XX:+PrintGCDetails",
+ GCTest.class.getName());
+
+ output = new OutputAnalyzer(pb.start());
+ checkMessagesAtLevel(output, allLogMessages, Level.FINER);
+
+ pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+ "-XX:+UseStringDeduplication",
+ "-Xmx10M",
+ "-XX:+PrintGCDetails",
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:G1LogLevel=finest",
+ GCTest.class.getName());
+
+ output = new OutputAnalyzer(pb.start());
+ checkMessagesAtLevel(output, allLogMessages, Level.FINEST);
+ output.shouldHaveExitValue(0);
+ }
+
+ LogMessageWithLevel exhFailureMessages[] = new LogMessageWithLevel[] {
+ new LogMessageWithLevel("Evacuation Failure", Level.FINER),
+ new LogMessageWithLevel("Recalculate Used", Level.FINEST),
+ new LogMessageWithLevel("Remove Self Forwards", Level.FINEST),
+ new LogMessageWithLevel("Restore RemSet", Level.FINEST),
+ };
+
+ private void testWithToSpaceExhaustionLogs() throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+ "-Xmx32M",
+ "-Xmn16M",
+ "-XX:+PrintGCDetails",
+ GCTestWithToSpaceExhaustion.class.getName());
+
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ checkMessagesAtLevel(output, exhFailureMessages, Level.FINER);
+ output.shouldHaveExitValue(0);
+
+ pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+ "-Xmx32M",
+ "-Xmn16M",
+ "-XX:+PrintGCDetails",
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:G1LogLevel=finest",
+ GCTestWithToSpaceExhaustion.class.getName());
+
+ output = new OutputAnalyzer(pb.start());
+ checkMessagesAtLevel(output, exhFailureMessages, Level.FINEST);
+ output.shouldHaveExitValue(0);
+ }
+
+ static class GCTest {
+ private static byte[] garbage;
+ public static void main(String [] args) {
+ System.out.println("Creating garbage");
+ // create 128MB of garbage. This should result in at least one GC
+ for (int i = 0; i < 1024; i++) {
+ garbage = new byte[128 * 1024];
+ }
+ System.out.println("Done");
+ }
+ }
+
+ static class GCTestWithToSpaceExhaustion {
+ private static byte[] garbage;
+ private static byte[] largeObject;
+ public static void main(String [] args) {
+ largeObject = new byte[16*1024*1024];
+ System.out.println("Creating garbage");
+ // create 128MB of garbage. This should result in at least one GC,
+ // some of them with to-space exhaustion.
+ for (int i = 0; i < 1024; i++) {
+ garbage = new byte[128 * 1024];
+ }
+ System.out.println("Done");
+ }
}
- }
}
+
diff --git a/test/gc/g1/TestGreyReclaimedHumongousObjects.java b/test/gc/g1/TestGreyReclaimedHumongousObjects.java
new file mode 100644
index 000000000..a9a0bfe7e
--- /dev/null
+++ b/test/gc/g1/TestGreyReclaimedHumongousObjects.java
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestGreyReclaimedHumongousObjects.java
+ * @bug 8069367
+ * @requires vm.gc == "G1" | vm.gc == "null"
+ * @summary Test handling of marked but unscanned reclaimed humongous objects.
+ * @key gc
+ * @run main/othervm -XX:+UseG1GC -Xss32m -Xmx128m -XX:G1HeapRegionSize=1m
+ * -XX:+UnlockExperimentalVMOptions
+ * -XX:+G1EagerReclaimHumongousObjects
+ * -XX:+G1EagerReclaimHumongousObjectsWithStaleRefs
+ * TestGreyReclaimedHumongousObjects 1048576 90
+ */
+
+// This test spawns a bunch of threads, each of them rapidly
+// allocating large objects and storing them into a circular buffer
+// associated with the thread. The circular buffer results in these
+// objects becoming dead in fairly short order.
+//
+// The situation we're trying to provoke is
+//
+// (1) A humongous object H is marked and added to the mark stack.
+//
+// (2) An evacuation pause determines H is no longer live, and
+// reclaims it. This occurs before concurrent marking has gotten
+// around to processing the mark stack entry for H.
+//
+// (3) Concurrent marking processes the mark stack entry for H. The
+// bug is that it would attempt to scan the now dead object.
+//
+// Unfortunately, this test is *very* sensitive to configuration.
+// Among the parameters that affect whether / how often we'll get into
+// the desired situation within a reasonable amount of time are:
+//
+// - THREAD_COUNT: The number of allocating threads.
+//
+// - OLD_COUNT: The number of objects each thread keeps.
+//
+// - MAX_MEMORY: The maximum heap size.
+//
+// - G1HeapRegionSize
+//
+// - The size of the objects being allocated.
+//
+// The parameter values specified here:
+//
+// - THREAD_COUNT = 12
+// - OLD_COUNT == 4
+// - MAX_MEMORY == 128m
+// - G1HeapRegionSize = 1m
+// - Object size = 1048576 (2 regions after header overhead and roundup)
+//
+// seems to work well at provoking the desired state fairly quickly.
+// Even relatively small perturbations may change that. The key
+// factors seem to be keeping the heap mostly full of live objects but
+// having them become dead fairly quickly.
+
+import java.util.Date;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import sun.management.ManagementFactoryHelper;
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+
+public class TestGreyReclaimedHumongousObjects {
+
+ static class NamedThreadFactory implements ThreadFactory {
+ private int threadNum = 0;
+
+ @Override
+ public Thread newThread(Runnable r) {
+ return new Thread(r, THREAD_NAME + (threadNum++));
+ }
+ }
+
+ static class Runner extends Thread {
+ private final Date startDate = new Date();
+ private final int obj_size;
+ private final Object[] old_garbage;
+ private int old_index = 0;
+
+ public Runner(int obj_size) {
+ this.obj_size = obj_size;
+ old_garbage = new Object[OLD_COUNT];
+ }
+
+ private void allocate_garbage() {
+ byte[] garbage = new byte[obj_size];
+ old_garbage[Math.abs(++old_index % OLD_COUNT)] = garbage;
+ }
+
+ @Override
+ public void run() {
+ try {
+ while (!isInterrupted()) {
+ allocate_garbage();
+ Thread.sleep(0); // Yield, to ensure interruptable.
+ }
+ } catch (InterruptedException e) {
+ System.out.println("Aborted after "
+ + (new Date().getTime() - startDate.getTime())
+ + " ms");
+ interrupt();
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
+
+ System.out.println("Max memory= " + MAX_MEMORY + " bytes");
+
+ int obj_size = 0;
+ long seconds_to_run = 0;
+ if (args.length != 2) {
+ throw new RuntimeException("Object size argument must be supplied");
+ } else {
+ obj_size = Integer.parseInt(args[0]);
+ seconds_to_run = Integer.parseInt(args[1]);
+ }
+ System.out.println("Objects size= " + obj_size + " bytes");
+ System.out.println("Seconds to run=" + seconds_to_run);
+
+ int region_size =
+ Integer.parseInt(diagnostic.getVMOption("G1HeapRegionSize").getValue());
+ if (obj_size < (region_size / 2)) {
+ throw new RuntimeException("Object size " + obj_size +
+ " is not humongous with region size " + region_size);
+ }
+
+ ExecutorService executor =
+ Executors.newFixedThreadPool(THREAD_COUNT, new NamedThreadFactory());
+ System.out.println("Starting " + THREAD_COUNT + " threads");
+
+ for (int i = 0; i < THREAD_COUNT; i++) {
+ executor.execute(new Runner(obj_size));
+ }
+
+ Thread.sleep(seconds_to_run * 1000);
+ executor.shutdownNow();
+
+ if (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
+ System.err.println("Thread pool did not terminate after 10 seconds after shutdown");
+ }
+ }
+
+ private static final long MAX_MEMORY = Runtime.getRuntime().maxMemory();
+ private static final int OLD_COUNT = 4;
+ private static final int THREAD_COUNT = 12;
+ private static final String THREAD_NAME = "TestGreyRH-";
+}
+
diff --git a/test/gc/g1/TestHumongousShrinkHeap.java b/test/gc/g1/TestHumongousShrinkHeap.java
index b350bcd86..764cfab3d 100644
--- a/test/gc/g1/TestHumongousShrinkHeap.java
+++ b/test/gc/g1/TestHumongousShrinkHeap.java
@@ -24,6 +24,7 @@
/**
* @test TestHumongousShrinkHeap
* @bug 8036025 8056043
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @summary Verify that heap shrinks after GC in the presence of fragmentation
* due to humongous objects
* @library /testlibrary
diff --git a/test/gc/g1/TestLargePageUseForAuxMemory.java b/test/gc/g1/TestLargePageUseForAuxMemory.java
new file mode 100644
index 000000000..ef8ada703
--- /dev/null
+++ b/test/gc/g1/TestLargePageUseForAuxMemory.java
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestLargePageUseForAuxMemory.java
+ * @bug 8058354
+ * @key gc
+ * @library /testlibrary /testlibrary/whitebox
+ * @requires (vm.gc=="G1" | vm.gc=="null")
+ * @build TestLargePageUseForAuxMemory
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @summary Test that auxiliary data structures are allocated using large pages if available.
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UseG1GC -XX:+WhiteBoxAPI -XX:+IgnoreUnrecognizedVMOptions -XX:+UseLargePages TestLargePageUseForAuxMemory
+ */
+
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class TestLargePageUseForAuxMemory {
+ static final int HEAP_REGION_SIZE = 4 * 1024 * 1024;
+ static long largePageSize;
+ static long smallPageSize;
+
+ static void checkSmallTables(OutputAnalyzer output, long expectedPageSize) throws Exception {
+ output.shouldContain("G1 'Block offset table': pg_sz=" + expectedPageSize);
+ output.shouldContain("G1 'Card counts table': pg_sz=" + expectedPageSize);
+ }
+
+ static void checkBitmaps(OutputAnalyzer output, long expectedPageSize) throws Exception {
+ output.shouldContain("G1 'Prev Bitmap': pg_sz=" + expectedPageSize);
+ output.shouldContain("G1 'Next Bitmap': pg_sz=" + expectedPageSize);
+ }
+
+ static void testVM(long heapsize, boolean cardsShouldUseLargePages, boolean bitmapShouldUseLargePages) throws Exception {
+ ProcessBuilder pb;
+ // Test with large page enabled.
+ pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+ "-XX:G1HeapRegionSize=" + HEAP_REGION_SIZE,
+ "-Xms" + 10 * HEAP_REGION_SIZE,
+ "-Xmx" + heapsize,
+ "-XX:+TracePageSizes",
+ "-XX:+UseLargePages",
+ "-XX:+IgnoreUnrecognizedVMOptions", // there is on ObjectAlignmentInBytes in 32 bit builds
+ "-XX:ObjectAlignmentInBytes=8",
+ "-version");
+
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ checkSmallTables(output, (cardsShouldUseLargePages ? largePageSize : smallPageSize));
+ checkBitmaps(output, (bitmapShouldUseLargePages ? largePageSize : smallPageSize));
+ output.shouldHaveExitValue(0);
+
+ // Test with large page disabled.
+ pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+ "-XX:G1HeapRegionSize=" + HEAP_REGION_SIZE,
+ "-Xms" + 10 * HEAP_REGION_SIZE,
+ "-Xmx" + heapsize,
+ "-XX:+TracePageSizes",
+ "-XX:-UseLargePages",
+ "-XX:+IgnoreUnrecognizedVMOptions", // there is on ObjectAlignmentInBytes in 32 bit builds
+ "-XX:ObjectAlignmentInBytes=8",
+ "-version");
+
+ output = new OutputAnalyzer(pb.start());
+ checkSmallTables(output, smallPageSize);
+ checkBitmaps(output, smallPageSize);
+ output.shouldHaveExitValue(0);
+ }
+
+ public static void main(String[] args) throws Exception {
+ if (!Platform.isDebugBuild()) {
+ System.out.println("Skip tests on non-debug builds because the required option TracePageSizes is a debug-only option.");
+ return;
+ }
+
+ WhiteBox wb = WhiteBox.getWhiteBox();
+ smallPageSize = wb.getVMPageSize();
+ largePageSize = wb.getVMLargePageSize();
+
+ if (largePageSize == 0) {
+ System.out.println("Skip tests because large page support does not seem to be available on this platform.");
+ return;
+ }
+
+ // To get large pages for the card table etc. we need at least a 1G heap (with 4k page size).
+ // 32 bit systems will have problems reserving such an amount of contiguous space, so skip the
+ // test there.
+ if (!Platform.is32bit()) {
+ // Size that a single card covers.
+ final int cardSize = 512;
+
+ final long heapSizeForCardTableUsingLargePages = largePageSize * cardSize;
+
+ testVM(heapSizeForCardTableUsingLargePages, true, true);
+ testVM(heapSizeForCardTableUsingLargePages + HEAP_REGION_SIZE, true, true);
+ testVM(heapSizeForCardTableUsingLargePages - HEAP_REGION_SIZE, false, true);
+ }
+
+ // Minimum heap requirement to get large pages for bitmaps is 128M heap. This seems okay to test
+ // everywhere.
+ final int bitmapTranslationFactor = 8 * 8; // ObjectAlignmentInBytes * BitsPerByte
+ final long heapSizeForBitmapUsingLargePages = largePageSize * bitmapTranslationFactor;
+
+ testVM(heapSizeForBitmapUsingLargePages, false, true);
+ testVM(heapSizeForBitmapUsingLargePages + HEAP_REGION_SIZE, false, true);
+ testVM(heapSizeForBitmapUsingLargePages - HEAP_REGION_SIZE, false, false);
+ }
+}
+
diff --git a/test/gc/g1/TestRegionAlignment.java b/test/gc/g1/TestRegionAlignment.java
index 08cf02e54..7c7600f8b 100644
--- a/test/gc/g1/TestRegionAlignment.java
+++ b/test/gc/g1/TestRegionAlignment.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
/*
* @test TestRegionAlignment.java
* @bug 8013791
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @summary Make sure that G1 ergonomics pick a heap size that is aligned with the region size
* @run main/othervm -XX:+UseG1GC -XX:G1HeapRegionSize=32m -XX:MaxRAM=555m TestRegionAlignment
*
diff --git a/test/gc/g1/TestShrinkAuxiliaryData.java b/test/gc/g1/TestShrinkAuxiliaryData.java
index cba813882..3145eb638 100644
--- a/test/gc/g1/TestShrinkAuxiliaryData.java
+++ b/test/gc/g1/TestShrinkAuxiliaryData.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -21,7 +21,7 @@
* questions.
*/
-import static com.oracle.java.testlibrary.Asserts.assertLessThanOrEqual;
+import com.oracle.java.testlibrary.Asserts;
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.Platform;
import com.oracle.java.testlibrary.ProcessTools;
@@ -36,23 +36,29 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
-import sun.misc.Unsafe;
+import sun.misc.Unsafe; // for ADDRESS_SIZE
+import sun.hotspot.WhiteBox;
public class TestShrinkAuxiliaryData {
+ private static final int REGION_SIZE = 1024 * 1024;
+
private final static String[] initialOpts = new String[]{
"-XX:MinHeapFreeRatio=10",
"-XX:MaxHeapFreeRatio=11",
"-XX:+UseG1GC",
- "-XX:G1HeapRegionSize=1m",
+ "-XX:G1HeapRegionSize=" + REGION_SIZE,
"-XX:-ExplicitGCInvokesConcurrent",
- "-XX:+PrintGCDetails"
+ "-XX:+PrintGCDetails",
+ "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+WhiteBoxAPI",
+ "-Xbootclasspath/a:.",
};
- private final int RSetCacheSize;
+ private final int hotCardTableSize;
- protected TestShrinkAuxiliaryData(int RSetCacheSize) {
- this.RSetCacheSize = RSetCacheSize;
+ protected TestShrinkAuxiliaryData(int hotCardTableSize) {
+ this.hotCardTableSize = hotCardTableSize;
}
protected void test() throws Exception {
@@ -60,19 +66,17 @@ public class TestShrinkAuxiliaryData {
Collections.addAll(vmOpts, initialOpts);
int maxCacheSize = Math.max(0, Math.min(31, getMaxCacheSize()));
- if (maxCacheSize < RSetCacheSize) {
+ if (maxCacheSize < hotCardTableSize) {
System.out.format("Skiping test for %d cache size due max cache size %d",
- RSetCacheSize, maxCacheSize
+ hotCardTableSize, maxCacheSize
);
return;
}
printTestInfo(maxCacheSize);
- vmOpts.add("-XX:G1ConcRSLogCacheSize=" + RSetCacheSize);
-
- vmOpts.addAll(Arrays.asList(Utils.getFilteredTestJavaOpts(
- ShrinkAuxiliaryDataTest.prohibitedVmOptions)));
+ vmOpts.add("-XX:G1ConcRSLogCacheSize=" + hotCardTableSize);
+ vmOpts.addAll(Arrays.asList(Utils.getTestJavaOpts()));
// for 32 bits ObjectAlignmentInBytes is not a option
if (Platform.is32bit()) {
@@ -94,11 +98,13 @@ public class TestShrinkAuxiliaryData {
private void performTest(List<String> opts) throws Exception {
ProcessBuilder pb
- = ProcessTools.createJavaProcessBuilder(
- opts.toArray(new String[opts.size()])
- );
+ = ProcessTools.createJavaProcessBuilder(
+ opts.toArray(new String[opts.size()])
+ );
OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ System.out.println(output.getStdout());
+ System.err.println(output.getStderr());
output.shouldHaveExitValue(0);
}
@@ -109,12 +115,13 @@ public class TestShrinkAuxiliaryData {
formatSymbols.setGroupingSeparator(' ');
grouped.setDecimalFormatSymbols(formatSymbols);
- System.out.format("Test will use %s bytes of memory of %s available%n"
+ System.out.format(
+ "Test will use %s bytes of memory of %s available%n"
+ "Available memory is %s with %d bytes pointer size - can save %s pointers%n"
+ "Max cache size: 2^%d = %s elements%n",
grouped.format(ShrinkAuxiliaryDataTest.getMemoryUsedByTest()),
- grouped.format(Runtime.getRuntime().freeMemory()),
- grouped.format(Runtime.getRuntime().freeMemory()
+ grouped.format(Runtime.getRuntime().maxMemory()),
+ grouped.format(Runtime.getRuntime().maxMemory()
- ShrinkAuxiliaryDataTest.getMemoryUsedByTest()),
Unsafe.ADDRESS_SIZE,
grouped.format((Runtime.getRuntime().freeMemory()
@@ -137,6 +144,7 @@ public class TestShrinkAuxiliaryData {
if (availableMemory <= 0) {
return 0;
}
+
long availablePointersCount = availableMemory / Unsafe.ADDRESS_SIZE;
return (63 - (int) Long.numberOfLeadingZeros(availablePointersCount));
}
@@ -144,17 +152,48 @@ public class TestShrinkAuxiliaryData {
static class ShrinkAuxiliaryDataTest {
public static void main(String[] args) throws IOException {
- int iterateCount = DEFAULT_ITERATION_COUNT;
- if (args.length > 0) {
- try {
- iterateCount = Integer.parseInt(args[0]);
- } catch (NumberFormatException e) {
- //num_iterate remains default
- }
+ ShrinkAuxiliaryDataTest testCase = new ShrinkAuxiliaryDataTest();
+
+ if (!testCase.checkEnvApplicability()) {
+ return;
}
- new ShrinkAuxiliaryDataTest().test(iterateCount);
+ testCase.test();
+ }
+
+ /**
+ * Checks is this environment suitable to run this test
+ * - memory is enough to decommit (page size is not big)
+ * - RSet cache size is not too big
+ *
+ * @return true if test could run, false if test should be skipped
+ */
+ protected boolean checkEnvApplicability() {
+
+ int pageSize = WhiteBox.getWhiteBox().getVMPageSize();
+ System.out.println( "Page size = " + pageSize
+ + " region size = " + REGION_SIZE
+ + " aux data ~= " + (REGION_SIZE * 3 / 100));
+ // If auxdata size will be less than page size it wouldn't decommit.
+ // Auxiliary data size is about ~3.6% of heap size.
+ if (pageSize >= REGION_SIZE * 3 / 100) {
+ System.out.format("Skipping test for too large page size = %d",
+ pageSize
+ );
+ return false;
+ }
+
+ if (REGION_SIZE * REGIONS_TO_ALLOCATE > Runtime.getRuntime().maxMemory()) {
+ System.out.format("Skipping test for too low available memory. "
+ + "Need %d, available %d",
+ REGION_SIZE * REGIONS_TO_ALLOCATE,
+ Runtime.getRuntime().maxMemory()
+ );
+ return false;
+ }
+
+ return true;
}
class GarbageObject {
@@ -179,41 +218,54 @@ public class TestShrinkAuxiliaryData {
private final List<GarbageObject> garbage = new ArrayList();
- public void test(int num_iterate) throws IOException {
+ public void test() throws IOException {
+
+ MemoryUsage muFull, muFree, muAuxDataFull, muAuxDataFree;
+ float auxFull, auxFree;
allocate();
link();
mutate();
- deallocate();
- MemoryUsage muBeforeHeap
- = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
- MemoryUsage muBeforeNonHeap
- = ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage();
+ muFull = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+ long numUsedRegions = WhiteBox.getWhiteBox().g1NumMaxRegions()
+ - WhiteBox.getWhiteBox().g1NumFreeRegions();
+ muAuxDataFull = WhiteBox.getWhiteBox().g1AuxiliaryMemoryUsage();
+ auxFull = (float)muAuxDataFull.getUsed() / numUsedRegions;
- for (int i = 0; i < num_iterate; i++) {
- allocate();
- link();
- mutate();
- deallocate();
- }
+ System.out.format("Full aux data ratio= %f, regions max= %d, used= %d\n",
+ auxFull, WhiteBox.getWhiteBox().g1NumMaxRegions(), numUsedRegions
+ );
+ deallocate();
System.gc();
- MemoryUsage muAfterHeap
- = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
- MemoryUsage muAfterNonHeap
- = ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage();
-
- assertLessThanOrEqual(muAfterHeap.getCommitted(), muBeforeHeap.getCommitted(),
- String.format("heap decommit failed - after > before: %d > %d",
- muAfterHeap.getCommitted(), muBeforeHeap.getCommitted()
+
+ muFree = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+ muAuxDataFree = WhiteBox.getWhiteBox().g1AuxiliaryMemoryUsage();
+
+ numUsedRegions = WhiteBox.getWhiteBox().g1NumMaxRegions()
+ - WhiteBox.getWhiteBox().g1NumFreeRegions();
+ auxFree = (float)muAuxDataFree.getUsed() / numUsedRegions;
+
+ System.out.format("Free aux data ratio= %f, regions max= %d, used= %d\n",
+ auxFree, WhiteBox.getWhiteBox().g1NumMaxRegions(), numUsedRegions
+ );
+
+ Asserts.assertLessThanOrEqual(muFree.getCommitted(), muFull.getCommitted(),
+ String.format("heap decommit failed - full > free: %d > %d",
+ muFree.getCommitted(), muFull.getCommitted()
)
);
- if (muAfterHeap.getCommitted() < muBeforeHeap.getCommitted()) {
- assertLessThanOrEqual(muAfterNonHeap.getCommitted(), muBeforeNonHeap.getCommitted(),
- String.format("non-heap decommit failed - after > before: %d > %d",
- muAfterNonHeap.getCommitted(), muBeforeNonHeap.getCommitted()
+ System.out.format("State used committed\n");
+ System.out.format("Full aux data: %10d %10d\n", muAuxDataFull.getUsed(), muAuxDataFull.getCommitted());
+ System.out.format("Free aux data: %10d %10d\n", muAuxDataFree.getUsed(), muAuxDataFree.getCommitted());
+
+ // if decommited check that aux data has same ratio
+ if (muFree.getCommitted() < muFull.getCommitted()) {
+ Asserts.assertLessThanOrEqual(auxFree, auxFull,
+ String.format("auxiliary data decommit failed - full > free: %f > %f",
+ auxFree, auxFull
)
);
}
@@ -240,8 +292,7 @@ public class TestShrinkAuxiliaryData {
for (int i = 0; i < NUM_LINKS; i++) {
int regionToLink;
do {
- regionToLink = (int) (Math.random()
- * REGIONS_TO_ALLOCATE);
+ regionToLink = (int) (Math.random() * REGIONS_TO_ALLOCATE);
} while (regionToLink == regionNumber);
// get random garbage object from random region
@@ -267,21 +318,8 @@ public class TestShrinkAuxiliaryData {
return REGIONS_TO_ALLOCATE * REGION_SIZE;
}
- private static final int REGION_SIZE = 1024 * 1024;
- private static final int DEFAULT_ITERATION_COUNT = 1; // iterate main scenario
- private static final int REGIONS_TO_ALLOCATE = 5;
+ private static final int REGIONS_TO_ALLOCATE = 100;
private static final int NUM_OBJECTS_PER_REGION = 10;
private static final int NUM_LINKS = 20; // how many links create for each object
-
- private static final String[] prohibitedVmOptions = {
- // remove this when @requires option will be on duty
- "-XX:\\+UseParallelGC",
- "-XX:\\+UseSerialGC",
- "-XX:\\+UseConcMarkSweepGC",
- "-XX:\\+UseParallelOldGC",
- "-XX:\\+UseParNewGC",
- "-Xconcgc",
- "-Xincgc"
- };
}
}
diff --git a/test/gc/g1/TestShrinkAuxiliaryData00.java b/test/gc/g1/TestShrinkAuxiliaryData00.java
index 4395bb2d7..04523e50c 100644
--- a/test/gc/g1/TestShrinkAuxiliaryData00.java
+++ b/test/gc/g1/TestShrinkAuxiliaryData00.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,11 +23,15 @@
/**
* @test TestShrinkAuxiliaryData00
- * @bug 8038423
+ * @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /testlibrary/whitebox
- * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData00
+ * @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
+ * TestShrinkAuxiliaryData TestShrinkAuxiliaryData00
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData00
*/
public class TestShrinkAuxiliaryData00 {
diff --git a/test/gc/g1/TestShrinkAuxiliaryData05.java b/test/gc/g1/TestShrinkAuxiliaryData05.java
index 910aad696..652c262fa 100644
--- a/test/gc/g1/TestShrinkAuxiliaryData05.java
+++ b/test/gc/g1/TestShrinkAuxiliaryData05.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,11 +23,15 @@
/**
* @test TestShrinkAuxiliaryData05
- * @bug 8038423
+ * @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /testlibrary/whitebox
- * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData05
+ * @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
+ * TestShrinkAuxiliaryData TestShrinkAuxiliaryData05
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData05
*/
public class TestShrinkAuxiliaryData05 {
diff --git a/test/gc/g1/TestShrinkAuxiliaryData10.java b/test/gc/g1/TestShrinkAuxiliaryData10.java
index 5dec96d63..46af6f820 100644
--- a/test/gc/g1/TestShrinkAuxiliaryData10.java
+++ b/test/gc/g1/TestShrinkAuxiliaryData10.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,11 +23,15 @@
/**
* @test TestShrinkAuxiliaryData10
- * @bug 8038423
+ * @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /testlibrary/whitebox
+ * @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
* @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData10
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData10
*/
public class TestShrinkAuxiliaryData10 {
diff --git a/test/gc/g1/TestShrinkAuxiliaryData15.java b/test/gc/g1/TestShrinkAuxiliaryData15.java
index 30455e4f4..a13af4ebc 100644
--- a/test/gc/g1/TestShrinkAuxiliaryData15.java
+++ b/test/gc/g1/TestShrinkAuxiliaryData15.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,11 +23,15 @@
/**
* @test TestShrinkAuxiliaryData15
- * @bug 8038423
+ * @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /testlibrary/whitebox
+ * @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
* @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData15
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData15
*/
public class TestShrinkAuxiliaryData15 {
diff --git a/test/gc/g1/TestShrinkAuxiliaryData20.java b/test/gc/g1/TestShrinkAuxiliaryData20.java
index bdc3996ec..899acdbc5 100644
--- a/test/gc/g1/TestShrinkAuxiliaryData20.java
+++ b/test/gc/g1/TestShrinkAuxiliaryData20.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,11 +23,15 @@
/**
* @test TestShrinkAuxiliaryData20
- * @bug 8038423
+ * @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /testlibrary/whitebox
+ * @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
* @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData20
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData20
*/
public class TestShrinkAuxiliaryData20 {
diff --git a/test/gc/g1/TestShrinkAuxiliaryData25.java b/test/gc/g1/TestShrinkAuxiliaryData25.java
index 4429ee503..1e982f8d2 100644
--- a/test/gc/g1/TestShrinkAuxiliaryData25.java
+++ b/test/gc/g1/TestShrinkAuxiliaryData25.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,11 +23,15 @@
/**
* @test TestShrinkAuxiliaryData25
- * @bug 8038423
+ * @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /testlibrary/whitebox
+ * @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
* @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData25
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData25
*/
public class TestShrinkAuxiliaryData25 {
diff --git a/test/gc/g1/TestShrinkAuxiliaryData30.java b/test/gc/g1/TestShrinkAuxiliaryData30.java
index 2ad40ccdf..bf11e0e90 100644
--- a/test/gc/g1/TestShrinkAuxiliaryData30.java
+++ b/test/gc/g1/TestShrinkAuxiliaryData30.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,11 +23,15 @@
/**
* @test TestShrinkAuxiliaryData30
- * @bug 8038423
+ * @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @library /testlibrary /testlibrary/whitebox
+ * @build com.oracle.java.testlibrary.* sun.hotspot.WhiteBox
* @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData30
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
* @run driver/timeout=720 TestShrinkAuxiliaryData30
*/
public class TestShrinkAuxiliaryData30 {
diff --git a/test/gc/g1/TestShrinkToOneRegion.java b/test/gc/g1/TestShrinkToOneRegion.java
index 100741b69..0821223c6 100644
--- a/test/gc/g1/TestShrinkToOneRegion.java
+++ b/test/gc/g1/TestShrinkToOneRegion.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
/*
* @test TestShrinkToOneRegion.java
* @bug 8013872
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @summary Shrinking the heap down to one region used to hit an assert
* @run main/othervm -XX:+UseG1GC -XX:G1HeapRegionSize=32m -Xmx256m TestShrinkToOneRegion
*
diff --git a/test/gc/metaspace/G1AddMetaspaceDependency.java b/test/gc/metaspace/G1AddMetaspaceDependency.java
index b8e8b1ad5..5c6256290 100644
--- a/test/gc/metaspace/G1AddMetaspaceDependency.java
+++ b/test/gc/metaspace/G1AddMetaspaceDependency.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
/*
* @test G1AddMetaspaceDependency
* @bug 8010196
+ * @requires vm.gc=="G1" | vm.gc=="null"
* @summary Checks that we don't get locking problems when adding metaspace dependencies with the G1 update buffer monitor
* @run main/othervm -XX:+UseG1GC -XX:G1UpdateBufferSize=1 G1AddMetaspaceDependency
*/
diff --git a/test/gc/metaspace/TestMetaspacePerfCounters.java b/test/gc/metaspace/TestMetaspacePerfCounters.java
index 974066cba..a02f5b45f 100644
--- a/test/gc/metaspace/TestMetaspacePerfCounters.java
+++ b/test/gc/metaspace/TestMetaspacePerfCounters.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@ import static com.oracle.java.testlibrary.Asserts.*;
/* @test TestMetaspacePerfCounters
* @bug 8014659
+ * @requires vm.gc=="null"
* @library /testlibrary
* @summary Tests that performance counters for metaspace and compressed class
* space exists and works.
diff --git a/test/gc/metaspace/TestPerfCountersAndMemoryPools.java b/test/gc/metaspace/TestPerfCountersAndMemoryPools.java
index ac708bf77..4aaa8ac17 100644
--- a/test/gc/metaspace/TestPerfCountersAndMemoryPools.java
+++ b/test/gc/metaspace/TestPerfCountersAndMemoryPools.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@ import static com.oracle.java.testlibrary.Asserts.*;
/* @test TestPerfCountersAndMemoryPools
* @bug 8023476
* @library /testlibrary
+ * @requires vm.gc=="Serial" | vm.gc=="null"
* @summary Tests that a MemoryPoolMXBeans and PerfCounters for metaspace
* report the same data.
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData -Xint TestPerfCountersAndMemoryPools
diff --git a/test/gc/whitebox/TestConcMarkCycleWB.java b/test/gc/whitebox/TestConcMarkCycleWB.java
new file mode 100644
index 000000000..2d97bc739
--- /dev/null
+++ b/test/gc/whitebox/TestConcMarkCycleWB.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestConMarkCycleWB
+ * @bug 8065579
+ * @requires vm.gc=="null" | vm.gc=="G1"
+ * @library /testlibrary /testlibrary/whitebox
+ * @build ClassFileInstaller com.oracle.java.testlibrary.* sun.hotspot.WhiteBox TestConcMarkCycleWB
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseG1GC TestConcMarkCycleWB
+ * @summary Verifies that ConcurrentMarking-related WB works properly
+ */
+import static com.oracle.java.testlibrary.Asserts.assertFalse;
+import static com.oracle.java.testlibrary.Asserts.assertTrue;
+import sun.hotspot.WhiteBox;
+
+public class TestConcMarkCycleWB {
+
+ public static void main(String[] args) throws Exception {
+ WhiteBox wb = WhiteBox.getWhiteBox();
+
+ wb.youngGC();
+ assertTrue(wb.g1StartConcMarkCycle());
+ while (wb.g1InConcurrentMark()) {
+ Thread.sleep(5);
+ }
+
+ wb.fullGC();
+ assertTrue(wb.g1StartConcMarkCycle());
+ while (wb.g1InConcurrentMark()) {
+ Thread.sleep(5);
+ }
+ assertTrue(wb.g1StartConcMarkCycle());
+ }
+}
diff --git a/test/runtime/6888954/vmerrors.sh b/test/runtime/6888954/vmerrors.sh
index ebd91490e..3864575d3 100644
--- a/test/runtime/6888954/vmerrors.sh
+++ b/test/runtime/6888954/vmerrors.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -61,11 +61,12 @@ bad_data_ptr_re='(SIGILL|SIGSEGV|EXCEPTION_ACCESS_VIOLATION).* at pc='
# EXCEPTION_ACCESS_VIOLATION - Win-*
# SIGBUS - Solaris SPARC-64
# SIGSEGV - Linux-*, Solaris SPARC-32, Solaris X86-*
+# SIGILL - Aix
#
# Note: would like to use "pc=0x00*0f," in the pattern, but Solaris SPARC-*
# gets its signal at a PC in test_error_handler().
#
-bad_func_ptr_re='(SIGBUS|SIGSEGV|EXCEPTION_ACCESS_VIOLATION).* at pc='
+bad_func_ptr_re='(SIGBUS|SIGSEGV|SIGILL|EXCEPTION_ACCESS_VIOLATION).* at pc='
guarantee_re='guarantee[(](str|num).*failed: *'
fatal_re='fatal error: *'
tail_1='.*expected null'
diff --git a/test/runtime/ErrorHandling/TestCrashOnOutOfMemoryError.java b/test/runtime/ErrorHandling/TestCrashOnOutOfMemoryError.java
new file mode 100644
index 000000000..7e5983457
--- /dev/null
+++ b/test/runtime/ErrorHandling/TestCrashOnOutOfMemoryError.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestCrashOnOutOfMemoryError
+ * @summary Test using -XX:+CrashOnOutOfMemoryError
+ * @library /testlibrary
+ * @build jdk.test.lib.*
+ * @run driver TestCrashOnOutOfMemoryError
+ * @bug 8138745
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStreamReader;
+import java.io.IOException;
+
+public class TestCrashOnOutOfMemoryError {
+
+ public static void main(String[] args) throws Exception {
+ if (args.length == 1) {
+ // This should guarantee to throw:
+ // java.lang.OutOfMemoryError: Requested array size exceeds VM limit
+ try {
+ Object[] oa = new Object[Integer.MAX_VALUE];
+ throw new Error("OOME not triggered");
+ } catch (OutOfMemoryError err) {
+ throw new Error("OOME didn't abort JVM!");
+ }
+ }
+ // else this is the main test
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+CrashOnOutOfMemoryError",
+ "-Xmx64m", TestCrashOnOutOfMemoryError.class.getName(),"throwOOME");
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ int exitValue = output.getExitValue();
+ if (0 == exitValue) {
+ //expecting a non zero value
+ throw new Error("Expected to get non zero exit value");
+ }
+
+ /* Output should look something like this. The actual text will depend on the OS and its core dump processing.
+ Aborting due to java.lang.OutOfMemoryError: Requested array size exceeds VM limit
+ # To suppress the following error report, specify this argument
+ # after -XX: or in .hotspotrc: SuppressErrorAt=/debug.cpp:303
+ #
+ # A fatal error has been detected by the Java Runtime Environment:
+ #
+ # Internal Error (/home/cheleswer/Desktop/jdk9/dev/hotspot/src/share/vm/utilities/debug.cpp:303), pid=6212, tid=6213
+ # fatal error: OutOfMemory encountered: Requested array size exceeds VM limit
+ #
+ # JRE version: OpenJDK Runtime Environment (9.0) (build 1.9.0-internal-debug-cheleswer_2015_10_20_14_32-b00)
+ # Java VM: OpenJDK 64-Bit Server VM (1.9.0-internal-debug-cheleswer_2015_10_20_14_32-b00, mixed mode, tiered, compressed oops, serial gc, linux-amd64)
+ # Core dump will be written. Default location: Core dumps may be processed with "/usr/share/apport/apport %p %s %c %P" (or dumping to
+ /home/cheleswer/Desktop/core.6212)
+ #
+ # An error report file with more information is saved as:
+ # /home/cheleswer/Desktop/hs_err_pid6212.log
+ #
+ # If you would like to submit a bug report, please visit:
+ # http://bugreport.java.com/bugreport/crash.jsp
+ #
+ Current thread is 6213
+ Dumping core ...
+ Aborted (core dumped)
+ */
+ output.shouldContain("Aborting due to java.lang.OutOfMemoryError: Requested array size exceeds VM limit");
+ // extract hs-err file
+ String hs_err_file = output.firstMatch("# *(\\S*hs_err_pid\\d+\\.log)", 1);
+ if (hs_err_file == null) {
+ throw new Error("Did not find hs-err file in output.\n");
+ }
+
+ /*
+ * Check if hs_err files exist or not
+ */
+ File f = new File(hs_err_file);
+ if (!f.exists()) {
+ throw new Error("hs-err file missing at "+ f.getAbsolutePath() + ".\n");
+ }
+
+ System.out.println("PASSED");
+ }
+}
diff --git a/test/runtime/ErrorHandling/TestExitOnOutOfMemoryError.java b/test/runtime/ErrorHandling/TestExitOnOutOfMemoryError.java
new file mode 100644
index 000000000..a3c1ce701
--- /dev/null
+++ b/test/runtime/ErrorHandling/TestExitOnOutOfMemoryError.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestExitOnOutOfMemoryError
+ * @summary Test using -XX:ExitOnOutOfMemoryError
+ * @library /testlibrary
+ * @build jdk.test.lib.*
+ * @run driver TestExitOnOutOfMemoryError
+ * @bug 8138745
+ */
+
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestExitOnOutOfMemoryError {
+
+ public static void main(String[] args) throws Exception {
+ if (args.length == 1) {
+ // This should guarantee to throw:
+ // java.lang.OutOfMemoryError: Requested array size exceeds VM limit
+ try {
+ Object[] oa = new Object[Integer.MAX_VALUE];
+ throw new Error("OOME not triggered");
+ } catch (OutOfMemoryError err) {
+ throw new Error("OOME didn't terminate JVM!");
+ }
+ }
+
+ // else this is the main test
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+ExitOnOutOfMemoryError",
+ "-Xmx64m", TestExitOnOutOfMemoryError.class.getName(), "throwOOME");
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ /*
+ * Actual output should look like this:
+ * Terminating due to java.lang.OutOfMemoryError: Requested array size exceeds VM limit
+ */
+ output.shouldHaveExitValue(3);
+ output.shouldContain("Terminating due to java.lang.OutOfMemoryError: Requested array size exceeds VM limit");
+ System.out.println("PASSED");
+ }
+}
diff --git a/test/runtime/ErrorHandling/TestOnError.java b/test/runtime/ErrorHandling/TestOnError.java
new file mode 100644
index 000000000..62d66b380
--- /dev/null
+++ b/test/runtime/ErrorHandling/TestOnError.java
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestOnError
+ * @summary Test using -XX:OnError=<cmd>
+ * @library /testlibrary
+ * @build TestOnError com.oracle.java.testlibrary.*
+ * @run main TestOnError
+ * @bug 8078470
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class TestOnError {
+
+ public static void main(String[] args) throws Exception {
+ if (!Platform.isDebugBuild()) {
+ System.out.println("Test requires a non-product build - skipping");
+ return;
+ }
+
+ String msg = "Test Succeeded";
+
+ // Execute the VM so that a
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:-TransmitErrorReport",
+ "-XX:ErrorHandlerTest=12", // trigger potential SEGV
+ "-XX:OnError=echo " + msg,
+ TestOnError.class.getName());
+
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ /* Actual output will include:
+ #
+ # -XX:OnError="echo Test Succeeded"
+ # Executing /bin/sh -c "echo Test Succeeded"...
+ Test Succeeded
+
+ So we don't want to match on the "# Executing ..." line, and they
+ both get written to stdout.
+ */
+ output.stdoutShouldMatch("^" + msg); // match start of line only
+ System.out.println("PASSED");
+ }
+}
diff --git a/test/runtime/ErrorHandling/TestOnOutOfMemoryError.java b/test/runtime/ErrorHandling/TestOnOutOfMemoryError.java
new file mode 100644
index 000000000..e28233144
--- /dev/null
+++ b/test/runtime/ErrorHandling/TestOnOutOfMemoryError.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestOnOutOfMemoryError
+ * @summary Test using -XX:OnOutOfMemoryError=<cmd>
+ * @library /testlibrary
+ * @build TestOnOutOfMemoryError com.oracle.java.testlibrary.*
+ * @run main TestOnOutOfMemoryError
+ * @bug 8078470
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class TestOnOutOfMemoryError {
+
+ public static void main(String[] args) throws Exception {
+ if (args.length == 1) {
+ // This should guarantee to throw:
+ // java.lang.OutOfMemoryError: Requested array size exceeds VM limit
+ Object[] oa = new Object[Integer.MAX_VALUE];
+ return;
+ }
+
+ // else this is the main test
+ String msg = "Test Succeeded";
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:OnOutOfMemoryError=echo " + msg,
+ TestOnOutOfMemoryError.class.getName(),
+ "throwOOME");
+
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ /* Actual output should look like this:
+ #
+ # java.lang.OutOfMemoryError: Requested array size exceeds VM limit
+ # -XX:OnOutOfMemoryError="echo Test Succeeded"
+ # Executing /bin/sh -c "echo Test Succeeded"...
+ Test Succeeded
+ Exception in thread "main" java.lang.OutOfMemoryError: Requested array size exceeds VM limit
+ at OOME.main(OOME.java:3)
+
+ So we don't want to match on the "# Executing ..." line, and they
+ both get written to stdout.
+ */
+ output.shouldContain("Requested array size exceeds VM limit");
+ output.stdoutShouldMatch("^" + msg); // match start of line only
+ System.out.println("PASSED");
+ }
+}
diff --git a/test/runtime/InitialThreadOverflow/testme.sh b/test/runtime/InitialThreadOverflow/testme.sh
index 8bdd702a0..bb3485b55 100644
--- a/test/runtime/InitialThreadOverflow/testme.sh
+++ b/test/runtime/InitialThreadOverflow/testme.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-# Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
diff --git a/test/runtime/NMT/JcmdWithNMTDisabled.java b/test/runtime/NMT/JcmdWithNMTDisabled.java
index 7b58a8413..9ef374349 100644
--- a/test/runtime/NMT/JcmdWithNMTDisabled.java
+++ b/test/runtime/NMT/JcmdWithNMTDisabled.java
@@ -26,10 +26,7 @@
* @key nmt jcmd
* @summary Verify that jcmd correctly reports that NMT is not enabled
* @library /testlibrary
- * First run without enabling NMT
- * @run main/othervm JcmdWithNMTDisabled
- * Then run with explicitly disabling NMT, should not be any difference
- * @run main/othervm -XX:NativeMemoryTracking=off JcmdWithNMTDisabled
+ * @run main JcmdWithNMTDisabled 1
*/
import com.oracle.java.testlibrary.*;
@@ -39,6 +36,27 @@ public class JcmdWithNMTDisabled {
static String pid;
public static void main(String args[]) throws Exception {
+
+ // This test explicitly needs to be run with the exact command lines below, not passing on
+ // arguments from the parent VM is a conscious choice to avoid NMT being turned on.
+ if (args.length > 0) {
+ ProcessBuilder pb;
+ OutputAnalyzer output;
+ String testjdkPath = System.getProperty("test.jdk");
+
+ // First run without enabling NMT
+ pb = ProcessTools.createJavaProcessBuilder("-Dtest.jdk=" + testjdkPath, "JcmdWithNMTDisabled");
+ output = new OutputAnalyzer(pb.start());
+ output.shouldHaveExitValue(0);
+
+ // Then run with explicitly disabling NMT, should not be any difference
+ pb = ProcessTools.createJavaProcessBuilder("-Dtest.jdk=" + testjdkPath, "-XX:NativeMemoryTracking=off", "JcmdWithNMTDisabled");
+ output = new OutputAnalyzer(pb.start());
+ output.shouldHaveExitValue(0);
+
+ return;
+ }
+
// Grab my own PID
pid = Integer.toString(ProcessTools.getProcessId());
diff --git a/test/runtime/RedefineFinalizer/RedefineFinalizer.java b/test/runtime/RedefineFinalizer/RedefineFinalizer.java
new file mode 100644
index 000000000..227b9e818
--- /dev/null
+++ b/test/runtime/RedefineFinalizer/RedefineFinalizer.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 6904403
+ * @summary Don't assert if we redefine finalize method
+ * @library /testlibrary
+ * @build RedefineClassHelper
+ * @run main RedefineClassHelper
+ * @run main/othervm -javaagent:redefineagent.jar RedefineFinalizer
+ */
+
+/*
+ * Regression test for hitting:
+ *
+ * assert(f == k->has_finalizer()) failed: inconsistent has_finalizer
+ *
+ * when redefining finalizer method
+ */
+public class RedefineFinalizer {
+
+ public static String newB =
+ "class RedefineFinalizer$B {" +
+ " protected void finalize() { " +
+ " System.out.println(\"Finalizer called\");" +
+ " }" +
+ "}";
+
+ public static void main(String[] args) throws Exception {
+ RedefineClassHelper.redefineClass(B.class, newB);
+
+ A a = new A();
+ }
+
+ static class A extends B {
+ }
+
+ static class B {
+ protected void finalize() {
+ // should be empty
+ }
+ }
+}
diff --git a/test/runtime/RedefineTests/RedefineRunningMethodsWithResolutionErrors.java b/test/runtime/RedefineTests/RedefineRunningMethodsWithResolutionErrors.java
new file mode 100644
index 000000000..cbfb353a5
--- /dev/null
+++ b/test/runtime/RedefineTests/RedefineRunningMethodsWithResolutionErrors.java
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8076110
+ * @summary Redefine running methods that have cached resolution errors
+ * @library /testlibrary
+ * @modules java.instrument
+ * java.base/jdk.internal.org.objectweb.asm
+ * @build RedefineClassHelper
+ * @run main RedefineClassHelper
+ * @run main/othervm -javaagent:redefineagent.jar RedefineRunningMethodsWithResolutionErrors
+ */
+
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.Label;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.Opcodes;
+
+import java.lang.reflect.InvocationTargetException;
+
+public class RedefineRunningMethodsWithResolutionErrors extends ClassLoader implements Opcodes {
+
+ @Override
+ protected Class<?> findClass(String name) throws ClassNotFoundException {
+ if (name.equals("C")) {
+ byte[] b = loadC(false);
+ return defineClass(name, b, 0, b.length);
+ } else {
+ return super.findClass(name);
+ }
+ }
+
+ private static byte[] loadC(boolean redefine) {
+ ClassWriter cw = new ClassWriter(0);
+
+ cw.visit(52, ACC_SUPER | ACC_PUBLIC, "C", null, "java/lang/Object", null);
+ {
+ MethodVisitor mv;
+
+ mv = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, "m", "()V", null, null);
+ mv.visitCode();
+
+ // First time we run we will:
+ // 1) Cache resolution errors
+ // 2) Redefine the class / method
+ // 3) Try to read the resolution errors that were cached
+ //
+ // The redefined method will never run, throw error to be sure
+ if (redefine) {
+ createThrowRuntimeExceptionCode(mv, "The redefined method was called");
+ } else {
+ createMethodBody(mv);
+ }
+ mv.visitMaxs(3, 0);
+ mv.visitEnd();
+ }
+ cw.visitEnd();
+ return cw.toByteArray();
+ }
+
+ private static void createMethodBody(MethodVisitor mv) {
+ Label classExists = new Label();
+
+ // Cache resolution errors
+ createLoadNonExistentClassCode(mv, classExists);
+
+ // Redefine our own class and method
+ mv.visitMethodInsn(INVOKESTATIC, "RedefineRunningMethodsWithResolutionErrors", "redefine", "()V");
+
+ // Provoke the same error again to make sure the resolution error cache works
+ createLoadNonExistentClassCode(mv, classExists);
+
+ // Test passed
+ mv.visitInsn(RETURN);
+
+ mv.visitFrame(F_SAME, 0, new Object[0], 0, new Object[0]);
+ mv.visitLabel(classExists);
+
+ createThrowRuntimeExceptionCode(mv, "Loaded class that shouldn't exist (\"NonExistentClass\")");
+ }
+
+ private static void createLoadNonExistentClassCode(MethodVisitor mv, Label classExists) {
+ Label tryLoadBegin = new Label();
+ Label tryLoadEnd = new Label();
+ Label catchLoadBlock = new Label();
+ mv.visitTryCatchBlock(tryLoadBegin, tryLoadEnd, catchLoadBlock, "java/lang/NoClassDefFoundError");
+
+ // Try to load a class that does not exist to provoke resolution errors
+ mv.visitLabel(tryLoadBegin);
+ mv.visitMethodInsn(INVOKESTATIC, "NonExistentClass", "nonExistentMethod", "()V");
+ mv.visitLabel(tryLoadEnd);
+
+ // No NoClassDefFoundError means NonExistentClass existed, which shouldn't happen
+ mv.visitJumpInsn(GOTO, classExists);
+
+ mv.visitFrame(F_SAME1, 0, new Object[0], 1, new Object[] { "java/lang/NoClassDefFoundError" });
+ mv.visitLabel(catchLoadBlock);
+
+ // Ignore the expected NoClassDefFoundError
+ mv.visitInsn(POP);
+ }
+
+ private static void createThrowRuntimeExceptionCode(MethodVisitor mv, String msg) {
+ mv.visitTypeInsn(NEW, "java/lang/RuntimeException");
+ mv.visitInsn(DUP);
+ mv.visitLdcInsn(msg);
+ mv.visitMethodInsn(INVOKESPECIAL, "java/lang/RuntimeException", "<init>", "(Ljava/lang/String;)V");
+ mv.visitInsn(ATHROW);
+ }
+
+ private static Class<?> c;
+
+ public static void redefine() throws Exception {
+ RedefineClassHelper.redefineClass(c, loadC(true));
+ }
+
+ public static void main(String[] args) throws ClassNotFoundException, NoSuchMethodException, IllegalAccessException, InvocationTargetException {
+ c = Class.forName("C", true, new RedefineRunningMethodsWithResolutionErrors());
+ c.getMethod("m").invoke(null);
+ }
+}
diff --git a/test/runtime/handlerInTry/HandlerInTry.jasm b/test/runtime/handlerInTry/HandlerInTry.jasm
new file mode 100644
index 000000000..39c367c8f
--- /dev/null
+++ b/test/runtime/handlerInTry/HandlerInTry.jasm
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * HandlerInTry contains a try block in a ctor whose handler is inside
+ * the same try block. The try block starts at line 74 (try t2;), ends at
+ * line 106 (endtry t2;), but its handler starts at line 101 (catch t2 #0;).
+ */
+super public class HandlerInTry
+ version 51:0
+{
+
+public static final synthetic Field ___transactionFactory_2002349702336125:"Ljava/lang/Object;";
+
+public Method "<init>":"(Ljava/lang/Object;)V"
+ stack 5 locals 5
+{
+ invokestatic Method ThreadLocalTransaction.getThreadLocalTransaction:"()Ljava/lang/Object;";
+ checkcast class java/lang/Object;
+ astore_2;
+ aload_2;
+ invokestatic Method TransactionLogicDonor.isActiveTransaction:"(Ljava/lang/Object;)Z";
+ ifeq L21;
+ aload_0;
+ aload_1;
+ aload_2;
+ invokespecial Method "<init>":"(Ljava/lang/Object;Ljava/lang/Object;)V";
+ return;
+ L21: stack_frame_type append;
+ locals_map class java/lang/Object;
+ aload_2;
+ getstatic Field ___transactionFactory_2002349702336125:"Ljava/lang/Object;";
+ invokestatic Method TransactionLogicDonor.createTransaction:"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;";
+ astore_2;
+ aload_2;
+ iconst_1;
+ pop;
+ aload_2;
+ invokestatic Method ThreadLocalTransaction.setThreadLocalTransaction:"(Ljava/lang/Object;)V";
+ try t0, t1;
+ aload_0;
+ aload_1;
+ aload_2;
+ invokespecial Method "<init>":"(Ljava/lang/Object;Ljava/lang/Object;)V";
+ aload_2;
+ pop;
+ aconst_null;
+ astore_2;
+ endtry t0, t1;
+ invokestatic Method ThreadLocalTransaction.clearThreadLocalTransaction:"()V";
+ pop;
+ goto L107;
+ catch t0 java/lang/Throwable;
+ try t2;
+ stack_frame_type full;
+ locals_map bogus, class java/lang/Object, class java/lang/Object;
+ stack_map class java/lang/Throwable;
+ astore_3;
+ aload_2;
+ pop;
+ aload_3;
+ instanceof class ControlFlowError;
+ ifeq L82;
+ new class java/lang/NullPointerException;
+ dup;
+ invokespecial Method java/lang/NullPointerException."<init>":"()V";
+ athrow;
+ L82: stack_frame_type append;
+ locals_map class java/lang/Throwable;
+ aload_3;
+ instanceof class java/lang/Error;
+ ifeq L94;
+ aload_3;
+ checkcast class java/lang/Error;
+ athrow;
+ L94: stack_frame_type same;
+ aload_3;
+ checkcast class java/lang/Exception;
+ athrow;
+ catch t1 #0;
+ catch t2 #0;
+ stack_frame_type full;
+ locals_map bogus, class java/lang/Object, class java/lang/Object;
+ stack_map class java/lang/Throwable;
+ astore 4;
+ endtry t2;
+ invokestatic Method ThreadLocalTransaction.clearThreadLocalTransaction:"()V";
+ aload 4;
+ athrow;
+ L107: stack_frame_type full;
+ locals_map class HandlerInTry, class java/lang/Object, null;
+ return;
+}
+
+} // end Class HandlerInTry
diff --git a/test/runtime/handlerInTry/IsolatedHandlerInTry.jasm b/test/runtime/handlerInTry/IsolatedHandlerInTry.jasm
new file mode 100644
index 000000000..513050eff
--- /dev/null
+++ b/test/runtime/handlerInTry/IsolatedHandlerInTry.jasm
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * IsolatedHandlerInTry contains a try block in a ctor whose handler is inside
+ * the same try block but the handler can only be reached if an exception
+ * occurs. The handler does a return. So, a VerifyException should be thrown.
+ * The try block starts at line 77 (try t2;) and ends at line 113 (endtry t2;).
+ * Its handler starts at line 107 (catch t2 #0;). The handler can only be reached
+ * by exception because of the athrow at line 106.
+ */
+super public class IsolatedHandlerInTry
+ version 51:0
+{
+
+public static final synthetic Field ___transactionFactory_2002349702336125:"Ljava/lang/Object;";
+
+public Method "<init>":"(Ljava/lang/Object;)V"
+ stack 5 locals 5
+{
+ invokestatic Method ThreadLocalTransaction.getThreadLocalTransaction:"()Ljava/lang/Object;";
+ checkcast class java/lang/Object;
+ astore_2;
+ aload_2;
+ invokestatic Method TransactionLogicDonor.isActiveTransaction:"(Ljava/lang/Object;)Z";
+ ifeq L21;
+ aload_0;
+ aload_1;
+ aload_2;
+ invokespecial Method "<init>":"(Ljava/lang/Object;Ljava/lang/Object;)V";
+ return;
+ L21: stack_frame_type append;
+ locals_map class java/lang/Object;
+ aload_2;
+ getstatic Field ___transactionFactory_2002349702336125:"Ljava/lang/Object;";
+ invokestatic Method TransactionLogicDonor.createTransaction:"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;";
+ astore_2;
+ aload_2;
+ iconst_1;
+ pop;
+ aload_2;
+ invokestatic Method ThreadLocalTransaction.setThreadLocalTransaction:"(Ljava/lang/Object;)V";
+ try t0, t1;
+ aload_0;
+ aload_1;
+ aload_2;
+ invokespecial Method "<init>":"(Ljava/lang/Object;Ljava/lang/Object;)V";
+ aload_2;
+ pop;
+ aconst_null;
+ astore_2;
+ endtry t0, t1;
+ invokestatic Method ThreadLocalTransaction.clearThreadLocalTransaction:"()V";
+ pop;
+ goto L107;
+ catch t0 java/lang/Throwable;
+ try t2;
+ stack_frame_type full;
+ locals_map bogus, class java/lang/Object, class java/lang/Object;
+ stack_map class java/lang/Throwable;
+ astore_3;
+ aload_2;
+ pop;
+ aload_3;
+ instanceof class ControlFlowError;
+ ifeq L82;
+ new class java/lang/NullPointerException;
+ dup;
+ invokespecial Method java/lang/NullPointerException."<init>":"()V";
+ athrow;
+ L82: stack_frame_type append;
+ locals_map class java/lang/Throwable;
+ aload_3;
+ instanceof class java/lang/Error;
+ ifeq L94;
+ aload_3;
+ checkcast class java/lang/Error;
+ athrow;
+ L94: stack_frame_type same;
+ aload_3;
+ checkcast class java/lang/Exception;
+ catch t1 #0;
+ stack_frame_type full;
+ locals_map bogus, class java/lang/Object, class java/lang/Object;
+ stack_map class java/lang/Throwable;
+ athrow;
+ catch t2 #0;
+ stack_frame_type full;
+ locals_map bogus, class java/lang/Object, class java/lang/Object;
+ stack_map class java/lang/Throwable;
+ astore 4;
+ return;
+ endtry t2;
+ stack_frame_type full;
+ locals_map bogus, class java/lang/Object, class java/lang/Object, class java/lang/Object;
+ stack_map class java/lang/Throwable;
+ invokestatic Method ThreadLocalTransaction.clearThreadLocalTransaction:"()V";
+ athrow;
+ L107: stack_frame_type full;
+ locals_map class IsolatedHandlerInTry, class java/lang/Object, null;
+ return;
+}
+
+} // end Class IsolatedHandlerInTry
diff --git a/test/runtime/handlerInTry/LoadHandlerInTry.java b/test/runtime/handlerInTry/LoadHandlerInTry.java
new file mode 100644
index 000000000..5fc726879
--- /dev/null
+++ b/test/runtime/handlerInTry/LoadHandlerInTry.java
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8075118
+ * @summary Allow a ctor to call super() from a switch bytecode.
+ * @compile HandlerInTry.jasm
+ * @compile IsolatedHandlerInTry.jasm
+ * @run main/othervm -Xverify:all LoadHandlerInTry
+ */
+
+/*
+ * This test has two cases:
+ *
+ * 1. class HandlerInTry: Class HandlerInTry contains a TRY block in a
+ * constructor whose handler is inside the same TRY block. The last
+ * few bytecodes and exception table look like this:
+ *
+ * ...
+ * 87: athrow
+ * 88: astore 4
+ * 90: invokestatic #9
+ * 93: aload 4
+ * 95: athrow
+ * 96: return
+ * Exception table:
+ * from to target type
+ * 36 46 53 Class java/lang/Throwable
+ * 36 46 88 any
+ * 53 90 88 any
+ *
+ * Note that the target for the third handler in the Exception table is
+ * inside its TRY block.
+ * Without the fix for bug JDK-8075118, this test will time out.
+ *
+ *
+ * 2. class IsolatedHandlerInTry: Class IsolatedHandlerInTry also contains
+ * a TRY block in a constructoer whose handler is inside its TRY block.
+ * But the handler is only reachable if an exception is thrown. The
+ * handler's bytecodes will not get parsed as part of parsing the TRY
+ * block. They will only get parsed as a handler for the TRY block.
+ * Since the isolated handler does a 'return', a VerifyError exception
+ * should get thrown.
+ */
+
+public class LoadHandlerInTry {
+
+ public static void main(String[] args) throws Exception {
+ System.out.println("Regression test for bug 8075118");
+ try {
+ Class newClass = Class.forName("HandlerInTry");
+ } catch (Exception e) {
+ System.out.println("Failed: Exception was thrown: " + e.toString());
+ throw e;
+ }
+
+ try {
+ Class newClass = Class.forName("IsolatedHandlerInTry");
+ throw new RuntimeException(
+ "Failed to throw VerifyError for IsolatedHandlerInTry");
+ } catch (java.lang.VerifyError e) {
+ System.out.println("Passed: VerifyError exception was thrown");
+ }
+ }
+}
diff --git a/test/runtime/invokedynamic/BootstrapMethodErrorTest.java b/test/runtime/invokedynamic/BootstrapMethodErrorTest.java
new file mode 100644
index 000000000..558177185
--- /dev/null
+++ b/test/runtime/invokedynamic/BootstrapMethodErrorTest.java
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8051045
+ * @summary Test that exceptions from invokedynamic are wrapped in BootstrapMethodError
+ * @modules java.base/jdk.internal.org.objectweb.asm
+ * @run main BootstrapMethodErrorTest
+ */
+
+import java.lang.reflect.Method;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import static java.lang.invoke.MethodHandles.*;
+import static java.lang.invoke.MethodType.*;
+
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.Handle;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.Opcodes;
+
+public class BootstrapMethodErrorTest extends ClassLoader implements Opcodes {
+
+ @Override
+ public Class findClass(String name) throws ClassNotFoundException {
+ byte[] b;
+ try {
+ b = loadClassData(name);
+ } catch (Throwable th) {
+ throw new ClassNotFoundException("Loading error", th);
+ }
+ return defineClass(name, b, 0, b.length);
+ }
+
+ private byte[] loadClassData(String name) throws Exception {
+ ClassWriter cw = new ClassWriter(0);
+ MethodVisitor mv;
+
+ if (name.equals("C")) {
+ cw.visit(52, ACC_SUPER | ACC_PUBLIC, "C", null, "java/lang/Object", null);
+ {
+ mv = cw.visitMethod(ACC_PRIVATE | ACC_STATIC, "m", "()V", null, null);
+ mv.visitCode();
+ mv.visitInsn(RETURN);
+ mv.visitMaxs(0, 1);
+ mv.visitEnd();
+ }
+ cw.visitEnd();
+ return cw.toByteArray();
+ } else if (name.equals("Exec")) {
+ cw.visit(52, ACC_SUPER | ACC_PUBLIC, "Exec", null, "java/lang/Object", null);
+ {
+ mv = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, "invokeRef", "()V", null, null);
+ mv.visitCode();
+ Handle h = new Handle(H_INVOKESTATIC, "C", "m", "()V");
+ mv.visitInvokeDynamicInsn("C", "()V", h);
+ mv.visitInsn(RETURN);
+ mv.visitMaxs(0, 0);
+ mv.visitEnd();
+ }
+ cw.visitEnd();
+ return cw.toByteArray();
+ }
+ return null;
+ }
+
+ public static void main(String[] args) throws ClassNotFoundException, IllegalAccessException, NoSuchMethodException {
+ new BootstrapMethodErrorTest().test();
+ }
+
+ public void test() throws ClassNotFoundException, IllegalAccessException, NoSuchMethodException {
+ Class.forName("C", true, this);
+ Class<?> exec = Class.forName("Exec", true, this);
+
+ try {
+ exec.getMethod("invokeRef").invoke(null);
+ } catch (Throwable e) {
+ Throwable c = e.getCause();
+ if (c == null) {
+ throw new RuntimeException(
+ "Expected BootstrapMethodError wrapped in an InvocationTargetException but it wasn't wrapped", e);
+ } else if (c instanceof BootstrapMethodError) {
+ // Only way to pass test, all else should throw
+ return;
+ } else {
+ throw new RuntimeException(
+ "Expected BootstrapMethodError but got another Error: "
+ + c.getClass().getName(),
+ c);
+ }
+ }
+ throw new RuntimeException("Expected BootstrapMethodError but no Error at all was thrown");
+ }
+}
diff --git a/test/runtime/lambda-features/InvokespecialInterface.java b/test/runtime/lambda-features/InvokespecialInterface.java
index 345d94d5b..3b32dd7e5 100644
--- a/test/runtime/lambda-features/InvokespecialInterface.java
+++ b/test/runtime/lambda-features/InvokespecialInterface.java
@@ -28,7 +28,7 @@
* @bug 8025937
* @bug 8033528
* @summary [JDK 8] Test invokespecial and invokeinterface with the same JVM_CONSTANT_InterfaceMethodref
- * @run main/othervm -XX:+StressRewriter InvokespecialInterface
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+StressRewriter InvokespecialInterface
*/
import java.util.function.*;
import java.util.*;
diff --git a/test/runtime/memory/LargePages/TestLargePageSizeInBytes.java b/test/runtime/memory/LargePages/TestLargePageSizeInBytes.java
new file mode 100644
index 000000000..0f90d5fb1
--- /dev/null
+++ b/test/runtime/memory/LargePages/TestLargePageSizeInBytes.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test TestLargePageSizeInBytes
+ * @summary Tests that the flag -XX:LargePageSizeInBytes does not cause warnings on Solaris
+ * @bug 8049536
+ * @library /testlibrary
+ * @run driver TestLargePageSizeInBytes
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.ProcessTools;
+
+public class TestLargePageSizeInBytes {
+ private static long M = 1024L * 1024L;
+ private static long G = 1024L * M;
+
+ public static void main(String[] args) throws Exception {
+ if (!Platform.isSolaris()) {
+ // We only use the syscall mencntl on Solaris
+ return;
+ }
+
+ testLargePageSizeInBytes(4 * M);
+ testLargePageSizeInBytes(256 * M);
+ testLargePageSizeInBytes(512 * M);
+ testLargePageSizeInBytes(2 * G);
+ }
+
+ private static void testLargePageSizeInBytes(long size) throws Exception {
+ ProcessBuilder pb =
+ ProcessTools.createJavaProcessBuilder("-XX:+UseLargePages",
+ "-XX:LargePageSizeInBytes=" + size,
+ "-version");
+
+ OutputAnalyzer out = new OutputAnalyzer(pb.start());
+ out.shouldNotContain("Attempt to use MPSS failed.");
+ out.shouldHaveExitValue(0);
+ }
+}
diff --git a/test/runtime/memory/ReadVMPageSize.java b/test/runtime/memory/ReadVMPageSize.java
new file mode 100644
index 000000000..a32822040
--- /dev/null
+++ b/test/runtime/memory/ReadVMPageSize.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Using WhiteBox to get VM page size
+ * @library /testlibrary /testlibrary/whitebox
+ * @build ReadVMPageSize
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ReadVMPageSize
+ */
+
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class ReadVMPageSize {
+ public static void main(String args[]) throws Exception {
+ WhiteBox wb = WhiteBox.getWhiteBox();
+ int pageSize = wb.getVMPageSize();
+ if (pageSize < 0) {
+ throw new Exception("pageSize < 0");
+ } else {
+ System.out.println("Page size = " + pageSize);
+ }
+ }
+}
diff --git a/test/runtime/stackMapCheck/BadMap.jasm b/test/runtime/stackMapCheck/BadMap.jasm
new file mode 100644
index 000000000..2a4c7856f
--- /dev/null
+++ b/test/runtime/stackMapCheck/BadMap.jasm
@@ -0,0 +1,152 @@
+ /*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * This class should throw VerifyError because the StackMap for bytecode index
+ * 45 (astore_2, line 123) is incorrect. The stack maps for bytecode indexes 45
+ * and 49 (astore, line 133) do not match because 45 does not supply enough
+ * locals to satisfy 49.
+ *
+ * The astore_2 bytecode at bytecode index 45 changes the type state,
+ * preventing the stackmap mismatch. But, if the incoming type state is used,
+ * as required by JVM Spec 8, then the verifier will detected the stackmap
+ * mismatch, and throw VerifyError.
+ */
+
+super public class BadMap
+ version 51:0
+{
+
+
+public Method "<init>":"()V"
+ stack 1 locals 1
+{
+ aload_0;
+ invokespecial Method java/lang/Object."<init>":"()V";
+ return;
+}
+
+public static Method main:"([Ljava/lang/String;)V"
+ throws java/lang/Throwable
+ stack 0 locals 1
+{
+ return;
+}
+
+public static Method foo:"()V"
+ stack 3 locals 5
+{
+ iconst_0;
+ ifne L5;
+ nop;
+ try t7;
+ L5: stack_frame_type full;
+ aconst_null;
+ dup;
+ astore_0;
+ astore_1;
+ try t0;
+ aconst_null;
+ astore_0;
+ endtry t0;
+ goto L19;
+ catch t0 java/io/IOException;
+ stack_frame_type full;
+ locals_map class java/lang/Object, null;
+ stack_map class java/io/IOException;
+ astore_2;
+ aconst_null;
+ dup;
+ astore_1;
+ astore_0;
+ try t1;
+ L19: stack_frame_type full;
+ locals_map class java/lang/Object, class java/lang/Object;
+ aconst_null;
+ astore_2;
+ endtry t1;
+ aload_1;
+ ifnonnull L37;
+ nop;
+ goto L37;
+ catch t1 #0;
+ catch t2 #0;
+ try t2;
+ stack_frame_type full;
+ locals_map class java/lang/Object, class java/lang/Object;
+ stack_map class java/lang/Throwable;
+ astore_3;
+ endtry t2;
+ aload_1;
+ ifnonnull L35;
+ nop;
+ L35: stack_frame_type full;
+ locals_map class java/lang/Object, class java/lang/Object, bogus, class java/lang/Throwable;
+ aload_3;
+ athrow;
+ try t3, t4;
+ L37: stack_frame_type full;
+ locals_map class java/lang/Object, class java/lang/Object, class java/lang/Object;
+ aload_1;
+ ifnonnull L42;
+ nop;
+ endtry t3, t4;
+ L42: stack_frame_type full;
+ locals_map class java/lang/Object, class java/lang/Object, class java/lang/Object;
+ goto L54;
+ catch t3 java/lang/Exception;
+ try t5;
+ stack_frame_type full;
+ locals_map class java/lang/Object, class java/lang/Object;
+ stack_map class java/lang/Exception;
+ astore_2; // astore_2, at bci 45, that changes the type state.
+ endtry t5;
+ goto L54;
+ catch t4 #0;
+ catch t5 #0;
+ catch t6 #0;
+ try t6;
+ stack_frame_type full;
+ locals_map class java/lang/Object, class java/lang/Object, class java/lang/Object;
+ stack_map class java/lang/Throwable;
+ astore 4;
+ endtry t6;
+ aload 4;
+ athrow;
+ L54: stack_frame_type full;
+ locals_map class java/lang/Object, class java/lang/Object, class java/lang/Object;
+ goto L57;
+ L57: stack_frame_type full;
+ locals_map class java/lang/Object, class java/lang/Object, class java/lang/Object;
+ nop;
+ endtry t7;
+ return;
+ catch t7 #0;
+ stack_frame_type full;
+ stack_map class java/lang/Throwable;
+ nop;
+ athrow;
+}
+
+} // end Class BadMap
diff --git a/test/runtime/stackMapCheck/BadMapDstore.jasm b/test/runtime/stackMapCheck/BadMapDstore.jasm
new file mode 100644
index 000000000..16797fb9e
--- /dev/null
+++ b/test/runtime/stackMapCheck/BadMapDstore.jasm
@@ -0,0 +1,79 @@
+ /*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * This class should throw VerifyError because the StackMap for bytecode index
+ * 9 (dstore_2, line 60) is incorrect. The stack maps for bytecode indexes 9
+ * and 18 (astore_2, line 70) do not match because 9 does not supply enough
+ * locals to satisfy 18.
+ *
+ * The dstore_2 bytecode at bytecode index 9 changes the type state,
+ * preventing the stackmap mismatch. But, if the incoming type state is used,
+ * as required by JVM Spec 8, then the verifier will detected the stackmap
+ * mismatch, and throw VerifyError.
+ */
+
+super public class BadMapDstore
+ version 51:0
+{
+
+Field blah:I;
+
+public Method "<init>":"()V"
+ stack 1 locals 1
+{
+ aload_0;
+ invokespecial Method java/lang/Object."<init>":"()V";
+ return;
+}
+
+public static Method main:"([Ljava/lang/String;)V"
+ stack 4 locals 4
+{
+ new class BadMapDstore;
+ dup;
+ invokespecial Method "<init>":"()V";
+ astore_1;
+ dconst_1;
+ try t0;
+ dstore_2;
+ aload_1;
+ iconst_5;
+ putfield Field blah:"I";
+ endtry t0;
+ goto L22;
+ catch t0 java/lang/Throwable;
+ stack_frame_type full;
+ locals_map class "[Ljava/lang/String;", class BadMapDstore, double;
+ stack_map class java/lang/Throwable;
+ astore_2;
+ aload_1;
+ dconst_0;
+ dstore_2;
+ pop;
+ L22: stack_frame_type same;
+ return;
+}
+
+} // end Class BadMapDstore
diff --git a/test/runtime/stackMapCheck/BadMapIstore.jasm b/test/runtime/stackMapCheck/BadMapIstore.jasm
new file mode 100644
index 000000000..63eeaf358
--- /dev/null
+++ b/test/runtime/stackMapCheck/BadMapIstore.jasm
@@ -0,0 +1,79 @@
+ /*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * This class should throw VerifyError because the StackMap for bytecode index
+ * 9 (istore_2, line 60) is incorrect. The stack maps for bytecode indexes 9
+ * and 18 (astore_2, line 70) do not match because 9 does not supply enough
+ * locals to satisfy 18.
+ *
+ * The istore_2 bytecode at bytecode index 9 changes the type state,
+ * preventing the stackmap mismatch. But, if the incoming type state is used,
+ * as required by JVM Spec 8, then the verifier will detected the stackmap
+ * mismatch, and throw VerifyError.
+ */
+
+super public class BadMapIstore
+ version 51:0
+{
+
+Field blah:I;
+
+public Method "<init>":"()V"
+ stack 1 locals 1
+{
+ aload_0;
+ invokespecial Method java/lang/Object."<init>":"()V";
+ return;
+}
+
+public static Method main:"([Ljava/lang/String;)V"
+ stack 2 locals 3
+{
+ new class BadMapIstore;
+ dup;
+ invokespecial Method "<init>":"()V";
+ astore_1;
+ iconst_2;
+ try t0;
+ istore_2;
+ aload_1;
+ iconst_5;
+ putfield Field blah:"I";
+ endtry t0;
+ goto L22;
+ catch t0 java/lang/Throwable;
+ stack_frame_type full;
+ locals_map class "[Ljava/lang/String;", class BadMapIstore, int;
+ stack_map class java/lang/Throwable;
+ astore_2;
+ aload_1;
+ iconst_4;
+ istore_2;
+ pop;
+ L22: stack_frame_type same;
+ return;
+}
+
+} // end Class BadMapIstore
diff --git a/test/runtime/stackMapCheck/StackMapCheck.java b/test/runtime/stackMapCheck/StackMapCheck.java
new file mode 100644
index 000000000..56d171585
--- /dev/null
+++ b/test/runtime/stackMapCheck/StackMapCheck.java
@@ -0,0 +1,63 @@
+ /*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 7127066
+ * @summary Class verifier accepts an invalid class file
+ * @compile BadMap.jasm
+ * @compile BadMapDstore.jasm
+ * @compile BadMapIstore.jasm
+ * @run main/othervm -Xverify:all StackMapCheck
+ */
+
+public class StackMapCheck {
+ public static void main(String args[]) throws Throwable {
+
+ System.out.println("Regression test for bug 7127066");
+ try {
+ Class newClass = Class.forName("BadMap");
+ throw new RuntimeException(
+ "StackMapCheck failed, BadMap did not throw VerifyError");
+ } catch (java.lang.VerifyError e) {
+ System.out.println("BadMap passed, VerifyError was thrown");
+ }
+
+ try {
+ Class newClass = Class.forName("BadMapDstore");
+ throw new RuntimeException(
+ "StackMapCheck failed, BadMapDstore did not throw VerifyError");
+ } catch (java.lang.VerifyError e) {
+ System.out.println("BadMapDstore passed, VerifyError was thrown");
+ }
+
+ try {
+ Class newClass = Class.forName("BadMapIstore");
+ throw new RuntimeException(
+ "StackMapCheck failed, BadMapIstore did not throw VerifyError");
+ } catch (java.lang.VerifyError e) {
+ System.out.println("BadMapIstore passed, VerifyError was thrown");
+ }
+ }
+}
diff --git a/test/serviceability/jvmti/GetObjectSizeOverflow.java b/test/serviceability/jvmti/GetObjectSizeOverflow.java
index 9acefe511..3f0aad09e 100644
--- a/test/serviceability/jvmti/GetObjectSizeOverflow.java
+++ b/test/serviceability/jvmti/GetObjectSizeOverflow.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/test/serviceability/sa/jmap-hashcode/Test8028623.java b/test/serviceability/sa/jmap-hashcode/Test8028623.java
index 6f4b2fd68..cbf98473d 100644
--- a/test/serviceability/sa/jmap-hashcode/Test8028623.java
+++ b/test/serviceability/sa/jmap-hashcode/Test8028623.java
@@ -33,20 +33,25 @@
import com.oracle.java.testlibrary.JDKToolLauncher;
import com.oracle.java.testlibrary.OutputBuffer;
+import com.oracle.java.testlibrary.Platform;
import com.oracle.java.testlibrary.ProcessTools;
import java.io.File;
public class Test8028623 {
- public static int à = 1;
+ public static int \u00CB = 1;
public static String dumpFile = "heap.out";
public static void main (String[] args) {
- System.out.println(Ã);
+ System.out.println(\u00CB);
try {
+ if (!Platform.shouldSAAttach()) {
+ System.out.println("SA attach not expected to work - test skipped.");
+ return;
+ }
int pid = ProcessTools.getProcessId();
JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
.addToolArg("-F")
diff --git a/test/test_env.sh b/test/test_env.sh
index fa912dde2..27c653bf1 100644
--- a/test/test_env.sh
+++ b/test/test_env.sh
@@ -1,6 +1,6 @@
#!/bin/sh
#
-# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,7 @@ echo "TESTCLASSES=${TESTCLASSES}"
# set platform-dependent variables
OS=`uname -s`
case "$OS" in
- SunOS | Linux | Darwin )
+ AIX | Darwin | Linux | SunOS )
NULL=/dev/null
PS=":"
FS="/"
@@ -130,25 +130,30 @@ then
fi
VM_OS="unknown"
-grep "solaris" vm_version.out > ${NULL}
+grep "aix" vm_version.out > ${NULL}
if [ $? = 0 ]
then
- VM_OS="solaris"
+ VM_OS="aix"
+fi
+grep "bsd" vm_version.out > ${NULL}
+if [ $? = 0 ]
+then
+ VM_OS="bsd"
fi
grep "linux" vm_version.out > ${NULL}
if [ $? = 0 ]
then
VM_OS="linux"
fi
-grep "windows" vm_version.out > ${NULL}
+grep "solaris" vm_version.out > ${NULL}
if [ $? = 0 ]
then
- VM_OS="windows"
+ VM_OS="solaris"
fi
-grep "bsd" vm_version.out > ${NULL}
+grep "windows" vm_version.out > ${NULL}
if [ $? = 0 ]
then
- VM_OS="bsd"
+ VM_OS="windows"
fi
VM_CPU="unknown"
@@ -186,6 +191,11 @@ if [ $? = 0 ]
then
VM_CPU="ia64"
fi
+grep "aarch64" vm_version.out > ${NULL}
+if [ $? = 0 ]
+then
+ VM_CPU="aarch64"
+fi
export VM_TYPE VM_BITS VM_OS VM_CPU
echo "VM_TYPE=${VM_TYPE}"
echo "VM_BITS=${VM_BITS}"
diff --git a/test/testlibrary/RedefineClassHelper.java b/test/testlibrary/RedefineClassHelper.java
new file mode 100644
index 000000000..accc5447a
--- /dev/null
+++ b/test/testlibrary/RedefineClassHelper.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.PrintWriter;
+import java.lang.instrument.*;
+import com.oracle.java.testlibrary.*;
+
+/*
+ * Helper class to write tests that redefine classes.
+ * When main method is run, it will create a redefineagent.jar that can be used
+ * with the -javaagent option to support redefining classes in jtreg tests.
+ *
+ * See sample test in test/testlibrary_tests/RedefineClassTest.java
+ */
+public class RedefineClassHelper {
+
+ public static Instrumentation instrumentation;
+ public static void premain(String agentArgs, Instrumentation inst) {
+ instrumentation = inst;
+ }
+
+ /**
+ * Redefine a class
+ *
+ * @param clazz Class to redefine
+ * @param javacode String with the new java code for the class to be redefined
+ */
+ public static void redefineClass(Class clazz, String javacode) throws Exception {
+ byte[] bytecode = InMemoryJavaCompiler.compile(clazz.getName(), javacode);
+ redefineClass(clazz, bytecode);
+ }
+
+ /**
+ * Redefine a class
+ *
+ * @param clazz Class to redefine
+ * @param bytecode byte[] with the new class
+ */
+ public static void redefineClass(Class clazz, byte[] bytecode) throws Exception {
+ instrumentation.redefineClasses(new ClassDefinition(clazz, bytecode));
+ }
+
+ /**
+ * Main method to be invoked before test to create the redefineagent.jar
+ */
+ public static void main(String[] args) throws Exception {
+ ClassFileInstaller.main("RedefineClassHelper");
+
+ PrintWriter pw = new PrintWriter("MANIFEST.MF");
+ pw.println("Premain-Class: RedefineClassHelper");
+ pw.println("Can-Redefine-Classes: true");
+ pw.close();
+
+ sun.tools.jar.Main jarTool = new sun.tools.jar.Main(System.out, System.err, "jar");
+ if (!jarTool.run(new String[] { "-cmf", "MANIFEST.MF", "redefineagent.jar", "RedefineClassHelper.class" })) {
+ throw new Exception("jar operation failed");
+ }
+ }
+}
diff --git a/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java b/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java
index 73b65165e..b81f21a01 100644
--- a/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java
+++ b/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java
@@ -74,11 +74,12 @@ public final class OutputAnalyzer {
* @param expectedString String that buffer should contain
* @throws RuntimeException If the string was not found
*/
- public void shouldContain(String expectedString) {
+ public OutputAnalyzer shouldContain(String expectedString) {
if (!stdout.contains(expectedString) && !stderr.contains(expectedString)) {
reportDiagnosticSummary();
throw new RuntimeException("'" + expectedString + "' missing from stdout/stderr \n");
}
+ return this;
}
/**
@@ -87,11 +88,12 @@ public final class OutputAnalyzer {
* @param expectedString String that buffer should contain
* @throws RuntimeException If the string was not found
*/
- public void stdoutShouldContain(String expectedString) {
+ public OutputAnalyzer stdoutShouldContain(String expectedString) {
if (!stdout.contains(expectedString)) {
reportDiagnosticSummary();
throw new RuntimeException("'" + expectedString + "' missing from stdout \n");
}
+ return this;
}
/**
@@ -100,11 +102,12 @@ public final class OutputAnalyzer {
* @param expectedString String that buffer should contain
* @throws RuntimeException If the string was not found
*/
- public void stderrShouldContain(String expectedString) {
+ public OutputAnalyzer stderrShouldContain(String expectedString) {
if (!stderr.contains(expectedString)) {
reportDiagnosticSummary();
throw new RuntimeException("'" + expectedString + "' missing from stderr \n");
}
+ return this;
}
/**
@@ -113,7 +116,7 @@ public final class OutputAnalyzer {
* @param expectedString String that the buffer should not contain
* @throws RuntimeException If the string was found
*/
- public void shouldNotContain(String notExpectedString) {
+ public OutputAnalyzer shouldNotContain(String notExpectedString) {
if (stdout.contains(notExpectedString)) {
reportDiagnosticSummary();
throw new RuntimeException("'" + notExpectedString + "' found in stdout \n");
@@ -122,6 +125,7 @@ public final class OutputAnalyzer {
reportDiagnosticSummary();
throw new RuntimeException("'" + notExpectedString + "' found in stderr \n");
}
+ return this;
}
/**
@@ -130,11 +134,12 @@ public final class OutputAnalyzer {
* @param expectedString String that the buffer should not contain
* @throws RuntimeException If the string was found
*/
- public void stdoutShouldNotContain(String notExpectedString) {
+ public OutputAnalyzer stdoutShouldNotContain(String notExpectedString) {
if (stdout.contains(notExpectedString)) {
reportDiagnosticSummary();
throw new RuntimeException("'" + notExpectedString + "' found in stdout \n");
}
+ return this;
}
/**
@@ -143,11 +148,12 @@ public final class OutputAnalyzer {
* @param expectedString String that the buffer should not contain
* @throws RuntimeException If the string was found
*/
- public void stderrShouldNotContain(String notExpectedString) {
+ public OutputAnalyzer stderrShouldNotContain(String notExpectedString) {
if (stderr.contains(notExpectedString)) {
reportDiagnosticSummary();
throw new RuntimeException("'" + notExpectedString + "' found in stderr \n");
}
+ return this;
}
/**
@@ -157,7 +163,7 @@ public final class OutputAnalyzer {
* @param pattern
* @throws RuntimeException If the pattern was not found
*/
- public void shouldMatch(String pattern) {
+ public OutputAnalyzer shouldMatch(String pattern) {
Matcher stdoutMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
Matcher stderrMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
if (!stdoutMatcher.find() && !stderrMatcher.find()) {
@@ -165,6 +171,7 @@ public final class OutputAnalyzer {
throw new RuntimeException("'" + pattern
+ "' missing from stdout/stderr \n");
}
+ return this;
}
/**
@@ -174,13 +181,14 @@ public final class OutputAnalyzer {
* @param pattern
* @throws RuntimeException If the pattern was not found
*/
- public void stdoutShouldMatch(String pattern) {
+ public OutputAnalyzer stdoutShouldMatch(String pattern) {
Matcher matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
if (!matcher.find()) {
reportDiagnosticSummary();
throw new RuntimeException("'" + pattern
+ "' missing from stdout \n");
}
+ return this;
}
/**
@@ -190,13 +198,14 @@ public final class OutputAnalyzer {
* @param pattern
* @throws RuntimeException If the pattern was not found
*/
- public void stderrShouldMatch(String pattern) {
+ public OutputAnalyzer stderrShouldMatch(String pattern) {
Matcher matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
if (!matcher.find()) {
reportDiagnosticSummary();
throw new RuntimeException("'" + pattern
+ "' missing from stderr \n");
}
+ return this;
}
/**
@@ -206,7 +215,7 @@ public final class OutputAnalyzer {
* @param pattern
* @throws RuntimeException If the pattern was found
*/
- public void shouldNotMatch(String pattern) {
+ public OutputAnalyzer shouldNotMatch(String pattern) {
Matcher matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
if (matcher.find()) {
reportDiagnosticSummary();
@@ -219,6 +228,7 @@ public final class OutputAnalyzer {
throw new RuntimeException("'" + pattern
+ "' found in stderr: '" + matcher.group() + "' \n");
}
+ return this;
}
/**
@@ -228,13 +238,14 @@ public final class OutputAnalyzer {
* @param pattern
* @throws RuntimeException If the pattern was found
*/
- public void stdoutShouldNotMatch(String pattern) {
+ public OutputAnalyzer stdoutShouldNotMatch(String pattern) {
Matcher matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
if (matcher.find()) {
reportDiagnosticSummary();
throw new RuntimeException("'" + pattern
+ "' found in stdout \n");
}
+ return this;
}
/**
@@ -244,13 +255,14 @@ public final class OutputAnalyzer {
* @param pattern
* @throws RuntimeException If the pattern was found
*/
- public void stderrShouldNotMatch(String pattern) {
+ public OutputAnalyzer stderrShouldNotMatch(String pattern) {
Matcher matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
if (matcher.find()) {
reportDiagnosticSummary();
throw new RuntimeException("'" + pattern
+ "' found in stderr \n");
}
+ return this;
}
/**
@@ -290,12 +302,13 @@ public final class OutputAnalyzer {
* @param expectedExitValue Expected exit value from process
* @throws RuntimeException If the exit value from the process did not match the expected value
*/
- public void shouldHaveExitValue(int expectedExitValue) {
+ public OutputAnalyzer shouldHaveExitValue(int expectedExitValue) {
if (getExitValue() != expectedExitValue) {
reportDiagnosticSummary();
throw new RuntimeException("Expected to get exit value of ["
+ expectedExitValue + "]\n");
}
+ return this;
}
diff --git a/test/testlibrary/com/oracle/java/testlibrary/Platform.java b/test/testlibrary/com/oracle/java/testlibrary/Platform.java
index fa4b36361..93841bf59 100644
--- a/test/testlibrary/com/oracle/java/testlibrary/Platform.java
+++ b/test/testlibrary/com/oracle/java/testlibrary/Platform.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,12 +23,17 @@
package com.oracle.java.testlibrary;
+import java.util.regex.Pattern;
+
+import com.oracle.java.testlibrary.Utils;
+
public class Platform {
private static final String osName = System.getProperty("os.name");
private static final String dataModel = System.getProperty("sun.arch.data.model");
private static final String vmVersion = System.getProperty("java.vm.version");
private static final String osArch = System.getProperty("os.arch");
private static final String vmName = System.getProperty("java.vm.name");
+ private static final String userName = System.getProperty("user.name");
public static boolean isClient() {
return vmName.endsWith(" Client VM");
@@ -58,20 +63,24 @@ public class Platform {
return dataModel.equals("64");
}
- public static boolean isSolaris() {
- return isOs("sunos");
+ public static boolean isAix() {
+ return isOs("aix");
}
- public static boolean isWindows() {
- return isOs("win");
+ public static boolean isLinux() {
+ return isOs("linux");
}
public static boolean isOSX() {
return isOs("mac");
}
- public static boolean isLinux() {
- return isOs("linux");
+ public static boolean isSolaris() {
+ return isOs("sunos");
+ }
+
+ public static boolean isWindows() {
+ return isOs("win");
}
private static boolean isOs(String osname) {
@@ -92,33 +101,93 @@ public class Platform {
// Returns true for sparc and sparcv9.
public static boolean isSparc() {
- return isArch("sparc");
+ return isArch("sparc.*");
}
public static boolean isARM() {
- return isArch("arm");
+ return isArch("arm.*");
}
public static boolean isPPC() {
- return isArch("ppc");
+ return isArch("ppc.*");
}
public static boolean isX86() {
- // On Linux it's 'i386', Windows 'x86'
- return (isArch("i386") || isArch("x86"));
+ // On Linux it's 'i386', Windows 'x86' without '_64' suffix.
+ return isArch("(i386)|(x86(?!_64))");
}
public static boolean isX64() {
// On OSX it's 'x86_64' and on other (Linux, Windows and Solaris) platforms it's 'amd64'
- return (isArch("amd64") || isArch("x86_64"));
+ return isArch("(amd64)|(x86_64)");
+ }
+
+ public static boolean isAArch64() {
+ return isArch("aarch64");
}
- private static boolean isArch(String archname) {
- return osArch.toLowerCase().startsWith(archname.toLowerCase());
+ private static boolean isArch(String archnameRE) {
+ return Pattern.compile(archnameRE, Pattern.CASE_INSENSITIVE)
+ .matcher(osArch)
+ .matches();
}
public static String getOsArch() {
return osArch;
}
+ /**
+ * Return a boolean for whether we expect to be able to attach
+ * the SA to our own processes on this system.
+ */
+ public static boolean shouldSAAttach() throws Exception {
+
+ if (isAix()) {
+ return false; // SA not implemented.
+ } else if (isLinux()) {
+ return canPtraceAttachLinux();
+ } else if (isOSX()) {
+ return canAttachOSX();
+ } else {
+ // Other platforms expected to work:
+ return true;
+ }
+ }
+
+ /**
+ * On Linux, first check the SELinux boolean "deny_ptrace" and return false
+ * as we expect to be denied if that is "1". Then expect permission to attach
+ * if we are root, so return true. Then return false for an expected denial
+ * if "ptrace_scope" is 1, and true otherwise.
+ */
+ public static boolean canPtraceAttachLinux() throws Exception {
+
+ // SELinux deny_ptrace:
+ String deny_ptrace = Utils.fileAsString("/sys/fs/selinux/booleans/deny_ptrace");
+ if (deny_ptrace != null && deny_ptrace.contains("1")) {
+ // ptrace will be denied:
+ return false;
+ }
+
+ if (userName.equals("root")) {
+ return true;
+ }
+
+ // ptrace_scope:
+ String ptrace_scope = Utils.fileAsString("/proc/sys/kernel/yama/ptrace_scope");
+ if (ptrace_scope != null && ptrace_scope.contains("1")) {
+ // ptrace will be denied:
+ return false;
+ }
+
+ // Otherwise expect to be permitted:
+ return true;
+ }
+
+ /**
+ * On OSX, expect permission to attach only if we are root.
+ */
+ public static boolean canAttachOSX() throws Exception {
+ return userName.equals("root");
+ }
}
diff --git a/test/testlibrary/com/oracle/java/testlibrary/Utils.java b/test/testlibrary/com/oracle/java/testlibrary/Utils.java
index ba4bea5e9..84215195c 100644
--- a/test/testlibrary/com/oracle/java/testlibrary/Utils.java
+++ b/test/testlibrary/com/oracle/java/testlibrary/Utils.java
@@ -299,6 +299,35 @@ public final class Utils {
}
/**
+ * Return the contents of the named file as a single String,
+ * or null if not found.
+ * @param filename name of the file to read
+ * @return String contents of file, or null if file not found.
+ */
+ public static String fileAsString(String filename) {
+ StringBuilder result = new StringBuilder();
+ try {
+ File file = new File(filename);
+ if (file.exists()) {
+ BufferedReader reader = new BufferedReader(new FileReader(file));
+ while (true) {
+ String line = reader.readLine();
+ if (line == null) {
+ break;
+ }
+ result.append(line).append("\n");
+ }
+ } else {
+ // Does not exist:
+ return null;
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ return result.toString();
+ }
+
+ /**
* @return Unsafe instance.
*/
public static synchronized Unsafe getUnsafe() {
diff --git a/test/testlibrary/ctw/Makefile b/test/testlibrary/ctw/Makefile
index 5bca7754c..a4fc46264 100644
--- a/test/testlibrary/ctw/Makefile
+++ b/test/testlibrary/ctw/Makefile
@@ -8,7 +8,7 @@
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
diff --git a/test/testlibrary/ctw/README b/test/testlibrary/ctw/README
index babb08162..a3badc6d5 100644
--- a/test/testlibrary/ctw/README
+++ b/test/testlibrary/ctw/README
@@ -1,26 +1,26 @@
-#
-# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
+
+Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+This code is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License version 2 only, as
+published by the Free Software Foundation.
+
+This code is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+version 2 for more details (a copy is included in the LICENSE file that
+accompanied this code).
+
+You should have received a copy of the GNU General Public License version
+2 along with this work; if not, write to the Free Software Foundation,
+Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+or visit www.oracle.com if you need additional information or have any
+questions.
+
+
DESCRIPTION
diff --git a/test/testlibrary/whitebox/Makefile b/test/testlibrary/whitebox/Makefile
index 8a84a0a44..91ea89012 100644
--- a/test/testlibrary/whitebox/Makefile
+++ b/test/testlibrary/whitebox/Makefile
@@ -8,7 +8,7 @@
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
diff --git a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java
index acb1131e3..99bb67000 100644
--- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java
+++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
package sun.hotspot;
+import java.lang.management.MemoryUsage;
import java.lang.reflect.Executable;
import java.util.Arrays;
import java.util.List;
@@ -75,6 +76,9 @@ public class WhiteBox {
// Memory
public native long getObjectAddress(Object o);
public native int getHeapOopSize();
+ public native int getVMPageSize();
+ public native long getVMLargePageSize();
+
public native boolean isObjectInOldGen(Object o);
public native long getObjectSize(Object o);
@@ -84,6 +88,8 @@ public class WhiteBox {
return isClassAlive0(name.replace('.', '/'));
}
private native boolean isClassAlive0(String name);
+ public native boolean isMonitorInflated(Object obj);
+ public native void forceSafepoint();
// Resource/Class Lookup Cache
public native boolean classKnownToNotExist(ClassLoader loader, String name);
@@ -97,8 +103,10 @@ public class WhiteBox {
// G1
public native boolean g1InConcurrentMark();
public native boolean g1IsHumongous(Object o);
+ public native long g1NumMaxRegions();
public native long g1NumFreeRegions();
public native int g1RegionSize();
+ public native MemoryUsage g1AuxiliaryMemoryUsage();
public native Object[] parseCommandLine(String commandline, DiagnosticCommand[] args);
// NMT
@@ -166,12 +174,16 @@ public class WhiteBox {
public native long incMetaspaceCapacityUntilGC(long increment);
public native long metaspaceCapacityUntilGC();
- // force Young GC
+ // Force Young GC
public native void youngGC();
- // force Full GC
+ // Force Full GC
public native void fullGC();
+ // Method tries to start concurrent mark cycle.
+ // It returns false if CM Thread is always in concurrent cycle.
+ public native boolean g1StartConcMarkCycle();
+
// Tests on ReservedSpace/VirtualSpace classes
public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations);
public native void runMemoryUnitTests();
diff --git a/test/testlibrary_tests/RedefineClassTest.java b/test/testlibrary_tests/RedefineClassTest.java
new file mode 100644
index 000000000..e812e43cd
--- /dev/null
+++ b/test/testlibrary_tests/RedefineClassTest.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary
+ * @summary Proof of concept test for RedefineClassHelper
+ * @build RedefineClassHelper
+ * @run main RedefineClassHelper
+ * @run main/othervm -javaagent:redefineagent.jar RedefineClassTest
+ */
+
+import static com.oracle.java.testlibrary.Asserts.*;
+import com.oracle.java.testlibrary.*;
+
+/*
+ * Proof of concept test for the test utility class RedefineClassHelper
+ */
+public class RedefineClassTest {
+
+ public static String newClass = "class RedefineClassTest$A { public int Method() { return 2; } }";
+ public static void main(String[] args) throws Exception {
+ A a = new A();
+ assertTrue(a.Method() == 1);
+ RedefineClassHelper.redefineClass(A.class, newClass);
+ assertTrue(a.Method() == 2);
+ }
+
+ static class A {
+ public int Method() {
+ return 1;
+ }
+ }
+}
diff --git a/test/testlibrary_tests/TestMutuallyExclusivePlatformPredicates.java b/test/testlibrary_tests/TestMutuallyExclusivePlatformPredicates.java
new file mode 100644
index 000000000..7d56a4a3b
--- /dev/null
+++ b/test/testlibrary_tests/TestMutuallyExclusivePlatformPredicates.java
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.Platform;
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * @test
+ * @summary Verify that for each group of mutually exclusive predicates defined
+ * in com.oracle.java.testlibrary.Platform one and only one predicate
+ * evaluates to true.
+ * @library /testlibrary
+ * @run main TestMutuallyExclusivePlatformPredicates
+ */
+public class TestMutuallyExclusivePlatformPredicates {
+ private static enum MethodGroup {
+ ARCH("isARM", "isPPC", "isSparc", "isX86", "isX64", "isAArch64"),
+ BITNESS("is32bit", "is64bit"),
+ OS("isAix", "isLinux", "isSolaris", "isWindows", "isOSX"),
+ VM_TYPE("isClient", "isServer", "isGraal", "isMinimal"),
+ IGNORED("isEmbedded", "isDebugBuild", "shouldSAAttach",
+ "canPtraceAttachLinux", "canAttachOSX");
+
+ public final List<String> methodNames;
+
+ private MethodGroup(String... methodNames) {
+ this.methodNames = Collections.unmodifiableList(
+ Arrays.asList(methodNames));
+ }
+ }
+
+ public static void main(String args[]) {
+ EnumSet<MethodGroup> notIgnoredMethodGroups
+ = EnumSet.complementOf(EnumSet.of(MethodGroup.IGNORED));
+
+ notIgnoredMethodGroups.forEach(
+ TestMutuallyExclusivePlatformPredicates::verifyPredicates);
+
+ TestMutuallyExclusivePlatformPredicates.verifyCoverage();
+ }
+
+ /**
+ * Verifies that one and only one predicate method defined in
+ * {@link com.oracle.java.testlibrary.Platform}, whose name included into
+ * methodGroup will return {@code true}.
+ * @param methodGroup The group of methods that should be tested.
+ */
+ private static void verifyPredicates(MethodGroup methodGroup) {
+ System.out.println("Verifying method group: " + methodGroup.name());
+ long truePredicatesCount = methodGroup.methodNames.stream()
+ .filter(TestMutuallyExclusivePlatformPredicates
+ ::evaluatePredicate)
+ .count();
+
+ Asserts.assertEQ(truePredicatesCount, 1L, String.format(
+ "Only one predicate from group %s should be evaluated to true "
+ + "(Actually %d predicates were evaluated to true).",
+ methodGroup.name(), truePredicatesCount));
+ }
+
+ /**
+ * Verifies that all predicates defined in
+ * {@link com.oracle.java.testlibrary.Platform} were either tested or
+ * explicitly ignored.
+ */
+ private static void verifyCoverage() {
+ Set<String> allMethods = new HashSet<>();
+ for (MethodGroup group : MethodGroup.values()) {
+ allMethods.addAll(group.methodNames);
+ }
+
+ for (Method m : Platform.class.getMethods()) {
+ if (m.getParameterCount() == 0
+ && m.getReturnType() == boolean.class) {
+ Asserts.assertTrue(allMethods.contains(m.getName()),
+ "All Platform's methods with signature '():Z' should "
+ + "be tested ");
+ }
+ }
+ }
+
+ /**
+ * Evaluates predicate method with name {@code name} defined in
+ * {@link com.oracle.java.testlibrary.Platform}.
+ *
+ * @param name The name of a predicate to be evaluated.
+ * @return evaluated predicate's value.
+ * @throws java.lang.Error if predicate is not defined or could not be
+ * evaluated.
+ */
+ private static boolean evaluatePredicate(String name) {
+ try {
+ System.out.printf("Trying to evaluate predicate with name %s%n",
+ name);
+ boolean value
+ = (Boolean) Platform.class.getMethod(name).invoke(null);
+ System.out.printf("Predicate evaluated to: %s%n", value);
+ return value;
+ } catch (NoSuchMethodException e) {
+ throw new Error("Predicate with name " + name
+ + " is not defined in " + Platform.class.getName(), e);
+ } catch (IllegalAccessException | InvocationTargetException e) {
+ throw new Error("Unable to evaluate predicate " + name, e);
+ }
+ }
+}