diff --git a/.hgtags b/.hgtags index 79370ccbc33..b785cc1f512 100644 --- a/.hgtags +++ b/.hgtags @@ -500,8 +500,11 @@ e1b3def126240d5433902f3cb0e91a4c27f6db50 jdk-11+18 ea900a7dc7d77dee30865c60eabd87fc24b1037c jdk-11+24 331888ea4a788df801b1edf8836646cd25fc758b jdk-11+25 945ba9278a272a5477ffb1b3ea1b04174fed8036 jdk-11+26 +9d7d74c6f2cbe522e39fa22dc557fdd3f79b32ad jdk-11+27 69b438908512d3dfef5852c6a843a5778333a309 jdk-12+2 990db216e7199b2ba9989d8fa20b657e0ca7d969 jdk-12+3 499b873761d8e8a1cc4aa649daf04cbe98cbce77 jdk-12+4 f8696e0ab9b795030429fc3374ec03e378fd9ed7 jdk-12+5 7939b3c4e4088bf4f70ec5bbd8030393b653372f jdk-12+6 +ef57958c7c511162da8d9a75f0b977f0f7ac464e jdk-12+7 +492b366f8e5784cc4927c2c98f9b8a3f16c067eb jdk-12+8 diff --git a/doc/building.html b/doc/building.html index d6f7b5a8c19..71c0d621ee2 100644 --- a/doc/building.html +++ b/doc/building.html @@ -72,6 +72,7 @@
  • Specifying the Target Platform
  • Toolchain Considerations
  • Native Libraries
  • +
  • Creating And Using Sysroots With qemu-deboostrap
  • Building for ARM/aarch64
  • Verifying the Build
  • @@ -634,6 +635,72 @@ cp: cannot stat `arm-linux-gnueabihf/libSM.so': No such file or directory cp: cannot stat `arm-linux-gnueabihf/libXt.so': No such file or directory
  • If the X11 libraries are not properly detected by configure, you can point them out by --with-x.

  • +

    Creating And Using Sysroots With qemu-deboostrap

    +

    Fortunately, you can create sysroots for foreign architectures with tools provided by your OS. On Debian/Ubuntu systems, one could use qemu-deboostrap to create the target system chroot, which would have the native libraries and headers specific to that target system. After that, we can use the cross-compiler on the build system, pointing into chroot to get the build dependencies right. This allows building for foreign architectures with native compilation speed.

    +

    For example, cross-compiling to AArch64 from x86_64 could be done like this:

    + +

    The build does not create new files in that chroot, so it can be reused for multiple builds without additional cleanup.

    +

    Architectures that are known to successfully cross-compile like this are:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TargetCCCXX--arch=...--openjdk-target=...
    x86defaultdefaulti386i386-linux-gnu
    armhfgcc-arm-linux-gnueabihfg++-arm-linux-gnueabihfarmhfarm-linux-gnueabihf
    aarch64gcc-aarch64-linux-gnug++-aarch64-linux-gnuarm64aarch64-linux-gnu
    ppc64elgcc-powerpc64le-linux-gnug++-powerpc64le-linux-gnuppc64elpowerpc64le-linux-gnu
    s390xgcc-s390x-linux-gnug++-s390x-linux-gnus390xs390x-linux-gnu
    +

    Additional architectures might be supported by Debian/Ubuntu Ports.

    Building for ARM/aarch64

    A common cross-compilation target is the ARM CPU. When building for ARM, it is useful to set the ABI profile. A number of pre-defined ABI profiles are available using --with-abi-profile: arm-vfp-sflt, arm-vfp-hflt, arm-sflt, armv5-vfp-sflt, armv6-vfp-hflt. Note that soft-float ABIs are no longer properly supported by the JDK.

    The JDK contains two different ports for the aarch64 platform, one is the original aarch64 port from the AArch64 Port Project and one is a 64-bit version of the Oracle contributed ARM port. When targeting aarch64, by the default the original aarch64 port is used. To select the Oracle ARM 64 port, use --with-cpu-port=arm64. Also set the corresponding value (aarch64 or arm64) to --with-abi-profile, to ensure a consistent build.

    diff --git a/doc/building.md b/doc/building.md index e5990a76300..653683d10d7 100644 --- a/doc/building.md +++ b/doc/building.md @@ -1018,6 +1018,51 @@ Note that X11 is needed even if you only want to build a headless JDK. * If the X11 libraries are not properly detected by `configure`, you can point them out by `--with-x`. +### Creating And Using Sysroots With qemu-deboostrap + +Fortunately, you can create sysroots for foreign architectures with tools +provided by your OS. On Debian/Ubuntu systems, one could use `qemu-deboostrap` to +create the *target* system chroot, which would have the native libraries and headers +specific to that *target* system. After that, we can use the cross-compiler on the *build* +system, pointing into chroot to get the build dependencies right. This allows building +for foreign architectures with native compilation speed. + +For example, cross-compiling to AArch64 from x86_64 could be done like this: + + * Install cross-compiler on the *build* system: +``` +apt install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu +``` + + * Create chroot on the *build* system, configuring it for *target* system: +``` +sudo qemu-debootstrap --arch=arm64 --verbose \ + --include=fakeroot,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng12-dev \ + --resolve-deps jessie /chroots/arm64 http://httpredir.debian.org/debian/ +``` + + * Configure and build with newly created chroot as sysroot/toolchain-path: +``` +CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-g++ sh ./configure --openjdk-target=aarch64-linux-gnu --with-sysroot=/chroots/arm64/ --with-toolchain-path=/chroots/arm64/ +make images +ls build/linux-aarch64-normal-server-release/ +``` + +The build does not create new files in that chroot, so it can be reused for multiple builds +without additional cleanup. + +Architectures that are known to successfully cross-compile like this are: + + Target `CC` `CXX` `--arch=...` `--openjdk-target=...` + ------------ ------------------------- --------------------------- ------------ ---------------------- + x86 default default i386 i386-linux-gnu + armhf gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf armhf arm-linux-gnueabihf + aarch64 gcc-aarch64-linux-gnu g++-aarch64-linux-gnu arm64 aarch64-linux-gnu + ppc64el gcc-powerpc64le-linux-gnu g++-powerpc64le-linux-gnu ppc64el powerpc64le-linux-gnu + s390x gcc-s390x-linux-gnu g++-s390x-linux-gnu s390x s390x-linux-gnu + +Additional architectures might be supported by Debian/Ubuntu Ports. + ### Building for ARM/aarch64 A common cross-compilation target is the ARM CPU. When building for ARM, it is diff --git a/make/CompileJavaModules.gmk b/make/CompileJavaModules.gmk index 646aa1a4d0e..a889e79026a 100644 --- a/make/CompileJavaModules.gmk +++ b/make/CompileJavaModules.gmk @@ -511,6 +511,10 @@ jdk.aot_ADD_JAVAC_FLAGS += -parameters -XDstringConcat=inline \ --add-exports jdk.internal.vm.ci/jdk.vm.ci.sparc=jdk.internal.vm.compiler,jdk.aot \ # +jdk.aot_EXCLUDES += \ + jdk.tools.jaotc.test + # + ################################################################################ sun.charsets_COPY += .dat diff --git a/make/common/TestFilesCompilation.gmk b/make/common/TestFilesCompilation.gmk index 41c5a8e1efc..e90921e70af 100644 --- a/make/common/TestFilesCompilation.gmk +++ b/make/common/TestFilesCompilation.gmk @@ -94,7 +94,7 @@ define SetupTestFilesCompilationBody CFLAGS := $$($1_CFLAGS) $$($1_CFLAGS_$$(name)), \ LDFLAGS := $$($1_LDFLAGS) $$($1_LDFLAGS_$$(name)), \ LIBS := $$($1_LIBS_$$(name)), \ - OPTIMIZATION := LOW, \ + OPTIMIZATION := $$(if $$($1_OPTIMIZATION_$$(name)),$$($1_OPTIMIZATION_$$(name)),LOW), \ COPY_DEBUG_SYMBOLS := false, \ STRIP_SYMBOLS := false, \ )) \ diff --git a/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk b/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk index 6d2b1ab4c3a..f71ac568020 100644 --- a/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk +++ b/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk @@ -124,7 +124,7 @@ $(GENSRC_DIR)/module-info.java.extra: $(GENSRC_DIR)/_gensrc_proc_done ($(CD) $(GENSRC_DIR)/META-INF/providers && \ p=""; \ impl=""; \ - for i in $$($(LS) | $(SORT)); do \ + for i in $$($(GREP) '^' * | $(SORT) -t ':' -k 2 | $(SED) 's/:.*//'); do \ c=$$($(CAT) $$i | $(TR) -d '\n\r'); \ if test x$$p != x$$c; then \ if test x$$p != x; then \ diff --git a/make/jdk/src/classes/build/tools/module/GenModuleInfoSource.java b/make/jdk/src/classes/build/tools/module/GenModuleInfoSource.java index 478a90811b5..5189c995d53 100644 --- a/make/jdk/src/classes/build/tools/module/GenModuleInfoSource.java +++ b/make/jdk/src/classes/build/tools/module/GenModuleInfoSource.java @@ -431,14 +431,12 @@ public class GenModuleInfoSource { } uses.put(name, statement); break; - /* Disable this check until jdk.internal.vm.compiler generated file is fixed. case "provides": if (provides.containsKey(name)) { throw parser.newError("multiple " + keyword + " " + name); } provides.put(name, statement); break; - */ } String lookAhead = lookAhead(parser); if (lookAhead.equals(statement.qualifier)) { diff --git a/make/jdk/src/classes/build/tools/module/ModuleInfoExtraTest.java b/make/jdk/src/classes/build/tools/module/ModuleInfoExtraTest.java index 5f15a20bb58..f9a09ef3f3b 100644 --- a/make/jdk/src/classes/build/tools/module/ModuleInfoExtraTest.java +++ b/make/jdk/src/classes/build/tools/module/ModuleInfoExtraTest.java @@ -230,7 +230,11 @@ public class ModuleInfoExtraTest { new String[] { " uses s;", " uses s;" - }, ".*, line .*, multiple uses s.*" + }, ".*, line .*, multiple uses s.*", + new String[] { + " provides s with impl1;", + " provides s with impl2, impl3;" + }, ".*, line .*, multiple provides s.*" ); void errorCases() { diff --git a/make/test/JtregNativeHotspot.gmk b/make/test/JtregNativeHotspot.gmk index 19d352da797..eef272c6a5b 100644 --- a/make/test/JtregNativeHotspot.gmk +++ b/make/test/JtregNativeHotspot.gmk @@ -139,6 +139,15 @@ NSK_AOD_INCLUDES := \ -I$(VM_TESTBASE_DIR)/nsk/share/native \ -I$(VM_TESTBASE_DIR)/nsk/share/jni +NO_FRAMEPOINTER_CFLAGS := +ifeq ($(OPENJDK_TARGET_OS),linux) + NO_FRAMEPOINTER_CFLAGS := -fomit-frame-pointer +endif + +BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libNoFramePointer := $(NO_FRAMEPOINTER_CFLAGS) +# Optimization -O3 needed, HIGH == -O3 +BUILD_HOTSPOT_JTREG_LIBRARIES_OPTIMIZATION_libNoFramePointer := HIGH + BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libProcessUtils := $(VM_SHARE_INCLUDES) BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libThreadController := $(NSK_MONITORING_INCLUDES) diff --git a/src/hotspot/.mx.jvmci/suite.py b/src/hotspot/.mx.jvmci/suite.py index edea9f2d82e..d8211e709ca 100644 --- a/src/hotspot/.mx.jvmci/suite.py +++ b/src/hotspot/.mx.jvmci/suite.py @@ -43,7 +43,8 @@ suite = { "jdk.vm.ci.services" : { "subDir" : "../jdk.internal.vm.ci/share/classes", "sourceDirs" : ["src"], - "javaCompliance" : "9", + "javaCompliance" : "9+", + "checkstyleVersion" : "8.8", "workingSets" : "API,JVMCI", }, @@ -53,7 +54,7 @@ suite = { "subDir" : "../jdk.internal.vm.ci/share/classes", "sourceDirs" : ["src"], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -61,7 +62,7 @@ suite = { "subDir" : "../jdk.internal.vm.ci/share/classes", "sourceDirs" : ["src"], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -70,7 +71,7 @@ suite = { "sourceDirs" : ["src"], "dependencies" : ["jdk.vm.ci.meta"], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -85,7 +86,7 @@ suite = { "jdk.vm.ci.hotspot", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -97,7 +98,7 @@ suite = { "jdk.vm.ci.services", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -110,7 +111,7 @@ suite = { "jdk.vm.ci.runtime", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -121,7 +122,7 @@ suite = { "sourceDirs" : ["src"], "dependencies" : ["jdk.vm.ci.code"], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI,AArch64", }, @@ -130,7 +131,7 @@ suite = { "sourceDirs" : ["src"], "dependencies" : ["jdk.vm.ci.code"], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI,AMD64", }, @@ -139,7 +140,7 @@ suite = { "sourceDirs" : ["src"], "dependencies" : ["jdk.vm.ci.code"], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI,SPARC", }, @@ -156,7 +157,7 @@ suite = { "jdk.internal.org.objectweb.asm", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI", }, @@ -168,7 +169,7 @@ suite = { "jdk.vm.ci.hotspot", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -180,7 +181,7 @@ suite = { "jdk.vm.ci.hotspot", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI,HotSpot,AArch64", }, @@ -192,7 +193,7 @@ suite = { "jdk.vm.ci.hotspot", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI,HotSpot,AMD64", }, @@ -204,7 +205,7 @@ suite = { "jdk.vm.ci.hotspot", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI,HotSpot,SPARC", }, diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index f794d403389..abc26ae4005 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -1036,21 +1036,8 @@ class HandlerImpl { } }; - // graph traversal helpers - - MemBarNode *parent_membar(const Node *n); - MemBarNode *child_membar(const MemBarNode *n); - bool leading_membar(const MemBarNode *barrier); - - bool is_card_mark_membar(const MemBarNode *barrier); bool is_CAS(int opcode); - MemBarNode *leading_to_normal(MemBarNode *leading); - MemBarNode *normal_to_leading(const MemBarNode *barrier); - MemBarNode *card_mark_to_trailing(const MemBarNode *barrier); - MemBarNode *trailing_to_card_mark(const MemBarNode *trailing); - MemBarNode *trailing_to_leading(const MemBarNode *trailing); - // predicates controlling emit of ldr/ldar and associated dmb bool unnecessary_acquire(const Node *barrier); @@ -1272,605 +1259,6 @@ source %{ // relevant dmb instructions. // - // graph traversal helpers used for volatile put/get and CAS - // optimization - - // 1) general purpose helpers - - // if node n is linked to a parent MemBarNode by an intervening - // Control and Memory ProjNode return the MemBarNode otherwise return - // NULL. - // - // n may only be a Load or a MemBar. - - MemBarNode *parent_membar(const Node *n) - { - Node *ctl = NULL; - Node *mem = NULL; - Node *membar = NULL; - - if (n->is_Load()) { - ctl = n->lookup(LoadNode::Control); - mem = n->lookup(LoadNode::Memory); - } else if (n->is_MemBar()) { - ctl = n->lookup(TypeFunc::Control); - mem = n->lookup(TypeFunc::Memory); - } else { - return NULL; - } - - if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) { - return NULL; - } - - membar = ctl->lookup(0); - - if (!membar || !membar->is_MemBar()) { - return NULL; - } - - if (mem->lookup(0) != membar) { - return NULL; - } - - return membar->as_MemBar(); - } - - // if n is linked to a child MemBarNode by intervening Control and - // Memory ProjNodes return the MemBarNode otherwise return NULL. - - MemBarNode *child_membar(const MemBarNode *n) - { - ProjNode *ctl = n->proj_out_or_null(TypeFunc::Control); - ProjNode *mem = n->proj_out_or_null(TypeFunc::Memory); - - // MemBar needs to have both a Ctl and Mem projection - if (! ctl || ! mem) - return NULL; - - MemBarNode *child = NULL; - Node *x; - - for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) { - x = ctl->fast_out(i); - // if we see a membar we keep hold of it. we may also see a new - // arena copy of the original but it will appear later - if (x->is_MemBar()) { - child = x->as_MemBar(); - break; - } - } - - if (child == NULL) { - return NULL; - } - - for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { - x = mem->fast_out(i); - // if we see a membar we keep hold of it. we may also see a new - // arena copy of the original but it will appear later - if (x == child) { - return child; - } - } - return NULL; - } - - // helper predicate use to filter candidates for a leading memory - // barrier - // - // returns true if barrier is a MemBarRelease or a MemBarCPUOrder - // whose Ctl and Mem feeds come from a MemBarRelease otherwise false - - bool leading_membar(const MemBarNode *barrier) - { - int opcode = barrier->Opcode(); - // if this is a release membar we are ok - if (opcode == Op_MemBarRelease) { - return true; - } - // if its a cpuorder membar . . . - if (opcode != Op_MemBarCPUOrder) { - return false; - } - // then the parent has to be a release membar - MemBarNode *parent = parent_membar(barrier); - if (!parent) { - return false; - } - opcode = parent->Opcode(); - return opcode == Op_MemBarRelease; - } - - // 2) card mark detection helper - - // helper predicate which can be used to detect a volatile membar - // introduced as part of a conditional card mark sequence either by - // G1 or by CMS when UseCondCardMark is true. - // - // membar can be definitively determined to be part of a card mark - // sequence if and only if all the following hold - // - // i) it is a MemBarVolatile - // - // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is - // true - // - // iii) the node's Mem projection feeds a StoreCM node. - - bool is_card_mark_membar(const MemBarNode *barrier) - { - if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) { - return false; - } - - if (barrier->Opcode() != Op_MemBarVolatile) { - return false; - } - - ProjNode *mem = barrier->proj_out(TypeFunc::Memory); - - for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) { - Node *y = mem->fast_out(i); - if (y->Opcode() == Op_StoreCM) { - return true; - } - } - - return false; - } - - - // 3) helper predicates to traverse volatile put or CAS graphs which - // may contain GC barrier subgraphs - - // Preamble - // -------- - // - // for volatile writes we can omit generating barriers and employ a - // releasing store when we see a node sequence sequence with a - // leading MemBarRelease and a trailing MemBarVolatile as follows - // - // MemBarRelease - // { || } -- optional - // {MemBarCPUOrder} - // || \\ - // || StoreX[mo_release] - // | \ / - // | MergeMem - // | / - // {MemBarCPUOrder} -- optional - // { || } - // MemBarVolatile - // - // where - // || and \\ represent Ctl and Mem feeds via Proj nodes - // | \ and / indicate further routing of the Ctl and Mem feeds - // - // this is the graph we see for non-object stores. however, for a - // volatile Object store (StoreN/P) we may see other nodes below the - // leading membar because of the need for a GC pre- or post-write - // barrier. - // - // with most GC configurations we with see this simple variant which - // includes a post-write barrier card mark. - // - // MemBarRelease______________________________ - // || \\ Ctl \ \\ - // || StoreN/P[mo_release] CastP2X StoreB/CM - // | \ / . . . / - // | MergeMem - // | / - // || / - // {MemBarCPUOrder} -- optional - // { || } - // MemBarVolatile - // - // i.e. the leading membar feeds Ctl to a CastP2X (which converts - // the object address to an int used to compute the card offset) and - // Ctl+Mem to a StoreB node (which does the actual card mark). - // - // n.b. a StoreCM node will only appear in this configuration when - // using CMS or G1. StoreCM differs from a normal card mark write (StoreB) - // because it implies a requirement to order visibility of the card - // mark (StoreCM) relative to the object put (StoreP/N) using a - // StoreStore memory barrier (arguably this ought to be represented - // explicitly in the ideal graph but that is not how it works). This - // ordering is required for both non-volatile and volatile - // puts. Normally that means we need to translate a StoreCM using - // the sequence - // - // dmb ishst - // strb - // - // However, when using G1 or CMS with conditional card marking (as - // we shall see) we don't need to insert the dmb when translating - // StoreCM because there is already an intervening StoreLoad barrier - // between it and the StoreP/N. - // - // It is also possible to perform the card mark conditionally on it - // currently being unmarked in which case the volatile put graph - // will look slightly different - // - // MemBarRelease____________________________________________ - // || \\ Ctl \ Ctl \ \\ Mem \ - // || StoreN/P[mo_release] CastP2X If LoadB | - // | \ / \ | - // | MergeMem . . . StoreB - // | / / - // || / - // MemBarVolatile - // - // It is worth noting at this stage that both the above - // configurations can be uniquely identified by checking that the - // memory flow includes the following subgraph: - // - // MemBarRelease - // {MemBarCPUOrder} - // | \ . . . - // | StoreX[mo_release] . . . - // | / - // MergeMem - // | - // {MemBarCPUOrder} - // MemBarVolatile - // - // This is referred to as a *normal* subgraph. It can easily be - // detected starting from any candidate MemBarRelease, - // StoreX[mo_release] or MemBarVolatile. - // - // A simple variation on this normal case occurs for an unsafe CAS - // operation. The basic graph for a non-object CAS is - // - // MemBarRelease - // || - // MemBarCPUOrder - // || \\ . . . - // || CompareAndSwapX - // || | - // || SCMemProj - // | \ / - // | MergeMem - // | / - // MemBarCPUOrder - // || - // MemBarAcquire - // - // The same basic variations on this arrangement (mutatis mutandis) - // occur when a card mark is introduced. i.e. we se the same basic - // shape but the StoreP/N is replaced with CompareAndSawpP/N and the - // tail of the graph is a pair comprising a MemBarCPUOrder + - // MemBarAcquire. - // - // So, in the case of a CAS the normal graph has the variant form - // - // MemBarRelease - // MemBarCPUOrder - // | \ . . . - // | CompareAndSwapX . . . - // | | - // | SCMemProj - // | / . . . - // MergeMem - // | - // MemBarCPUOrder - // MemBarAcquire - // - // This graph can also easily be detected starting from any - // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire. - // - // the code below uses two helper predicates, leading_to_normal and - // normal_to_leading to identify these normal graphs, one validating - // the layout starting from the top membar and searching down and - // the other validating the layout starting from the lower membar - // and searching up. - // - // There are two special case GC configurations when a normal graph - // may not be generated: when using G1 (which always employs a - // conditional card mark); and when using CMS with conditional card - // marking configured. These GCs are both concurrent rather than - // stop-the world GCs. So they introduce extra Ctl+Mem flow into the - // graph between the leading and trailing membar nodes, in - // particular enforcing stronger memory serialisation beween the - // object put and the corresponding conditional card mark. CMS - // employs a post-write GC barrier while G1 employs both a pre- and - // post-write GC barrier. Of course the extra nodes may be absent -- - // they are only inserted for object puts/swaps. This significantly - // complicates the task of identifying whether a MemBarRelease, - // StoreX[mo_release] or MemBarVolatile forms part of a volatile put - // when using these GC configurations (see below). It adds similar - // complexity to the task of identifying whether a MemBarRelease, - // CompareAndSwapX or MemBarAcquire forms part of a CAS. - // - // In both cases the post-write subtree includes an auxiliary - // MemBarVolatile (StoreLoad barrier) separating the object put/swap - // and the read of the corresponding card. This poses two additional - // problems. - // - // Firstly, a card mark MemBarVolatile needs to be distinguished - // from a normal trailing MemBarVolatile. Resolving this first - // problem is straightforward: a card mark MemBarVolatile always - // projects a Mem feed to a StoreCM node and that is a unique marker - // - // MemBarVolatile (card mark) - // C | \ . . . - // | StoreCM . . . - // . . . - // - // The second problem is how the code generator is to translate the - // card mark barrier? It always needs to be translated to a "dmb - // ish" instruction whether or not it occurs as part of a volatile - // put. A StoreLoad barrier is needed after the object put to ensure - // i) visibility to GC threads of the object put and ii) visibility - // to the mutator thread of any card clearing write by a GC - // thread. Clearly a normal store (str) will not guarantee this - // ordering but neither will a releasing store (stlr). The latter - // guarantees that the object put is visible but does not guarantee - // that writes by other threads have also been observed. - // - // So, returning to the task of translating the object put and the - // leading/trailing membar nodes: what do the non-normal node graph - // look like for these 2 special cases? and how can we determine the - // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile - // in both normal and non-normal cases? - // - // A CMS GC post-barrier wraps its card write (StoreCM) inside an If - // which selects conditonal execution based on the value loaded - // (LoadB) from the card. Ctl and Mem are fed to the If via an - // intervening StoreLoad barrier (MemBarVolatile). - // - // So, with CMS we may see a node graph for a volatile object store - // which looks like this - // - // MemBarRelease - // {MemBarCPUOrder}_(leading)_________________ - // C | M \ \\ C \ - // | \ StoreN/P[mo_release] CastP2X - // | Bot \ / - // | MergeMem - // | / - // MemBarVolatile (card mark) - // C | || M | - // | LoadB | - // | | | - // | Cmp |\ - // | / | \ - // If | \ - // | \ | \ - // IfFalse IfTrue | \ - // \ / \ | \ - // \ / StoreCM | - // \ / | | - // Region . . . | - // | \ / - // | . . . \ / Bot - // | MergeMem - // | | - // {MemBarCPUOrder} - // MemBarVolatile (trailing) - // - // The first MergeMem merges the AliasIdxBot Mem slice from the - // leading membar and the oopptr Mem slice from the Store into the - // card mark membar. The trailing MergeMem merges the AliasIdxBot - // Mem slice from the card mark membar and the AliasIdxRaw slice - // from the StoreCM into the trailing membar (n.b. the latter - // proceeds via a Phi associated with the If region). - // - // The graph for a CAS varies slightly, the difference being - // that the StoreN/P node is replaced by a CompareAndSwapP/N node - // and the trailing MemBarVolatile by a MemBarCPUOrder + - // MemBarAcquire pair (also the MemBarCPUOrder nodes are not optional). - // - // MemBarRelease - // MemBarCPUOrder_(leading)_______________ - // C | M \ \\ C \ - // | \ CompareAndSwapN/P CastP2X - // | \ | - // | \ SCMemProj - // | Bot \ / - // | MergeMem - // | / - // MemBarVolatile (card mark) - // C | || M | - // | LoadB | - // | | | - // | Cmp |\ - // | / | \ - // If | \ - // | \ | \ - // IfFalse IfTrue | \ - // \ / \ | \ - // \ / StoreCM | - // \ / | | - // Region . . . | - // | \ / - // | . . . \ / Bot - // | MergeMem - // | | - // MemBarCPUOrder - // MemBarVolatile (trailing) - // - // - // G1 is quite a lot more complicated. The nodes inserted on behalf - // of G1 may comprise: a pre-write graph which adds the old value to - // the SATB queue; the releasing store itself; and, finally, a - // post-write graph which performs a card mark. - // - // The pre-write graph may be omitted, but only when the put is - // writing to a newly allocated (young gen) object and then only if - // there is a direct memory chain to the Initialize node for the - // object allocation. This will not happen for a volatile put since - // any memory chain passes through the leading membar. - // - // The pre-write graph includes a series of 3 If tests. The outermost - // If tests whether SATB is enabled (no else case). The next If tests - // whether the old value is non-NULL (no else case). The third tests - // whether the SATB queue index is > 0, if so updating the queue. The - // else case for this third If calls out to the runtime to allocate a - // new queue buffer. - // - // So with G1 the pre-write and releasing store subgraph looks like - // this (the nested Ifs are omitted). - // - // MemBarRelease - // {MemBarCPUOrder}_(leading)___________ - // C | || M \ M \ M \ M \ . . . - // | LoadB \ LoadL LoadN \ - // | / \ \ - // If |\ \ - // | \ | \ \ - // IfFalse IfTrue | \ \ - // | | | \ | - // | If | /\ | - // | | \ | - // | \ | - // | . . . \ | - // | / | / | | - // Region Phi[M] | | - // | \ | | | - // | \_____ | ___ | | - // C | C \ | C \ M | | - // | CastP2X | StoreN/P[mo_release] | - // | | | | - // C | M | M | M | - // \ | | / - // . . . - // (post write subtree elided) - // . . . - // C \ M / - // \ / - // {MemBarCPUOrder} - // MemBarVolatile (trailing) - // - // n.b. the LoadB in this subgraph is not the card read -- it's a - // read of the SATB queue active flag. - // - // The G1 post-write subtree is also optional, this time when the - // new value being written is either null or can be identified as a - // newly allocated (young gen) object with no intervening control - // flow. The latter cannot happen but the former may, in which case - // the card mark membar is omitted and the memory feeds form the - // leading membar and the SToreN/P are merged direct into the - // trailing membar as per the normal subgraph. So, the only special - // case which arises is when the post-write subgraph is generated. - // - // The kernel of the post-write G1 subgraph is the card mark itself - // which includes a card mark memory barrier (MemBarVolatile), a - // card test (LoadB), and a conditional update (If feeding a - // StoreCM). These nodes are surrounded by a series of nested Ifs - // which try to avoid doing the card mark. The top level If skips if - // the object reference does not cross regions (i.e. it tests if - // (adr ^ val) >> log2(regsize) != 0) -- intra-region references - // need not be recorded. The next If, which skips on a NULL value, - // may be absent (it is not generated if the type of value is >= - // OopPtr::NotNull). The 3rd If skips writes to young regions (by - // checking if card_val != young). n.b. although this test requires - // a pre-read of the card it can safely be done before the StoreLoad - // barrier. However that does not bypass the need to reread the card - // after the barrier. A final, 4th If tests if the card is already - // marked. - // - // (pre-write subtree elided) - // . . . . . . . . . . . . - // C | M | M | M | - // Region Phi[M] StoreN | - // | / \ | | - // / \_______ / \ | | - // C / C \ . . . \ | | - // If CastP2X . . . | | | - // / \ | | | - // / \ | | | - // IfFalse IfTrue | | | - // | | | | /| - // | If | | / | - // | / \ | | / | - // | / \ \ | / | - // | IfFalse IfTrue MergeMem | - // | . . . / \ / | - // | / \ / | - // | IfFalse IfTrue / | - // | . . . | / | - // | If / | - // | / \ / | - // | / \ / | - // | IfFalse IfTrue / | - // | . . . | / | - // | \ / | - // | \ / | - // | MemBarVolatile__(card mark) | - // | || C | M \ M \ | - // | LoadB If | | | - // | / \ | | | - // | . . . | | | - // | \ | | / - // | StoreCM | / - // | . . . | / - // | _________/ / - // | / _____________/ - // | . . . . . . | / / - // | | | / _________/ - // | | Phi[M] / / - // | | | / / - // | | | / / - // | Region . . . Phi[M] _____/ - // | / | / - // | | / - // | . . . . . . | / - // | / | / - // Region | | Phi[M] - // | | | / Bot - // \ MergeMem - // \ / - // {MemBarCPUOrder} - // MemBarVolatile - // - // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice - // from the leading membar and the oopptr Mem slice from the Store - // into the card mark membar i.e. the memory flow to the card mark - // membar still looks like a normal graph. - // - // The trailing MergeMem merges an AliasIdxBot Mem slice with other - // Mem slices (from the StoreCM and other card mark queue stores). - // However in this case the AliasIdxBot Mem slice does not come - // direct from the card mark membar. It is merged through a series - // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow - // from the leading membar with the Mem feed from the card mark - // membar. Each Phi corresponds to one of the Ifs which may skip - // around the card mark membar. So when the If implementing the NULL - // value check has been elided the total number of Phis is 2 - // otherwise it is 3. - // - // The CAS graph when using G1GC also includes a pre-write subgraph - // and an optional post-write subgraph. The same variations are - // introduced as for CMS with conditional card marking i.e. the - // StoreP/N is swapped for a CompareAndSwapP/N with a following - // SCMemProj, the trailing MemBarVolatile for a MemBarCPUOrder + - // MemBarAcquire pair. There may be an extra If test introduced in - // the CAS case, when the boolean result of the CAS is tested by the - // caller. In that case an extra Region and AliasIdxBot Phi may be - // introduced before the MergeMem - // - // So, the upshot is that in all cases the subgraph will include a - // *normal* memory subgraph betwen the leading membar and its child - // membar: either a normal volatile put graph including a releasing - // StoreX and terminating with a trailing volatile membar or card - // mark volatile membar; or a normal CAS graph including a - // CompareAndSwapX + SCMemProj pair and terminating with a card mark - // volatile membar or a trailing cpu order and acquire membar - // pair. If the child membar is not a (volatile) card mark membar - // then it marks the end of the volatile put or CAS subgraph. If the - // child is a card mark membar then the normal subgraph will form - // part of a larger volatile put or CAS subgraph if and only if the - // child feeds an AliasIdxBot Mem feed to a trailing barrier via a - // MergeMem. That feed is either direct (for CMS) or via 2, 3 or 4 - // Phi nodes merging the leading barrier memory flow (for G1). - // - // The predicates controlling generation of instructions for store - // and barrier nodes employ a few simple helper functions (described - // below) which identify the presence or absence of all these - // subgraph configurations and provide a means of traversing from - // one node in the subgraph to another. - // is_CAS(int opcode) // // return true if opcode is one of the possible CompareAndSwapX @@ -1910,674 +1298,7 @@ source %{ // traverse when searching from a card mark membar for the merge mem // feeding a trailing membar or vice versa - int max_phis() - { - if (UseG1GC) { - return 4; - } else if (UseConcMarkSweepGC && UseCondCardMark) { - return 1; - } else { - return 0; - } - } - - // leading_to_normal - // - // graph traversal helper which detects the normal case Mem feed - // from a release membar (or, optionally, its cpuorder child) to a - // dependent volatile or acquire membar i.e. it ensures that one of - // the following 3 Mem flow subgraphs is present. - // - // MemBarRelease - // {MemBarCPUOrder} {leading} - // | \ . . . - // | StoreN/P[mo_release] . . . - // | / - // MergeMem - // | - // {MemBarCPUOrder} - // MemBarVolatile {trailing or card mark} - // - // MemBarRelease - // MemBarCPUOrder {leading} - // | \ . . . - // | CompareAndSwapX . . . - // | / - // MergeMem - // | - // MemBarVolatile {card mark} - // - // MemBarRelease - // MemBarCPUOrder {leading} - // | \ . . . - // | CompareAndSwapX . . . - // | / - // MergeMem - // | - // MemBarCPUOrder - // MemBarAcquire {trailing} - // - // if the correct configuration is present returns the trailing - // or cardmark membar otherwise NULL. - // - // the input membar is expected to be either a cpuorder membar or a - // release membar. in the latter case it should not have a cpu membar - // child. - // - // the returned value may be a card mark or trailing membar - // - - MemBarNode *leading_to_normal(MemBarNode *leading) - { - assert((leading->Opcode() == Op_MemBarRelease || - leading->Opcode() == Op_MemBarCPUOrder), - "expecting a volatile or cpuroder membar!"); - - // check the mem flow - ProjNode *mem = leading->proj_out(TypeFunc::Memory); - - if (!mem) { - return NULL; - } - - Node *x = NULL; - StoreNode * st = NULL; - LoadStoreNode *cas = NULL; - MergeMemNode *mm = NULL; - - for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { - x = mem->fast_out(i); - if (x->is_MergeMem()) { - if (mm != NULL) { - return NULL; - } - // two merge mems is one too many - mm = x->as_MergeMem(); - } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) { - // two releasing stores/CAS nodes is one too many - if (st != NULL || cas != NULL) { - return NULL; - } - st = x->as_Store(); - } else if (is_CAS(x->Opcode())) { - if (st != NULL || cas != NULL) { - return NULL; - } - cas = x->as_LoadStore(); - } - } - - // must have a store or a cas - if (!st && !cas) { - return NULL; - } - - // must have a merge - if (!mm) { - return NULL; - } - - Node *feed = NULL; - if (cas) { - // look for an SCMemProj - for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) { - x = cas->fast_out(i); - if (x->Opcode() == Op_SCMemProj) { - feed = x; - break; - } - } - if (feed == NULL) { - return NULL; - } - } else { - feed = st; - } - // ensure the feed node feeds the existing mergemem; - for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) { - x = feed->fast_out(i); - if (x == mm) { - break; - } - } - if (x != mm) { - return NULL; - } - - MemBarNode *mbar = NULL; - // ensure the merge feeds to the expected type of membar - for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) { - x = mm->fast_out(i); - if (x->is_MemBar()) { - if (x->Opcode() == Op_MemBarCPUOrder) { - // with a store any cpu order membar should precede a - // trailing volatile membar. with a cas it should precede a - // trailing acquire membar. in either case try to skip to - // that next membar - MemBarNode *y = x->as_MemBar(); - y = child_membar(y); - if (y != NULL) { - // skip to this new membar to do the check - x = y; - } - - } - if (x->Opcode() == Op_MemBarVolatile) { - mbar = x->as_MemBar(); - // for a volatile store this can be either a trailing membar - // or a card mark membar. for a cas it must be a card mark - // membar - guarantee(cas == NULL || is_card_mark_membar(mbar), - "in CAS graph volatile membar must be a card mark"); - } else if (cas != NULL && x->Opcode() == Op_MemBarAcquire) { - mbar = x->as_MemBar(); - } - break; - } - } - - return mbar; - } - - // normal_to_leading - // - // graph traversal helper which detects the normal case Mem feed - // from either a card mark or a trailing membar to a preceding - // release membar (optionally its cpuorder child) i.e. it ensures - // that one of the following 3 Mem flow subgraphs is present. - // - // MemBarRelease - // {MemBarCPUOrder} {leading} - // | \ . . . - // | StoreN/P[mo_release] . . . - // | / - // MergeMem - // | - // {MemBarCPUOrder} - // MemBarVolatile {trailing or card mark} - // - // MemBarRelease - // MemBarCPUOrder {leading} - // | \ . . . - // | CompareAndSwapX . . . - // | / - // MergeMem - // | - // MemBarVolatile {card mark} - // - // MemBarRelease - // MemBarCPUOrder {leading} - // | \ . . . - // | CompareAndSwapX . . . - // | / - // MergeMem - // | - // MemBarCPUOrder - // MemBarAcquire {trailing} - // - // this predicate checks for the same flow as the previous predicate - // but starting from the bottom rather than the top. - // - // if the configuration is present returns the cpuorder member for - // preference or when absent the release membar otherwise NULL. - // - // n.b. the input membar is expected to be a MemBarVolatile but - // need not be a card mark membar. - - MemBarNode *normal_to_leading(const MemBarNode *barrier) - { - // input must be a volatile membar - assert((barrier->Opcode() == Op_MemBarVolatile || - barrier->Opcode() == Op_MemBarAcquire), - "expecting a volatile or an acquire membar"); - bool barrier_is_acquire = barrier->Opcode() == Op_MemBarAcquire; - - // if we have an intervening cpu order membar then start the - // search from it - - Node *x = parent_membar(barrier); - - if (x == NULL) { - // stick with the original barrier - x = (Node *)barrier; - } else if (x->Opcode() != Op_MemBarCPUOrder) { - // any other barrier means this is not the graph we want - return NULL; - } - - // the Mem feed to the membar should be a merge - x = x ->in(TypeFunc::Memory); - if (!x->is_MergeMem()) - return NULL; - - MergeMemNode *mm = x->as_MergeMem(); - - // the merge should get its Bottom mem feed from the leading membar - x = mm->in(Compile::AliasIdxBot); - - // ensure this is a non control projection - if (!x->is_Proj() || x->is_CFG()) { - return NULL; - } - // if it is fed by a membar that's the one we want - x = x->in(0); - - if (!x->is_MemBar()) { - return NULL; - } - - MemBarNode *leading = x->as_MemBar(); - // reject invalid candidates - if (!leading_membar(leading)) { - return NULL; - } - - // ok, we have a leading membar, now for the sanity clauses - - // the leading membar must feed Mem to a releasing store or CAS - ProjNode *mem = leading->proj_out(TypeFunc::Memory); - StoreNode *st = NULL; - LoadStoreNode *cas = NULL; - for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { - x = mem->fast_out(i); - if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) { - // two stores or CASes is one too many - if (st != NULL || cas != NULL) { - return NULL; - } - st = x->as_Store(); - } else if (is_CAS(x->Opcode())) { - if (st != NULL || cas != NULL) { - return NULL; - } - cas = x->as_LoadStore(); - } - } - - // we cannot have both a store and a cas - if (st == NULL && cas == NULL) { - // we have neither -- this is not a normal graph - return NULL; - } - if (st == NULL) { - // if we started from a volatile membar and found a CAS then the - // original membar ought to be for a card mark - guarantee((barrier_is_acquire || is_card_mark_membar(barrier)), - "unexpected volatile barrier (i.e. not card mark) in CAS graph"); - // check that the CAS feeds the merge we used to get here via an - // intermediary SCMemProj - Node *scmemproj = NULL; - for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) { - x = cas->fast_out(i); - if (x->Opcode() == Op_SCMemProj) { - scmemproj = x; - break; - } - } - if (scmemproj == NULL) { - return NULL; - } - for (DUIterator_Fast imax, i = scmemproj->fast_outs(imax); i < imax; i++) { - x = scmemproj->fast_out(i); - if (x == mm) { - return leading; - } - } - } else { - // we should not have found a store if we started from an acquire - guarantee(!barrier_is_acquire, - "unexpected trailing acquire barrier in volatile store graph"); - - // the store should feed the merge we used to get here - for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) { - if (st->fast_out(i) == mm) { - return leading; - } - } - } - - return NULL; - } - - // card_mark_to_trailing - // - // graph traversal helper which detects extra, non-normal Mem feed - // from a card mark volatile membar to a trailing membar i.e. it - // ensures that one of the following three GC post-write Mem flow - // subgraphs is present. - // - // 1) - // . . . - // | - // MemBarVolatile (card mark) - // | | - // | StoreCM - // | | - // | . . . - // Bot | / - // MergeMem - // | - // {MemBarCPUOrder} OR MemBarCPUOrder - // MemBarVolatile {trailing} MemBarAcquire {trailing} - // - // - // 2) - // MemBarRelease/CPUOrder (leading) - // | - // | - // |\ . . . - // | \ | - // | \ MemBarVolatile (card mark) - // | \ | | - // \ \ | StoreCM . . . - // \ \ | - // \ Phi - // \ / - // Phi . . . - // Bot | / - // MergeMem - // | - // {MemBarCPUOrder} OR MemBarCPUOrder - // MemBarVolatile {trailing} MemBarAcquire {trailing} - // - // 3) - // MemBarRelease/CPUOrder (leading) - // | - // |\ - // | \ - // | \ . . . - // | \ | - // |\ \ MemBarVolatile (card mark) - // | \ \ | | - // | \ \ | StoreCM . . . - // | \ \ | - // \ \ Phi - // \ \ / - // \ Phi - // \ / - // Phi . . . - // Bot | / - // MergeMem - // | - // | - // {MemBarCPUOrder} OR MemBarCPUOrder - // MemBarVolatile {trailing} MemBarAcquire {trailing} - // - // 4) - // MemBarRelease/CPUOrder (leading) - // | - // |\ - // | \ - // | \ - // | \ - // |\ \ - // | \ \ - // | \ \ . . . - // | \ \ | - // |\ \ \ MemBarVolatile (card mark) - // | \ \ \ / | - // | \ \ \ / StoreCM . . . - // | \ \ Phi - // \ \ \ / - // \ \ Phi - // \ \ / - // \ Phi - // \ / - // Phi . . . - // Bot | / - // MergeMem - // | - // | - // MemBarCPUOrder - // MemBarAcquire {trailing} - // - // configuration 1 is only valid if UseConcMarkSweepGC && - // UseCondCardMark - // - // configuration 2, is only valid if UseConcMarkSweepGC && - // UseCondCardMark or if UseG1GC - // - // configurations 3 and 4 are only valid if UseG1GC. - // - // if a valid configuration is present returns the trailing membar - // otherwise NULL. - // - // n.b. the supplied membar is expected to be a card mark - // MemBarVolatile i.e. the caller must ensure the input node has the - // correct operand and feeds Mem to a StoreCM node - - MemBarNode *card_mark_to_trailing(const MemBarNode *barrier) - { - // input must be a card mark volatile membar - assert(is_card_mark_membar(barrier), "expecting a card mark membar"); - - Node *feed = barrier->proj_out(TypeFunc::Memory); - Node *x; - MergeMemNode *mm = NULL; - - const int MAX_PHIS = max_phis(); // max phis we will search through - int phicount = 0; // current search count - - bool retry_feed = true; - while (retry_feed) { - // see if we have a direct MergeMem feed - for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) { - x = feed->fast_out(i); - // the correct Phi will be merging a Bot memory slice - if (x->is_MergeMem()) { - mm = x->as_MergeMem(); - break; - } - } - if (mm) { - retry_feed = false; - } else if (phicount++ < MAX_PHIS) { - // the barrier may feed indirectly via one or two Phi nodes - PhiNode *phi = NULL; - for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) { - x = feed->fast_out(i); - // the correct Phi will be merging a Bot memory slice - if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) { - phi = x->as_Phi(); - break; - } - } - if (!phi) { - return NULL; - } - // look for another merge below this phi - feed = phi; - } else { - // couldn't find a merge - return NULL; - } - } - - // sanity check this feed turns up as the expected slice - guarantee(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge"); - - MemBarNode *trailing = NULL; - // be sure we have a trailing membar fed by the merge - for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) { - x = mm->fast_out(i); - if (x->is_MemBar()) { - // if this is an intervening cpu order membar skip to the - // following membar - if (x->Opcode() == Op_MemBarCPUOrder) { - MemBarNode *y = x->as_MemBar(); - y = child_membar(y); - if (y != NULL) { - x = y; - } - } - if (x->Opcode() == Op_MemBarVolatile || - x->Opcode() == Op_MemBarAcquire) { - trailing = x->as_MemBar(); - } - break; - } - } - - return trailing; - } - - // trailing_to_card_mark - // - // graph traversal helper which detects extra, non-normal Mem feed - // from a trailing volatile membar to a preceding card mark volatile - // membar i.e. it identifies whether one of the three possible extra - // GC post-write Mem flow subgraphs is present - // - // this predicate checks for the same flow as the previous predicate - // but starting from the bottom rather than the top. - // - // if the configuration is present returns the card mark membar - // otherwise NULL - // - // n.b. the supplied membar is expected to be a trailing - // MemBarVolatile or MemBarAcquire i.e. the caller must ensure the - // input node has the correct opcode - - MemBarNode *trailing_to_card_mark(const MemBarNode *trailing) - { - assert(trailing->Opcode() == Op_MemBarVolatile || - trailing->Opcode() == Op_MemBarAcquire, - "expecting a volatile or acquire membar"); - assert(!is_card_mark_membar(trailing), - "not expecting a card mark membar"); - - Node *x = (Node *)trailing; - - // look for a preceding cpu order membar - MemBarNode *y = parent_membar(x->as_MemBar()); - if (y != NULL) { - // make sure it is a cpu order membar - if (y->Opcode() != Op_MemBarCPUOrder) { - // this is nto the graph we were looking for - return NULL; - } - // start the search from here - x = y; - } - - // the Mem feed to the membar should be a merge - x = x->in(TypeFunc::Memory); - if (!x->is_MergeMem()) { - return NULL; - } - - MergeMemNode *mm = x->as_MergeMem(); - - x = mm->in(Compile::AliasIdxBot); - // with G1 we may possibly see a Phi or two before we see a Memory - // Proj from the card mark membar - - const int MAX_PHIS = max_phis(); // max phis we will search through - int phicount = 0; // current search count - - bool retry_feed = !x->is_Proj(); - - while (retry_feed) { - if (x->is_Phi() && phicount++ < MAX_PHIS) { - PhiNode *phi = x->as_Phi(); - ProjNode *proj = NULL; - PhiNode *nextphi = NULL; - bool found_leading = false; - for (uint i = 1; i < phi->req(); i++) { - x = phi->in(i); - if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) { - nextphi = x->as_Phi(); - } else if (x->is_Proj()) { - int opcode = x->in(0)->Opcode(); - if (opcode == Op_MemBarVolatile) { - proj = x->as_Proj(); - } else if (opcode == Op_MemBarRelease || - opcode == Op_MemBarCPUOrder) { - // probably a leading membar - found_leading = true; - } - } - } - // if we found a correct looking proj then retry from there - // otherwise we must see a leading and a phi or this the - // wrong config - if (proj != NULL) { - x = proj; - retry_feed = false; - } else if (found_leading && nextphi != NULL) { - // retry from this phi to check phi2 - x = nextphi; - } else { - // not what we were looking for - return NULL; - } - } else { - return NULL; - } - } - // the proj has to come from the card mark membar - x = x->in(0); - if (!x->is_MemBar()) { - return NULL; - } - - MemBarNode *card_mark_membar = x->as_MemBar(); - - if (!is_card_mark_membar(card_mark_membar)) { - return NULL; - } - - return card_mark_membar; - } - - // trailing_to_leading - // - // graph traversal helper which checks the Mem flow up the graph - // from a (non-card mark) trailing membar attempting to locate and - // return an associated leading membar. it first looks for a - // subgraph in the normal configuration (relying on helper - // normal_to_leading). failing that it then looks for one of the - // possible post-write card mark subgraphs linking the trailing node - // to a the card mark membar (relying on helper - // trailing_to_card_mark), and then checks that the card mark membar - // is fed by a leading membar (once again relying on auxiliary - // predicate normal_to_leading). - // - // if the configuration is valid returns the cpuorder member for - // preference or when absent the release membar otherwise NULL. - // - // n.b. the input membar is expected to be either a volatile or - // acquire membar but in the former case must *not* be a card mark - // membar. - - MemBarNode *trailing_to_leading(const MemBarNode *trailing) - { - assert((trailing->Opcode() == Op_MemBarAcquire || - trailing->Opcode() == Op_MemBarVolatile), - "expecting an acquire or volatile membar"); - assert((trailing->Opcode() != Op_MemBarVolatile || - !is_card_mark_membar(trailing)), - "not expecting a card mark membar"); - - MemBarNode *leading = normal_to_leading(trailing); - - if (leading) { - return leading; - } - - // there is no normal path from trailing to leading membar. see if - // we can arrive via a card mark membar - - MemBarNode *card_mark_membar = trailing_to_card_mark(trailing); - - if (!card_mark_membar) { - return NULL; - } - - return normal_to_leading(card_mark_membar); - } - - // predicates controlling emit of ldr/ldar and associated dmb +// predicates controlling emit of ldr/ldar and associated dmb bool unnecessary_acquire(const Node *barrier) { @@ -2588,40 +1309,19 @@ bool unnecessary_acquire(const Node *barrier) return false; } - // a volatile read derived from bytecode (or also from an inlined - // SHA field read via LibraryCallKit::load_field_from_object) - // manifests as a LoadX[mo_acquire] followed by an acquire membar - // with a bogus read dependency on it's preceding load. so in those - // cases we will find the load node at the PARMS offset of the - // acquire membar. n.b. there may be an intervening DecodeN node. + MemBarNode* mb = barrier->as_MemBar(); - Node *x = barrier->lookup(TypeFunc::Parms); - if (x) { - // we are starting from an acquire and it has a fake dependency - // - // need to check for - // - // LoadX[mo_acquire] - // { |1 } - // {DecodeN} - // |Parms - // MemBarAcquire* - // - // where * tags node we were passed - // and |k means input k - if (x->is_DecodeNarrowPtr()) { - x = x->in(1); - } - - return (x->is_Load() && x->as_Load()->is_acquire()); + if (mb->trailing_load()) { + return true; } - // other option for unnecessary membar is that it is a trailing node - // belonging to a CAS + if (mb->trailing_load_store()) { + Node* load_store = mb->in(MemBarNode::Precedent); + assert(load_store->is_LoadStore(), "unexpected graph shape"); + return is_CAS(load_store->Opcode()); + } - MemBarNode *leading = trailing_to_leading(barrier->as_MemBar()); - - return leading != NULL; + return false; } bool needs_acquiring_load(const Node *n) @@ -2634,45 +1334,7 @@ bool needs_acquiring_load(const Node *n) LoadNode *ld = n->as_Load(); - if (!ld->is_acquire()) { - return false; - } - - // check if this load is feeding an acquire membar - // - // LoadX[mo_acquire] - // { |1 } - // {DecodeN} - // |Parms - // MemBarAcquire* - // - // where * tags node we were passed - // and |k means input k - - Node *start = ld; - Node *mbacq = NULL; - - // if we hit a DecodeNarrowPtr we reset the start node and restart - // the search through the outputs - restart: - - for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) { - Node *x = start->fast_out(i); - if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) { - mbacq = x; - } else if (!mbacq && - (x->is_DecodeNarrowPtr() || - (x->is_Mach() && x->Opcode() == Op_DecodeN))) { - start = x; - goto restart; - } - } - - if (mbacq) { - return true; - } - - return false; + return ld->is_acquire(); } bool unnecessary_release(const Node *n) @@ -2686,32 +1348,27 @@ bool unnecessary_release(const Node *n) return false; } - // if there is a dependent CPUOrder barrier then use that as the - // leading - MemBarNode *barrier = n->as_MemBar(); - // check for an intervening cpuorder membar - MemBarNode *b = child_membar(barrier); - if (b && b->Opcode() == Op_MemBarCPUOrder) { - // ok, so start the check from the dependent cpuorder barrier - barrier = b; - } - - // must start with a normal feed - MemBarNode *child_barrier = leading_to_normal(barrier); - - if (!child_barrier) { + if (!barrier->leading()) { return false; - } + } else { + Node* trailing = barrier->trailing_membar(); + MemBarNode* trailing_mb = trailing->as_MemBar(); + assert(trailing_mb->trailing(), "Not a trailing membar?"); + assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars"); - if (!is_card_mark_membar(child_barrier)) { - // this is the trailing membar and we are done - return true; + Node* mem = trailing_mb->in(MemBarNode::Precedent); + if (mem->is_Store()) { + assert(mem->as_Store()->is_release(), ""); + assert(trailing_mb->Opcode() == Op_MemBarVolatile, ""); + return true; + } else { + assert(mem->is_LoadStore(), ""); + assert(trailing_mb->Opcode() == Op_MemBarAcquire, ""); + return is_CAS(mem->Opcode()); + } } - - // must be sure this card mark feeds a trailing membar - MemBarNode *trailing = card_mark_to_trailing(child_barrier); - return (trailing != NULL); + return false; } bool unnecessary_volatile(const Node *n) @@ -2724,17 +1381,18 @@ bool unnecessary_volatile(const Node *n) MemBarNode *mbvol = n->as_MemBar(); - // first we check if this is part of a card mark. if so then we have - // to generate a StoreLoad barrier - - if (is_card_mark_membar(mbvol)) { - return false; + bool release = mbvol->trailing_store(); + assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), ""); +#ifdef ASSERT + if (release) { + Node* leading = mbvol->leading_membar(); + assert(leading->Opcode() == Op_MemBarRelease, ""); + assert(leading->as_MemBar()->leading_store(), ""); + assert(leading->as_MemBar()->trailing_membar() == mbvol, ""); } +#endif - // ok, if it's not a card mark then we still need to check if it is - // a trailing membar of a volatile put graph. - - return (trailing_to_leading(mbvol) != NULL); + return release; } // predicates controlling emit of str/stlr and associated dmbs @@ -2749,53 +1407,7 @@ bool needs_releasing_store(const Node *n) StoreNode *st = n->as_Store(); - // the store must be marked as releasing - if (!st->is_release()) { - return false; - } - - // the store must be fed by a membar - - Node *x = st->lookup(StoreNode::Memory); - - if (! x || !x->is_Proj()) { - return false; - } - - ProjNode *proj = x->as_Proj(); - - x = proj->lookup(0); - - if (!x || !x->is_MemBar()) { - return false; - } - - MemBarNode *barrier = x->as_MemBar(); - - // if the barrier is a release membar or a cpuorder mmebar fed by a - // release membar then we need to check whether that forms part of a - // volatile put graph. - - // reject invalid candidates - if (!leading_membar(barrier)) { - return false; - } - - // does this lead a normal subgraph? - MemBarNode *mbvol = leading_to_normal(barrier); - - if (!mbvol) { - return false; - } - - // all done unless this is a card mark - if (!is_card_mark_membar(mbvol)) { - return true; - } - - // we found a card mark -- just make sure we have a trailing barrier - - return (card_mark_to_trailing(mbvol) != NULL); + return st->trailing_membar() != NULL; } // predicate controlling translation of CAS @@ -2809,48 +1421,9 @@ bool needs_acquiring_load_exclusive(const Node *n) return false; } - // CAS nodes only ought to turn up in inlined unsafe CAS operations -#ifdef ASSERT - LoadStoreNode *st = n->as_LoadStore(); + LoadStoreNode* ldst = n->as_LoadStore(); + assert(ldst->trailing_membar() != NULL, "expected trailing membar"); - // the store must be fed by a membar - - Node *x = st->lookup(StoreNode::Memory); - - assert (x && x->is_Proj(), "CAS not fed by memory proj!"); - - ProjNode *proj = x->as_Proj(); - - x = proj->lookup(0); - - assert (x && x->is_MemBar(), "CAS not fed by membar!"); - - MemBarNode *barrier = x->as_MemBar(); - - // the barrier must be a cpuorder mmebar fed by a release membar - - guarantee(barrier->Opcode() == Op_MemBarCPUOrder, - "CAS not fed by cpuorder membar!"); - - MemBarNode *b = parent_membar(barrier); - assert ((b != NULL && b->Opcode() == Op_MemBarRelease), - "CAS not fed by cpuorder+release membar pair!"); - - // does this lead a normal subgraph? - MemBarNode *mbar = leading_to_normal(barrier); - - guarantee(mbar != NULL, "CAS not embedded in normal graph!"); - - // if this is a card mark membar check we have a trailing acquire - - if (is_card_mark_membar(mbar)) { - mbar = card_mark_to_trailing(mbar); - } - - guarantee(mbar != NULL, "card mark membar for CAS not embedded in normal graph!"); - - guarantee(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire"); -#endif // ASSERT // so we can just return true here return true; } @@ -11050,6 +9623,24 @@ instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) ins_pipe(imac_reg_reg); %} +// Combined Integer Multiply & Neg + +instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{ + match(Set dst (MulI (SubI zero src1) src2)); + match(Set dst (MulI src1 (SubI zero src2))); + + ins_cost(INSN_COST * 3); + format %{ "mneg $dst, $src1, $src2" %} + + ins_encode %{ + __ mnegw(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg)); + %} + + ins_pipe(imac_reg_reg); +%} + // Combined Long Multiply & Add/Sub instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{ @@ -11084,6 +9675,24 @@ instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{ ins_pipe(lmac_reg_reg); %} +// Combined Long Multiply & Neg + +instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{ + match(Set dst (MulL (SubL zero src1) src2)); + match(Set dst (MulL src1 (SubL zero src2))); + + ins_cost(INSN_COST * 5); + format %{ "mneg $dst, $src1, $src2" %} + + ins_encode %{ + __ mneg(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg)); + %} + + ins_pipe(lmac_reg_reg); +%} + // Integer Divide instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index 821c00c6dcc..02a145382d0 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -2167,6 +2167,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Register length = op->length()->as_register(); Register tmp = op->tmp()->as_register(); + __ resolve(ACCESS_READ, src); + __ resolve(ACCESS_WRITE, dst); + CodeStub* stub = op->stub(); int flags = op->flags(); BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; @@ -2510,6 +2513,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { scratch = op->scratch_opr()->as_register(); } assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); + __ resolve(ACCESS_READ | ACCESS_WRITE, obj); // add debug info for NullPointerException only if one is possible int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry()); if (op->info() != NULL) { diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp index e9e6af951e5..5fb75233a16 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp @@ -941,6 +941,10 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) { index = tmp; } + if (is_updateBytes) { + base_op = access_resolve(ACCESS_READ, base_op); + } + if (offset) { LIR_Opr tmp = new_pointer_register(); __ add(base_op, LIR_OprFact::intConst(offset), tmp); @@ -1019,6 +1023,10 @@ void LIRGenerator::do_update_CRC32C(Intrinsic* x) { index = tmp; } + if (is_updateBytes) { + base_op = access_resolve(ACCESS_READ, base_op); + } + if (offset) { LIR_Opr tmp = new_pointer_register(); __ add(base_op, LIR_OprFact::intConst(offset), tmp); diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index df0ce767118..1f6908482cf 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -3038,6 +3038,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Register length = op->length()->as_register(); Register tmp = op->tmp()->as_register(); + __ resolve(ACCESS_READ, src); + __ resolve(ACCESS_WRITE, dst); + CodeStub* stub = op->stub(); int flags = op->flags(); BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; @@ -3476,6 +3479,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { scratch = op->scratch_opr()->as_register(); } assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); + __ resolve(ACCESS_READ | ACCESS_WRITE, obj); // add debug info for NullPointerException only if one is possible int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry()); if (op->info() != NULL) { diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp index 90312cf9a46..5e57b3dc3c1 100644 --- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp @@ -997,6 +997,10 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) { } #endif + if (is_updateBytes) { + base_op = access_resolve(IS_NOT_NULL | ACCESS_READ, base_op); + } + LIR_Address* a = new LIR_Address(base_op, index, offset, @@ -1054,7 +1058,7 @@ void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { constant_aOffset = result_aOffset->as_jlong(); result_aOffset = LIR_OprFact::illegalOpr; } - LIR_Opr result_a = a.result(); + LIR_Opr result_a = access_resolve(ACCESS_READ, a.result()); long constant_bOffset = 0; LIR_Opr result_bOffset = bOffset.result(); @@ -1062,7 +1066,7 @@ void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { constant_bOffset = result_bOffset->as_jlong(); result_bOffset = LIR_OprFact::illegalOpr; } - LIR_Opr result_b = b.result(); + LIR_Opr result_b = access_resolve(ACCESS_READ, b.result()); #ifndef _LP64 result_a = new_register(T_INT); diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp index e8b4b7414d0..4901eeae6c7 100644 --- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp @@ -23,10 +23,12 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" +#include "code/codeBlob.hpp" #include "gc/z/zBarrier.inline.hpp" #include "gc/z/zBarrierSet.hpp" #include "gc/z/zBarrierSetAssembler.hpp" #include "gc/z/zBarrierSetRuntime.hpp" +#include "memory/resourceArea.hpp" #include "runtime/stubCodeGenerator.hpp" #include "utilities/macros.hpp" #ifdef COMPILER1 diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index 92328f90cff..d6e106786d8 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -3123,6 +3123,16 @@ void MacroAssembler::store_double(Address dst) { } } +void MacroAssembler::push_zmm(XMMRegister reg) { + lea(rsp, Address(rsp, -64)); // Use lea to not affect flags + evmovdqul(Address(rsp, 0), reg, Assembler::AVX_512bit); +} + +void MacroAssembler::pop_zmm(XMMRegister reg) { + evmovdqul(reg, Address(rsp, 0), Assembler::AVX_512bit); + lea(rsp, Address(rsp, 64)); // Use lea to not affect flags +} + void MacroAssembler::fremr(Register tmp) { save_rax(tmp); { Label L; @@ -3848,33 +3858,25 @@ void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { } else if ((dst_enc < 16) && (src_enc < 16)) { Assembler::pcmpeqb(dst, src); } else if (src_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::pcmpeqb(xmm0, src); movdqu(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else if (dst_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::pcmpeqb(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm0); + push_zmm(xmm1); movdqu(xmm0, src); movdqu(xmm1, dst); Assembler::pcmpeqb(xmm1, xmm0); movdqu(dst, xmm1); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); + pop_zmm(xmm0); } } @@ -3886,33 +3888,25 @@ void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { } else if ((dst_enc < 16) && (src_enc < 16)) { Assembler::pcmpeqw(dst, src); } else if (src_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::pcmpeqw(xmm0, src); movdqu(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else if (dst_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::pcmpeqw(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm0); + push_zmm(xmm1); movdqu(xmm0, src); movdqu(xmm1, dst); Assembler::pcmpeqw(xmm1, xmm0); movdqu(dst, xmm1); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); + pop_zmm(xmm0); } } @@ -3921,13 +3915,11 @@ void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { if (dst_enc < 16) { Assembler::pcmpestri(dst, src, imm8); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::pcmpestri(xmm0, src, imm8); movdqu(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } } @@ -3937,33 +3929,25 @@ void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { if ((dst_enc < 16) && (src_enc < 16)) { Assembler::pcmpestri(dst, src, imm8); } else if (src_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::pcmpestri(xmm0, src, imm8); movdqu(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else if (dst_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::pcmpestri(dst, xmm0, imm8); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm0); + push_zmm(xmm1); movdqu(xmm0, src); movdqu(xmm1, dst); Assembler::pcmpestri(xmm1, xmm0, imm8); movdqu(dst, xmm1); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); + pop_zmm(xmm0); } } @@ -3975,33 +3959,25 @@ void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { } else if ((dst_enc < 16) && (src_enc < 16)) { Assembler::pmovzxbw(dst, src); } else if (src_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::pmovzxbw(xmm0, src); movdqu(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else if (dst_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::pmovzxbw(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm0); + push_zmm(xmm1); movdqu(xmm0, src); movdqu(xmm1, dst); Assembler::pmovzxbw(xmm1, xmm0); movdqu(dst, xmm1); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); + pop_zmm(xmm0); } } @@ -4012,13 +3988,11 @@ void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { } else if (dst_enc < 16) { Assembler::pmovzxbw(dst, src); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::pmovzxbw(xmm0, src); movdqu(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } } @@ -4027,12 +4001,10 @@ void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { if (src_enc < 16) { Assembler::pmovmskb(dst, src); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::pmovmskb(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } } @@ -4042,31 +4014,23 @@ void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { if ((dst_enc < 16) && (src_enc < 16)) { Assembler::ptest(dst, src); } else if (src_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::ptest(xmm0, src); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else if (dst_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::ptest(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm0); + push_zmm(xmm1); movdqu(xmm0, src); movdqu(xmm1, dst); Assembler::ptest(xmm1, xmm0); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); + pop_zmm(xmm0); } } @@ -4221,13 +4185,11 @@ void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, A evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, src, Assembler::AVX_512bit); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, nds, Assembler::AVX_512bit); vandps(xmm0, xmm0, negate_field, vector_len); evmovdqul(dst, xmm0, Assembler::AVX_512bit); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } } } @@ -4258,13 +4220,11 @@ void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, A evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, src, Assembler::AVX_512bit); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, nds, Assembler::AVX_512bit); vandpd(xmm0, xmm0, negate_field, vector_len); evmovdqul(dst, xmm0, Assembler::AVX_512bit); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } } } @@ -4294,16 +4254,14 @@ void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, i evmovdqul(xmm0, nds, Assembler::AVX_512bit); } else { // worse case scenario, all regs are in the upper bank - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm1); evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm1, src, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpaddb(xmm0, xmm0, xmm1, vector_len); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); } } @@ -4353,16 +4311,14 @@ void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, i evmovdqul(xmm0, nds, Assembler::AVX_512bit); } else { // worse case scenario, all regs are in the upper bank - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm1); evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm1, src, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpaddw(xmm0, xmm0, xmm1, vector_len); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); } } @@ -4404,33 +4360,25 @@ void MacroAssembler::vpbroadcastw(XMMRegister dst, XMMRegister src) { } else if ((dst_enc < 16) && (src_enc < 16)) { Assembler::vpbroadcastw(dst, src); } else if (src_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpbroadcastw(xmm0, src); movdqu(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else if (dst_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::vpbroadcastw(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm0); + push_zmm(xmm1); movdqu(xmm0, src); movdqu(xmm1, dst); Assembler::vpbroadcastw(xmm1, xmm0); movdqu(dst, xmm1); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); + pop_zmm(xmm0); } } @@ -4442,33 +4390,25 @@ void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, if ((dst_enc < 16) && (src_enc < 16)) { Assembler::vpcmpeqb(dst, nds, src, vector_len); } else if (src_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpcmpeqb(xmm0, xmm0, src, vector_len); movdqu(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else if (dst_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::vpcmpeqb(dst, dst, xmm0, vector_len); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm0); + push_zmm(xmm1); movdqu(xmm0, src); movdqu(xmm1, dst); Assembler::vpcmpeqb(xmm1, xmm1, xmm0, vector_len); movdqu(dst, xmm1); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); + pop_zmm(xmm0); } } @@ -4480,33 +4420,25 @@ void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, if ((dst_enc < 16) && (src_enc < 16)) { Assembler::vpcmpeqw(dst, nds, src, vector_len); } else if (src_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpcmpeqw(xmm0, xmm0, src, vector_len); movdqu(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else if (dst_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::vpcmpeqw(dst, dst, xmm0, vector_len); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm0); + push_zmm(xmm1); movdqu(xmm0, src); movdqu(xmm1, dst); Assembler::vpcmpeqw(xmm1, xmm1, xmm0, vector_len); movdqu(dst, xmm1); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); + pop_zmm(xmm0); } } @@ -4517,13 +4449,11 @@ void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { } else if (dst_enc < 16) { Assembler::vpmovzxbw(dst, src, vector_len); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpmovzxbw(xmm0, src, vector_len); movdqu(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } } @@ -4532,12 +4462,10 @@ void MacroAssembler::vpmovmskb(Register dst, XMMRegister src) { if (src_enc < 16) { Assembler::vpmovmskb(dst, src); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::vpmovmskb(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } } @@ -4566,16 +4494,14 @@ void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, evmovdqul(xmm0, nds, Assembler::AVX_512bit); } else { // worse case scenario, all regs are in the upper bank - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm1); evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm1, src, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpmullw(xmm0, xmm0, xmm1, vector_len); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); } } @@ -4625,16 +4551,14 @@ void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, i evmovdqul(xmm0, nds, Assembler::AVX_512bit); } else { // worse case scenario, all regs are in the upper bank - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm1); evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm1, src, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpsubb(xmm0, xmm0, xmm1, vector_len); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); } } @@ -4684,16 +4608,14 @@ void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, i evmovdqul(xmm0, nds, Assembler::AVX_512bit); } else { // worse case scenario, all regs are in the upper bank - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm1); evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm1, src, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpsubw(xmm0, xmm0, xmm1, vector_len); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); } } @@ -4751,8 +4673,7 @@ void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, evmovdqul(dst, nds, Assembler::AVX_512bit); } else { // worse case scenario, all regs are in the upper bank - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm1); evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm1, shift, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); @@ -4760,8 +4681,7 @@ void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, evmovdqul(xmm1, dst, Assembler::AVX_512bit); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); } } @@ -4819,8 +4739,7 @@ void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, evmovdqul(dst, nds, Assembler::AVX_512bit); } else { // worse case scenario, all regs are in the upper bank - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm1); evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm1, shift, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); @@ -4828,8 +4747,7 @@ void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, evmovdqul(xmm1, dst, Assembler::AVX_512bit); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); } } @@ -4887,8 +4805,7 @@ void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, evmovdqul(dst, nds, Assembler::AVX_512bit); } else { // worse case scenario, all regs are in the upper bank - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm1); evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm1, shift, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); @@ -4896,8 +4813,7 @@ void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, evmovdqul(xmm1, dst, Assembler::AVX_512bit); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); } } @@ -4928,31 +4844,23 @@ void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { if ((dst_enc < 16) && (src_enc < 16)) { Assembler::vptest(dst, src); } else if (src_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vptest(xmm0, src); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else if (dst_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::vptest(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm0); + push_zmm(xmm1); movdqu(xmm0, src); movdqu(xmm1, dst); Assembler::vptest(xmm1, xmm0); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); + pop_zmm(xmm0); } } @@ -4966,45 +4874,35 @@ void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { if (dst_enc < 16) { Assembler::punpcklbw(dst, src); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::punpcklbw(xmm0, xmm0); evmovdqul(dst, xmm0, Assembler::AVX_512bit); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } } else { if ((src_enc < 16) && (dst_enc < 16)) { Assembler::punpcklbw(dst, src); } else if (src_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::punpcklbw(xmm0, src); evmovdqul(dst, xmm0, Assembler::AVX_512bit); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else if (dst_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::punpcklbw(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm0); + push_zmm(xmm1); evmovdqul(xmm0, dst, Assembler::AVX_512bit); evmovdqul(xmm1, src, Assembler::AVX_512bit); Assembler::punpcklbw(xmm0, xmm1); evmovdqul(dst, xmm0, Assembler::AVX_512bit); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); + pop_zmm(xmm0); } } } else { @@ -5020,12 +4918,10 @@ void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { if (dst_enc < 16) { Assembler::pshufd(dst, src, mode); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); Assembler::pshufd(xmm0, src, mode); evmovdqul(dst, xmm0, Assembler::AVX_512bit); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } } } @@ -5040,45 +4936,35 @@ void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { if (dst_enc < 16) { Assembler::pshuflw(dst, src, mode); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::pshuflw(xmm0, xmm0, mode); evmovdqul(dst, xmm0, Assembler::AVX_512bit); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } } else { if ((src_enc < 16) && (dst_enc < 16)) { Assembler::pshuflw(dst, src, mode); } else if (src_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::pshuflw(xmm0, src, mode); evmovdqul(dst, xmm0, Assembler::AVX_512bit); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else if (dst_enc < 16) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); evmovdqul(xmm0, src, Assembler::AVX_512bit); Assembler::pshuflw(dst, xmm0, mode); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); + push_zmm(xmm0); + push_zmm(xmm1); evmovdqul(xmm0, dst, Assembler::AVX_512bit); evmovdqul(xmm1, src, Assembler::AVX_512bit); Assembler::pshuflw(xmm0, xmm1, mode); evmovdqul(dst, xmm0, Assembler::AVX_512bit); - evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm1); + pop_zmm(xmm0); } } } else { @@ -5166,13 +5052,11 @@ void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral if (VM_Version::supports_avx512novl() && (nds_upper_bank || dst_upper_bank)) { if (dst_upper_bank) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); movflt(xmm0, nds); vxorps(xmm0, xmm0, src, Assembler::AVX_128bit); movflt(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { movflt(dst, nds); vxorps(dst, dst, src, Assembler::AVX_128bit); @@ -5190,13 +5074,11 @@ void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral if (VM_Version::supports_avx512novl() && (nds_upper_bank || dst_upper_bank)) { if (dst_upper_bank) { - subptr(rsp, 64); - evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); + push_zmm(xmm0); movdbl(xmm0, nds); vxorpd(xmm0, xmm0, src, Assembler::AVX_128bit); movdbl(dst, xmm0); - evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); - addptr(rsp, 64); + pop_zmm(xmm0); } else { movdbl(dst, nds); vxorpd(dst, dst, src, Assembler::AVX_128bit); @@ -10567,7 +10449,7 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le XMMRegister tmp1Reg, XMMRegister tmp2Reg, XMMRegister tmp3Reg, XMMRegister tmp4Reg, Register tmp5, Register result) { - Label copy_chars_loop, return_length, return_zero, done, below_threshold; + Label copy_chars_loop, return_length, return_zero, done; // rsi: src // rdi: dst @@ -10590,13 +10472,12 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le set_vector_masking(); // opening of the stub context for programming mask registers - Label copy_32_loop, copy_loop_tail, restore_k1_return_zero; + Label copy_32_loop, copy_loop_tail, restore_k1_return_zero, below_threshold; - // alignement - Label post_alignement; + // alignment + Label post_alignment; - // if length of the string is less than 16, handle it in an old fashioned - // way + // if length of the string is less than 16, handle it in an old fashioned way testl(len, -32); jcc(Assembler::zero, below_threshold); @@ -10609,7 +10490,7 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le kmovql(k3, k1); testl(len, -64); - jcc(Assembler::zero, post_alignement); + jcc(Assembler::zero, post_alignment); movl(tmp5, dst); andl(tmp5, (32 - 1)); @@ -10618,7 +10499,7 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le // bail out when there is nothing to be done testl(tmp5, 0xFFFFFFFF); - jcc(Assembler::zero, post_alignement); + jcc(Assembler::zero, post_alignment); // ~(~0 << len), where len is the # of remaining elements to process movl(result, 0xFFFFFFFF); @@ -10638,8 +10519,8 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le addptr(dst, tmp5); subl(len, tmp5); - bind(post_alignement); - // end of alignement + bind(post_alignment); + // end of alignment movl(tmp5, len); andl(tmp5, (32 - 1)); // tail count (in chars) @@ -10694,11 +10575,12 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le jmp(return_zero); clear_vector_masking(); // closing of the stub context for programming mask registers - } - if (UseSSE42Intrinsics) { - Label copy_32_loop, copy_16, copy_tail; bind(below_threshold); + } + + if (UseSSE42Intrinsics) { + Label copy_32_loop, copy_16, copy_tail; movl(result, len); @@ -10812,8 +10694,7 @@ void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len Label copy_32_loop, copy_tail; Register tmp3_aliased = len; - // if length of the string is less than 16, handle it in an old fashioned - // way + // if length of the string is less than 16, handle it in an old fashioned way testl(len, -16); jcc(Assembler::zero, below_threshold); @@ -10927,7 +10808,10 @@ void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len addptr(dst, 8); bind(copy_bytes); + } else { + bind(below_threshold); } + testl(len, len); jccb(Assembler::zero, done); lea(src, Address(src, len, Address::times_1)); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp index 62820d1dd4b..3a92cede04e 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -482,6 +482,10 @@ class MacroAssembler: public Assembler { // from register xmm0. Otherwise, the value is stored from the FPU stack. void store_double(Address dst); + // Save/restore ZMM (512bit) register on stack. + void push_zmm(XMMRegister reg); + void pop_zmm(XMMRegister reg); + // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack void push_fTOS(); diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad index 5d1b9318adb..6df775b9dcf 100644 --- a/src/hotspot/cpu/x86/x86_64.ad +++ b/src/hotspot/cpu/x86/x86_64.ad @@ -317,18 +317,6 @@ reg_class ptr_rsp_reg(RSP, RSP_H); // Singleton class for TLS pointer reg_class ptr_r15_reg(R15, R15_H); -// The registers which can be used for -// a thread local safepoint poll -// * R12 is reserved for heap base -// * R13 cannot be encoded for addressing without an offset byte -// * R15 is reserved for the JavaThread -reg_class ptr_rex_reg(R8, R8_H, - R9, R9_H, - R10, R10_H, - R11, R11_H, - R14, R14_H); - - // Class for all long registers (excluding RSP) reg_class long_reg_with_rbp(RAX, RAX_H, RDX, RDX_H, @@ -3557,16 +3545,6 @@ operand r15_RegP() interface(REG_INTER); %} -operand rex_RegP() -%{ - constraint(ALLOC_IN_RC(ptr_rex_reg)); - match(RegP); - match(rRegP); - - format %{ %} - interface(REG_INTER); -%} - operand rRegL() %{ constraint(ALLOC_IN_RC(long_reg)); @@ -12360,7 +12338,7 @@ instruct safePoint_poll_far(rFlagsReg cr, rRegP poll) ins_pipe(ialu_reg_mem); %} -instruct safePoint_poll_tls(rFlagsReg cr, rex_RegP poll) +instruct safePoint_poll_tls(rFlagsReg cr, rRegP poll) %{ predicate(SafepointMechanism::uses_thread_local_poll()); match(SafePoint poll); @@ -12369,13 +12347,12 @@ instruct safePoint_poll_tls(rFlagsReg cr, rex_RegP poll) format %{ "testl rax, [$poll]\t" "# Safepoint: poll for GC" %} ins_cost(125); - size(3); /* setting an explicit size will cause debug builds to assert if size is incorrect */ + size(4); /* setting an explicit size will cause debug builds to assert if size is incorrect */ ins_encode %{ __ relocate(relocInfo::poll_type); address pre_pc = __ pc(); __ testl(rax, Address($poll$$Register, 0)); - address post_pc = __ pc(); - guarantee(pre_pc[0] == 0x41 && pre_pc[1] == 0x85, "must emit #rex test-ax [reg]"); + assert(nativeInstruction_at(pre_pc)->is_safepoint_poll(), "must emit test %%eax [reg]"); %} ins_pipe(ialu_reg_mem); %} diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index 2b93f1e6c49..45da50e8e0f 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -5793,11 +5793,21 @@ int os::get_core_path(char* buffer, size_t bufferSize) { core_pattern[ret] = '\0'; } + // Replace the %p in the core pattern with the process id. NOTE: we do this + // only if the pattern doesn't start with "|", and we support only one %p in + // the pattern. char *pid_pos = strstr(core_pattern, "%p"); + const char* tail = (pid_pos != NULL) ? (pid_pos + 2) : ""; // skip over the "%p" int written; if (core_pattern[0] == '/') { - written = jio_snprintf(buffer, bufferSize, "%s", core_pattern); + if (pid_pos != NULL) { + *pid_pos = '\0'; + written = jio_snprintf(buffer, bufferSize, "%s%d%s", core_pattern, + current_process_id(), tail); + } else { + written = jio_snprintf(buffer, bufferSize, "%s", core_pattern); + } } else { char cwd[PATH_MAX]; @@ -5810,6 +5820,10 @@ int os::get_core_path(char* buffer, size_t bufferSize) { written = jio_snprintf(buffer, bufferSize, "\"%s\" (or dumping to %s/core.%d)", &core_pattern[1], p, current_process_id()); + } else if (pid_pos != NULL) { + *pid_pos = '\0'; + written = jio_snprintf(buffer, bufferSize, "%s/%s%d%s", p, core_pattern, + current_process_id(), tail); } else { written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern); } diff --git a/src/hotspot/share/aot/aotCodeHeap.cpp b/src/hotspot/share/aot/aotCodeHeap.cpp index dc270307bf9..c7d24bcae65 100644 --- a/src/hotspot/share/aot/aotCodeHeap.cpp +++ b/src/hotspot/share/aot/aotCodeHeap.cpp @@ -1006,7 +1006,7 @@ bool AOTCodeHeap::reconcile_dynamic_klass(AOTCompiledMethod *caller, InstanceKla InstanceKlass* dyno = InstanceKlass::cast(dyno_klass); - if (!dyno->is_anonymous()) { + if (!dyno->is_unsafe_anonymous()) { if (_klasses_got[dyno_data->_got_index] != dyno) { // compile-time class different from runtime class, fail and deoptimize sweep_dependent_methods(holder_data); diff --git a/src/hotspot/share/aot/aotCompiledMethod.cpp b/src/hotspot/share/aot/aotCompiledMethod.cpp index 30c5e6adc03..8c3ae8bdd59 100644 --- a/src/hotspot/share/aot/aotCompiledMethod.cpp +++ b/src/hotspot/share/aot/aotCompiledMethod.cpp @@ -362,7 +362,7 @@ void AOTCompiledMethod::log_identity(xmlStream* log) const { log->print(" aot='%2d'", _heap->dso_id()); } -void AOTCompiledMethod::log_state_change() const { +void AOTCompiledMethod::log_state_change(oop cause) const { if (LogCompilation) { ResourceMark m; if (xtty != NULL) { diff --git a/src/hotspot/share/aot/aotCompiledMethod.hpp b/src/hotspot/share/aot/aotCompiledMethod.hpp index e549fe63437..a0219606681 100644 --- a/src/hotspot/share/aot/aotCompiledMethod.hpp +++ b/src/hotspot/share/aot/aotCompiledMethod.hpp @@ -193,7 +193,7 @@ private: virtual int comp_level() const { return CompLevel_aot; } virtual address verified_entry_point() const { return _code + _meta->verified_entry_offset(); } virtual void log_identity(xmlStream* stream) const; - virtual void log_state_change() const; + virtual void log_state_change(oop cause = NULL) const; virtual bool make_entrant() NOT_TIERED({ ShouldNotReachHere(); return false; }); virtual bool make_not_entrant() { return make_not_entrant_helper(not_entrant); } virtual bool make_not_used() { return make_not_entrant_helper(not_used); } diff --git a/src/hotspot/share/aot/aotLoader.cpp b/src/hotspot/share/aot/aotLoader.cpp index 266479d85fc..b3cad5859fb 100644 --- a/src/hotspot/share/aot/aotLoader.cpp +++ b/src/hotspot/share/aot/aotLoader.cpp @@ -42,7 +42,7 @@ GrowableArray* AOTLoader::_libraries = new(ResourceObj::C_HEAP, mtCode) #define FOR_ALL_AOT_LIBRARIES(lib) for (GrowableArrayIterator lib = libraries()->begin(); lib != libraries()->end(); ++lib) void AOTLoader::load_for_klass(InstanceKlass* ik, Thread* thread) { - if (ik->is_anonymous()) { + if (ik->is_unsafe_anonymous()) { // don't even bother return; } @@ -54,7 +54,7 @@ void AOTLoader::load_for_klass(InstanceKlass* ik, Thread* thread) { } uint64_t AOTLoader::get_saved_fingerprint(InstanceKlass* ik) { - if (ik->is_anonymous()) { + if (ik->is_unsafe_anonymous()) { // don't even bother return 0; } diff --git a/src/hotspot/share/c1/c1_Decorators.hpp b/src/hotspot/share/c1/c1_Decorators.hpp index a1020690e01..7bbdbef8439 100644 --- a/src/hotspot/share/c1/c1_Decorators.hpp +++ b/src/hotspot/share/c1/c1_Decorators.hpp @@ -34,9 +34,5 @@ const DecoratorSet C1_NEEDS_PATCHING = DECORATOR_LAST << 1; // Use the C1_MASK_BOOLEAN decorator for boolean accesses where the value // needs to be masked. const DecoratorSet C1_MASK_BOOLEAN = DECORATOR_LAST << 2; -// The C1_WRITE_ACCESS decorator is used to mark writing accesses. -const DecoratorSet C1_WRITE_ACCESS = DECORATOR_LAST << 3; -// The C1_READ_ACCESS decorator is used to mark reading accesses. -const DecoratorSet C1_READ_ACCESS = DECORATOR_LAST << 4; #endif // SHARE_VM_C1_C1_DECORATORS_HPP diff --git a/src/hotspot/share/c1/c1_GraphBuilder.cpp b/src/hotspot/share/c1/c1_GraphBuilder.cpp index 2c083b17b07..15170bae9a4 100644 --- a/src/hotspot/share/c1/c1_GraphBuilder.cpp +++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp @@ -1844,8 +1844,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) { // invoke-special-super if (bc_raw == Bytecodes::_invokespecial && !target->is_object_initializer()) { ciInstanceKlass* sender_klass = - calling_klass->is_anonymous() ? calling_klass->host_klass() : - calling_klass; + calling_klass->is_unsafe_anonymous() ? calling_klass->unsafe_anonymous_host() : + calling_klass; if (sender_klass->is_interface()) { int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); Value receiver = state()->stack_at(index); diff --git a/src/hotspot/share/c1/c1_LIRAssembler.cpp b/src/hotspot/share/c1/c1_LIRAssembler.cpp index b68fe8b7e92..0b695294436 100644 --- a/src/hotspot/share/c1/c1_LIRAssembler.cpp +++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp @@ -112,6 +112,9 @@ LIR_Assembler::LIR_Assembler(Compilation* c): LIR_Assembler::~LIR_Assembler() { + // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out. + // Reset it here to avoid an assertion. + _unwind_handler_entry.reset(); } diff --git a/src/hotspot/share/c1/c1_LIRAssembler.hpp b/src/hotspot/share/c1/c1_LIRAssembler.hpp index 7d9c93caeaf..fd230a61f70 100644 --- a/src/hotspot/share/c1/c1_LIRAssembler.hpp +++ b/src/hotspot/share/c1/c1_LIRAssembler.hpp @@ -71,11 +71,7 @@ class LIR_Assembler: public CompilationResourceObj { void record_non_safepoint_debug_info(); // unified bailout support - void bailout(const char* msg) { - // reset the label in case it hits assertion in destructor. - _unwind_handler_entry.reset(); - compilation()->bailout(msg); - } + void bailout(const char* msg) const { compilation()->bailout(msg); } bool bailed_out() const { return compilation()->bailed_out(); } // code emission patterns and accessors diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp index 16eea099c74..d37372be60e 100644 --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp @@ -1285,9 +1285,10 @@ void LIRGenerator::do_getClass(Intrinsic* x) { // FIXME T_ADDRESS should actually be T_METADATA but it can't because the // meaning of these two is mixed up (see JDK-8026837). __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info); - __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), result); + __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp); // mirror = ((OopHandle)mirror)->resolve(); - __ move_wide(new LIR_Address(result, T_OBJECT), result); + access_load(IN_NATIVE, T_OBJECT, + LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result); } // java.lang.Class::isPrimitive() @@ -1614,7 +1615,7 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type, LIRItem& base, LIR_Opr offset, LIR_Opr result, CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) { - decorators |= C1_READ_ACCESS; + decorators |= ACCESS_READ; LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info); if (access.is_raw()) { _barrier_set->BarrierSetC1::load_at(access, result); @@ -1623,10 +1624,22 @@ void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type, } } +void LIRGenerator::access_load(DecoratorSet decorators, BasicType type, + LIR_Opr addr, LIR_Opr result) { + decorators |= ACCESS_READ; + LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type); + access.set_resolved_addr(addr); + if (access.is_raw()) { + _barrier_set->BarrierSetC1::load(access, result); + } else { + _barrier_set->load(access, result); + } +} + void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type, LIRItem& base, LIR_Opr offset, LIR_Opr value, CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) { - decorators |= C1_WRITE_ACCESS; + decorators |= ACCESS_WRITE; LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info); if (access.is_raw()) { _barrier_set->BarrierSetC1::store_at(access, value); @@ -1637,9 +1650,9 @@ void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type, LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type, LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) { + decorators |= ACCESS_READ; + decorators |= ACCESS_WRITE; // Atomic operations are SEQ_CST by default - decorators |= C1_READ_ACCESS; - decorators |= C1_WRITE_ACCESS; decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0; LIRAccess access(this, decorators, base, offset, type); if (access.is_raw()) { @@ -1651,9 +1664,9 @@ LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicTyp LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type, LIRItem& base, LIRItem& offset, LIRItem& value) { + decorators |= ACCESS_READ; + decorators |= ACCESS_WRITE; // Atomic operations are SEQ_CST by default - decorators |= C1_READ_ACCESS; - decorators |= C1_WRITE_ACCESS; decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0; LIRAccess access(this, decorators, base, offset, type); if (access.is_raw()) { @@ -1665,9 +1678,9 @@ LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType t LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type, LIRItem& base, LIRItem& offset, LIRItem& value) { + decorators |= ACCESS_READ; + decorators |= ACCESS_WRITE; // Atomic operations are SEQ_CST by default - decorators |= C1_READ_ACCESS; - decorators |= C1_WRITE_ACCESS; decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0; LIRAccess access(this, decorators, base, offset, type); if (access.is_raw()) { @@ -1677,6 +1690,15 @@ LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType ty } } +LIR_Opr LIRGenerator::access_resolve(DecoratorSet decorators, LIR_Opr obj) { + // Use stronger ACCESS_WRITE|ACCESS_READ by default. + if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) { + decorators |= ACCESS_READ | ACCESS_WRITE; + } + + return _barrier_set->resolve(this, decorators, obj); +} + void LIRGenerator::do_LoadField(LoadField* x) { bool needs_patching = x->needs_patching(); bool is_volatile = x->field()->is_volatile(); @@ -1754,11 +1776,12 @@ void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) { if (GenerateRangeChecks) { CodeEmitInfo* info = state_for(x); CodeStub* stub = new RangeCheckStub(info, index.result()); + LIR_Opr buf_obj = access_resolve(IS_NOT_NULL | ACCESS_READ, buf.result()); if (index.result()->is_constant()) { - cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info); + cmp_mem_int(lir_cond_belowEqual, buf_obj, java_nio_Buffer::limit_offset(), index.result()->as_jint(), info); __ branch(lir_cond_belowEqual, T_INT, stub); } else { - cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(), + cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf_obj, java_nio_Buffer::limit_offset(), T_INT, info); __ branch(lir_cond_aboveEqual, T_INT, stub); } diff --git a/src/hotspot/share/c1/c1_LIRGenerator.hpp b/src/hotspot/share/c1/c1_LIRGenerator.hpp index 60295fb23f5..5350447f336 100644 --- a/src/hotspot/share/c1/c1_LIRGenerator.hpp +++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp @@ -288,6 +288,9 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { LIRItem& base, LIR_Opr offset, LIR_Opr result, CodeEmitInfo* patch_info = NULL, CodeEmitInfo* load_emit_info = NULL); + void access_load(DecoratorSet decorators, BasicType type, + LIR_Opr addr, LIR_Opr result); + LIR_Opr access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type, LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value); @@ -297,6 +300,8 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type, LIRItem& base, LIRItem& offset, LIRItem& value); + LIR_Opr access_resolve(DecoratorSet decorators, LIR_Opr obj); + // These need to guarantee JMM volatile semantics are preserved on each platform // and requires one implementation per architecture. LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value); diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp index c3dc8b66fff..32bfbf61435 100644 --- a/src/hotspot/share/c1/c1_Runtime1.cpp +++ b/src/hotspot/share/c1/c1_Runtime1.cpp @@ -55,8 +55,9 @@ #include "runtime/atomic.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/compilationPolicy.hpp" -#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "runtime/frame.inline.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/threadCritical.hpp" diff --git a/src/hotspot/share/ci/ciField.cpp b/src/hotspot/share/ci/ciField.cpp index 9074f8dbe9a..3fcde7e9688 100644 --- a/src/hotspot/share/ci/ciField.cpp +++ b/src/hotspot/share/ci/ciField.cpp @@ -31,7 +31,7 @@ #include "interpreter/linkResolver.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" -#include "runtime/fieldDescriptor.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" // ciField @@ -222,9 +222,9 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) { // Even if general trusting is disabled, trust system-built closures in these packages. if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke")) return true; - // Trust VM anonymous classes. They are private API (sun.misc.Unsafe) and can't be serialized, - // so there is no hacking of finals going on with them. - if (holder->is_anonymous()) + // Trust VM unsafe anonymous classes. They are private API (jdk.internal.misc.Unsafe) + // and can't be serialized, so there is no hacking of finals going on with them. + if (holder->is_unsafe_anonymous()) return true; // Trust final fields in all boxed classes if (holder->is_box_klass()) diff --git a/src/hotspot/share/ci/ciInstanceKlass.cpp b/src/hotspot/share/ci/ciInstanceKlass.cpp index a0d296cb3d8..3048cfd8646 100644 --- a/src/hotspot/share/ci/ciInstanceKlass.cpp +++ b/src/hotspot/share/ci/ciInstanceKlass.cpp @@ -33,7 +33,7 @@ #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "oops/fieldStreams.hpp" -#include "runtime/fieldDescriptor.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/jniHandles.inline.hpp" @@ -62,7 +62,7 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) : _nonstatic_field_size = ik->nonstatic_field_size(); _has_nonstatic_fields = ik->has_nonstatic_fields(); _has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods(); - _is_anonymous = ik->is_anonymous(); + _is_unsafe_anonymous = ik->is_unsafe_anonymous(); _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields: _has_injected_fields = -1; _implementor = NULL; // we will fill these lazily @@ -73,13 +73,13 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) : // InstanceKlass are created for both weak and strong metadata. Ensuring this metadata // alive covers the cases where there are weak roots without performance cost. oop holder = ik->holder_phantom(); - if (ik->is_anonymous()) { + if (ik->is_unsafe_anonymous()) { // Though ciInstanceKlass records class loader oop, it's not enough to keep - // VM anonymous classes alive (loader == NULL). Klass holder should be used instead. - // It is enough to record a ciObject, since cached elements are never removed + // VM unsafe anonymous classes alive (loader == NULL). Klass holder should + // be used instead. It is enough to record a ciObject, since cached elements are never removed // during ciObjectFactory lifetime. ciObjectFactory itself is created for // every compilation and lives for the whole duration of the compilation. - assert(holder != NULL, "holder of anonymous class is the mirror which is never null"); + assert(holder != NULL, "holder of unsafe anonymous class is the mirror which is never null"); (void)CURRENT_ENV->get_object(holder); } @@ -122,7 +122,7 @@ ciInstanceKlass::ciInstanceKlass(ciSymbol* name, _has_nonstatic_fields = false; _nonstatic_fields = NULL; _has_injected_fields = -1; - _is_anonymous = false; + _is_unsafe_anonymous = false; _loader = loader; _protection_domain = protection_domain; _is_shared = false; @@ -615,12 +615,12 @@ ciInstanceKlass* ciInstanceKlass::implementor() { return impl; } -ciInstanceKlass* ciInstanceKlass::host_klass() { +ciInstanceKlass* ciInstanceKlass::unsafe_anonymous_host() { assert(is_loaded(), "must be loaded"); - if (is_anonymous()) { + if (is_unsafe_anonymous()) { VM_ENTRY_MARK - Klass* host_klass = get_instanceKlass()->host_klass(); - return CURRENT_ENV->get_instance_klass(host_klass); + Klass* unsafe_anonymous_host = get_instanceKlass()->unsafe_anonymous_host(); + return CURRENT_ENV->get_instance_klass(unsafe_anonymous_host); } return NULL; } diff --git a/src/hotspot/share/ci/ciInstanceKlass.hpp b/src/hotspot/share/ci/ciInstanceKlass.hpp index 3b6b98caacb..c5b7d78f4b9 100644 --- a/src/hotspot/share/ci/ciInstanceKlass.hpp +++ b/src/hotspot/share/ci/ciInstanceKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,7 +53,7 @@ private: bool _has_subklass; bool _has_nonstatic_fields; bool _has_nonstatic_concrete_methods; - bool _is_anonymous; + bool _is_unsafe_anonymous; ciFlags _flags; jint _nonstatic_field_size; @@ -179,8 +179,8 @@ public: return _has_nonstatic_concrete_methods; } - bool is_anonymous() { - return _is_anonymous; + bool is_unsafe_anonymous() { + return _is_unsafe_anonymous; } ciInstanceKlass* get_canonical_holder(int offset); @@ -260,7 +260,7 @@ public: return NULL; } - ciInstanceKlass* host_klass(); + ciInstanceKlass* unsafe_anonymous_host(); bool can_be_instantiated() { assert(is_loaded(), "must be loaded"); diff --git a/src/hotspot/share/ci/ciReplay.cpp b/src/hotspot/share/ci/ciReplay.cpp index 8d25d3ee3cb..b14e09191e8 100644 --- a/src/hotspot/share/ci/ciReplay.cpp +++ b/src/hotspot/share/ci/ciReplay.cpp @@ -35,6 +35,7 @@ #include "memory/resourceArea.hpp" #include "oops/method.inline.hpp" #include "oops/oop.inline.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "utilities/copy.hpp" #include "utilities/macros.hpp" diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp index 77059c0739f..a6bf54468fc 100644 --- a/src/hotspot/share/classfile/classFileParser.cpp +++ b/src/hotspot/share/classfile/classFileParser.cpp @@ -2091,7 +2091,7 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data, // Privileged code can use all annotations. Other code silently drops some. const bool privileged = loader_data->is_the_null_class_loader_data() || loader_data->is_platform_class_loader_data() || - loader_data->is_anonymous(); + loader_data->is_unsafe_anonymous(); switch (sid) { case vmSymbols::VM_SYMBOL_ENUM_NAME(reflect_CallerSensitive_signature): { if (_location != _in_method) break; // only allow for methods @@ -5591,7 +5591,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa ik->set_this_class_index(_this_class_index); - if (is_anonymous()) { + if (is_unsafe_anonymous()) { // _this_class_index is a CONSTANT_Class entry that refers to this // anonymous class itself. If this class needs to refer to its own methods or // fields, it would use a CONSTANT_MethodRef, etc, which would reference @@ -5607,9 +5607,9 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa ik->set_has_nonstatic_concrete_methods(_has_nonstatic_concrete_methods); ik->set_declares_nonstatic_concrete_methods(_declares_nonstatic_concrete_methods); - if (_host_klass != NULL) { - assert (ik->is_anonymous(), "should be the same"); - ik->set_host_klass(_host_klass); + if (_unsafe_anonymous_host != NULL) { + assert (ik->is_unsafe_anonymous(), "should be the same"); + ik->set_unsafe_anonymous_host(_unsafe_anonymous_host); } // Set PackageEntry for this_klass @@ -5760,15 +5760,15 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa debug_only(ik->verify();) } -// For an anonymous class that is in the unnamed package, move it to its host class's +// For an unsafe anonymous class that is in the unnamed package, move it to its host class's // package by prepending its host class's package name to its class name and setting // its _class_name field. -void ClassFileParser::prepend_host_package_name(const InstanceKlass* host_klass, TRAPS) { +void ClassFileParser::prepend_host_package_name(const InstanceKlass* unsafe_anonymous_host, TRAPS) { ResourceMark rm(THREAD); assert(strrchr(_class_name->as_C_string(), '/') == NULL, - "Anonymous class should not be in a package"); + "Unsafe anonymous class should not be in a package"); const char* host_pkg_name = - ClassLoader::package_from_name(host_klass->name()->as_C_string(), NULL); + ClassLoader::package_from_name(unsafe_anonymous_host->name()->as_C_string(), NULL); if (host_pkg_name != NULL) { size_t host_pkg_len = strlen(host_pkg_name); @@ -5778,7 +5778,7 @@ void ClassFileParser::prepend_host_package_name(const InstanceKlass* host_klass, // Copy host package name and trailing /. strncpy(new_anon_name, host_pkg_name, host_pkg_len); new_anon_name[host_pkg_len] = '/'; - // Append anonymous class name. The anonymous class name can contain odd + // Append unsafe anonymous class name. The unsafe anonymous class name can contain odd // characters. So, do a strncpy instead of using sprintf("%s..."). strncpy(new_anon_name + host_pkg_len + 1, (char *)_class_name->base(), class_name_len); @@ -5793,19 +5793,19 @@ void ClassFileParser::prepend_host_package_name(const InstanceKlass* host_klass, // nothing. If the anonymous class is in the unnamed package then move it to its // host's package. If the classes are in different packages then throw an IAE // exception. -void ClassFileParser::fix_anonymous_class_name(TRAPS) { - assert(_host_klass != NULL, "Expected an anonymous class"); +void ClassFileParser::fix_unsafe_anonymous_class_name(TRAPS) { + assert(_unsafe_anonymous_host != NULL, "Expected an unsafe anonymous class"); const jbyte* anon_last_slash = UTF8::strrchr(_class_name->base(), _class_name->utf8_length(), '/'); if (anon_last_slash == NULL) { // Unnamed package - prepend_host_package_name(_host_klass, CHECK); + prepend_host_package_name(_unsafe_anonymous_host, CHECK); } else { - if (!_host_klass->is_same_class_package(_host_klass->class_loader(), _class_name)) { + if (!_unsafe_anonymous_host->is_same_class_package(_unsafe_anonymous_host->class_loader(), _class_name)) { ResourceMark rm(THREAD); THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), err_msg("Host class %s and anonymous class %s are in different packages", - _host_klass->name()->as_C_string(), _class_name->as_C_string())); + _unsafe_anonymous_host->name()->as_C_string(), _class_name->as_C_string())); } } } @@ -5825,14 +5825,14 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream, Symbol* name, ClassLoaderData* loader_data, Handle protection_domain, - const InstanceKlass* host_klass, + const InstanceKlass* unsafe_anonymous_host, GrowableArray* cp_patches, Publicity pub_level, TRAPS) : _stream(stream), _requested_name(name), _loader_data(loader_data), - _host_klass(host_klass), + _unsafe_anonymous_host(unsafe_anonymous_host), _cp_patches(cp_patches), _num_patched_klasses(0), _max_num_patched_klasses(0), @@ -6140,8 +6140,8 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream, // if this is an anonymous class fix up its name if it's in the unnamed // package. Otherwise, throw IAE if it is in a different package than // its host class. - if (_host_klass != NULL) { - fix_anonymous_class_name(CHECK); + if (_unsafe_anonymous_host != NULL) { + fix_unsafe_anonymous_class_name(CHECK); } // Verification prevents us from creating names with dots in them, this @@ -6166,9 +6166,9 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream, warning("DumpLoadedClassList and CDS are not supported in exploded build"); DumpLoadedClassList = NULL; } else if (SystemDictionaryShared::is_sharing_possible(_loader_data) && - _host_klass == NULL) { + _unsafe_anonymous_host == NULL) { // Only dump the classes that can be stored into CDS archive. - // Anonymous classes such as generated LambdaForm classes are also not included. + // Unsafe anonymous classes such as generated LambdaForm classes are also not included. oop class_loader = _loader_data->class_loader(); ResourceMark rm(THREAD); bool skip = false; diff --git a/src/hotspot/share/classfile/classFileParser.hpp b/src/hotspot/share/classfile/classFileParser.hpp index 9521e15b15d..74baeca1a56 100644 --- a/src/hotspot/share/classfile/classFileParser.hpp +++ b/src/hotspot/share/classfile/classFileParser.hpp @@ -82,7 +82,7 @@ class ClassFileParser { const Symbol* _requested_name; Symbol* _class_name; mutable ClassLoaderData* _loader_data; - const InstanceKlass* _host_klass; + const InstanceKlass* _unsafe_anonymous_host; GrowableArray* _cp_patches; // overrides for CP entries int _num_patched_klasses; int _max_num_patched_klasses; @@ -173,8 +173,8 @@ class ClassFileParser { ConstantPool* cp, TRAPS); - void prepend_host_package_name(const InstanceKlass* host_klass, TRAPS); - void fix_anonymous_class_name(TRAPS); + void prepend_host_package_name(const InstanceKlass* unsafe_anonymous_host, TRAPS); + void fix_unsafe_anonymous_class_name(TRAPS); void fill_instance_klass(InstanceKlass* ik, bool cf_changed_in_CFLH, TRAPS); void set_klass(InstanceKlass* instance); @@ -501,7 +501,7 @@ class ClassFileParser { Symbol* name, ClassLoaderData* loader_data, Handle protection_domain, - const InstanceKlass* host_klass, + const InstanceKlass* unsafe_anonymous_host, GrowableArray* cp_patches, Publicity pub_level, TRAPS); @@ -524,10 +524,10 @@ class ClassFileParser { u2 this_class_index() const { return _this_class_index; } u2 super_class_index() const { return _super_class_index; } - bool is_anonymous() const { return _host_klass != NULL; } + bool is_unsafe_anonymous() const { return _unsafe_anonymous_host != NULL; } bool is_interface() const { return _access_flags.is_interface(); } - const InstanceKlass* host_klass() const { return _host_klass; } + const InstanceKlass* unsafe_anonymous_host() const { return _unsafe_anonymous_host; } const GrowableArray* cp_patches() const { return _cp_patches; } ClassLoaderData* loader_data() const { return _loader_data; } const Symbol* class_name() const { return _class_name; } diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp index 7bd04f8cf49..e37f2c5cc98 100644 --- a/src/hotspot/share/classfile/classLoader.cpp +++ b/src/hotspot/share/classfile/classLoader.cpp @@ -1400,7 +1400,7 @@ InstanceKlass* ClassLoader::load_class(Symbol* name, bool search_append_only, TR name, loader_data, protection_domain, - NULL, // host_klass + NULL, // unsafe_anonymous_host NULL, // cp_patches THREAD); if (HAS_PENDING_EXCEPTION) { @@ -1443,8 +1443,8 @@ void ClassLoader::record_result(InstanceKlass* ik, const ClassFileStream* stream assert(DumpSharedSpaces, "sanity"); assert(stream != NULL, "sanity"); - if (ik->is_anonymous()) { - // We do not archive anonymous classes. + if (ik->is_unsafe_anonymous()) { + // We do not archive unsafe anonymous classes. return; } diff --git a/src/hotspot/share/classfile/classLoaderData.cpp b/src/hotspot/share/classfile/classLoaderData.cpp index cbf51543323..e4e8a813d13 100644 --- a/src/hotspot/share/classfile/classLoaderData.cpp +++ b/src/hotspot/share/classfile/classLoaderData.cpp @@ -141,16 +141,16 @@ void ClassLoaderData::initialize_name(Handle class_loader) { _name_and_id = SymbolTable::new_symbol(cl_instance_name_and_id, CATCH); } -ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) : +ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_unsafe_anonymous) : _metaspace(NULL), _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true, Monitor::_safepoint_check_never)), - _unloading(false), _is_anonymous(is_anonymous), + _unloading(false), _is_unsafe_anonymous(is_unsafe_anonymous), _modified_oops(true), _accumulated_modified_oops(false), - // An anonymous class loader data doesn't have anything to keep - // it from being unloaded during parsing of the anonymous class. + // An unsafe anonymous class loader data doesn't have anything to keep + // it from being unloaded during parsing of the unsafe anonymous class. // The null-class-loader should always be kept alive. - _keep_alive((is_anonymous || h_class_loader.is_null()) ? 1 : 0), + _keep_alive((is_unsafe_anonymous || h_class_loader.is_null()) ? 1 : 0), _claimed(0), _handles(), _klasses(NULL), _packages(NULL), _modules(NULL), _unnamed_module(NULL), _dictionary(NULL), @@ -164,14 +164,14 @@ ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) : _class_loader_klass = h_class_loader->klass(); } - if (!is_anonymous) { - // The holder is initialized later for anonymous classes, and before calling anything + if (!is_unsafe_anonymous) { + // The holder is initialized later for unsafe anonymous classes, and before calling anything // that call class_loader(). initialize_holder(h_class_loader); - // A ClassLoaderData created solely for an anonymous class should never have a + // A ClassLoaderData created solely for an unsafe anonymous class should never have a // ModuleEntryTable or PackageEntryTable created for it. The defining package - // and module for an anonymous class will be found in its host class. + // and module for an unsafe anonymous class will be found in its host class. _packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size); if (h_class_loader.is_null()) { // Create unnamed module for boot loader @@ -287,20 +287,20 @@ bool ClassLoaderData::claim() { return (int) Atomic::cmpxchg(1, &_claimed, 0) == 0; } -// Anonymous classes have their own ClassLoaderData that is marked to keep alive +// Unsafe anonymous classes have their own ClassLoaderData that is marked to keep alive // while the class is being parsed, and if the class appears on the module fixup list. -// Due to the uniqueness that no other class shares the anonymous class' name or -// ClassLoaderData, no other non-GC thread has knowledge of the anonymous class while +// Due to the uniqueness that no other class shares the unsafe anonymous class' name or +// ClassLoaderData, no other non-GC thread has knowledge of the unsafe anonymous class while // it is being defined, therefore _keep_alive is not volatile or atomic. void ClassLoaderData::inc_keep_alive() { - if (is_anonymous()) { + if (is_unsafe_anonymous()) { assert(_keep_alive >= 0, "Invalid keep alive increment count"); _keep_alive++; } } void ClassLoaderData::dec_keep_alive() { - if (is_anonymous()) { + if (is_unsafe_anonymous()) { assert(_keep_alive > 0, "Invalid keep alive decrement count"); _keep_alive--; } @@ -402,20 +402,20 @@ void ClassLoaderData::record_dependency(const Klass* k) { // Do not need to record dependency if the dependency is to a class whose // class loader data is never freed. (i.e. the dependency's class loader // is one of the three builtin class loaders and the dependency is not - // anonymous.) + // unsafe anonymous.) if (to_cld->is_permanent_class_loader_data()) { return; } oop to; - if (to_cld->is_anonymous()) { - // Just return if an anonymous class is attempting to record a dependency - // to itself. (Note that every anonymous class has its own unique class + if (to_cld->is_unsafe_anonymous()) { + // Just return if an unsafe anonymous class is attempting to record a dependency + // to itself. (Note that every unsafe anonymous class has its own unique class // loader data.) if (to_cld == from_cld) { return; } - // Anonymous class dependencies are through the mirror. + // Unsafe anonymous class dependencies are through the mirror. to = k->java_mirror(); } else { to = to_cld->class_loader(); @@ -640,7 +640,7 @@ const int _boot_loader_dictionary_size = 1009; const int _default_loader_dictionary_size = 107; Dictionary* ClassLoaderData::create_dictionary() { - assert(!is_anonymous(), "anonymous class loader data do not have a dictionary"); + assert(!is_unsafe_anonymous(), "unsafe anonymous class loader data do not have a dictionary"); int size; bool resizable = false; if (_the_null_class_loader_data == NULL) { @@ -655,7 +655,7 @@ Dictionary* ClassLoaderData::create_dictionary() { size = _default_loader_dictionary_size; resizable = true; } - if (!DynamicallyResizeSystemDictionaries || DumpSharedSpaces || UseSharedSpaces) { + if (!DynamicallyResizeSystemDictionaries || DumpSharedSpaces) { resizable = false; } return new Dictionary(this, size, resizable); @@ -677,7 +677,7 @@ oop ClassLoaderData::holder_phantom() const { // Unloading support bool ClassLoaderData::is_alive() const { - bool alive = keep_alive() // null class loader and incomplete anonymous klasses. + bool alive = keep_alive() // null class loader and incomplete unsafe anonymous klasses. || (_holder.peek() != NULL); // and not cleaned by the GC weak handle processing. return alive; @@ -767,13 +767,13 @@ ClassLoaderData::~ClassLoaderData() { // Returns true if this class loader data is for the app class loader // or a user defined system class loader. (Note that the class loader -// data may be anonymous.) +// data may be unsafe anonymous.) bool ClassLoaderData::is_system_class_loader_data() const { return SystemDictionary::is_system_class_loader(class_loader()); } // Returns true if this class loader data is for the platform class loader. -// (Note that the class loader data may be anonymous.) +// (Note that the class loader data may be unsafe anonymous.) bool ClassLoaderData::is_platform_class_loader_data() const { return SystemDictionary::is_platform_class_loader(class_loader()); } @@ -781,7 +781,7 @@ bool ClassLoaderData::is_platform_class_loader_data() const { // Returns true if the class loader for this class loader data is one of // the 3 builtin (boot application/system or platform) class loaders, // including a user-defined system class loader. Note that if the class -// loader data is for an anonymous class then it may get freed by a GC +// loader data is for an unsafe anonymous class then it may get freed by a GC // even if its class loader is one of these loaders. bool ClassLoaderData::is_builtin_class_loader_data() const { return (is_boot_class_loader_data() || @@ -790,10 +790,10 @@ bool ClassLoaderData::is_builtin_class_loader_data() const { } // Returns true if this class loader data is a class loader data -// that is not ever freed by a GC. It must be one of the builtin -// class loaders and not anonymous. +// that is not ever freed by a GC. It must be the CLD for one of the builtin +// class loaders and not the CLD for an unsafe anonymous class. bool ClassLoaderData::is_permanent_class_loader_data() const { - return is_builtin_class_loader_data() && !is_anonymous(); + return is_builtin_class_loader_data() && !is_unsafe_anonymous(); } ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() { @@ -810,8 +810,8 @@ ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() { if (this == the_null_class_loader_data()) { assert (class_loader() == NULL, "Must be"); metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::BootMetaspaceType); - } else if (is_anonymous()) { - metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType); + } else if (is_unsafe_anonymous()) { + metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::UnsafeAnonymousMetaspaceType); } else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) { metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType); } else { @@ -962,8 +962,8 @@ void ClassLoaderData::free_deallocate_list_C_heap_structures() { } } -// These anonymous class loaders are to contain classes used for JSR292 -ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(Handle loader) { +// These CLDs are to contain unsafe anonymous classes used for JSR292 +ClassLoaderData* ClassLoaderData::unsafe_anonymous_class_loader_data(Handle loader) { // Add a new class loader data to the graph. return ClassLoaderDataGraph::add(loader, true); } @@ -1005,8 +1005,8 @@ void ClassLoaderData::print_value_on(outputStream* out) const { // loader data: 0xsomeaddr of 'bootstrap' out->print("loader data: " INTPTR_FORMAT " of %s", p2i(this), loader_name_and_id()); } - if (is_anonymous()) { - out->print(" anonymous"); + if (is_unsafe_anonymous()) { + out->print(" unsafe anonymous"); } } @@ -1014,7 +1014,7 @@ void ClassLoaderData::print_value_on(outputStream* out) const { void ClassLoaderData::print_on(outputStream* out) const { out->print("ClassLoaderData CLD: " PTR_FORMAT ", loader: " PTR_FORMAT ", loader_klass: %s {", p2i(this), p2i(_class_loader.ptr_raw()), loader_name_and_id()); - if (is_anonymous()) out->print(" anonymous"); + if (is_unsafe_anonymous()) out->print(" unsafe anonymous"); if (claimed()) out->print(" claimed"); if (is_unloading()) out->print(" unloading"); out->print(" metaspace: " INTPTR_FORMAT, p2i(metaspace_or_null())); @@ -1032,8 +1032,8 @@ void ClassLoaderData::verify() { assert_locked_or_safepoint(_metaspace_lock); oop cl = class_loader(); - guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same"); - guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data() || is_anonymous(), "must be"); + guarantee(this == class_loader_data(cl) || is_unsafe_anonymous(), "Must be the same"); + guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data() || is_unsafe_anonymous(), "must be"); // Verify the integrity of the allocated space. if (metaspace_or_null() != NULL) { @@ -1069,14 +1069,14 @@ bool ClassLoaderDataGraph::_metaspace_oom = false; // Add a new class loader data node to the list. Assign the newly created // ClassLoaderData into the java/lang/ClassLoader object as a hidden field -ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_anonymous) { +ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_unsafe_anonymous) { NoSafepointVerifier no_safepoints; // we mustn't GC until we've installed the // ClassLoaderData in the graph since the CLD // contains oops in _handles that must be walked. - ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous); + ClassLoaderData* cld = new ClassLoaderData(loader, is_unsafe_anonymous); - if (!is_anonymous) { + if (!is_unsafe_anonymous) { // First, Atomically set it ClassLoaderData* old = java_lang_ClassLoader::cmpxchg_loader_data(cld, loader(), NULL); if (old != NULL) { @@ -1109,8 +1109,8 @@ ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_anony } while (true); } -ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous) { - ClassLoaderData* loader_data = add_to_graph(loader, is_anonymous); +ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_unsafe_anonymous) { + ClassLoaderData* loader_data = add_to_graph(loader, is_unsafe_anonymous); // Initialize _name and _name_and_id after the loader data is added to the // CLDG because adding the Symbol for _name and _name_and_id might safepoint. if (loader.not_null()) { @@ -1119,28 +1119,6 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous) { return loader_data; } -void ClassLoaderDataGraph::oops_do(OopClosure* f, bool must_claim) { - for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) { - cld->oops_do(f, must_claim); - } -} - -void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, bool must_claim) { - for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) { - if (cld->keep_alive()) { - cld->oops_do(f, must_claim); - } - } -} - -void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, bool must_claim) { - if (ClassUnloading) { - keep_alive_oops_do(f, must_claim); - } else { - oops_do(f, must_claim); - } -} - void ClassLoaderDataGraph::cld_do(CLDClosure* cl) { for (ClassLoaderData* cld = _head; cl != NULL && cld != NULL; cld = cld->next()) { cl->do_cld(cld); @@ -1166,13 +1144,9 @@ void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) { } } -void ClassLoaderDataGraph::keep_alive_cld_do(CLDClosure* cl) { - roots_cld_do(cl, NULL); -} - void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) { if (ClassUnloading) { - keep_alive_cld_do(cl); + roots_cld_do(cl, NULL); } else { cld_do(cl); } @@ -1280,15 +1254,6 @@ void ClassLoaderDataGraph::dictionary_classes_do(void f(InstanceKlass*, TRAPS), } } -// Walks all entries in the dictionary including entries initiated by this class loader. -void ClassLoaderDataGraph::dictionary_all_entries_do(void f(InstanceKlass*, ClassLoaderData*)) { - Thread* thread = Thread::current(); - FOR_ALL_DICTIONARY(cld) { - Handle holder(thread, cld->holder_phantom()); - cld->dictionary()->all_entries_do(f); - } -} - void ClassLoaderDataGraph::verify_dictionary() { FOR_ALL_DICTIONARY(cld) { cld->dictionary()->verify(); diff --git a/src/hotspot/share/classfile/classLoaderData.hpp b/src/hotspot/share/classfile/classLoaderData.hpp index 0d393b57ede..7744822f25c 100644 --- a/src/hotspot/share/classfile/classLoaderData.hpp +++ b/src/hotspot/share/classfile/classLoaderData.hpp @@ -92,29 +92,24 @@ class ClassLoaderDataGraph : public AllStatic { static volatile size_t _num_instance_classes; static volatile size_t _num_array_classes; - static ClassLoaderData* add_to_graph(Handle class_loader, bool anonymous); - static ClassLoaderData* add(Handle class_loader, bool anonymous); + static ClassLoaderData* add_to_graph(Handle class_loader, bool is_unsafe_anonymous); + static ClassLoaderData* add(Handle class_loader, bool is_unsafe_anonymous); public: static ClassLoaderData* find_or_create(Handle class_loader); static void clean_module_and_package_info(); static void purge(); static void clear_claimed_marks(); - // oops do - static void oops_do(OopClosure* f, bool must_claim); - static void keep_alive_oops_do(OopClosure* blk, bool must_claim); - static void always_strong_oops_do(OopClosure* blk, bool must_claim); - // cld do + // Iteration through CLDG inside a safepoint; GC support static void cld_do(CLDClosure* cl); static void cld_unloading_do(CLDClosure* cl); static void roots_cld_do(CLDClosure* strong, CLDClosure* weak); - static void keep_alive_cld_do(CLDClosure* cl); static void always_strong_cld_do(CLDClosure* cl); // klass do // Walking classes through the ClassLoaderDataGraph include array classes. It also includes // classes that are allocated but not loaded, classes that have errors, and scratch classes // for redefinition. These classes are removed during the next class unloading. - // Walking the ClassLoaderDataGraph also includes anonymous classes. + // Walking the ClassLoaderDataGraph also includes unsafe anonymous classes. static void classes_do(KlassClosure* klass_closure); static void classes_do(void f(Klass* const)); static void methods_do(void f(Method*)); @@ -139,9 +134,6 @@ class ClassLoaderDataGraph : public AllStatic { // Added for initialize_itable_for_klass to handle exceptions. static void dictionary_classes_do(void f(InstanceKlass*, TRAPS), TRAPS); - // Iterate all classes and their class loaders, including initiating class loaders. - static void dictionary_all_entries_do(void f(InstanceKlass*, ClassLoaderData*)); - // VM_CounterDecay iteration support static InstanceKlass* try_get_next_class(); @@ -238,16 +230,17 @@ class ClassLoaderData : public CHeapObj { // classes in the class loader are allocated. Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup. bool _unloading; // true if this class loader goes away - bool _is_anonymous; // if this CLD is for an anonymous class + bool _is_unsafe_anonymous; // CLD is dedicated to one class and that class determines the CLDs lifecycle. + // For example, an unsafe anonymous class. // Remembered sets support for the oops in the class loader data. bool _modified_oops; // Card Table Equivalent (YC/CMS support) bool _accumulated_modified_oops; // Mod Union Equivalent (CMS support) s2 _keep_alive; // if this CLD is kept alive. - // Used for anonymous classes and the boot class + // Used for unsafe anonymous classes and the boot class // loader. _keep_alive does not need to be volatile or - // atomic since there is one unique CLD per anonymous class. + // atomic since there is one unique CLD per unsafe anonymous class. volatile int _claimed; // true if claimed, for example during GC traces. // To avoid applying oop closure more than once. @@ -283,7 +276,7 @@ class ClassLoaderData : public CHeapObj { void set_next(ClassLoaderData* next) { _next = next; } ClassLoaderData* next() const { return _next; } - ClassLoaderData(Handle h_class_loader, bool is_anonymous); + ClassLoaderData(Handle h_class_loader, bool is_unsafe_anonymous); ~ClassLoaderData(); // The CLD are not placed in the Heap, so the Card Table or @@ -337,7 +330,7 @@ class ClassLoaderData : public CHeapObj { Mutex* metaspace_lock() const { return _metaspace_lock; } - bool is_anonymous() const { return _is_anonymous; } + bool is_unsafe_anonymous() const { return _is_unsafe_anonymous; } static void init_null_class_loader_data(); @@ -346,15 +339,15 @@ class ClassLoaderData : public CHeapObj { } // Returns true if this class loader data is for the system class loader. - // (Note that the class loader data may be anonymous.) + // (Note that the class loader data may be unsafe anonymous.) bool is_system_class_loader_data() const; // Returns true if this class loader data is for the platform class loader. - // (Note that the class loader data may be anonymous.) + // (Note that the class loader data may be unsafe anonymous.) bool is_platform_class_loader_data() const; // Returns true if this class loader data is for the boot class loader. - // (Note that the class loader data may be anonymous.) + // (Note that the class loader data may be unsafe anonymous.) inline bool is_boot_class_loader_data() const; bool is_builtin_class_loader_data() const; @@ -372,7 +365,7 @@ class ClassLoaderData : public CHeapObj { return _unloading; } - // Used to refcount an anonymous class's CLD in order to + // Used to refcount an unsafe anonymous class's CLD in order to // indicate their aliveness. void inc_keep_alive(); void dec_keep_alive(); @@ -412,7 +405,7 @@ class ClassLoaderData : public CHeapObj { static ClassLoaderData* class_loader_data(oop loader); static ClassLoaderData* class_loader_data_or_null(oop loader); - static ClassLoaderData* anonymous_class_loader_data(Handle loader); + static ClassLoaderData* unsafe_anonymous_class_loader_data(Handle loader); // Returns Klass* of associated class loader, or NULL if associated loader is 'bootstrap'. // Also works if unloading. diff --git a/src/hotspot/share/classfile/classLoaderData.inline.hpp b/src/hotspot/share/classfile/classLoaderData.inline.hpp index a345cf0c459..732f71365e1 100644 --- a/src/hotspot/share/classfile/classLoaderData.inline.hpp +++ b/src/hotspot/share/classfile/classLoaderData.inline.hpp @@ -94,9 +94,12 @@ void ClassLoaderDataGraph::dec_array_classes(size_t count) { } bool ClassLoaderDataGraph::should_clean_metaspaces_and_reset() { - bool do_cleaning = _safepoint_cleanup_needed && _should_clean_deallocate_lists; + // Only clean metaspaces after full GC. + bool do_cleaning = _safepoint_cleanup_needed; #if INCLUDE_JVMTI - do_cleaning = do_cleaning || InstanceKlass::has_previous_versions(); + do_cleaning = do_cleaning && (_should_clean_deallocate_lists || InstanceKlass::has_previous_versions()); +#else + do_cleaning = do_cleaning && _should_clean_deallocate_lists; #endif _safepoint_cleanup_needed = false; // reset return do_cleaning; diff --git a/src/hotspot/share/classfile/classLoaderExt.cpp b/src/hotspot/share/classfile/classLoaderExt.cpp index 31544c6437b..887e8c009ee 100644 --- a/src/hotspot/share/classfile/classLoaderExt.cpp +++ b/src/hotspot/share/classfile/classLoaderExt.cpp @@ -50,6 +50,7 @@ jshort ClassLoaderExt::_app_class_paths_start_index = ClassLoaderExt::max_classpath_index; jshort ClassLoaderExt::_app_module_paths_start_index = ClassLoaderExt::max_classpath_index; +jshort ClassLoaderExt::_max_used_path_index = 0; bool ClassLoaderExt::_has_app_classes = false; bool ClassLoaderExt::_has_platform_classes = false; @@ -242,6 +243,9 @@ void ClassLoaderExt::record_result(const s2 classpath_index, classloader_type = ClassLoader::PLATFORM_LOADER; ClassLoaderExt::set_has_platform_classes(); } + if (classpath_index > ClassLoaderExt::max_used_path_index()) { + ClassLoaderExt::set_max_used_path_index(classpath_index); + } result->set_shared_classpath_index(classpath_index); result->set_class_loader_type(classloader_type); } @@ -294,7 +298,7 @@ InstanceKlass* ClassLoaderExt::load_class(Symbol* name, const char* path, TRAPS) name, loader_data, protection_domain, - NULL, // host_klass + NULL, // unsafe_anonymous_host NULL, // cp_patches THREAD); diff --git a/src/hotspot/share/classfile/classLoaderExt.hpp b/src/hotspot/share/classfile/classLoaderExt.hpp index 6863a69a177..85b39a097ae 100644 --- a/src/hotspot/share/classfile/classLoaderExt.hpp +++ b/src/hotspot/share/classfile/classLoaderExt.hpp @@ -49,6 +49,8 @@ private: static jshort _app_class_paths_start_index; // index of first modular JAR in shared modulepath entry table static jshort _app_module_paths_start_index; + // the largest path index being used during CDS dump time + static jshort _max_used_path_index; static bool _has_app_classes; static bool _has_platform_classes; @@ -91,6 +93,12 @@ public: static jshort app_module_paths_start_index() { return _app_module_paths_start_index; } + static jshort max_used_path_index() { return _max_used_path_index; } + + static void set_max_used_path_index(jshort used_index) { + _max_used_path_index = used_index; + } + static void init_paths_start_index(jshort app_start) { _app_class_paths_start_index = app_start; } diff --git a/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp b/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp index 86fdafa50b4..59948ff2f6e 100644 --- a/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp +++ b/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp @@ -128,7 +128,7 @@ public: class LoaderTreeNode : public ResourceObj { - // We walk the CLDG and, for each CLD which is non-anonymous, add + // We walk the CLDG and, for each CLD which is non-unsafe_anonymous, add // a tree node. // To add a node we need its parent node; if the parent node does not yet // exist - because we have not yet encountered the CLD for the parent loader - @@ -219,7 +219,7 @@ class LoaderTreeNode : public ResourceObj { if (print_classes) { if (_classes != NULL) { for (LoadedClassInfo* lci = _classes; lci; lci = lci->_next) { - // Non-anonymous classes should live in the primary CLD of its loader + // Non-unsafe anonymous classes should live in the primary CLD of its loader assert(lci->_cld == _cld, "must be"); branchtracker.print(st); @@ -252,12 +252,12 @@ class LoaderTreeNode : public ResourceObj { for (LoadedClassInfo* lci = _anon_classes; lci; lci = lci->_next) { branchtracker.print(st); if (lci == _anon_classes) { // first iteration - st->print("%*s ", indentation, "Anonymous Classes:"); + st->print("%*s ", indentation, "Unsafe Anonymous Classes:"); } else { st->print("%*s ", indentation, ""); } st->print("%s", lci->_klass->external_name()); - // For anonymous classes, also print CLD if verbose. Should be a different one than the primary CLD. + // For unsafe anonymous classes, also print CLD if verbose. Should be a different one than the primary CLD. assert(lci->_cld != _cld, "must be"); if (verbose) { st->print(" (Loader Data: " PTR_FORMAT ")", p2i(lci->_cld)); @@ -266,7 +266,7 @@ class LoaderTreeNode : public ResourceObj { } branchtracker.print(st); st->print("%*s ", indentation, ""); - st->print_cr("(%u anonymous class%s)", _num_anon_classes, (_num_anon_classes == 1) ? "" : "es"); + st->print_cr("(%u unsafe anonymous class%s)", _num_anon_classes, (_num_anon_classes == 1) ? "" : "es"); // Empty line branchtracker.print(st); @@ -318,14 +318,14 @@ public: _next = info; } - void add_classes(LoadedClassInfo* first_class, int num_classes, bool anonymous) { - LoadedClassInfo** p_list_to_add_to = anonymous ? &_anon_classes : &_classes; + void add_classes(LoadedClassInfo* first_class, int num_classes, bool is_unsafe_anonymous) { + LoadedClassInfo** p_list_to_add_to = is_unsafe_anonymous ? &_anon_classes : &_classes; // Search tail. while ((*p_list_to_add_to) != NULL) { p_list_to_add_to = &(*p_list_to_add_to)->_next; } *p_list_to_add_to = first_class; - if (anonymous) { + if (is_unsafe_anonymous) { _num_anon_classes += num_classes; } else { _num_classes += num_classes; @@ -420,7 +420,7 @@ class LoaderInfoScanClosure : public CLDClosure { LoadedClassCollectClosure lccc(cld); const_cast(cld)->classes_do(&lccc); if (lccc._num_classes > 0) { - info->add_classes(lccc._list, lccc._num_classes, cld->is_anonymous()); + info->add_classes(lccc._list, lccc._num_classes, cld->is_unsafe_anonymous()); } } @@ -480,7 +480,7 @@ public: assert(info != NULL, "must be"); // Update CLD in node, but only if this is the primary CLD for this loader. - if (cld->is_anonymous() == false) { + if (cld->is_unsafe_anonymous() == false) { assert(info->cld() == NULL, "there should be only one primary CLD per loader"); info->set_cld(cld); } diff --git a/src/hotspot/share/classfile/classLoaderStats.cpp b/src/hotspot/share/classfile/classLoaderStats.cpp index 7d1ea4ba28c..221eda7759c 100644 --- a/src/hotspot/share/classfile/classLoaderStats.cpp +++ b/src/hotspot/share/classfile/classLoaderStats.cpp @@ -58,7 +58,7 @@ void ClassLoaderStatsClosure::do_cld(ClassLoaderData* cld) { cls = *cls_ptr; } - if (!cld->is_anonymous()) { + if (!cld->is_unsafe_anonymous()) { cls->_cld = cld; } @@ -70,7 +70,7 @@ void ClassLoaderStatsClosure::do_cld(ClassLoaderData* cld) { ClassStatsClosure csc; cld->classes_do(&csc); - if(cld->is_anonymous()) { + if(cld->is_unsafe_anonymous()) { cls->_anon_classes_count += csc._num_classes; } else { cls->_classes_count = csc._num_classes; @@ -79,7 +79,7 @@ void ClassLoaderStatsClosure::do_cld(ClassLoaderData* cld) { ClassLoaderMetaspace* ms = cld->metaspace_or_null(); if (ms != NULL) { - if(cld->is_anonymous()) { + if(cld->is_unsafe_anonymous()) { cls->_anon_chunk_sz += ms->allocated_chunks_bytes(); cls->_anon_block_sz += ms->allocated_blocks_bytes(); } else { diff --git a/src/hotspot/share/classfile/compactHashtable.cpp b/src/hotspot/share/classfile/compactHashtable.cpp index de6bb061ecc..979aae32cd6 100644 --- a/src/hotspot/share/classfile/compactHashtable.cpp +++ b/src/hotspot/share/classfile/compactHashtable.cpp @@ -27,6 +27,7 @@ #include "classfile/compactHashtable.inline.hpp" #include "classfile/javaClasses.hpp" #include "logging/logMessage.hpp" +#include "memory/heapShared.inline.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceShared.hpp" #include "oops/compressedOops.inline.hpp" @@ -280,8 +281,9 @@ class CompactHashtable_OopIterator { public: CompactHashtable_OopIterator(OopClosure *cl) : _closure(cl) {} inline void do_value(address base_address, u4 offset) const { - narrowOop o = (narrowOop)offset; - _closure->do_oop(&o); + narrowOop v = (narrowOop)offset; + oop obj = HeapShared::decode_with_archived_oop_encoding_mode(v); + _closure->do_oop(&obj); } }; diff --git a/src/hotspot/share/classfile/compactHashtable.hpp b/src/hotspot/share/classfile/compactHashtable.hpp index 8d4589d62dc..1006380a1eb 100644 --- a/src/hotspot/share/classfile/compactHashtable.hpp +++ b/src/hotspot/share/classfile/compactHashtable.hpp @@ -231,6 +231,10 @@ public: // For reading from/writing to the CDS archive void serialize(SerializeClosure* soc); + + inline bool empty() { + return (_entry_count == 0); + } }; template class CompactHashtable : public SimpleCompactHashtable { diff --git a/src/hotspot/share/classfile/compactHashtable.inline.hpp b/src/hotspot/share/classfile/compactHashtable.inline.hpp index cbfd9d14873..7a5f24e5736 100644 --- a/src/hotspot/share/classfile/compactHashtable.inline.hpp +++ b/src/hotspot/share/classfile/compactHashtable.inline.hpp @@ -28,7 +28,8 @@ #include "classfile/compactHashtable.hpp" #include "classfile/javaClasses.hpp" #include "memory/allocation.inline.hpp" -#include "oops/compressedOops.inline.hpp" +#include "memory/filemap.hpp" +#include "memory/heapShared.inline.hpp" #include "oops/oop.hpp" template @@ -46,8 +47,8 @@ inline Symbol* CompactHashtable::decode_entry(CompactHashtable inline oop CompactHashtable::decode_entry(CompactHashtable* const t, u4 offset, const char* name, int len) { - narrowOop obj = (narrowOop)offset; - oop string = CompressedOops::decode(obj); + narrowOop v = (narrowOop)offset; + oop string = HeapShared::decode_with_archived_oop_encoding_mode(v); if (java_lang_String::equals(string, (jchar*)name, len)) { return string; } diff --git a/src/hotspot/share/classfile/defaultMethods.cpp b/src/hotspot/share/classfile/defaultMethods.cpp index 172a47c51f2..66ae4ccdd18 100644 --- a/src/hotspot/share/classfile/defaultMethods.cpp +++ b/src/hotspot/share/classfile/defaultMethods.cpp @@ -885,7 +885,7 @@ static void switchover_constant_pool(BytecodeConstantPool* bpool, ConstantPool* cp = bpool->create_constant_pool(CHECK); if (cp != klass->constants()) { // Copy resolved anonymous class into new constant pool. - if (klass->is_anonymous()) { + if (klass->is_unsafe_anonymous()) { cp->klass_at_put(klass->this_class_index(), klass); } klass->class_loader_data()->add_to_deallocate_list(klass->constants()); diff --git a/src/hotspot/share/classfile/dictionary.cpp b/src/hotspot/share/classfile/dictionary.cpp index 4c9cc9be5a4..272a5d0ae0b 100644 --- a/src/hotspot/share/classfile/dictionary.cpp +++ b/src/hotspot/share/classfile/dictionary.cpp @@ -330,13 +330,13 @@ void Dictionary::classes_do(void f(InstanceKlass*, TRAPS), TRAPS) { } // All classes, and their class loaders, including initiating class loaders -void Dictionary::all_entries_do(void f(InstanceKlass*, ClassLoaderData*)) { +void Dictionary::all_entries_do(KlassClosure* closure) { for (int index = 0; index < table_size(); index++) { for (DictionaryEntry* probe = bucket(index); probe != NULL; probe = probe->next()) { InstanceKlass* k = probe->instance_klass(); - f(k, loader_data()); + closure->do_klass(k); } } } @@ -592,8 +592,8 @@ void Dictionary::print_on(outputStream* st) const { ResourceMark rm; assert(loader_data() != NULL, "loader data should not be null"); - st->print_cr("Java dictionary (table_size=%d, classes=%d)", - table_size(), number_of_entries()); + st->print_cr("Java dictionary (table_size=%d, classes=%d, resizable=%s)", + table_size(), number_of_entries(), BOOL_TO_STR(_resizable)); st->print_cr("^ indicates that initiating loader is different from defining loader"); for (int index = 0; index < table_size(); index++) { diff --git a/src/hotspot/share/classfile/dictionary.hpp b/src/hotspot/share/classfile/dictionary.hpp index 6caddf25b09..94f5c8c4a52 100644 --- a/src/hotspot/share/classfile/dictionary.hpp +++ b/src/hotspot/share/classfile/dictionary.hpp @@ -74,7 +74,7 @@ public: void classes_do(void f(InstanceKlass*)); void classes_do(void f(InstanceKlass*, TRAPS), TRAPS); - void all_entries_do(void f(InstanceKlass*, ClassLoaderData*)); + void all_entries_do(KlassClosure* closure); void classes_do(MetaspaceClosure* it); void unlink(); diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp index f3a1a267fe3..887386d26ee 100644 --- a/src/hotspot/share/classfile/javaClasses.cpp +++ b/src/hotspot/share/classfile/javaClasses.cpp @@ -50,7 +50,7 @@ #include "oops/symbol.hpp" #include "oops/typeArrayOop.inline.hpp" #include "prims/resolvedMethodTable.hpp" -#include "runtime/fieldDescriptor.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -209,7 +209,7 @@ void java_lang_String::compute_offsets() { } #if INCLUDE_CDS -void java_lang_String::serialize(SerializeClosure* f) { +void java_lang_String::serialize_offsets(SerializeClosure* f) { STRING_FIELDS_DO(FIELD_SERIALIZE_OFFSET); f->do_u4((u4*)&initialized); } @@ -1038,6 +1038,7 @@ void java_lang_Class::archive_basic_type_mirrors(TRAPS) { if (m != NULL) { // Update the field at _array_klass_offset to point to the relocated array klass. oop archived_m = MetaspaceShared::archive_heap_object(m, THREAD); + assert(archived_m != NULL, "sanity"); Klass *ak = (Klass*)(archived_m->metadata_field(_array_klass_offset)); assert(ak != NULL || t == T_VOID, "should not be NULL"); if (ak != NULL) { @@ -1212,7 +1213,7 @@ oop java_lang_Class::process_archived_mirror(Klass* k, oop mirror, bool java_lang_Class::restore_archived_mirror(Klass *k, Handle class_loader, Handle module, Handle protection_domain, TRAPS) { - oop m = MetaspaceShared::materialize_archived_object(k->archived_java_mirror_raw()); + oop m = MetaspaceShared::materialize_archived_object(k->archived_java_mirror_raw_narrow()); if (m == NULL) { return false; @@ -1270,6 +1271,13 @@ int java_lang_Class::oop_size(oop java_class) { return size; } +int java_lang_Class::oop_size_raw(oop java_class) { + assert(_oop_size_offset != 0, "must be set"); + int size = java_class->int_field_raw(_oop_size_offset); + assert(size > 0, "Oop size must be greater than zero, not %d", size); + return size; +} + void java_lang_Class::set_oop_size(HeapWord* java_class, int size) { assert(_oop_size_offset != 0, "must be set"); assert(size > 0, "Oop size must be greater than zero, not %d", size); @@ -1280,6 +1288,12 @@ int java_lang_Class::static_oop_field_count(oop java_class) { assert(_static_oop_field_count_offset != 0, "must be set"); return java_class->int_field(_static_oop_field_count_offset); } + +int java_lang_Class::static_oop_field_count_raw(oop java_class) { + assert(_static_oop_field_count_offset != 0, "must be set"); + return java_class->int_field_raw(_static_oop_field_count_offset); +} + void java_lang_Class::set_static_oop_field_count(oop java_class, int size) { assert(_static_oop_field_count_offset != 0, "must be set"); java_class->int_field_put(_static_oop_field_count_offset, size); @@ -1369,6 +1383,14 @@ Klass* java_lang_Class::as_Klass(oop java_class) { return k; } +Klass* java_lang_Class::as_Klass_raw(oop java_class) { + //%note memory_2 + assert(java_lang_Class::is_instance(java_class), "must be a Class object"); + Klass* k = ((Klass*)java_class->metadata_field_raw(_klass_offset)); + assert(k == NULL || k->is_klass(), "type check"); + return k; +} + void java_lang_Class::set_klass(oop java_class, Klass* klass) { assert(java_lang_Class::is_instance(java_class), "must be a Class object"); @@ -1534,7 +1556,7 @@ void java_lang_Class::compute_offsets() { } #if INCLUDE_CDS -void java_lang_Class::serialize(SerializeClosure* f) { +void java_lang_Class::serialize_offsets(SerializeClosure* f) { f->do_u4((u4*)&offsets_computed); f->do_u4((u4*)&_init_lock_offset); @@ -1608,7 +1630,7 @@ void java_lang_Thread::compute_offsets() { } #if INCLUDE_CDS -void java_lang_Thread::serialize(SerializeClosure* f) { +void java_lang_Thread::serialize_offsets(SerializeClosure* f) { THREAD_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -1860,7 +1882,7 @@ void java_lang_ThreadGroup::compute_offsets() { } #if INCLUDE_CDS -void java_lang_ThreadGroup::serialize(SerializeClosure* f) { +void java_lang_ThreadGroup::serialize_offsets(SerializeClosure* f) { THREADGROUP_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -1878,7 +1900,7 @@ void java_lang_Throwable::compute_offsets() { } #if INCLUDE_CDS -void java_lang_Throwable::serialize(SerializeClosure* f) { +void java_lang_Throwable::serialize_offsets(SerializeClosure* f) { THROWABLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -2654,7 +2676,7 @@ void java_lang_StackFrameInfo::compute_offsets() { } #if INCLUDE_CDS -void java_lang_StackFrameInfo::serialize(SerializeClosure* f) { +void java_lang_StackFrameInfo::serialize_offsets(SerializeClosure* f) { STACKFRAMEINFO_FIELDS_DO(FIELD_SERIALIZE_OFFSET); STACKFRAMEINFO_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } @@ -2672,7 +2694,7 @@ void java_lang_LiveStackFrameInfo::compute_offsets() { } #if INCLUDE_CDS -void java_lang_LiveStackFrameInfo::serialize(SerializeClosure* f) { +void java_lang_LiveStackFrameInfo::serialize_offsets(SerializeClosure* f) { LIVESTACKFRAMEINFO_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -2686,7 +2708,7 @@ void java_lang_reflect_AccessibleObject::compute_offsets() { } #if INCLUDE_CDS -void java_lang_reflect_AccessibleObject::serialize(SerializeClosure* f) { +void java_lang_reflect_AccessibleObject::serialize_offsets(SerializeClosure* f) { ACCESSIBLEOBJECT_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -2727,7 +2749,7 @@ void java_lang_reflect_Method::compute_offsets() { } #if INCLUDE_CDS -void java_lang_reflect_Method::serialize(SerializeClosure* f) { +void java_lang_reflect_Method::serialize_offsets(SerializeClosure* f) { METHOD_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -2914,7 +2936,7 @@ void java_lang_reflect_Constructor::compute_offsets() { } #if INCLUDE_CDS -void java_lang_reflect_Constructor::serialize(SerializeClosure* f) { +void java_lang_reflect_Constructor::serialize_offsets(SerializeClosure* f) { CONSTRUCTOR_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3063,7 +3085,7 @@ void java_lang_reflect_Field::compute_offsets() { } #if INCLUDE_CDS -void java_lang_reflect_Field::serialize(SerializeClosure* f) { +void java_lang_reflect_Field::serialize_offsets(SerializeClosure* f) { FIELD_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3186,7 +3208,7 @@ void reflect_ConstantPool::compute_offsets() { } #if INCLUDE_CDS -void reflect_ConstantPool::serialize(SerializeClosure* f) { +void reflect_ConstantPool::serialize_offsets(SerializeClosure* f) { CONSTANTPOOL_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3203,7 +3225,7 @@ void java_lang_reflect_Parameter::compute_offsets() { } #if INCLUDE_CDS -void java_lang_reflect_Parameter::serialize(SerializeClosure* f) { +void java_lang_reflect_Parameter::serialize_offsets(SerializeClosure* f) { PARAMETER_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3281,7 +3303,7 @@ void java_lang_Module::compute_offsets() { } #if INCLUDE_CDS -void java_lang_Module::serialize(SerializeClosure* f) { +void java_lang_Module::serialize_offsets(SerializeClosure* f) { MODULE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); MODULE_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } @@ -3371,7 +3393,7 @@ void reflect_UnsafeStaticFieldAccessorImpl::compute_offsets() { } #if INCLUDE_CDS -void reflect_UnsafeStaticFieldAccessorImpl::serialize(SerializeClosure* f) { +void reflect_UnsafeStaticFieldAccessorImpl::serialize_offsets(SerializeClosure* f) { UNSAFESTATICFIELDACCESSORIMPL_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3543,7 +3565,7 @@ void java_lang_ref_SoftReference::compute_offsets() { } #if INCLUDE_CDS -void java_lang_ref_SoftReference::serialize(SerializeClosure* f) { +void java_lang_ref_SoftReference::serialize_offsets(SerializeClosure* f) { SOFTREFERENCE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3584,7 +3606,7 @@ void java_lang_invoke_DirectMethodHandle::compute_offsets() { } #if INCLUDE_CDS -void java_lang_invoke_DirectMethodHandle::serialize(SerializeClosure* f) { +void java_lang_invoke_DirectMethodHandle::serialize_offsets(SerializeClosure* f) { DIRECTMETHODHANDLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3616,7 +3638,7 @@ void java_lang_invoke_MethodHandle::compute_offsets() { } #if INCLUDE_CDS -void java_lang_invoke_MethodHandle::serialize(SerializeClosure* f) { +void java_lang_invoke_MethodHandle::serialize_offsets(SerializeClosure* f) { METHODHANDLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3635,7 +3657,7 @@ void java_lang_invoke_MemberName::compute_offsets() { } #if INCLUDE_CDS -void java_lang_invoke_MemberName::serialize(SerializeClosure* f) { +void java_lang_invoke_MemberName::serialize_offsets(SerializeClosure* f) { MEMBERNAME_FIELDS_DO(FIELD_SERIALIZE_OFFSET); MEMBERNAME_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } @@ -3648,7 +3670,7 @@ void java_lang_invoke_ResolvedMethodName::compute_offsets() { } #if INCLUDE_CDS -void java_lang_invoke_ResolvedMethodName::serialize(SerializeClosure* f) { +void java_lang_invoke_ResolvedMethodName::serialize_offsets(SerializeClosure* f) { RESOLVEDMETHOD_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } #endif @@ -3663,7 +3685,7 @@ void java_lang_invoke_LambdaForm::compute_offsets() { } #if INCLUDE_CDS -void java_lang_invoke_LambdaForm::serialize(SerializeClosure* f) { +void java_lang_invoke_LambdaForm::serialize_offsets(SerializeClosure* f) { LAMBDAFORM_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3785,7 +3807,7 @@ oop java_lang_invoke_ResolvedMethodName::find_resolved_method(const methodHandle } oop new_resolved_method = k->allocate_instance(CHECK_NULL); new_resolved_method->address_field_put(_vmtarget_offset, (address)m()); - // Add a reference to the loader (actually mirror because anonymous classes will not have + // Add a reference to the loader (actually mirror because unsafe anonymous classes will not have // distinct loaders) to ensure the metadata is kept alive. // This mirror may be different than the one in clazz field. new_resolved_method->obj_field_put(_vmholder_offset, m->method_holder()->java_mirror()); @@ -3815,7 +3837,7 @@ void java_lang_invoke_MethodType::compute_offsets() { } #if INCLUDE_CDS -void java_lang_invoke_MethodType::serialize(SerializeClosure* f) { +void java_lang_invoke_MethodType::serialize_offsets(SerializeClosure* f) { METHODTYPE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3909,7 +3931,7 @@ void java_lang_invoke_CallSite::compute_offsets() { } #if INCLUDE_CDS -void java_lang_invoke_CallSite::serialize(SerializeClosure* f) { +void java_lang_invoke_CallSite::serialize_offsets(SerializeClosure* f) { CALLSITE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3931,7 +3953,7 @@ void java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets() { } #if INCLUDE_CDS -void java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize(SerializeClosure* f) { +void java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize_offsets(SerializeClosure* f) { CALLSITECONTEXT_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } #endif @@ -3963,7 +3985,7 @@ void java_security_AccessControlContext::compute_offsets() { } #if INCLUDE_CDS -void java_security_AccessControlContext::serialize(SerializeClosure* f) { +void java_security_AccessControlContext::serialize_offsets(SerializeClosure* f) { ACCESSCONTROLCONTEXT_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4006,6 +4028,11 @@ ClassLoaderData* java_lang_ClassLoader::loader_data(oop loader) { return HeapAccess<>::load_at(loader, _loader_data_offset); } +ClassLoaderData* java_lang_ClassLoader::loader_data_raw(oop loader) { + assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop"); + return RawAccess<>::load_at(loader, _loader_data_offset); +} + ClassLoaderData* java_lang_ClassLoader::cmpxchg_loader_data(ClassLoaderData* new_data, oop loader, ClassLoaderData* expected_data) { assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop"); return HeapAccess<>::atomic_cmpxchg_at(new_data, loader, _loader_data_offset, expected_data); @@ -4029,7 +4056,7 @@ void java_lang_ClassLoader::compute_offsets() { } #if INCLUDE_CDS -void java_lang_ClassLoader::serialize(SerializeClosure* f) { +void java_lang_ClassLoader::serialize_offsets(SerializeClosure* f) { CLASSLOADER_FIELDS_DO(FIELD_SERIALIZE_OFFSET); CLASSLOADER_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } @@ -4143,7 +4170,7 @@ void java_lang_System::compute_offsets() { } #if INCLUDE_CDS -void java_lang_System::serialize(SerializeClosure* f) { +void java_lang_System::serialize_offsets(SerializeClosure* f) { SYSTEM_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4247,15 +4274,7 @@ int java_nio_Buffer::_limit_offset; int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset; int reflect_ConstantPool::_oop_offset; int reflect_UnsafeStaticFieldAccessorImpl::_base_offset; -int jdk_internal_module_ArchivedModuleGraph::_archivedSystemModules_offset; -int jdk_internal_module_ArchivedModuleGraph::_archivedModuleFinder_offset; -int jdk_internal_module_ArchivedModuleGraph::_archivedMainModule_offset; -int jdk_internal_module_ArchivedModuleGraph::_archivedConfiguration_offset; -int java_lang_Integer_IntegerCache::_archivedCache_offset; -int java_lang_module_Configuration::_EMPTY_CONFIGURATION_offset; -int java_util_ImmutableCollections_ListN::_EMPTY_LIST_offset; -int java_util_ImmutableCollections_SetN::_EMPTY_SET_offset; -int java_util_ImmutableCollections_MapN::_EMPTY_MAP_offset; + #define STACKTRACEELEMENT_FIELDS_DO(macro) \ macro(declaringClassObject_offset, k, "declaringClassObject", class_signature, false); \ @@ -4274,7 +4293,7 @@ void java_lang_StackTraceElement::compute_offsets() { } #if INCLUDE_CDS -void java_lang_StackTraceElement::serialize(SerializeClosure* f) { +void java_lang_StackTraceElement::serialize_offsets(SerializeClosure* f) { STACKTRACEELEMENT_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4349,7 +4368,7 @@ void java_lang_AssertionStatusDirectives::compute_offsets() { } #if INCLUDE_CDS -void java_lang_AssertionStatusDirectives::serialize(SerializeClosure* f) { +void java_lang_AssertionStatusDirectives::serialize_offsets(SerializeClosure* f) { ASSERTIONSTATUSDIRECTIVES_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4390,7 +4409,7 @@ void java_nio_Buffer::compute_offsets() { } #if INCLUDE_CDS -void java_nio_Buffer::serialize(SerializeClosure* f) { +void java_nio_Buffer::serialize_offsets(SerializeClosure* f) { BUFFER_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4409,7 +4428,7 @@ oop java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj( } #if INCLUDE_CDS -void java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize(SerializeClosure* f) { +void java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize_offsets(SerializeClosure* f) { AOS_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4418,99 +4437,6 @@ static int member_offset(int hardcoded_offset) { return (hardcoded_offset * heapOopSize) + instanceOopDesc::base_offset_in_bytes(); } -#define INTEGERCACHE_FIELDS_DO(macro) \ - macro(_archivedCache_offset, k, "archivedCache", java_lang_Integer_array_signature, true) - -void java_lang_Integer_IntegerCache::compute_offsets() { - InstanceKlass* k = SystemDictionary::Integer_IntegerCache_klass(); - assert(k != NULL, "must be loaded"); - INTEGERCACHE_FIELDS_DO(FIELD_COMPUTE_OFFSET); -} - -#if INCLUDE_CDS -void java_lang_Integer_IntegerCache::serialize(SerializeClosure* f) { - INTEGERCACHE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); -} -#endif - -#define ARCHIVEDMODULEGRAPH_FIELDS_DO(macro) \ - macro(_archivedSystemModules_offset, k, "archivedSystemModules", systemModules_signature, true); \ - macro(_archivedModuleFinder_offset, k, "archivedModuleFinder", moduleFinder_signature, true); \ - macro(_archivedMainModule_offset, k, "archivedMainModule", string_signature, true); \ - macro(_archivedConfiguration_offset, k, "archivedConfiguration", configuration_signature, true) - -void jdk_internal_module_ArchivedModuleGraph::compute_offsets() { - InstanceKlass* k = SystemDictionary::ArchivedModuleGraph_klass(); - assert(k != NULL, "must be loaded"); - ARCHIVEDMODULEGRAPH_FIELDS_DO(FIELD_COMPUTE_OFFSET); -} - -#if INCLUDE_CDS -void jdk_internal_module_ArchivedModuleGraph::serialize(SerializeClosure* f) { - ARCHIVEDMODULEGRAPH_FIELDS_DO(FIELD_SERIALIZE_OFFSET); -} -#endif - -#define CONFIGURATION_FIELDS_DO(macro) \ - macro(_EMPTY_CONFIGURATION_offset, k, "EMPTY_CONFIGURATION", configuration_signature, true) - -void java_lang_module_Configuration::compute_offsets() { - InstanceKlass* k = SystemDictionary::Configuration_klass(); - assert(k != NULL, "must be loaded"); - CONFIGURATION_FIELDS_DO(FIELD_COMPUTE_OFFSET); -} - -#if INCLUDE_CDS -void java_lang_module_Configuration::serialize(SerializeClosure* f) { - CONFIGURATION_FIELDS_DO(FIELD_SERIALIZE_OFFSET); -} -#endif - -#define LISTN_FIELDS_DO(macro) \ - macro(_EMPTY_LIST_offset, k, "EMPTY_LIST", list_signature, true) - -void java_util_ImmutableCollections_ListN::compute_offsets() { - InstanceKlass* k = SystemDictionary::ImmutableCollections_ListN_klass(); - assert(k != NULL, "must be loaded"); - LISTN_FIELDS_DO(FIELD_COMPUTE_OFFSET); -} - -#if INCLUDE_CDS -void java_util_ImmutableCollections_ListN::serialize(SerializeClosure* f) { - LISTN_FIELDS_DO(FIELD_SERIALIZE_OFFSET); -} -#endif - -#define SETN_FIELDS_DO(macro) \ - macro(_EMPTY_SET_offset, k, "EMPTY_SET", set_signature, true) - -void java_util_ImmutableCollections_SetN::compute_offsets() { - InstanceKlass* k = SystemDictionary::ImmutableCollections_SetN_klass(); - assert(k != NULL, "must be loaded"); - SETN_FIELDS_DO(FIELD_COMPUTE_OFFSET); -} - -#if INCLUDE_CDS -void java_util_ImmutableCollections_SetN::serialize(SerializeClosure* f) { - SETN_FIELDS_DO(FIELD_SERIALIZE_OFFSET); -} -#endif - -#define MAPN_FIELDS_DO(macro) \ - macro(_EMPTY_MAP_offset, k, "EMPTY_MAP", map_signature, true) - -void java_util_ImmutableCollections_MapN::compute_offsets() { - InstanceKlass* k = SystemDictionary::ImmutableCollections_MapN_klass(); - assert(k != NULL, "must be loaded"); - MAPN_FIELDS_DO(FIELD_COMPUTE_OFFSET); -} - -#if INCLUDE_CDS -void java_util_ImmutableCollections_MapN::serialize(SerializeClosure* f) { - MAPN_FIELDS_DO(FIELD_SERIALIZE_OFFSET); -} -#endif - // Compute hard-coded offsets // Invoked before SystemDictionary::initialize, so pre-loaded classes // are not available to determine the offset_of_static_fields. @@ -4527,6 +4453,7 @@ void JavaClasses::compute_hard_coded_offsets() { java_lang_ref_Reference::discovered_offset = member_offset(java_lang_ref_Reference::hc_discovered_offset); } +#define DO_COMPUTE_OFFSETS(k) k::compute_offsets(); // Compute non-hard-coded field offsets of all the classes in this file void JavaClasses::compute_offsets() { @@ -4534,52 +4461,24 @@ void JavaClasses::compute_offsets() { return; // field offsets are loaded from archive } - // java_lang_Class::compute_offsets was called earlier in bootstrap - java_lang_System::compute_offsets(); - java_lang_ClassLoader::compute_offsets(); - java_lang_Throwable::compute_offsets(); - java_lang_Thread::compute_offsets(); - java_lang_ThreadGroup::compute_offsets(); - java_lang_AssertionStatusDirectives::compute_offsets(); - java_lang_ref_SoftReference::compute_offsets(); - java_lang_invoke_MethodHandle::compute_offsets(); - java_lang_invoke_DirectMethodHandle::compute_offsets(); - java_lang_invoke_MemberName::compute_offsets(); - java_lang_invoke_ResolvedMethodName::compute_offsets(); - java_lang_invoke_LambdaForm::compute_offsets(); - java_lang_invoke_MethodType::compute_offsets(); - java_lang_invoke_CallSite::compute_offsets(); - java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets(); - java_security_AccessControlContext::compute_offsets(); - // Initialize reflection classes. The layouts of these classes - // changed with the new reflection implementation in JDK 1.4, and - // since the Universe doesn't know what JDK version it is until this - // point we defer computation of these offsets until now. - java_lang_reflect_AccessibleObject::compute_offsets(); - java_lang_reflect_Method::compute_offsets(); - java_lang_reflect_Constructor::compute_offsets(); - java_lang_reflect_Field::compute_offsets(); - java_nio_Buffer::compute_offsets(); - reflect_ConstantPool::compute_offsets(); - reflect_UnsafeStaticFieldAccessorImpl::compute_offsets(); - java_lang_reflect_Parameter::compute_offsets(); - java_lang_Module::compute_offsets(); - java_lang_StackTraceElement::compute_offsets(); - java_lang_StackFrameInfo::compute_offsets(); - java_lang_LiveStackFrameInfo::compute_offsets(); - java_util_concurrent_locks_AbstractOwnableSynchronizer::compute_offsets(); - - java_lang_Integer_IntegerCache::compute_offsets(); - java_lang_module_Configuration::compute_offsets(); - java_util_ImmutableCollections_ListN::compute_offsets(); - java_util_ImmutableCollections_MapN::compute_offsets(); - java_util_ImmutableCollections_SetN::compute_offsets(); - jdk_internal_module_ArchivedModuleGraph::compute_offsets(); + // We have already called the compute_offsets() of the + // BASIC_JAVA_CLASSES_DO_PART1 classes (java_lang_String and java_lang_Class) + // earlier inside SystemDictionary::resolve_preloaded_classes() + BASIC_JAVA_CLASSES_DO_PART2(DO_COMPUTE_OFFSETS); // generated interpreter code wants to know about the offsets we just computed: AbstractAssembler::update_delayed_values(); } +#if INCLUDE_CDS +#define DO_SERIALIZE_OFFSETS(k) k::serialize_offsets(soc); + +void JavaClasses::serialize_offsets(SerializeClosure* soc) { + BASIC_JAVA_CLASSES_DO(DO_SERIALIZE_OFFSETS); +} +#endif + + #ifndef PRODUCT // These functions exist to assert the validity of hard-coded field offsets to guard diff --git a/src/hotspot/share/classfile/javaClasses.hpp b/src/hotspot/share/classfile/javaClasses.hpp index e4cee5ae967..c30ad083566 100644 --- a/src/hotspot/share/classfile/javaClasses.hpp +++ b/src/hotspot/share/classfile/javaClasses.hpp @@ -47,6 +47,46 @@ // correspondingly. The names in the enums must be identical to the actual field // names in order for the verification code to work. +#define BASIC_JAVA_CLASSES_DO_PART1(f) \ + f(java_lang_Class) \ + f(java_lang_String) \ + //end + +#define BASIC_JAVA_CLASSES_DO_PART2(f) \ + f(java_lang_System) \ + f(java_lang_ClassLoader) \ + f(java_lang_Throwable) \ + f(java_lang_Thread) \ + f(java_lang_ThreadGroup) \ + f(java_lang_AssertionStatusDirectives) \ + f(java_lang_ref_SoftReference) \ + f(java_lang_invoke_MethodHandle) \ + f(java_lang_invoke_DirectMethodHandle) \ + f(java_lang_invoke_MemberName) \ + f(java_lang_invoke_ResolvedMethodName) \ + f(java_lang_invoke_LambdaForm) \ + f(java_lang_invoke_MethodType) \ + f(java_lang_invoke_CallSite) \ + f(java_lang_invoke_MethodHandleNatives_CallSiteContext) \ + f(java_security_AccessControlContext) \ + f(java_lang_reflect_AccessibleObject) \ + f(java_lang_reflect_Method) \ + f(java_lang_reflect_Constructor) \ + f(java_lang_reflect_Field) \ + f(java_nio_Buffer) \ + f(reflect_ConstantPool) \ + f(reflect_UnsafeStaticFieldAccessorImpl) \ + f(java_lang_reflect_Parameter) \ + f(java_lang_Module) \ + f(java_lang_StackTraceElement) \ + f(java_lang_StackFrameInfo) \ + f(java_lang_LiveStackFrameInfo) \ + f(java_util_concurrent_locks_AbstractOwnableSynchronizer) \ + //end + +#define BASIC_JAVA_CLASSES_DO(f) \ + BASIC_JAVA_CLASSES_DO_PART1(f) \ + BASIC_JAVA_CLASSES_DO_PART2(f) // Interface to java.lang.String objects @@ -71,7 +111,7 @@ class java_lang_String : AllStatic { }; static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Instance creation static Handle create_from_unicode(jchar* unicode, int len, TRAPS); @@ -224,7 +264,7 @@ class java_lang_Class : AllStatic { static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS); // Archiving - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; static void archive_basic_type_mirrors(TRAPS) NOT_CDS_JAVA_HEAP_RETURN; static oop archive_mirror(Klass* k, TRAPS) NOT_CDS_JAVA_HEAP_RETURN_(NULL); static oop process_archived_mirror(Klass* k, oop mirror, oop archived_mirror, Thread *THREAD) @@ -237,6 +277,7 @@ class java_lang_Class : AllStatic { // Conversion static Klass* as_Klass(oop java_class); + static Klass* as_Klass_raw(oop java_class); static void set_klass(oop java_class, Klass* klass); static BasicType as_BasicType(oop java_class, Klass** reference_klass = NULL); static Symbol* as_signature(oop java_class, bool intern_if_not_found, TRAPS); @@ -270,8 +311,10 @@ class java_lang_Class : AllStatic { static oop module(oop java_class); static int oop_size(oop java_class); + static int oop_size_raw(oop java_class); static void set_oop_size(HeapWord* java_class, int size); static int static_oop_field_count(oop java_class); + static int static_oop_field_count_raw(oop java_class); static void set_static_oop_field_count(oop java_class, int size); static GrowableArray* fixup_mirror_list() { @@ -317,7 +360,7 @@ class java_lang_Thread : AllStatic { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Instance creation static oop create(); @@ -419,7 +462,7 @@ class java_lang_ThreadGroup : AllStatic { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // parent ThreadGroup static oop parent(oop java_thread_group); @@ -500,7 +543,7 @@ class java_lang_Throwable: AllStatic { static void print_stack_usage(Handle stream); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocate space for backtrace (created but stack trace not filled in) static void allocate_backtrace(Handle throwable, TRAPS); @@ -531,7 +574,7 @@ class java_lang_reflect_AccessibleObject: AllStatic { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static jboolean override(oop reflect); @@ -564,7 +607,7 @@ class java_lang_reflect_Method : public java_lang_reflect_AccessibleObject { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocation static Handle create(TRAPS); @@ -635,7 +678,7 @@ class java_lang_reflect_Constructor : public java_lang_reflect_AccessibleObject static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocation static Handle create(TRAPS); @@ -695,7 +738,7 @@ class java_lang_reflect_Field : public java_lang_reflect_AccessibleObject { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocation static Handle create(TRAPS); @@ -752,7 +795,7 @@ class java_lang_reflect_Parameter { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocation static Handle create(TRAPS); @@ -784,7 +827,7 @@ class java_lang_Module { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocation static Handle create(Handle loader, Handle module_name, TRAPS); @@ -815,7 +858,7 @@ class reflect_ConstantPool { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocation static Handle create(TRAPS); @@ -839,7 +882,7 @@ class reflect_UnsafeStaticFieldAccessorImpl { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; static int base_offset() { return _base_offset; @@ -944,7 +987,7 @@ class java_lang_ref_SoftReference: public java_lang_ref_Reference { static void set_clock(jlong value); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; }; // Interface to java.lang.invoke.MethodHandle objects @@ -961,7 +1004,7 @@ class java_lang_invoke_MethodHandle: AllStatic { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static oop type(oop mh); @@ -992,7 +1035,7 @@ class java_lang_invoke_DirectMethodHandle: AllStatic { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static oop member(oop mh); @@ -1019,7 +1062,7 @@ class java_lang_invoke_LambdaForm: AllStatic { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static oop vmentry(oop lform); @@ -1052,7 +1095,7 @@ class java_lang_invoke_ResolvedMethodName : AllStatic { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; static int vmtarget_offset_in_bytes() { return _vmtarget_offset; } @@ -1091,7 +1134,7 @@ class java_lang_invoke_MemberName: AllStatic { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static oop clazz(oop mname); static void set_clazz(oop mname, oop clazz); @@ -1156,7 +1199,7 @@ class java_lang_invoke_MethodType: AllStatic { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static oop rtype(oop mt); static objArrayOop ptypes(oop mt); @@ -1192,7 +1235,7 @@ private: static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static oop target( oop site); static void set_target( oop site, oop target); @@ -1226,7 +1269,7 @@ private: static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static DependencyContext vmdependencies(oop context); @@ -1250,7 +1293,7 @@ class java_security_AccessControlContext: AllStatic { static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; static oop create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS); static bool is_authorized(Handle context); @@ -1277,9 +1320,10 @@ class java_lang_ClassLoader : AllStatic { public: static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; static ClassLoaderData* loader_data(oop loader); + static ClassLoaderData* loader_data_raw(oop loader); static ClassLoaderData* cmpxchg_loader_data(ClassLoaderData* new_data, oop loader, ClassLoaderData* expected_data); static oop parent(oop loader); @@ -1330,7 +1374,7 @@ class java_lang_System : AllStatic { static bool has_security_manager(); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Debugging friend class JavaClasses; @@ -1368,7 +1412,7 @@ class java_lang_StackTraceElement: AllStatic { int version, int bci, Symbol* name, TRAPS); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Debugging friend class JavaClasses; @@ -1412,7 +1456,7 @@ public: static void set_version(oop info, short value); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; static void to_stack_trace_element(Handle stackFrame, Handle stack_trace_element, TRAPS); @@ -1434,7 +1478,7 @@ class java_lang_LiveStackFrameInfo: AllStatic { static void set_mode(oop info, int value); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Debugging friend class JavaClasses; @@ -1459,7 +1503,7 @@ class java_lang_AssertionStatusDirectives: AllStatic { static void set_deflt(oop obj, bool val); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Debugging friend class JavaClasses; @@ -1473,7 +1517,7 @@ class java_nio_Buffer: AllStatic { public: static int limit_offset(); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; }; class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic { @@ -1482,67 +1526,7 @@ class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic { public: static void compute_offsets(); static oop get_owner_threadObj(oop obj); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; -}; - -class java_lang_Integer_IntegerCache: AllStatic { - private: - static int _archivedCache_offset; - public: - static int archivedCache_offset() { return _archivedCache_offset; } - static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; -}; - -class jdk_internal_module_ArchivedModuleGraph: AllStatic { - private: - static int _archivedSystemModules_offset; - static int _archivedModuleFinder_offset; - static int _archivedMainModule_offset; - static int _archivedConfiguration_offset; - public: - static int archivedSystemModules_offset() { return _archivedSystemModules_offset; } - static int archivedModuleFinder_offset() { return _archivedModuleFinder_offset; } - static int archivedMainModule_offset() { return _archivedMainModule_offset; } - static int archivedConfiguration_offset() { return _archivedConfiguration_offset; } - static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; -}; - -class java_lang_module_Configuration: AllStatic { - private: - static int _EMPTY_CONFIGURATION_offset; - public: - static int EMPTY_CONFIGURATION_offset() { return _EMPTY_CONFIGURATION_offset; } - static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; -}; - -class java_util_ImmutableCollections_ListN : AllStatic { - private: - static int _EMPTY_LIST_offset; - public: - static int EMPTY_LIST_offset() { return _EMPTY_LIST_offset; } - static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; -}; - -class java_util_ImmutableCollections_SetN : AllStatic { - private: - static int _EMPTY_SET_offset; - public: - static int EMPTY_SET_offset() { return _EMPTY_SET_offset; } - static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; -}; - -class java_util_ImmutableCollections_MapN : AllStatic { - private: - static int _EMPTY_MAP_offset; - public: - static int EMPTY_MAP_offset() { return _EMPTY_MAP_offset; } - static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; }; // Use to declare fields that need to be injected into Java classes @@ -1605,7 +1589,7 @@ class JavaClasses : AllStatic { static void compute_hard_coded_offsets(); static void compute_offsets(); static void check_offsets() PRODUCT_RETURN; - + static void serialize_offsets(SerializeClosure* soc) NOT_CDS_RETURN; static InjectedField* get_injected(Symbol* class_name, int* field_count); }; diff --git a/src/hotspot/share/classfile/klassFactory.cpp b/src/hotspot/share/classfile/klassFactory.cpp index 3614480f16b..c2efeade16f 100644 --- a/src/hotspot/share/classfile/klassFactory.cpp +++ b/src/hotspot/share/classfile/klassFactory.cpp @@ -183,7 +183,7 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream, Symbol* name, ClassLoaderData* loader_data, Handle protection_domain, - const InstanceKlass* host_klass, + const InstanceKlass* unsafe_anonymous_host, GrowableArray* cp_patches, TRAPS) { assert(stream != NULL, "invariant"); @@ -201,7 +201,7 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream, THREAD->statistical_info().incr_define_class_count(); // Skip this processing for VM anonymous classes - if (host_klass == NULL) { + if (unsafe_anonymous_host == NULL) { stream = check_class_file_load_hook(stream, name, loader_data, @@ -214,7 +214,7 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream, name, loader_data, protection_domain, - host_klass, + unsafe_anonymous_host, cp_patches, ClassFileParser::BROADCAST, // publicity level CHECK_NULL); diff --git a/src/hotspot/share/classfile/klassFactory.hpp b/src/hotspot/share/classfile/klassFactory.hpp index c08f8b9a119..a864b05e122 100644 --- a/src/hotspot/share/classfile/klassFactory.hpp +++ b/src/hotspot/share/classfile/klassFactory.hpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. +* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,7 +72,7 @@ class KlassFactory : AllStatic { Symbol* name, ClassLoaderData* loader_data, Handle protection_domain, - const InstanceKlass* host_klass, + const InstanceKlass* unsafe_anonymous_host, GrowableArray* cp_patches, TRAPS); public: diff --git a/src/hotspot/share/classfile/moduleEntry.hpp b/src/hotspot/share/classfile/moduleEntry.hpp index ba7954e3acd..9c288a454f0 100644 --- a/src/hotspot/share/classfile/moduleEntry.hpp +++ b/src/hotspot/share/classfile/moduleEntry.hpp @@ -110,7 +110,7 @@ public: ClassLoaderData* loader_data() const { return _loader_data; } void set_loader_data(ClassLoaderData* cld) { - assert(!cld->is_anonymous(), "Unexpected anonymous class loader data"); + assert(!cld->is_unsafe_anonymous(), "Unexpected unsafe anonymous class loader data"); _loader_data = cld; } diff --git a/src/hotspot/share/classfile/resolutionErrors.cpp b/src/hotspot/share/classfile/resolutionErrors.cpp index 5e0eccdbe28..8aed61c4d35 100644 --- a/src/hotspot/share/classfile/resolutionErrors.cpp +++ b/src/hotspot/share/classfile/resolutionErrors.cpp @@ -65,9 +65,10 @@ void ResolutionErrorEntry::set_error(Symbol* e) { } void ResolutionErrorEntry::set_message(Symbol* c) { - assert(c != NULL, "must set a value"); _message = c; - _message->increment_refcount(); + if (_message != NULL) { + _message->increment_refcount(); + } } // create new error entry @@ -87,7 +88,9 @@ void ResolutionErrorTable::free_entry(ResolutionErrorEntry *entry) { // decrement error refcount assert(entry->error() != NULL, "error should be set"); entry->error()->decrement_refcount(); - entry->message()->decrement_refcount(); + if (entry->message() != NULL) { + entry->message()->decrement_refcount(); + } Hashtable::free_entry(entry); } diff --git a/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp b/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp index b49b2870946..0d85c5475f0 100644 --- a/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp +++ b/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp @@ -115,10 +115,15 @@ bool SharedPathsMiscInfo::check() { return fail("Corrupted archive file header"); } + jshort cur_index = 0; + jshort max_cp_index = FileMapInfo::current_info()->header()->max_used_path_index(); + jshort module_paths_start_index = + FileMapInfo::current_info()->header()->app_module_paths_start_index(); while (_cur_ptr < _end_ptr) { jint type; const char* path = _cur_ptr; _cur_ptr += strlen(path) + 1; + if (!read_jint(&type)) { return fail("Corrupted archive file header"); } @@ -129,13 +134,19 @@ bool SharedPathsMiscInfo::check() { print_path(&ls, type, path); ls.cr(); } - if (!check(type, path)) { - if (!PrintSharedArchiveAndExit) { - return false; + // skip checking the class path(s) which was not referenced during CDS dump + if ((cur_index <= max_cp_index) || (cur_index >= module_paths_start_index)) { + if (!check(type, path)) { + if (!PrintSharedArchiveAndExit) { + return false; + } + } else { + ClassLoader::trace_class_path("ok"); } } else { - ClassLoader::trace_class_path("ok"); + ClassLoader::trace_class_path("skipped check"); } + cur_index++; } return true; diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp index 80c6e56416e..7b54c56d36f 100644 --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -64,9 +64,9 @@ // -------------------------------------------------------------------------- StringTable* StringTable::_the_table = NULL; -bool StringTable::_shared_string_mapped = false; CompactHashtable StringTable::_shared_table; -bool StringTable::_alt_hash = false; +volatile bool StringTable::_shared_string_mapped = false; +volatile bool StringTable::_alt_hash = false; static juint murmur_seed = 0; @@ -176,18 +176,18 @@ class StringTableLookupOop : public StackObj { } }; -static size_t ceil_pow_2(uintx val) { +static size_t ceil_log2(size_t val) { size_t ret; for (ret = 1; ((size_t)1 << ret) < val; ++ret); return ret; } StringTable::StringTable() : _local_table(NULL), _current_size(0), _has_work(0), - _needs_rehashing(false), _weak_handles(NULL), _items(0), _uncleaned_items(0) { + _needs_rehashing(false), _weak_handles(NULL), _items_count(0), _uncleaned_items_count(0) { _weak_handles = new OopStorage("StringTable weak", StringTableWeakAlloc_lock, StringTableWeakActive_lock); - size_t start_size_log_2 = ceil_pow_2(StringTableSize); + size_t start_size_log_2 = ceil_log2(StringTableSize); _current_size = ((size_t)1) << start_size_log_2; log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")", _current_size, start_size_log_2); @@ -195,32 +195,31 @@ StringTable::StringTable() : _local_table(NULL), _current_size(0), _has_work(0), } size_t StringTable::item_added() { - return Atomic::add((size_t)1, &(the_table()->_items)); + return Atomic::add((size_t)1, &(the_table()->_items_count)); } -size_t StringTable::add_items_to_clean(size_t ndead) { - size_t total = Atomic::add((size_t)ndead, &(the_table()->_uncleaned_items)); +size_t StringTable::add_items_count_to_clean(size_t ndead) { + size_t total = Atomic::add((size_t)ndead, &(the_table()->_uncleaned_items_count)); log_trace(stringtable)( "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT, - the_table()->_uncleaned_items, ndead, total); + the_table()->_uncleaned_items_count, ndead, total); return total; } void StringTable::item_removed() { - Atomic::add((size_t)-1, &(the_table()->_items)); + Atomic::add((size_t)-1, &(the_table()->_items_count)); } double StringTable::get_load_factor() { - return (_items*1.0)/_current_size; + return (double)_items_count/_current_size; } double StringTable::get_dead_factor() { - return (_uncleaned_items*1.0)/_current_size; + return (double)_uncleaned_items_count/_current_size; } -size_t StringTable::table_size(Thread* thread) { - return ((size_t)(1)) << _local_table->get_size_log2(thread != NULL ? thread - : Thread::current()); +size_t StringTable::table_size() { + return ((size_t)1) << _local_table->get_size_log2(Thread::current()); } void StringTable::trigger_concurrent_work() { @@ -406,7 +405,7 @@ void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, // This is the serial case without ParState. // Just set the correct number and check for a cleaning phase. - the_table()->_uncleaned_items = stiac._count; + the_table()->_uncleaned_items_count = stiac._count; StringTable::the_table()->check_concurrent_work(); if (processed != NULL) { @@ -433,7 +432,7 @@ void StringTable::possibly_parallel_unlink( _par_state_string->weak_oops_do(&stiac, &dnc); // Accumulate the dead strings. - the_table()->add_items_to_clean(stiac._count); + the_table()->add_items_count_to_clean(stiac._count); *processed = (int) stiac._count_total; *removed = (int) stiac._count; @@ -465,7 +464,7 @@ void StringTable::grow(JavaThread* jt) { } } gt.done(jt); - _current_size = table_size(jt); + _current_size = table_size(); log_debug(stringtable)("Grown to size:" SIZE_FORMAT, _current_size); } @@ -843,7 +842,7 @@ void StringTable::write_to_archive() { assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be"); _shared_table.reset(); - int num_buckets = the_table()->_items / SharedSymbolTableBucketSize; + int num_buckets = the_table()->_items_count / SharedSymbolTableBucketSize; // calculation of num_buckets can result in zero buckets, we need at least one CompactStringTableWriter writer(num_buckets > 1 ? num_buckets : 1, &MetaspaceShared::stats()->string); diff --git a/src/hotspot/share/classfile/stringTable.hpp b/src/hotspot/share/classfile/stringTable.hpp index 5b036b08b01..aee889a4eaa 100644 --- a/src/hotspot/share/classfile/stringTable.hpp +++ b/src/hotspot/share/classfile/stringTable.hpp @@ -58,21 +58,22 @@ private: static StringTable* _the_table; // Shared string table static CompactHashtable _shared_table; - static bool _shared_string_mapped; - static bool _alt_hash; + static volatile bool _shared_string_mapped; + static volatile bool _alt_hash; + private: - // Set if one bucket is out of balance due to hash algorithm deficiency StringTableHash* _local_table; size_t _current_size; volatile bool _has_work; + // Set if one bucket is out of balance due to hash algorithm deficiency volatile bool _needs_rehashing; OopStorage* _weak_handles; - volatile size_t _items; + volatile size_t _items_count; DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); - volatile size_t _uncleaned_items; + volatile size_t _uncleaned_items_count; DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); double get_load_factor(); @@ -83,7 +84,7 @@ private: static size_t item_added(); static void item_removed(); - size_t add_items_to_clean(size_t ndead); + size_t add_items_count_to_clean(size_t ndead); StringTable(); @@ -100,7 +101,7 @@ private: public: // The string table static StringTable* the_table() { return _the_table; } - size_t table_size(Thread* thread = NULL); + size_t table_size(); static OopStorage* weak_storage() { return the_table()->_weak_handles; } @@ -116,7 +117,7 @@ private: // Must be called before a parallel walk where strings might die. static void reset_dead_counter() { - the_table()->_uncleaned_items = 0; + the_table()->_uncleaned_items_count = 0; } // After the parallel walk this method must be called to trigger // cleaning. Note it might trigger a resize instead. @@ -127,7 +128,7 @@ private: // If GC uses ParState directly it should add the number of cleared // strings to this method. static void inc_dead_counter(size_t ndead) { - the_table()->add_items_to_clean(ndead); + the_table()->add_items_count_to_clean(ndead); } // Delete pointers to otherwise-unreachable objects. diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp index ae4d3a01ebc..642e70aaa30 100644 --- a/src/hotspot/share/classfile/symbolTable.cpp +++ b/src/hotspot/share/classfile/symbolTable.cpp @@ -27,46 +27,178 @@ #include "classfile/compactHashtable.inline.hpp" #include "classfile/javaClasses.hpp" #include "classfile/symbolTable.hpp" -#include "classfile/systemDictionary.hpp" -#include "gc/shared/collectedHeap.inline.hpp" #include "memory/allocation.inline.hpp" -#include "memory/filemap.hpp" #include "memory/metaspaceClosure.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/safepointVerifiers.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/timerTrace.hpp" #include "services/diagnosticCommand.hpp" -#include "utilities/hashtable.inline.hpp" +#include "utilities/concurrentHashTable.inline.hpp" +#include "utilities/concurrentHashTableTasks.inline.hpp" + +// We used to not resize at all, so let's be conservative +// and not set it too short before we decide to resize, +// to match previous startup behavior +#define PREF_AVG_LIST_LEN 8 +// 2^17 (131,072) is max size, which is about 6.5 times as large +// as the previous table size (used to be 20,011), +// which never resized +#define END_SIZE 17 +// If a chain gets to 100 something might be wrong +#define REHASH_LEN 100 +// We only get a chance to check whether we need +// to clean infrequently (on class unloading), +// so if we have even one dead entry then mark table for cleaning +#define CLEAN_DEAD_HIGH_WATER_MARK 0.0 + +#define ON_STACK_BUFFER_LENGTH 128 // -------------------------------------------------------------------------- -// the number of buckets a thread claims -const int ClaimChunkSize = 32; - SymbolTable* SymbolTable::_the_table = NULL; +CompactHashtable SymbolTable::_shared_table; +volatile bool SymbolTable::_alt_hash = false; +volatile bool SymbolTable::_lookup_shared_first = false; // Static arena for symbols that are not deallocated Arena* SymbolTable::_arena = NULL; -bool SymbolTable::_needs_rehashing = false; -bool SymbolTable::_lookup_shared_first = false; -CompactHashtable SymbolTable::_shared_table; +static juint murmur_seed = 0; -Symbol* SymbolTable::allocate_symbol(const u1* name, int len, bool c_heap, TRAPS) { +static inline void log_trace_symboltable_helper(Symbol* sym, const char* msg) { +#ifndef PRODUCT + ResourceMark rm; + log_trace(symboltable)("%s [%s]", msg, sym->as_quoted_ascii()); +#endif // PRODUCT +} + +// Pick hashing algorithm. +static uintx hash_symbol(const char* s, int len, bool useAlt) { + return useAlt ? + AltHashing::murmur3_32(murmur_seed, (const jbyte*)s, len) : + java_lang_String::hash_code((const jbyte*)s, len); +} + +static uintx hash_shared_symbol(const char* s, int len) { + return java_lang_String::hash_code((const jbyte*)s, len); +} + +class SymbolTableConfig : public SymbolTableHash::BaseConfig { +private: +public: + static uintx get_hash(Symbol* const& value, bool* is_dead) { + *is_dead = (value->refcount() == 0); + if (*is_dead) { + return 0; + } else { + return hash_symbol((const char*)value->bytes(), value->utf8_length(), SymbolTable::_alt_hash); + } + } + // We use default allocation/deallocation but counted + static void* allocate_node(size_t size, Symbol* const& value) { + SymbolTable::item_added(); + return SymbolTableHash::BaseConfig::allocate_node(size, value); + } + static void free_node(void* memory, Symbol* const& value) { + // We get here either because #1 some threads lost a race + // to insert a newly created Symbol, or #2 we are freeing + // a symbol during normal cleanup deletion. + // If #1, then the symbol can be a permanent (refcount==PERM_REFCOUNT), + // or regular newly created one but with refcount==0 (see SymbolTableCreateEntry) + // If #2, then the symbol must have refcount==0 + assert((value->refcount() == PERM_REFCOUNT) || (value->refcount() == 0), + "refcount %d", value->refcount()); + SymbolTable::delete_symbol(value); + SymbolTableHash::BaseConfig::free_node(memory, value); + SymbolTable::item_removed(); + } +}; + +static size_t ceil_log2(size_t value) { + size_t ret; + for (ret = 1; ((size_t)1 << ret) < value; ++ret); + return ret; +} + +SymbolTable::SymbolTable() : + _symbols_removed(0), _symbols_counted(0), _local_table(NULL), + _current_size(0), _has_work(0), _needs_rehashing(false), + _items_count(0), _uncleaned_items_count(0) { + + size_t start_size_log_2 = ceil_log2(SymbolTableSize); + _current_size = ((size_t)1) << start_size_log_2; + log_trace(symboltable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")", + _current_size, start_size_log_2); + _local_table = new SymbolTableHash(start_size_log_2, END_SIZE, REHASH_LEN); +} + +void SymbolTable::delete_symbol(Symbol* sym) { + if (sym->refcount() == PERM_REFCOUNT) { + MutexLockerEx ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena + // Deleting permanent symbol should not occur very often (insert race condition), + // so log it. + log_trace_symboltable_helper(sym, "Freeing permanent symbol"); + if (!arena()->Afree(sym, sym->size())) { + log_trace_symboltable_helper(sym, "Leaked permanent symbol"); + } + } else { + delete sym; + } +} + +void SymbolTable::item_added() { + Atomic::inc(&(SymbolTable::the_table()->_items_count)); +} + +void SymbolTable::set_item_clean_count(size_t ncl) { + Atomic::store(ncl, &(SymbolTable::the_table()->_uncleaned_items_count)); + log_trace(symboltable)("Set uncleaned items:" SIZE_FORMAT, SymbolTable::the_table()->_uncleaned_items_count); +} + +void SymbolTable::mark_item_clean_count() { + if (Atomic::cmpxchg((size_t)1, &(SymbolTable::the_table()->_uncleaned_items_count), (size_t)0) == 0) { // only mark if unset + log_trace(symboltable)("Marked uncleaned items:" SIZE_FORMAT, SymbolTable::the_table()->_uncleaned_items_count); + } +} + +void SymbolTable::item_removed() { + Atomic::inc(&(SymbolTable::the_table()->_symbols_removed)); + Atomic::dec(&(SymbolTable::the_table()->_items_count)); +} + +double SymbolTable::get_load_factor() { + return (double)_items_count/_current_size; +} + +double SymbolTable::get_dead_factor() { + return (double)_uncleaned_items_count/_current_size; +} + +size_t SymbolTable::table_size() { + return ((size_t)1) << _local_table->get_size_log2(Thread::current()); +} + +void SymbolTable::trigger_concurrent_work() { + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + SymbolTable::the_table()->_has_work = true; + Service_lock->notify_all(); +} + +Symbol* SymbolTable::allocate_symbol(const char* name, int len, bool c_heap, TRAPS) { assert (len <= Symbol::max_length(), "should be checked by caller"); Symbol* sym; - if (DumpSharedSpaces) { c_heap = false; } if (c_heap) { // refcount starts as 1 - sym = new (len, THREAD) Symbol(name, len, 1); + sym = new (len, THREAD) Symbol((const u1*)name, len, 1); assert(sym != NULL, "new should call vm_exit_out_of_memory if C_HEAP is exhausted"); } else { // Allocate to global arena - sym = new (len, arena(), THREAD) Symbol(name, len, PERM_REFCOUNT); + MutexLockerEx ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena + sym = new (len, arena(), THREAD) Symbol((const u1*)name, len, PERM_REFCOUNT); } return sym; } @@ -80,314 +212,176 @@ void SymbolTable::initialize_symbols(int arena_alloc_size) { } } +class SymbolsDo : StackObj { + SymbolClosure *_cl; +public: + SymbolsDo(SymbolClosure *cl) : _cl(cl) {} + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + _cl->do_symbol(value); + return true; + }; +}; + // Call function for all symbols in the symbol table. void SymbolTable::symbols_do(SymbolClosure *cl) { // all symbols from shared table _shared_table.symbols_do(cl); // all symbols from the dynamic table - const int n = the_table()->table_size(); - for (int i = 0; i < n; i++) { - for (HashtableEntry* p = the_table()->bucket(i); - p != NULL; - p = p->next()) { - cl->do_symbol(p->literal_addr()); - } + SymbolsDo sd(cl); + if (!SymbolTable::the_table()->_local_table->try_scan(Thread::current(), sd)) { + log_info(stringtable)("symbols_do unavailable at this moment"); } } +class MetaspacePointersDo : StackObj { + MetaspaceClosure *_it; +public: + MetaspacePointersDo(MetaspaceClosure *it) : _it(it) {} + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + _it->push(value); + return true; + }; +}; + void SymbolTable::metaspace_pointers_do(MetaspaceClosure* it) { assert(DumpSharedSpaces, "called only during dump time"); - const int n = the_table()->table_size(); - for (int i = 0; i < n; i++) { - for (HashtableEntry* p = the_table()->bucket(i); - p != NULL; - p = p->next()) { - it->push(p->literal_addr()); - } - } + MetaspacePointersDo mpd(it); + SymbolTable::the_table()->_local_table->do_scan(Thread::current(), mpd); } -int SymbolTable::_symbols_removed = 0; -int SymbolTable::_symbols_counted = 0; -volatile int SymbolTable::_parallel_claimed_idx = 0; - -void SymbolTable::buckets_unlink(int start_idx, int end_idx, BucketUnlinkContext* context) { - for (int i = start_idx; i < end_idx; ++i) { - HashtableEntry** p = the_table()->bucket_addr(i); - HashtableEntry* entry = the_table()->bucket(i); - while (entry != NULL) { - // Shared entries are normally at the end of the bucket and if we run into - // a shared entry, then there is nothing more to remove. However, if we - // have rehashed the table, then the shared entries are no longer at the - // end of the bucket. - if (entry->is_shared() && !use_alternate_hashcode()) { - break; - } - Symbol* s = entry->literal(); - context->_num_processed++; - assert(s != NULL, "just checking"); - // If reference count is zero, remove. - if (s->refcount() == 0) { - assert(!entry->is_shared(), "shared entries should be kept live"); - delete s; - *p = entry->next(); - context->free_entry(entry); - } else { - p = entry->next_addr(); - } - // get next entry - entry = (HashtableEntry*)HashtableEntry::make_ptr(*p); - } - } -} - -// Remove unreferenced symbols from the symbol table -// This is done late during GC. -void SymbolTable::unlink(int* processed, int* removed) { - BucketUnlinkContext context; - buckets_unlink(0, the_table()->table_size(), &context); - _the_table->bulk_free_entries(&context); - *processed = context._num_processed; - *removed = context._num_removed; - - _symbols_removed = context._num_removed; - _symbols_counted = context._num_processed; -} - -void SymbolTable::possibly_parallel_unlink(int* processed, int* removed) { - const int limit = the_table()->table_size(); - - BucketUnlinkContext context; - for (;;) { - // Grab next set of buckets to scan - int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize; - if (start_idx >= limit) { - // End of table - break; - } - - int end_idx = MIN2(limit, start_idx + ClaimChunkSize); - buckets_unlink(start_idx, end_idx, &context); - } - - _the_table->bulk_free_entries(&context); - *processed = context._num_processed; - *removed = context._num_removed; - - Atomic::add(context._num_processed, &_symbols_counted); - Atomic::add(context._num_removed, &_symbols_removed); -} - -// Create a new table and using alternate hash code, populate the new table -// with the existing strings. Set flag to use the alternate hash code afterwards. -void SymbolTable::rehash_table() { - if (DumpSharedSpaces) { - tty->print_cr("Warning: rehash_table should not be called while dumping archive"); - return; - } - - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - // This should never happen with -Xshare:dump but it might in testing mode. - if (DumpSharedSpaces) return; - - // Create a new symbol table - SymbolTable* new_table = new SymbolTable(); - - the_table()->move_to(new_table); - - // Delete the table and buckets (entries are reused in new table). - delete _the_table; - // Don't check if we need rehashing until the table gets unbalanced again. - // Then rehash with a new global seed. - _needs_rehashing = false; - _the_table = new_table; -} - -// Lookup a symbol in a bucket. - -Symbol* SymbolTable::lookup_dynamic(int index, const char* name, +Symbol* SymbolTable::lookup_dynamic(const char* name, int len, unsigned int hash) { - int count = 0; - for (HashtableEntry* e = bucket(index); e != NULL; e = e->next()) { - count++; // count all entries in this bucket, not just ones with same hash - if (e->hash() == hash) { - Symbol* sym = e->literal(); - // Skip checking already dead symbols in the bucket. - if (sym->refcount() == 0) { - count--; // Don't count this symbol towards rehashing. - } else if (sym->equals(name, len)) { - if (sym->try_increment_refcount()) { - // something is referencing this symbol now. - return sym; - } else { - count--; // don't count this symbol. - } - } - } - } - // If the bucket size is too deep check if this hash code is insufficient. - if (count >= rehash_count && !needs_rehashing()) { - _needs_rehashing = check_rehash_table(count); - } - return NULL; + Symbol* sym = SymbolTable::the_table()->do_lookup(name, len, hash); + assert((sym == NULL) || sym->refcount() != 0, "refcount must not be zero"); + return sym; } Symbol* SymbolTable::lookup_shared(const char* name, int len, unsigned int hash) { - if (use_alternate_hashcode()) { - // hash_code parameter may use alternate hashing algorithm but the shared table - // always uses the same original hash code. - hash = hash_shared_symbol(name, len); + if (!_shared_table.empty()) { + if (SymbolTable::_alt_hash) { + // hash_code parameter may use alternate hashing algorithm but the shared table + // always uses the same original hash code. + hash = hash_shared_symbol(name, len); + } + return _shared_table.lookup(name, hash, len); + } else { + return NULL; } - return _shared_table.lookup(name, hash, len); } -Symbol* SymbolTable::lookup(int index, const char* name, +Symbol* SymbolTable::lookup_common(const char* name, int len, unsigned int hash) { Symbol* sym; if (_lookup_shared_first) { sym = lookup_shared(name, len, hash); - if (sym != NULL) { - return sym; + if (sym == NULL) { + _lookup_shared_first = false; + sym = lookup_dynamic(name, len, hash); } - _lookup_shared_first = false; - return lookup_dynamic(index, name, len, hash); } else { - sym = lookup_dynamic(index, name, len, hash); - if (sym != NULL) { - return sym; - } - sym = lookup_shared(name, len, hash); - if (sym != NULL) { - _lookup_shared_first = true; - } - return sym; - } -} - -u4 SymbolTable::encode_shared(Symbol* sym) { - assert(DumpSharedSpaces, "called only during dump time"); - uintx base_address = uintx(MetaspaceShared::shared_rs()->base()); - uintx offset = uintx(sym) - base_address; - assert(offset < 0x7fffffff, "sanity"); - return u4(offset); -} - -Symbol* SymbolTable::decode_shared(u4 offset) { - assert(!DumpSharedSpaces, "called only during runtime"); - uintx base_address = _shared_table.base_address(); - Symbol* sym = (Symbol*)(base_address + offset); - -#ifndef PRODUCT - const char* s = (const char*)sym->bytes(); - int len = sym->utf8_length(); - unsigned int hash = hash_symbol(s, len); - assert(sym == lookup_shared(s, len, hash), "must be shared symbol"); -#endif - - return sym; -} - -// Pick hashing algorithm. -unsigned int SymbolTable::hash_symbol(const char* s, int len) { - return use_alternate_hashcode() ? - AltHashing::murmur3_32(seed(), (const jbyte*)s, len) : - java_lang_String::hash_code((const jbyte*)s, len); -} - -unsigned int SymbolTable::hash_shared_symbol(const char* s, int len) { - return java_lang_String::hash_code((const jbyte*)s, len); -} - - -// We take care not to be blocking while holding the -// SymbolTable_lock. Otherwise, the system might deadlock, since the -// symboltable is used during compilation (VM_thread) The lock free -// synchronization is simplified by the fact that we do not delete -// entries in the symbol table during normal execution (only during -// safepoints). - -Symbol* SymbolTable::lookup(const char* name, int len, TRAPS) { - unsigned int hashValue = hash_symbol(name, len); - int index = the_table()->hash_to_index(hashValue); - - Symbol* s = the_table()->lookup(index, name, len, hashValue); - - // Found - if (s != NULL) return s; - - // Grab SymbolTable_lock first. - MutexLocker ml(SymbolTable_lock, THREAD); - - // Otherwise, add to symbol to table - return the_table()->basic_add(index, (u1*)name, len, hashValue, true, THREAD); -} - -Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) { - char* buffer; - int index, len; - unsigned int hashValue; - char* name; - { - debug_only(NoSafepointVerifier nsv;) - - name = (char*)sym->base() + begin; - len = end - begin; - hashValue = hash_symbol(name, len); - index = the_table()->hash_to_index(hashValue); - Symbol* s = the_table()->lookup(index, name, len, hashValue); - - // Found - if (s != NULL) return s; - } - - // Otherwise, add to symbol to table. Copy to a C string first. - char stack_buf[128]; - ResourceMark rm(THREAD); - if (len <= 128) { - buffer = stack_buf; - } else { - buffer = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, len); - } - for (int i=0; ibasic_add(index, (u1*)buffer, len, hashValue, true, THREAD); -} - -Symbol* SymbolTable::lookup_only(const char* name, int len, - unsigned int& hash) { - hash = hash_symbol(name, len); - int index = the_table()->hash_to_index(hash); - - Symbol* s = the_table()->lookup(index, name, len, hash); - return s; -} - -// Look up the address of the literal in the SymbolTable for this Symbol* -// Do not create any new symbols -// Do not increment the reference count to keep this alive -Symbol** SymbolTable::lookup_symbol_addr(Symbol* sym){ - unsigned int hash = hash_symbol((char*)sym->bytes(), sym->utf8_length()); - int index = the_table()->hash_to_index(hash); - - for (HashtableEntry* e = the_table()->bucket(index); e != NULL; e = e->next()) { - if (e->hash() == hash) { - Symbol* literal_sym = e->literal(); - if (sym == literal_sym) { - return e->literal_addr(); + sym = lookup_dynamic(name, len, hash); + if (sym == NULL) { + sym = lookup_shared(name, len, hash); + if (sym != NULL) { + _lookup_shared_first = true; } } } - return NULL; + return sym; +} + +Symbol* SymbolTable::lookup(const char* name, int len, TRAPS) { + unsigned int hash = hash_symbol(name, len, SymbolTable::_alt_hash); + Symbol* sym = SymbolTable::the_table()->lookup_common(name, len, hash); + if (sym == NULL) { + sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, true, CHECK_NULL); + } + assert(sym->refcount() != 0, "lookup should have incremented the count"); + assert(sym->equals(name, len), "symbol must be properly initialized"); + return sym; +} + +Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) { + assert(sym->refcount() != 0, "require a valid symbol"); + const char* name = (const char*)sym->base() + begin; + int len = end - begin; + unsigned int hash = hash_symbol(name, len, SymbolTable::_alt_hash); + Symbol* found = SymbolTable::the_table()->lookup_common(name, len, hash); + if (found == NULL) { + found = SymbolTable::the_table()->do_add_if_needed(name, len, hash, true, THREAD); + } + return found; +} + +class SymbolTableLookup : StackObj { +private: + Thread* _thread; + uintx _hash; + int _len; + const char* _str; +public: + SymbolTableLookup(Thread* thread, const char* key, int len, uintx hash) + : _thread(thread), _hash(hash), _len(len), _str(key) {} + uintx get_hash() const { + return _hash; + } + bool equals(Symbol** value, bool* is_dead) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + Symbol *sym = *value; + if (sym->equals(_str, _len)) { + if (sym->try_increment_refcount()) { + // something is referencing this symbol now. + return true; + } else { + assert(sym->refcount() == 0, "expected dead symbol"); + *is_dead = true; + return false; + } + } else { + *is_dead = (sym->refcount() == 0); + return false; + } + } +}; + +class SymbolTableGet : public StackObj { + Symbol* _return; +public: + SymbolTableGet() : _return(NULL) {} + void operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + _return = *value; + } + Symbol* get_res_sym() { + return _return; + } +}; + +Symbol* SymbolTable::do_lookup(const char* name, int len, uintx hash) { + Thread* thread = Thread::current(); + SymbolTableLookup lookup(thread, name, len, hash); + SymbolTableGet stg; + bool rehash_warning = false; + _local_table->get(thread, lookup, stg, &rehash_warning); + if (rehash_warning) { + _needs_rehashing = true; + } + Symbol* sym = stg.get_res_sym(); + assert((sym == NULL) || sym->refcount() != 0, "found dead symbol"); + return sym; +} + +Symbol* SymbolTable::lookup_only(const char* name, int len, unsigned int& hash) { + hash = hash_symbol(name, len, SymbolTable::_alt_hash); + return SymbolTable::the_table()->lookup_common(name, len, hash); } // Suggestion: Push unicode-based lookup all the way into the hashing @@ -395,14 +389,14 @@ Symbol** SymbolTable::lookup_symbol_addr(Symbol* sym){ // an actual new Symbol* is created. Symbol* SymbolTable::lookup_unicode(const jchar* name, int utf16_length, TRAPS) { int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length); - char stack_buf[128]; + char stack_buf[ON_STACK_BUFFER_LENGTH]; if (utf8_length < (int) sizeof(stack_buf)) { char* chars = stack_buf; UNICODE::convert_to_utf8(name, utf16_length, chars); return lookup(chars, utf8_length, THREAD); } else { ResourceMark rm(THREAD); - char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);; + char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1); UNICODE::convert_to_utf8(name, utf16_length, chars); return lookup(chars, utf8_length, THREAD); } @@ -411,214 +405,243 @@ Symbol* SymbolTable::lookup_unicode(const jchar* name, int utf16_length, TRAPS) Symbol* SymbolTable::lookup_only_unicode(const jchar* name, int utf16_length, unsigned int& hash) { int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length); - char stack_buf[128]; + char stack_buf[ON_STACK_BUFFER_LENGTH]; if (utf8_length < (int) sizeof(stack_buf)) { char* chars = stack_buf; UNICODE::convert_to_utf8(name, utf16_length, chars); return lookup_only(chars, utf8_length, hash); } else { ResourceMark rm; - char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);; + char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1); UNICODE::convert_to_utf8(name, utf16_length, chars); return lookup_only(chars, utf8_length, hash); } } void SymbolTable::add(ClassLoaderData* loader_data, const constantPoolHandle& cp, - int names_count, - const char** names, int* lengths, int* cp_indices, - unsigned int* hashValues, TRAPS) { - // Grab SymbolTable_lock first. - MutexLocker ml(SymbolTable_lock, THREAD); - - SymbolTable* table = the_table(); - bool added = table->basic_add(loader_data, cp, names_count, names, lengths, - cp_indices, hashValues, CHECK); - if (!added) { - // do it the hard way - for (int i=0; ihash_to_index(hashValues[i]); - bool c_heap = !loader_data->is_the_null_class_loader_data(); - Symbol* sym = table->basic_add(index, (u1*)names[i], lengths[i], hashValues[i], c_heap, CHECK); - cp->symbol_at_put(cp_indices[i], sym); + int names_count, const char** names, int* lengths, + int* cp_indices, unsigned int* hashValues, TRAPS) { + bool c_heap = !loader_data->is_the_null_class_loader_data(); + for (int i = 0; i < names_count; i++) { + const char *name = names[i]; + int len = lengths[i]; + unsigned int hash = hashValues[i]; + Symbol* sym = SymbolTable::the_table()->lookup_common(name, len, hash); + if (sym == NULL) { + sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, c_heap, CHECK); } + assert(sym->refcount() != 0, "lookup should have incremented the count"); + cp->symbol_at_put(cp_indices[i], sym); } } -Symbol* SymbolTable::new_permanent_symbol(const char* name, TRAPS) { - unsigned int hash; - Symbol* result = SymbolTable::lookup_only((char*)name, (int)strlen(name), hash); - if (result != NULL) { - return result; - } - // Grab SymbolTable_lock first. - MutexLocker ml(SymbolTable_lock, THREAD); +class SymbolTableCreateEntry : public StackObj { +private: + Thread* _thread; + const char* _name; + int _len; + bool _heap; + Symbol* _return; + Symbol* _created; - SymbolTable* table = the_table(); - int index = table->hash_to_index(hash); - return table->basic_add(index, (u1*)name, (int)strlen(name), hash, false, THREAD); -} - -Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len, - unsigned int hashValue_arg, bool c_heap, TRAPS) { - assert(!Universe::heap()->is_in_reserved(name), - "proposed name of symbol must be stable"); - - // Don't allow symbols to be created which cannot fit in a Symbol*. - if (len > Symbol::max_length()) { - THROW_MSG_0(vmSymbols::java_lang_InternalError(), - "name is too long to represent"); + void assert_for_name(Symbol* sym, const char* where) const { +#ifdef ASSERT + assert(sym->utf8_length() == _len, "%s [%d,%d]", where, sym->utf8_length(), _len); + for (int i = 0; i < _len; i++) { + assert(sym->byte_at(i) == (jbyte) _name[i], + "%s [%d,%d,%d]", where, i, sym->byte_at(i), _name[i]); + } +#endif } - // Cannot hit a safepoint in this function because the "this" pointer can move. - NoSafepointVerifier nsv; - - // Check if the symbol table has been rehashed, if so, need to recalculate - // the hash value and index. - unsigned int hashValue; - int index; - if (use_alternate_hashcode()) { - hashValue = hash_symbol((const char*)name, len); - index = hash_to_index(hashValue); - } else { - hashValue = hashValue_arg; - index = index_arg; +public: + SymbolTableCreateEntry(Thread* thread, const char* name, int len, bool heap) + : _thread(thread), _name(name) , _len(len), _heap(heap), _return(NULL) , _created(NULL) { + assert(_name != NULL, "expected valid name"); } - - // Since look-up was done lock-free, we need to check if another - // thread beat us in the race to insert the symbol. - Symbol* test = lookup(index, (char*)name, len, hashValue); - if (test != NULL) { - // A race occurred and another thread introduced the symbol. - assert(test->refcount() != 0, "lookup should have incremented the count"); - return test; + Symbol* operator()() { + _created = SymbolTable::the_table()->allocate_symbol(_name, _len, _heap, _thread); + assert(_created != NULL, "expected created symbol"); + assert_for_name(_created, "operator()()"); + assert(_created->equals(_name, _len), + "symbol must be properly initialized [%p,%d,%d]", _name, _len, (int)_heap); + return _created; } + void operator()(bool inserted, Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + if (!inserted && (_created != NULL)) { + // We created our symbol, but someone else inserted + // theirs first, so ours will be destroyed. + // Since symbols are created with refcount of 1, + // we must decrement it here to 0 to delete, + // unless it's a permanent one. + if (_created->refcount() != PERM_REFCOUNT) { + assert(_created->refcount() == 1, "expected newly created symbol"); + _created->decrement_refcount(); + assert(_created->refcount() == 0, "expected dead symbol"); + } + } + _return = *value; + assert_for_name(_return, "operator()"); + } + Symbol* get_new_sym() const { + assert_for_name(_return, "get_new_sym"); + return _return; + } +}; - // Create a new symbol. - Symbol* sym = allocate_symbol(name, len, c_heap, CHECK_NULL); - assert(sym->equals((char*)name, len), "symbol must be properly initialized"); - - HashtableEntry* entry = new_entry(hashValue, sym); - add_entry(index, entry); +Symbol* SymbolTable::do_add_if_needed(const char* name, int len, uintx hash, bool heap, TRAPS) { + SymbolTableLookup lookup(THREAD, name, len, hash); + SymbolTableCreateEntry stce(THREAD, name, len, heap); + bool rehash_warning = false; + bool clean_hint = false; + _local_table->get_insert_lazy(THREAD, lookup, stce, stce, &rehash_warning, &clean_hint); + if (rehash_warning) { + _needs_rehashing = true; + } + if (clean_hint) { + // we just found out that there is a dead item, + // which we were unable to clean right now, + // but we have no way of telling whether it's + // been previously counted or not, so mark + // it only if no other items were found yet + mark_item_clean_count(); + check_concurrent_work(); + } + Symbol* sym = stce.get_new_sym(); + assert(sym->refcount() != 0, "zero is invalid"); return sym; } -// This version of basic_add adds symbols in batch from the constant pool -// parsing. -bool SymbolTable::basic_add(ClassLoaderData* loader_data, const constantPoolHandle& cp, - int names_count, - const char** names, int* lengths, - int* cp_indices, unsigned int* hashValues, - TRAPS) { - - // Check symbol names are not too long. If any are too long, don't add any. - for (int i = 0; i< names_count; i++) { - if (lengths[i] > Symbol::max_length()) { - THROW_MSG_0(vmSymbols::java_lang_InternalError(), - "name is too long to represent"); - } +Symbol* SymbolTable::new_permanent_symbol(const char* name, TRAPS) { + unsigned int hash = 0; + int len = (int)strlen(name); + Symbol* sym = SymbolTable::lookup_only(name, len, hash); + if (sym == NULL) { + sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, false, CHECK_NULL); } - - // Cannot hit a safepoint in this function because the "this" pointer can move. - NoSafepointVerifier nsv; - - for (int i=0; isymbol_at_put(cp_indices[i], test); - assert(test->refcount() != 0, "lookup should have incremented the count"); - } else { - // Create a new symbol. The null class loader is never unloaded so these - // are allocated specially in a permanent arena. - bool c_heap = !loader_data->is_the_null_class_loader_data(); - Symbol* sym = allocate_symbol((const u1*)names[i], lengths[i], c_heap, CHECK_(false)); - assert(sym->equals(names[i], lengths[i]), "symbol must be properly initialized"); // why wouldn't it be??? - HashtableEntry* entry = new_entry(hashValue, sym); - add_entry(index, entry); - cp->symbol_at_put(cp_indices[i], sym); - } + if (sym->refcount() != PERM_REFCOUNT) { + sym->increment_refcount(); + log_trace_symboltable_helper(sym, "Asked for a permanent symbol, but got a regular one"); } - return true; + return sym; } +struct SizeFunc : StackObj { + size_t operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + return (*value)->size() * HeapWordSize; + }; +}; + +void SymbolTable::print_table_statistics(outputStream* st, + const char* table_name) { + SizeFunc sz; + _local_table->statistics_to(Thread::current(), sz, st, table_name); +} + +// Verification +class VerifySymbols : StackObj { +public: + bool operator()(Symbol** value) { + guarantee(value != NULL, "expected valid value"); + guarantee(*value != NULL, "value should point to a symbol"); + Symbol* sym = *value; + guarantee(sym->equals((const char*)sym->bytes(), sym->utf8_length()), + "symbol must be internally consistent"); + return true; + }; +}; void SymbolTable::verify() { - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry* p = the_table()->bucket(i); - for ( ; p != NULL; p = p->next()) { - Symbol* s = (Symbol*)(p->literal()); - guarantee(s != NULL, "symbol is NULL"); - unsigned int h = hash_symbol((char*)s->bytes(), s->utf8_length()); - guarantee(p->hash() == h, "broken hash in symbol table entry"); - guarantee(the_table()->hash_to_index(h) == i, - "wrong index in symbol table"); - } + Thread* thr = Thread::current(); + VerifySymbols vs; + if (!SymbolTable::the_table()->_local_table->try_scan(thr, vs)) { + log_info(stringtable)("verify unavailable at this moment"); } } +// Dumping +class DumpSymbol : StackObj { + Thread* _thr; + outputStream* _st; +public: + DumpSymbol(Thread* thr, outputStream* st) : _thr(thr), _st(st) {} + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + Symbol* sym = *value; + const char* utf8_string = (const char*)sym->bytes(); + int utf8_length = sym->utf8_length(); + _st->print("%d %d: ", utf8_length, sym->refcount()); + HashtableTextDump::put_utf8(_st, utf8_string, utf8_length); + _st->cr(); + return true; + }; +}; + void SymbolTable::dump(outputStream* st, bool verbose) { if (!verbose) { - the_table()->print_table_statistics(st, "SymbolTable"); + SymbolTable::the_table()->print_table_statistics(st, "SymbolTable"); } else { - st->print_cr("VERSION: 1.0"); - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry* p = the_table()->bucket(i); - for ( ; p != NULL; p = p->next()) { - Symbol* s = (Symbol*)(p->literal()); - const char* utf8_string = (const char*)s->bytes(); - int utf8_length = s->utf8_length(); - st->print("%d %d: ", utf8_length, s->refcount()); - HashtableTextDump::put_utf8(st, utf8_string, utf8_length); - st->cr(); - } + Thread* thr = Thread::current(); + ResourceMark rm(thr); + st->print_cr("VERSION: 1.1"); + DumpSymbol ds(thr, st); + if (!SymbolTable::the_table()->_local_table->try_scan(thr, ds)) { + log_info(symboltable)("dump unavailable at this moment"); } } } -void SymbolTable::write_to_archive() { #if INCLUDE_CDS - _shared_table.reset(); - - int num_buckets = the_table()->number_of_entries() / - SharedSymbolTableBucketSize; - CompactSymbolTableWriter writer(num_buckets, - &MetaspaceShared::stats()->symbol); - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry* p = the_table()->bucket(i); - for ( ; p != NULL; p = p->next()) { - Symbol* s = (Symbol*)(p->literal()); - unsigned int fixed_hash = hash_shared_symbol((char*)s->bytes(), s->utf8_length()); - assert(fixed_hash == p->hash(), "must not rehash during dumping"); - writer.add(fixed_hash, s); - } +struct CopyToArchive : StackObj { + CompactSymbolTableWriter* _writer; + CopyToArchive(CompactSymbolTableWriter* writer) : _writer(writer) {} + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + Symbol* sym = *value; + unsigned int fixed_hash = hash_shared_symbol((const char*)sym->bytes(), sym->utf8_length()); + if (fixed_hash == 0) { + return true; } + assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false), + "must not rehash during dumping"); - writer.dump(&_shared_table); + // add to the compact table + _writer->add(fixed_hash, sym); - // Verify table is correct - Symbol* sym = vmSymbols::java_lang_Object(); - const char* name = (const char*)sym->bytes(); - int len = sym->utf8_length(); - unsigned int hash = hash_symbol(name, len); - assert(sym == _shared_table.lookup(name, hash, len), "sanity"); -#endif + return true; + } +}; + +void SymbolTable::copy_shared_symbol_table(CompactSymbolTableWriter* writer) { + CopyToArchive copy(writer); + SymbolTable::the_table()->_local_table->do_scan(Thread::current(), copy); +} + +void SymbolTable::write_to_archive() { + _shared_table.reset(); + + int num_buckets = (int)(SymbolTable::the_table()->_items_count / SharedSymbolTableBucketSize); + // calculation of num_buckets can result in zero buckets, we need at least one + CompactSymbolTableWriter writer(num_buckets > 1 ? num_buckets : 1, + &MetaspaceShared::stats()->symbol); + copy_shared_symbol_table(&writer); + writer.dump(&_shared_table); + + // Verify table is correct + Symbol* sym = vmSymbols::java_lang_Object(); + const char* name = (const char*)sym->bytes(); + int len = sym->utf8_length(); + unsigned int hash = hash_symbol(name, len, SymbolTable::_alt_hash); + assert(sym == _shared_table.lookup(name, hash, len), "sanity"); } void SymbolTable::serialize(SerializeClosure* soc) { -#if INCLUDE_CDS _shared_table.set_type(CompactHashtable::_symbol_table); _shared_table.serialize(soc); @@ -626,7 +649,201 @@ void SymbolTable::serialize(SerializeClosure* soc) { // Sanity. Make sure we don't use the shared table at dump time _shared_table.reset(); } -#endif +} +#endif //INCLUDE_CDS + +// Concurrent work +void SymbolTable::grow(JavaThread* jt) { + SymbolTableHash::GrowTask gt(_local_table); + if (!gt.prepare(jt)) { + return; + } + log_trace(symboltable)("Started to grow"); + { + TraceTime timer("Grow", TRACETIME_LOG(Debug, symboltable, perf)); + while (gt.do_task(jt)) { + gt.pause(jt); + { + ThreadBlockInVM tbivm(jt); + } + gt.cont(jt); + } + } + gt.done(jt); + _current_size = table_size(); + log_debug(symboltable)("Grown to size:" SIZE_FORMAT, _current_size); +} + +struct SymbolTableDoDelete : StackObj { + int _deleted; + SymbolTableDoDelete() : _deleted(0) {} + void operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + Symbol *sym = *value; + assert(sym->refcount() == 0, "refcount"); + _deleted++; + } +}; + +struct SymbolTableDeleteCheck : StackObj { + int _processed; + SymbolTableDeleteCheck() : _processed(0) {} + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + _processed++; + Symbol *sym = *value; + return (sym->refcount() == 0); + } +}; + +void SymbolTable::clean_dead_entries(JavaThread* jt) { + SymbolTableHash::BulkDeleteTask bdt(_local_table); + if (!bdt.prepare(jt)) { + return; + } + + SymbolTableDeleteCheck stdc; + SymbolTableDoDelete stdd; + { + TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf)); + while (bdt.do_task(jt, stdc, stdd)) { + bdt.pause(jt); + { + ThreadBlockInVM tbivm(jt); + } + bdt.cont(jt); + } + SymbolTable::the_table()->set_item_clean_count(0); + bdt.done(jt); + } + + Atomic::add((size_t)stdc._processed, &_symbols_counted); + + log_debug(symboltable)("Cleaned " INT32_FORMAT " of " INT32_FORMAT, + stdd._deleted, stdc._processed); +} + +void SymbolTable::check_concurrent_work() { + if (_has_work) { + return; + } + double load_factor = SymbolTable::get_load_factor(); + double dead_factor = SymbolTable::get_dead_factor(); + // We should clean/resize if we have more dead than alive, + // more items than preferred load factor or + // more dead items than water mark. + if ((dead_factor > load_factor) || + (load_factor > PREF_AVG_LIST_LEN) || + (dead_factor > CLEAN_DEAD_HIGH_WATER_MARK)) { + log_debug(symboltable)("Concurrent work triggered, live factor:%f dead factor:%f", + load_factor, dead_factor); + trigger_concurrent_work(); + } +} + +void SymbolTable::concurrent_work(JavaThread* jt) { + double load_factor = get_load_factor(); + log_debug(symboltable, perf)("Concurrent work, live factor: %g", load_factor); + // We prefer growing, since that also removes dead items + if (load_factor > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached()) { + grow(jt); + } else { + clean_dead_entries(jt); + } + _has_work = false; +} + +class CountDead : StackObj { + int _count; +public: + CountDead() : _count(0) {} + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + Symbol* sym = *value; + if (sym->refcount() == 0) { + _count++; + } + return true; + }; + int get_dead_count() { + return _count; + } +}; + +void SymbolTable::do_check_concurrent_work() { + CountDead counter; + if (!SymbolTable::the_table()->_local_table->try_scan(Thread::current(), counter)) { + log_info(symboltable)("count dead unavailable at this moment"); + } else { + SymbolTable::the_table()->set_item_clean_count(counter.get_dead_count()); + SymbolTable::the_table()->check_concurrent_work(); + } +} + +void SymbolTable::do_concurrent_work(JavaThread* jt) { + SymbolTable::the_table()->concurrent_work(jt); +} + +// Rehash +bool SymbolTable::do_rehash() { + if (!_local_table->is_safepoint_safe()) { + return false; + } + + // We use max size + SymbolTableHash* new_table = new SymbolTableHash(END_SIZE, END_SIZE, REHASH_LEN); + // Use alt hash from now on + _alt_hash = true; + if (!_local_table->try_move_nodes_to(Thread::current(), new_table)) { + _alt_hash = false; + delete new_table; + return false; + } + + // free old table + delete _local_table; + _local_table = new_table; + + return true; +} + +void SymbolTable::try_rehash_table() { + static bool rehashed = false; + log_debug(symboltable)("Table imbalanced, rehashing called."); + + // Grow instead of rehash. + if (get_load_factor() > PREF_AVG_LIST_LEN && + !_local_table->is_max_size_reached()) { + log_debug(symboltable)("Choosing growing over rehashing."); + trigger_concurrent_work(); + _needs_rehashing = false; + return; + } + + // Already rehashed. + if (rehashed) { + log_warning(symboltable)("Rehashing already done, still long lists."); + trigger_concurrent_work(); + _needs_rehashing = false; + return; + } + + murmur_seed = AltHashing::compute_seed(); + + if (do_rehash()) { + rehashed = true; + } else { + log_info(symboltable)("Resizes in progress rehashing skipped."); + } + + _needs_rehashing = false; +} + +void SymbolTable::rehash_table() { + SymbolTable::the_table()->try_rehash_table(); } //--------------------------------------------------------------------------- @@ -634,89 +851,80 @@ void SymbolTable::serialize(SerializeClosure* soc) { #ifndef PRODUCT -void SymbolTable::print_histogram() { - MutexLocker ml(SymbolTable_lock); - const int results_length = 100; - int counts[results_length]; - int sizes[results_length]; - int i,j; - - // initialize results to zero - for (j = 0; j < results_length; j++) { - counts[j] = 0; - sizes[j] = 0; - } - - int total_size = 0; - int total_count = 0; - int total_length = 0; - int max_length = 0; - int out_of_range_count = 0; - int out_of_range_size = 0; - for (i = 0; i < the_table()->table_size(); i++) { - HashtableEntry* p = the_table()->bucket(i); - for ( ; p != NULL; p = p->next()) { - int size = p->literal()->size(); - int len = p->literal()->utf8_length(); - if (len < results_length) { - counts[len]++; - sizes[len] += size; - } else { - out_of_range_count++; - out_of_range_size += size; - } - total_count++; - total_size += size; - total_length += len; - max_length = MAX2(max_length, len); +class HistogramIterator : StackObj { +public: + static const size_t results_length = 100; + size_t counts[results_length]; + size_t sizes[results_length]; + size_t total_size; + size_t total_count; + size_t total_length; + size_t max_length; + size_t out_of_range_count; + size_t out_of_range_size; + HistogramIterator() : total_size(0), total_count(0), total_length(0), + max_length(0), out_of_range_count(0), out_of_range_size(0) { + // initialize results to zero + for (size_t i = 0; i < results_length; i++) { + counts[i] = 0; + sizes[i] = 0; } } + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + Symbol* sym = *value; + size_t size = sym->size(); + size_t len = sym->utf8_length(); + if (len < results_length) { + counts[len]++; + sizes[len] += size; + } else { + out_of_range_count++; + out_of_range_size += size; + } + total_count++; + total_size += size; + total_length += len; + max_length = MAX2(max_length, len); + + return true; + }; +}; + +void SymbolTable::print_histogram() { + SymbolTable* st = SymbolTable::the_table(); + HistogramIterator hi; + st->_local_table->do_scan(Thread::current(), hi); tty->print_cr("Symbol Table Histogram:"); - tty->print_cr(" Total number of symbols %7d", total_count); - tty->print_cr(" Total size in memory %7dK", - (total_size*wordSize)/1024); - tty->print_cr(" Total counted %7d", _symbols_counted); - tty->print_cr(" Total removed %7d", _symbols_removed); - if (_symbols_counted > 0) { + tty->print_cr(" Total number of symbols " SIZE_FORMAT_W(7), hi.total_count); + tty->print_cr(" Total size in memory " SIZE_FORMAT_W(7) "K", + (hi.total_size * wordSize) / 1024); + tty->print_cr(" Total counted " SIZE_FORMAT_W(7), st->_symbols_counted); + tty->print_cr(" Total removed " SIZE_FORMAT_W(7), st->_symbols_removed); + if (SymbolTable::the_table()->_symbols_counted > 0) { tty->print_cr(" Percent removed %3.2f", - ((float)_symbols_removed/(float)_symbols_counted)* 100); + ((float)st->_symbols_removed / st->_symbols_counted) * 100); } - tty->print_cr(" Reference counts %7d", Symbol::_total_count); - tty->print_cr(" Symbol arena used " SIZE_FORMAT_W(7) "K", arena()->used()/1024); - tty->print_cr(" Symbol arena size " SIZE_FORMAT_W(7) "K", arena()->size_in_bytes()/1024); - tty->print_cr(" Total symbol length %7d", total_length); - tty->print_cr(" Maximum symbol length %7d", max_length); - tty->print_cr(" Average symbol length %7.2f", ((float) total_length / (float) total_count)); + tty->print_cr(" Reference counts " SIZE_FORMAT_W(7), Symbol::_total_count); + tty->print_cr(" Symbol arena used " SIZE_FORMAT_W(7) "K", arena()->used() / 1024); + tty->print_cr(" Symbol arena size " SIZE_FORMAT_W(7) "K", arena()->size_in_bytes() / 1024); + tty->print_cr(" Total symbol length " SIZE_FORMAT_W(7), hi.total_length); + tty->print_cr(" Maximum symbol length " SIZE_FORMAT_W(7), hi.max_length); + tty->print_cr(" Average symbol length %7.2f", ((float)hi.total_length / hi.total_count)); tty->print_cr(" Symbol length histogram:"); tty->print_cr(" %6s %10s %10s", "Length", "#Symbols", "Size"); - for (i = 0; i < results_length; i++) { - if (counts[i] > 0) { - tty->print_cr(" %6d %10d %10dK", i, counts[i], (sizes[i]*wordSize)/1024); - } - } - tty->print_cr(" >=%6d %10d %10dK\n", results_length, - out_of_range_count, (out_of_range_size*wordSize)/1024); -} - -void SymbolTable::print() { - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry** p = the_table()->bucket_addr(i); - HashtableEntry* entry = the_table()->bucket(i); - if (entry != NULL) { - while (entry != NULL) { - tty->print(PTR_FORMAT " ", p2i(entry->literal())); - entry->literal()->print(); - tty->print(" %d", entry->literal()->refcount()); - p = entry->next_addr(); - entry = (HashtableEntry*)HashtableEntry::make_ptr(*p); - } - tty->cr(); + for (size_t i = 0; i < hi.results_length; i++) { + if (hi.counts[i] > 0) { + tty->print_cr(" " SIZE_FORMAT_W(6) " " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) "K", + i, hi.counts[i], (hi.sizes[i] * wordSize) / 1024); } } + tty->print_cr(" >=" SIZE_FORMAT_W(6) " " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) "K\n", + hi.results_length, hi.out_of_range_count, (hi.out_of_range_size*wordSize) / 1024); } #endif // PRODUCT - // Utility for dumping symbols SymboltableDCmd::SymboltableDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap), diff --git a/src/hotspot/share/classfile/symbolTable.hpp b/src/hotspot/share/classfile/symbolTable.hpp index bf3c36257da..3c2913d707b 100644 --- a/src/hotspot/share/classfile/symbolTable.hpp +++ b/src/hotspot/share/classfile/symbolTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,23 +26,11 @@ #define SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP #include "memory/allocation.hpp" +#include "memory/padded.hpp" #include "oops/symbol.hpp" +#include "utilities/concurrentHashTable.hpp" #include "utilities/hashtable.hpp" -// The symbol table holds all Symbol*s and corresponding interned strings. -// Symbol*s and literal strings should be canonicalized. -// -// The interned strings are created lazily. -// -// It is implemented as an open hash table with a fixed number of buckets. -// -// %note: -// - symbolTableEntrys are allocated in blocks to reduce the space overhead. - -class BoolObjectClosure; -class outputStream; -class SerializeClosure; - // TempNewSymbol acts as a handle class in a handle/body idiom and is // responsible for proper resource management of the body (which is a Symbol*). // The body is resource managed by a reference counting scheme. @@ -59,7 +47,7 @@ class SerializeClosure; class TempNewSymbol : public StackObj { Symbol* _temp; - public: +public: TempNewSymbol() : _temp(NULL) {} // Conversion from a Symbol* to a TempNewSymbol. @@ -97,35 +85,69 @@ class TempNewSymbol : public StackObj { }; template class CompactHashtable; +class CompactSymbolTableWriter; +class SerializeClosure; -class SymbolTable : public RehashableHashtable { +class SymbolTableConfig; +typedef ConcurrentHashTable SymbolTableHash; + +class SymbolTableCreateEntry; + +class SymbolTable : public CHeapObj { friend class VMStructs; + friend class Symbol; friend class ClassFileParser; + friend class SymbolTableConfig; + friend class SymbolTableCreateEntry; private: + static void delete_symbol(Symbol* sym); + void grow(JavaThread* jt); + void clean_dead_entries(JavaThread* jt); + // The symbol table static SymbolTable* _the_table; - - // Set if one bucket is out of balance due to hash algorithm deficiency - static bool _needs_rehashing; - static bool _lookup_shared_first; + // Shared symbol table. + static CompactHashtable _shared_table; + static volatile bool _lookup_shared_first; + static volatile bool _alt_hash; // For statistics - static int _symbols_removed; - static int _symbols_counted; + volatile size_t _symbols_removed; + volatile size_t _symbols_counted; - // shared symbol table. - static CompactHashtable _shared_table; + SymbolTableHash* _local_table; + size_t _current_size; + volatile bool _has_work; + // Set if one bucket is out of balance due to hash algorithm deficiency + volatile bool _needs_rehashing; - Symbol* allocate_symbol(const u1* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F + volatile size_t _items_count; + volatile size_t _uncleaned_items_count; + + double get_load_factor(); + double get_dead_factor(); + + void check_concurrent_work(); + void trigger_concurrent_work(); + + static void item_added(); + static void item_removed(); + static void set_item_clean_count(size_t ncl); + static void mark_item_clean_count(); + + SymbolTable(); + + Symbol* allocate_symbol(const char* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F + Symbol* do_lookup(const char* name, int len, uintx hash); + Symbol* do_add_if_needed(const char* name, int len, uintx hash, bool heap, TRAPS); // Adding elements - Symbol* basic_add(int index, u1* name, int len, unsigned int hashValue, - bool c_heap, TRAPS); - bool basic_add(ClassLoaderData* loader_data, - const constantPoolHandle& cp, int names_count, - const char** names, int* lengths, int* cp_indices, - unsigned int* hashValues, TRAPS); + static void add(ClassLoaderData* loader_data, + const constantPoolHandle& cp, int names_count, + const char** names, int* lengths, int* cp_indices, + unsigned int* hashValues, TRAPS); static void new_symbols(ClassLoaderData* loader_data, const constantPoolHandle& cp, int names_count, @@ -136,15 +158,8 @@ private: } static Symbol* lookup_shared(const char* name, int len, unsigned int hash); - Symbol* lookup_dynamic(int index, const char* name, int len, unsigned int hash); - Symbol* lookup(int index, const char* name, int len, unsigned int hash); - - SymbolTable() - : RehashableHashtable(SymbolTableSize, sizeof (HashtableEntry)) {} - - SymbolTable(HashtableBucket* t, int number_of_entries) - : RehashableHashtable(SymbolTableSize, sizeof (HashtableEntry), t, - number_of_entries) {} + Symbol* lookup_dynamic(const char* name, int len, unsigned int hash); + Symbol* lookup_common(const char* name, int len, unsigned int hash); // Arena for permanent symbols (null class loader) that are never unloaded static Arena* _arena; @@ -152,88 +167,45 @@ private: static void initialize_symbols(int arena_alloc_size = 0); - static volatile int _parallel_claimed_idx; + void concurrent_work(JavaThread* jt); + void print_table_statistics(outputStream* st, const char* table_name); + + void try_rehash_table(); + bool do_rehash(); - typedef SymbolTable::BucketUnlinkContext BucketUnlinkContext; - // Release any dead symbols. Unlinked bucket entries are collected in the given - // context to be freed later. - // This allows multiple threads to work on the table at once. - static void buckets_unlink(int start_idx, int end_idx, BucketUnlinkContext* context); public: + // The symbol table + static SymbolTable* the_table() { return _the_table; } + size_t table_size(); + enum { symbol_alloc_batch_size = 8, // Pick initial size based on java -version size measurements - symbol_alloc_arena_size = 360*K + symbol_alloc_arena_size = 360*K // TODO (revisit) }; - // The symbol table - static SymbolTable* the_table() { return _the_table; } - - // Size of one bucket in the string table. Used when checking for rollover. - static uint bucket_size() { return sizeof(HashtableBucket); } - static void create_table() { assert(_the_table == NULL, "One symbol table allowed."); _the_table = new SymbolTable(); initialize_symbols(symbol_alloc_arena_size); } - static unsigned int hash_symbol(const char* s, int len); - static unsigned int hash_shared_symbol(const char* s, int len); + static void unlink() { + do_check_concurrent_work(); + } + static void do_check_concurrent_work(); + static void do_concurrent_work(JavaThread* jt); + static bool has_work() { return the_table()->_has_work; } + // Probing static Symbol* lookup(const char* name, int len, TRAPS); // lookup only, won't add. Also calculate hash. static Symbol* lookup_only(const char* name, int len, unsigned int& hash); - // Only copy to C string to be added if lookup failed. + // adds new symbol if not found static Symbol* lookup(const Symbol* sym, int begin, int end, TRAPS); - - static void release(Symbol* sym); - - // Look up the address of the literal in the SymbolTable for this Symbol* - static Symbol** lookup_symbol_addr(Symbol* sym); - // jchar (UTF16) version of lookups static Symbol* lookup_unicode(const jchar* name, int len, TRAPS); static Symbol* lookup_only_unicode(const jchar* name, int len, unsigned int& hash); - - static void add(ClassLoaderData* loader_data, - const constantPoolHandle& cp, int names_count, - const char** names, int* lengths, int* cp_indices, - unsigned int* hashValues, TRAPS); - - // Release any dead symbols - static void unlink() { - int processed = 0; - int removed = 0; - unlink(&processed, &removed); - } - static void unlink(int* processed, int* removed); - // Release any dead symbols, possibly parallel version - static void possibly_parallel_unlink(int* processed, int* removed); - - // iterate over symbols - static void symbols_do(SymbolClosure *cl); - static void metaspace_pointers_do(MetaspaceClosure* it); - - // Symbol creation - static Symbol* new_symbol(const char* utf8_buffer, int length, TRAPS) { - assert(utf8_buffer != NULL, "just checking"); - return lookup(utf8_buffer, length, THREAD); - } - static Symbol* new_symbol(const char* name, TRAPS) { - return new_symbol(name, (int)strlen(name), THREAD); - } - static Symbol* new_symbol(const Symbol* sym, int begin, int end, TRAPS) { - assert(begin <= end && end <= sym->utf8_length(), "just checking"); - return lookup(sym, begin, end, THREAD); - } - - // Create a symbol in the arena for symbols that are not deleted - static Symbol* new_permanent_symbol(const char* name, TRAPS); - - // Symbol lookup - static Symbol* lookup(int index, const char* name, int len, TRAPS); - // Needed for preloading classes in signatures when compiling. // Returns the symbol is already present in symbol table, otherwise // NULL. NO ALLOCATION IS GUARANTEED! @@ -246,27 +218,45 @@ public: return lookup_only_unicode(name, len, ignore_hash); } - // Histogram - static void print_histogram() PRODUCT_RETURN; - static void print() PRODUCT_RETURN; + // Symbol creation + static Symbol* new_symbol(const char* utf8_buffer, int length, TRAPS) { + assert(utf8_buffer != NULL, "just checking"); + return lookup(utf8_buffer, length, THREAD); + } + static Symbol* new_symbol(const char* name, TRAPS) { + return new_symbol(name, (int)strlen(name), THREAD); + } + static Symbol* new_symbol(const Symbol* sym, int begin, int end, TRAPS) { + assert(begin <= end && end <= sym->utf8_length(), "just checking"); + return lookup(sym, begin, end, THREAD); + } + // Create a symbol in the arena for symbols that are not deleted + static Symbol* new_permanent_symbol(const char* name, TRAPS); - // Debugging - static void verify(); - static void dump(outputStream* st, bool verbose=false); - static void read(const char* filename, TRAPS); + // Rehash the string table if it gets out of balance + static void rehash_table(); + static bool needs_rehashing() + { return SymbolTable::the_table()->_needs_rehashing; } + + // Heap dumper and CDS + static void symbols_do(SymbolClosure *cl); // Sharing - static void write_to_archive(); - static void serialize(SerializeClosure* soc); - static u4 encode_shared(Symbol* sym); - static Symbol* decode_shared(u4 offset); +private: + static void copy_shared_symbol_table(CompactSymbolTableWriter* ch_table); +public: + static void write_to_archive() NOT_CDS_RETURN; + static void serialize(SerializeClosure* soc) NOT_CDS_RETURN; + static void metaspace_pointers_do(MetaspaceClosure* it); - // Rehash the symbol table if it gets out of balance - static void rehash_table(); - static bool needs_rehashing() { return _needs_rehashing; } - // Parallel chunked scanning - static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; } - static int parallel_claimed_index() { return _parallel_claimed_idx; } + // Jcmd + static void dump(outputStream* st, bool verbose=false); + // Debugging + static void verify(); + static void read(const char* filename, TRAPS); + + // Histogram + static void print_histogram() PRODUCT_RETURN; }; #endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp index 9b490359154..6663b909eb7 100644 --- a/src/hotspot/share/classfile/systemDictionary.cpp +++ b/src/hotspot/share/classfile/systemDictionary.cpp @@ -988,18 +988,18 @@ InstanceKlass* SystemDictionary::parse_stream(Symbol* class_name, Handle class_loader, Handle protection_domain, ClassFileStream* st, - const InstanceKlass* host_klass, + const InstanceKlass* unsafe_anonymous_host, GrowableArray* cp_patches, TRAPS) { EventClassLoad class_load_start_event; ClassLoaderData* loader_data; - if (host_klass != NULL) { - // Create a new CLD for anonymous class, that uses the same class loader - // as the host_klass - guarantee(oopDesc::equals(host_klass->class_loader(), class_loader()), "should be the same"); - loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader); + if (unsafe_anonymous_host != NULL) { + // Create a new CLD for an unsafe anonymous class, that uses the same class loader + // as the unsafe_anonymous_host + guarantee(oopDesc::equals(unsafe_anonymous_host->class_loader(), class_loader()), "should be the same"); + loader_data = ClassLoaderData::unsafe_anonymous_class_loader_data(class_loader); } else { loader_data = ClassLoaderData::class_loader_data(class_loader()); } @@ -1016,12 +1016,12 @@ InstanceKlass* SystemDictionary::parse_stream(Symbol* class_name, class_name, loader_data, protection_domain, - host_klass, + unsafe_anonymous_host, cp_patches, CHECK_NULL); - if (host_klass != NULL && k != NULL) { - // Anonymous classes must update ClassLoaderData holder (was host_klass loader) + if (unsafe_anonymous_host != NULL && k != NULL) { + // Unsafe anonymous classes must update ClassLoaderData holder (was unsafe_anonymous_host loader) // so that they can be unloaded when the mirror is no longer referenced. k->class_loader_data()->initialize_holder(Handle(THREAD, k->java_mirror())); @@ -1056,8 +1056,8 @@ InstanceKlass* SystemDictionary::parse_stream(Symbol* class_name, post_class_load_event(&class_load_start_event, k, loader_data); } } - assert(host_klass != NULL || NULL == cp_patches, - "cp_patches only found with host_klass"); + assert(unsafe_anonymous_host != NULL || NULL == cp_patches, + "cp_patches only found with unsafe_anonymous_host"); return k; } @@ -1115,7 +1115,7 @@ InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name, class_name, loader_data, protection_domain, - NULL, // host_klass + NULL, // unsafe_anonymous_host NULL, // cp_patches CHECK_NULL); } @@ -1160,10 +1160,12 @@ InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name, #if INCLUDE_CDS void SystemDictionary::set_shared_dictionary(HashtableBucket* t, int length, int number_of_entries) { + assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); assert(length == _shared_dictionary_size * sizeof(HashtableBucket), "bad shared dictionary size."); _shared_dictionary = new Dictionary(ClassLoaderData::the_null_class_loader_data(), - _shared_dictionary_size, t, number_of_entries); + _shared_dictionary_size, t, number_of_entries, + false /* explicitly set _resizable to false */); } @@ -1858,10 +1860,19 @@ bool SystemDictionary::do_unloading(GCTimer* gc_timer, } } + // TODO: just return if !unloading_occurred. if (unloading_occurred) { - GCTraceTime(Debug, gc, phases) t("Dictionary", gc_timer); - constraints()->purge_loader_constraints(); - resolution_errors()->purge_resolution_errors(); + { + GCTraceTime(Debug, gc, phases) t("SymbolTable", gc_timer); + // Check if there's work to do in the SymbolTable + SymbolTable::do_check_concurrent_work(); + } + + { + GCTraceTime(Debug, gc, phases) t("Dictionary", gc_timer); + constraints()->purge_loader_constraints(); + resolution_errors()->purge_resolution_errors(); + } } { @@ -1874,7 +1885,7 @@ bool SystemDictionary::do_unloading(GCTimer* gc_timer, if (do_cleaning) { GCTraceTime(Debug, gc, phases) t("ResolvedMethodTable", gc_timer); - ResolvedMethodTable::unlink(); + ResolvedMethodTable::trigger_cleanup(); } return unloading_occurred; @@ -3001,7 +3012,7 @@ class CombineDictionariesClosure : public CLDClosure { _master_dictionary(master_dictionary) {} void do_cld(ClassLoaderData* cld) { ResourceMark rm; - if (cld->is_anonymous()) { + if (cld->is_unsafe_anonymous()) { return; } if (cld->is_system_class_loader_data() || cld->is_platform_class_loader_data()) { diff --git a/src/hotspot/share/classfile/systemDictionary.hpp b/src/hotspot/share/classfile/systemDictionary.hpp index 581add81b26..4c008c8896f 100644 --- a/src/hotspot/share/classfile/systemDictionary.hpp +++ b/src/hotspot/share/classfile/systemDictionary.hpp @@ -187,11 +187,6 @@ class OopStorage; do_klass(jdk_internal_loader_ClassLoaders_AppClassLoader_klass, jdk_internal_loader_ClassLoaders_AppClassLoader, Pre ) \ do_klass(jdk_internal_loader_ClassLoaders_PlatformClassLoader_klass, jdk_internal_loader_ClassLoaders_PlatformClassLoader, Pre ) \ do_klass(CodeSource_klass, java_security_CodeSource, Pre ) \ - do_klass(Configuration_klass, java_lang_module_Configuration, Pre ) \ - do_klass(ImmutableCollections_ListN_klass, java_util_ImmutableCollections_ListN, Pre ) \ - do_klass(ImmutableCollections_MapN_klass, java_util_ImmutableCollections_MapN, Pre ) \ - do_klass(ImmutableCollections_SetN_klass, java_util_ImmutableCollections_SetN, Pre ) \ - do_klass(ArchivedModuleGraph_klass, jdk_internal_module_ArchivedModuleGraph, Pre ) \ \ do_klass(StackTraceElement_klass, java_lang_StackTraceElement, Opt ) \ \ @@ -215,7 +210,6 @@ class OopStorage; do_klass(Byte_klass, java_lang_Byte, Pre ) \ do_klass(Short_klass, java_lang_Short, Pre ) \ do_klass(Integer_klass, java_lang_Integer, Pre ) \ - do_klass(Integer_IntegerCache_klass, java_lang_Integer_IntegerCache, Pre ) \ do_klass(Long_klass, java_lang_Long, Pre ) \ \ /* JVMCI classes. These are loaded on-demand. */ \ @@ -304,7 +298,7 @@ public: class_loader, protection_domain, st, - NULL, // host klass + NULL, // unsafe_anonymous_host NULL, // cp_patches THREAD); } @@ -312,7 +306,7 @@ public: Handle class_loader, Handle protection_domain, ClassFileStream* st, - const InstanceKlass* host_klass, + const InstanceKlass* unsafe_anonymous_host, GrowableArray* cp_patches, TRAPS); diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp index 1acb4f0087e..81bf946977b 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp @@ -755,11 +755,11 @@ bool SystemDictionaryShared::add_verification_constraint(InstanceKlass* k, Symbo Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) { assert(DumpSharedSpaces, "called at dump time only"); - // Skip anonymous classes, which are not archived as they are not in - // dictionary (see assert_no_anonymoys_classes_in_dictionaries() in + // Skip unsafe anonymous classes, which are not archived as they are not in + // dictionary (see assert_no_unsafe_anonymous_classes_in_dictionaries() in // VM_PopulateDumpSharedSpace::doit()). - if (k->class_loader_data()->is_anonymous()) { - return true; // anonymous classes are not archived, skip + if (k->class_loader_data()->is_unsafe_anonymous()) { + return true; // unsafe anonymous classes are not archived, skip } SharedDictionaryEntry* entry = ((SharedDictionary*)(k->class_loader_data()->dictionary()))->find_entry_for(k); diff --git a/src/hotspot/share/classfile/systemDictionaryShared.hpp b/src/hotspot/share/classfile/systemDictionaryShared.hpp index 7f2c2336366..39d04b250e6 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.hpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp @@ -293,9 +293,6 @@ public: static void allocate_shared_data_arrays(int size, TRAPS); static void oops_do(OopClosure* f); - static void roots_oops_do(OopClosure* f) { - oops_do(f); - } // Check if sharing is supported for the class loader. static bool is_sharing_possible(ClassLoaderData* loader_data); diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp index 3e89711c376..6fb33a868da 100644 --- a/src/hotspot/share/classfile/verifier.cpp +++ b/src/hotspot/share/classfile/verifier.cpp @@ -2823,20 +2823,20 @@ void ClassVerifier::verify_invoke_instructions( current_class()->super()->name()))) { bool subtype = false; bool have_imr_indirect = cp->tag_at(index).value() == JVM_CONSTANT_InterfaceMethodref; - if (!current_class()->is_anonymous()) { + if (!current_class()->is_unsafe_anonymous()) { subtype = ref_class_type.is_assignable_from( current_type(), this, false, CHECK_VERIFY(this)); } else { - VerificationType host_klass_type = - VerificationType::reference_type(current_class()->host_klass()->name()); - subtype = ref_class_type.is_assignable_from(host_klass_type, this, false, CHECK_VERIFY(this)); + VerificationType unsafe_anonymous_host_type = + VerificationType::reference_type(current_class()->unsafe_anonymous_host()->name()); + subtype = ref_class_type.is_assignable_from(unsafe_anonymous_host_type, this, false, CHECK_VERIFY(this)); // If invokespecial of IMR, need to recheck for same or // direct interface relative to the host class have_imr_indirect = (have_imr_indirect && !is_same_or_direct_interface( - current_class()->host_klass(), - host_klass_type, ref_class_type)); + current_class()->unsafe_anonymous_host(), + unsafe_anonymous_host_type, ref_class_type)); } if (!subtype) { verify_error(ErrorContext::bad_code(bci), @@ -2866,15 +2866,15 @@ void ClassVerifier::verify_invoke_instructions( } else { // other methods // Ensures that target class is assignable to method class. if (opcode == Bytecodes::_invokespecial) { - if (!current_class()->is_anonymous()) { + if (!current_class()->is_unsafe_anonymous()) { current_frame->pop_stack(current_type(), CHECK_VERIFY(this)); } else { // anonymous class invokespecial calls: check if the - // objectref is a subtype of the host_klass of the current class - // to allow an anonymous class to reference methods in the host_klass + // objectref is a subtype of the unsafe_anonymous_host of the current class + // to allow an anonymous class to reference methods in the unsafe_anonymous_host VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this)); VerificationType hosttype = - VerificationType::reference_type(current_class()->host_klass()->name()); + VerificationType::reference_type(current_class()->unsafe_anonymous_host()->name()); bool subtype = hosttype.is_assignable_from(top, this, false, CHECK_VERIFY(this)); if (!subtype) { verify_error( ErrorContext::bad_type(current_frame->offset(), diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp index 42f149303f0..201f4977f4e 100644 --- a/src/hotspot/share/classfile/vmSymbols.hpp +++ b/src/hotspot/share/classfile/vmSymbols.hpp @@ -124,7 +124,6 @@ template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \ template(jdk_internal_vm_PostVMInitHook, "jdk/internal/vm/PostVMInitHook") \ template(sun_net_www_ParseUtil, "sun/net/www/ParseUtil") \ - template(jdk_internal_module_ArchivedModuleGraph, "jdk/internal/module/ArchivedModuleGraph") \ \ template(jdk_internal_loader_ClassLoaders_AppClassLoader, "jdk/internal/loader/ClassLoaders$AppClassLoader") \ template(jdk_internal_loader_ClassLoaders_PlatformClassLoader, "jdk/internal/loader/ClassLoaders$PlatformClassLoader") \ @@ -649,17 +648,7 @@ JFR_TEMPLATES(template) \ \ /* cds */ \ - template(configuration_signature, "Ljava/lang/module/Configuration;") \ - template(java_lang_module_Configuration, "java/lang/module/Configuration") \ - template(java_util_ImmutableCollections_ListN, "java/util/ImmutableCollections$ListN") \ - template(java_util_ImmutableCollections_MapN, "java/util/ImmutableCollections$MapN") \ - template(java_util_ImmutableCollections_SetN, "java/util/ImmutableCollections$SetN") \ template(jdk_internal_loader_ClassLoaders, "jdk/internal/loader/ClassLoaders") \ - template(list_signature, "Ljava/util/List;") \ - template(map_signature, "Ljava/util/Map;") \ - template(moduleFinder_signature, "Ljava/lang/module/ModuleFinder;") \ - template(set_signature, "Ljava/util/Set;") \ - template(systemModules_signature, "Ljdk/internal/module/SystemModules;") \ template(toFileURL_name, "toFileURL") \ template(toFileURL_signature, "(Ljava/lang/String;)Ljava/net/URL;") \ template(url_void_signature, "(Ljava/net/URL;)V") \ diff --git a/src/hotspot/share/code/compiledMethod.hpp b/src/hotspot/share/code/compiledMethod.hpp index 6291c6804b1..552ec270241 100644 --- a/src/hotspot/share/code/compiledMethod.hpp +++ b/src/hotspot/share/code/compiledMethod.hpp @@ -202,7 +202,7 @@ public: virtual address verified_entry_point() const = 0; virtual void log_identity(xmlStream* log) const = 0; - virtual void log_state_change() const = 0; + virtual void log_state_change(oop cause = NULL) const = 0; virtual bool make_not_used() = 0; virtual bool make_not_entrant() = 0; virtual bool make_entrant() = 0; diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index d092915b90f..bb601f54038 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -422,7 +422,7 @@ void nmethod::init_defaults() { #if INCLUDE_JVMCI _jvmci_installed_code = NULL; _speculation_log = NULL; - _jvmci_installed_code_triggers_unloading = false; + _jvmci_installed_code_triggers_invalidation = false; #endif } @@ -690,9 +690,9 @@ nmethod::nmethod( _speculation_log = speculation_log; oop obj = JNIHandles::resolve(installed_code); if (obj == NULL || (obj->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(obj))) { - _jvmci_installed_code_triggers_unloading = false; + _jvmci_installed_code_triggers_invalidation = false; } else { - _jvmci_installed_code_triggers_unloading = true; + _jvmci_installed_code_triggers_invalidation = true; } if (compiler->is_jvmci()) { @@ -786,6 +786,13 @@ void nmethod::log_identity(xmlStream* log) const { if (TieredCompilation) { log->print(" level='%d'", comp_level()); } +#if INCLUDE_JVMCI + char buffer[O_BUFLEN]; + char* jvmci_name = jvmci_installed_code_name(buffer, O_BUFLEN); + if (jvmci_name != NULL) { + log->print(" jvmci_installed_code_name='%s'", jvmci_name); + } +#endif } @@ -1083,7 +1090,7 @@ void nmethod::make_unloaded(oop cause) { _state = unloaded; // Log the unloading. - log_state_change(); + log_state_change(cause); #if INCLUDE_JVMCI // The method can only be unloaded after the pointer to the installed code @@ -1107,7 +1114,7 @@ void nmethod::invalidate_osr_method() { } } -void nmethod::log_state_change() const { +void nmethod::log_state_change(oop cause) const { if (LogCompilation) { if (xtty != NULL) { ttyLocker ttyl; // keep the following output all in one block @@ -1120,6 +1127,9 @@ void nmethod::log_state_change() const { (_state == zombie ? " zombie='1'" : "")); } log_identity(xtty); + if (cause != NULL) { + xtty->print(" cause='%s'", cause->klass()->external_name()); + } xtty->stamp(); xtty->end_elem(); } @@ -1150,7 +1160,8 @@ bool nmethod::make_not_entrant_or_zombie(int state) { // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. nmethodLocker nml(this); methodHandle the_method(method()); - NoSafepointVerifier nsv; + // This can be called while the system is already at a safepoint which is ok + NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint()); // during patching, depending on the nmethod state we must notify the GC that // code has been unloaded, unregistering it. We cannot do this right while @@ -1507,13 +1518,12 @@ bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_aliv bool nmethod::do_unloading_jvmci() { if (_jvmci_installed_code != NULL) { if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) { - if (_jvmci_installed_code_triggers_unloading) { - // jweak reference processing has already cleared the referent - make_unloaded(NULL); - return true; - } else { - clear_jvmci_installed_code(); + if (_jvmci_installed_code_triggers_invalidation) { + // The reference to the installed code has been dropped so invalidate + // this nmethod and allow the sweeper to reclaim it. + make_not_entrant(); } + clear_jvmci_installed_code(); } } return false; @@ -2948,7 +2958,7 @@ oop nmethod::speculation_log() { return JNIHandles::resolve(_speculation_log); } -char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) { +char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) const { if (!this->is_compiled_by_jvmci()) { return NULL; } diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index a33688d893f..664a10ecb14 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -78,7 +78,7 @@ class nmethod : public CompiledMethod { // That is, installed code other than a "default" // HotSpotNMethod causes nmethod unloading. // This field is ignored once _jvmci_installed_code is NULL. - bool _jvmci_installed_code_triggers_unloading; + bool _jvmci_installed_code_triggers_invalidation; #endif // To support simple linked-list chaining of nmethods: @@ -456,7 +456,7 @@ public: // Copies the value of the name field in the InstalledCode // object (if any) associated with this nmethod into buf. // Returns the value of buf if it was updated otherwise NULL. - char* jvmci_installed_code_name(char* buf, size_t buflen); + char* jvmci_installed_code_name(char* buf, size_t buflen) const; // Updates the state of the InstalledCode (if any) associated with // this nmethod based on the current value of _state. @@ -486,7 +486,7 @@ public: protected: virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive); #if INCLUDE_JVMCI - // See comment for _jvmci_installed_code_triggers_unloading field. + // See comment for _jvmci_installed_code_triggers_invalidation field. // Returns whether this nmethod was unloaded. virtual bool do_unloading_jvmci(); #endif @@ -555,7 +555,7 @@ public: // Logging void log_identity(xmlStream* log) const; void log_new_nmethod() const; - void log_state_change() const; + void log_state_change(oop cause = NULL) const; // Prints block-level comments, including nmethod specific block labels: virtual void print_block_comment(outputStream* stream, address block_begin) const { diff --git a/src/hotspot/share/gc/g1/collectionSetChooser.cpp b/src/hotspot/share/gc/g1/collectionSetChooser.cpp index 9d2c90b546c..1a85db83d8a 100644 --- a/src/hotspot/share/gc/g1/collectionSetChooser.cpp +++ b/src/hotspot/share/gc/g1/collectionSetChooser.cpp @@ -243,7 +243,7 @@ public: // sets for old regions. r->rem_set()->clear(true /* only_cardset */); } else { - assert(!r->is_old() || !r->rem_set()->is_tracked(), + assert(r->is_archive() || !r->is_old() || !r->rem_set()->is_tracked(), "Missed to clear unused remembered set of region %u (%s) that is %s", r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str()); } diff --git a/src/hotspot/share/gc/g1/dirtyCardQueue.hpp b/src/hotspot/share/gc/g1/dirtyCardQueue.hpp index 58a72aae375..57fac9f88dd 100644 --- a/src/hotspot/share/gc/g1/dirtyCardQueue.hpp +++ b/src/hotspot/share/gc/g1/dirtyCardQueue.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_VM_GC_G1_DIRTYCARDQUEUE_HPP #define SHARE_VM_GC_G1_DIRTYCARDQUEUE_HPP -#include "gc/g1/ptrQueue.hpp" +#include "gc/shared/ptrQueue.hpp" #include "memory/allocation.hpp" class FreeIdSet; diff --git a/src/hotspot/share/gc/g1/g1Allocator.cpp b/src/hotspot/share/gc/g1/g1Allocator.cpp index b07518ec7a2..a838abf3b91 100644 --- a/src/hotspot/share/gc/g1/g1Allocator.cpp +++ b/src/hotspot/share/gc/g1/g1Allocator.cpp @@ -361,7 +361,7 @@ bool G1ArchiveAllocator::alloc_new_region() { hr->set_closed_archive(); } _g1h->g1_policy()->remset_tracker()->update_at_allocate(hr); - _g1h->old_set_add(hr); + _g1h->archive_set_add(hr); _g1h->hr_printer()->alloc(hr); _allocated_regions.append(hr); _allocation_region = hr; diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.cpp b/src/hotspot/share/gc/g1/g1BarrierSet.cpp index 70bd581b925..3e6564dac2e 100644 --- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp +++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp @@ -27,9 +27,10 @@ #include "gc/g1/g1BarrierSetAssembler.hpp" #include "gc/g1/g1CardTable.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1SATBMarkQueueSet.hpp" #include "gc/g1/g1ThreadLocalData.hpp" #include "gc/g1/heapRegion.hpp" -#include "gc/g1/satbMarkQueue.hpp" +#include "gc/shared/satbMarkQueue.hpp" #include "logging/log.hpp" #include "oops/access.inline.hpp" #include "oops/compressedOops.inline.hpp" diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index c9e57121cf0..8a1f719a705 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -25,7 +25,6 @@ #include "precompiled.hpp" #include "classfile/metadataOnStackMark.hpp" #include "classfile/stringTable.hpp" -#include "classfile/symbolTable.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" #include "gc/g1/g1Allocator.inline.hpp" @@ -71,6 +70,7 @@ #include "gc/shared/generationSpec.hpp" #include "gc/shared/isGCActiveMark.hpp" #include "gc/shared/oopStorageParState.hpp" +#include "gc/shared/parallelCleaning.hpp" #include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/shared/referenceProcessor.inline.hpp" @@ -84,7 +84,6 @@ #include "oops/access.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" -#include "prims/resolvedMethodTable.hpp" #include "runtime/atomic.hpp" #include "runtime/flags/flagSetting.hpp" #include "runtime/handles.inline.hpp" @@ -645,7 +644,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, curr_region->set_closed_archive(); } _hr_printer.alloc(curr_region); - _old_set.add(curr_region); + _archive_set.add(curr_region); HeapWord* top; HeapRegion* next_region; if (curr_region != last_region) { @@ -802,7 +801,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) { guarantee(curr_region->is_archive(), "Expected archive region at index %u", curr_region->hrm_index()); uint curr_index = curr_region->hrm_index(); - _old_set.remove(curr_region); + _archive_set.remove(curr_region); curr_region->set_free(); curr_region->set_top(curr_region->bottom()); if (curr_region != last_region) { @@ -1127,7 +1126,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc, const bool do_clear_all_soft_refs = clear_all_soft_refs || soft_ref_policy()->should_clear_all_soft_refs(); - G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs); + G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs); GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true); collector.prepare_collection(); @@ -1407,6 +1406,68 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) { _verifier->verify_region_sets_optional(); } +class OldRegionSetChecker : public HeapRegionSetChecker { +public: + void check_mt_safety() { + // Master Old Set MT safety protocol: + // (a) If we're at a safepoint, operations on the master old set + // should be invoked: + // - by the VM thread (which will serialize them), or + // - by the GC workers while holding the FreeList_lock, if we're + // at a safepoint for an evacuation pause (this lock is taken + // anyway when an GC alloc region is retired so that a new one + // is allocated from the free list), or + // - by the GC workers while holding the OldSets_lock, if we're at a + // safepoint for a cleanup pause. + // (b) If we're not at a safepoint, operations on the master old set + // should be invoked while holding the Heap_lock. + + if (SafepointSynchronize::is_at_safepoint()) { + guarantee(Thread::current()->is_VM_thread() || + FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(), + "master old set MT safety protocol at a safepoint"); + } else { + guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint"); + } + } + bool is_correct_type(HeapRegion* hr) { return hr->is_old(); } + const char* get_description() { return "Old Regions"; } +}; + +class ArchiveRegionSetChecker : public HeapRegionSetChecker { +public: + void check_mt_safety() { + guarantee(!Universe::is_fully_initialized() || SafepointSynchronize::is_at_safepoint(), + "May only change archive regions during initialization or safepoint."); + } + bool is_correct_type(HeapRegion* hr) { return hr->is_archive(); } + const char* get_description() { return "Archive Regions"; } +}; + +class HumongousRegionSetChecker : public HeapRegionSetChecker { +public: + void check_mt_safety() { + // Humongous Set MT safety protocol: + // (a) If we're at a safepoint, operations on the master humongous + // set should be invoked by either the VM thread (which will + // serialize them) or by the GC workers while holding the + // OldSets_lock. + // (b) If we're not at a safepoint, operations on the master + // humongous set should be invoked while holding the Heap_lock. + + if (SafepointSynchronize::is_at_safepoint()) { + guarantee(Thread::current()->is_VM_thread() || + OldSets_lock->owned_by_self(), + "master humongous set MT safety protocol at a safepoint"); + } else { + guarantee(Heap_lock->owned_by_self(), + "master humongous set MT safety protocol outside a safepoint"); + } + } + bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); } + const char* get_description() { return "Humongous Regions"; } +}; + G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) : CollectedHeap(), _young_gen_sampling_thread(NULL), @@ -1414,13 +1475,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) : _collector_policy(collector_policy), _card_table(NULL), _soft_ref_policy(), - _memory_manager("G1 Young Generation", "end of minor GC"), - _full_gc_memory_manager("G1 Old Generation", "end of major GC"), - _eden_pool(NULL), - _survivor_pool(NULL), - _old_pool(NULL), - _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()), - _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()), + _old_set("Old Region Set", new OldRegionSetChecker()), + _archive_set("Archive Region Set", new ArchiveRegionSetChecker()), + _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()), _bot(NULL), _listener(), _hrm(), @@ -1747,20 +1804,6 @@ jint G1CollectedHeap::initialize() { return JNI_OK; } -void G1CollectedHeap::initialize_serviceability() { - _eden_pool = new G1EdenPool(this); - _survivor_pool = new G1SurvivorPool(this); - _old_pool = new G1OldGenPool(this); - - _full_gc_memory_manager.add_pool(_eden_pool); - _full_gc_memory_manager.add_pool(_survivor_pool); - _full_gc_memory_manager.add_pool(_old_pool); - - _memory_manager.add_pool(_eden_pool); - _memory_manager.add_pool(_survivor_pool); - _memory_manager.add_pool(_old_pool, false /* always_affected_by_gc */); -} - void G1CollectedHeap::stop() { // Stop all concurrent threads. We do this to make sure these threads // do not continue to execute and access resources (e.g. logging) @@ -2857,9 +2900,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { active_workers = workers()->update_active_workers(active_workers); log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers()); - TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); - TraceMemoryManagerStats tms(&_memory_manager, gc_cause(), - collector_state()->yc_type() == Mixed /* allMemoryPoolsAffected */); + G1MonitoringScope ms(g1mm(), + false /* full_gc */, + collector_state()->yc_type() == Mixed /* all_memory_pools_affected */); G1HeapTransition heap_transition(this); size_t heap_used_bytes_before_gc = used(); @@ -3256,402 +3299,26 @@ void G1CollectedHeap::print_termination_stats(uint worker_id, undo_waste * HeapWordSize / K); } -class G1StringAndSymbolCleaningTask : public AbstractGangTask { -private: - BoolObjectClosure* _is_alive; - G1StringDedupUnlinkOrOopsDoClosure _dedup_closure; - OopStorage::ParState _par_state_string; - - int _initial_string_table_size; - int _initial_symbol_table_size; - - bool _process_strings; - int _strings_processed; - int _strings_removed; - - bool _process_symbols; - int _symbols_processed; - int _symbols_removed; - - bool _process_string_dedup; - -public: - G1StringAndSymbolCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool process_string_dedup) : - AbstractGangTask("String/Symbol Unlinking"), - _is_alive(is_alive), - _dedup_closure(is_alive, NULL, false), - _par_state_string(StringTable::weak_storage()), - _process_strings(process_strings), _strings_processed(0), _strings_removed(0), - _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0), - _process_string_dedup(process_string_dedup) { - - _initial_string_table_size = (int) StringTable::the_table()->table_size(); - _initial_symbol_table_size = SymbolTable::the_table()->table_size(); - if (process_symbols) { - SymbolTable::clear_parallel_claimed_index(); - } - if (process_strings) { - StringTable::reset_dead_counter(); - } - } - - ~G1StringAndSymbolCleaningTask() { - guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, - "claim value %d after unlink less than initial symbol table size %d", - SymbolTable::parallel_claimed_index(), _initial_symbol_table_size); - - log_info(gc, stringtable)( - "Cleaned string and symbol table, " - "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, " - "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", - strings_processed(), strings_removed(), - symbols_processed(), symbols_removed()); - if (_process_strings) { - StringTable::finish_dead_counter(); - } - } - - void work(uint worker_id) { - int strings_processed = 0; - int strings_removed = 0; - int symbols_processed = 0; - int symbols_removed = 0; - if (_process_strings) { - StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed); - Atomic::add(strings_processed, &_strings_processed); - Atomic::add(strings_removed, &_strings_removed); - } - if (_process_symbols) { - SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed); - Atomic::add(symbols_processed, &_symbols_processed); - Atomic::add(symbols_removed, &_symbols_removed); - } - if (_process_string_dedup) { - G1StringDedup::parallel_unlink(&_dedup_closure, worker_id); - } - } - - size_t strings_processed() const { return (size_t)_strings_processed; } - size_t strings_removed() const { return (size_t)_strings_removed; } - - size_t symbols_processed() const { return (size_t)_symbols_processed; } - size_t symbols_removed() const { return (size_t)_symbols_removed; } -}; - -class G1CodeCacheUnloadingTask { -private: - static Monitor* _lock; - - BoolObjectClosure* const _is_alive; - const bool _unloading_occurred; - const uint _num_workers; - - // Variables used to claim nmethods. - CompiledMethod* _first_nmethod; - CompiledMethod* volatile _claimed_nmethod; - - // The list of nmethods that need to be processed by the second pass. - CompiledMethod* volatile _postponed_list; - volatile uint _num_entered_barrier; - - public: - G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) : - _is_alive(is_alive), - _unloading_occurred(unloading_occurred), - _num_workers(num_workers), - _first_nmethod(NULL), - _claimed_nmethod(NULL), - _postponed_list(NULL), - _num_entered_barrier(0) - { - CompiledMethod::increase_unloading_clock(); - // Get first alive nmethod - CompiledMethodIterator iter = CompiledMethodIterator(); - if(iter.next_alive()) { - _first_nmethod = iter.method(); - } - _claimed_nmethod = _first_nmethod; - } - - ~G1CodeCacheUnloadingTask() { - CodeCache::verify_clean_inline_caches(); - - CodeCache::set_needs_cache_clean(false); - guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be"); - - CodeCache::verify_icholder_relocations(); - } - - private: - void add_to_postponed_list(CompiledMethod* nm) { - CompiledMethod* old; - do { - old = _postponed_list; - nm->set_unloading_next(old); - } while (Atomic::cmpxchg(nm, &_postponed_list, old) != old); - } - - void clean_nmethod(CompiledMethod* nm) { - bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred); - - if (postponed) { - // This nmethod referred to an nmethod that has not been cleaned/unloaded yet. - add_to_postponed_list(nm); - } - - // Mark that this nmethod has been cleaned/unloaded. - // After this call, it will be safe to ask if this nmethod was unloaded or not. - nm->set_unloading_clock(CompiledMethod::global_unloading_clock()); - } - - void clean_nmethod_postponed(CompiledMethod* nm) { - nm->do_unloading_parallel_postponed(); - } - - static const int MaxClaimNmethods = 16; - - void claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) { - CompiledMethod* first; - CompiledMethodIterator last; - - do { - *num_claimed_nmethods = 0; - - first = _claimed_nmethod; - last = CompiledMethodIterator(first); - - if (first != NULL) { - - for (int i = 0; i < MaxClaimNmethods; i++) { - if (!last.next_alive()) { - break; - } - claimed_nmethods[i] = last.method(); - (*num_claimed_nmethods)++; - } - } - - } while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first); - } - - CompiledMethod* claim_postponed_nmethod() { - CompiledMethod* claim; - CompiledMethod* next; - - do { - claim = _postponed_list; - if (claim == NULL) { - return NULL; - } - - next = claim->unloading_next(); - - } while (Atomic::cmpxchg(next, &_postponed_list, claim) != claim); - - return claim; - } - - public: - // Mark that we're done with the first pass of nmethod cleaning. - void barrier_mark(uint worker_id) { - MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); - _num_entered_barrier++; - if (_num_entered_barrier == _num_workers) { - ml.notify_all(); - } - } - - // See if we have to wait for the other workers to - // finish their first-pass nmethod cleaning work. - void barrier_wait(uint worker_id) { - if (_num_entered_barrier < _num_workers) { - MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); - while (_num_entered_barrier < _num_workers) { - ml.wait(Mutex::_no_safepoint_check_flag, 0, false); - } - } - } - - // Cleaning and unloading of nmethods. Some work has to be postponed - // to the second pass, when we know which nmethods survive. - void work_first_pass(uint worker_id) { - // The first nmethods is claimed by the first worker. - if (worker_id == 0 && _first_nmethod != NULL) { - clean_nmethod(_first_nmethod); - _first_nmethod = NULL; - } - - int num_claimed_nmethods; - CompiledMethod* claimed_nmethods[MaxClaimNmethods]; - - while (true) { - claim_nmethods(claimed_nmethods, &num_claimed_nmethods); - - if (num_claimed_nmethods == 0) { - break; - } - - for (int i = 0; i < num_claimed_nmethods; i++) { - clean_nmethod(claimed_nmethods[i]); - } - } - } - - void work_second_pass(uint worker_id) { - CompiledMethod* nm; - // Take care of postponed nmethods. - while ((nm = claim_postponed_nmethod()) != NULL) { - clean_nmethod_postponed(nm); - } - } -}; - -Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never); - -class G1KlassCleaningTask : public StackObj { - volatile int _clean_klass_tree_claimed; - ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator; - - public: - G1KlassCleaningTask() : - _clean_klass_tree_claimed(0), - _klass_iterator() { - } - - private: - bool claim_clean_klass_tree_task() { - if (_clean_klass_tree_claimed) { - return false; - } - - return Atomic::cmpxchg(1, &_clean_klass_tree_claimed, 0) == 0; - } - - InstanceKlass* claim_next_klass() { - Klass* klass; - do { - klass =_klass_iterator.next_klass(); - } while (klass != NULL && !klass->is_instance_klass()); - - // this can be null so don't call InstanceKlass::cast - return static_cast(klass); - } - -public: - - void clean_klass(InstanceKlass* ik) { - ik->clean_weak_instanceklass_links(); - } - - void work() { - ResourceMark rm; - - // One worker will clean the subklass/sibling klass tree. - if (claim_clean_klass_tree_task()) { - Klass::clean_subklass_tree(); - } - - // All workers will help cleaning the classes, - InstanceKlass* klass; - while ((klass = claim_next_klass()) != NULL) { - clean_klass(klass); - } - } -}; - -class G1ResolvedMethodCleaningTask : public StackObj { - volatile int _resolved_method_task_claimed; -public: - G1ResolvedMethodCleaningTask() : - _resolved_method_task_claimed(0) {} - - bool claim_resolved_method_task() { - if (_resolved_method_task_claimed) { - return false; - } - return Atomic::cmpxchg(1, &_resolved_method_task_claimed, 0) == 0; - } - - // These aren't big, one thread can do it all. - void work() { - if (claim_resolved_method_task()) { - ResolvedMethodTable::unlink(); - } - } -}; - - -// To minimize the remark pause times, the tasks below are done in parallel. -class G1ParallelCleaningTask : public AbstractGangTask { -private: - bool _unloading_occurred; - G1StringAndSymbolCleaningTask _string_symbol_task; - G1CodeCacheUnloadingTask _code_cache_task; - G1KlassCleaningTask _klass_cleaning_task; - G1ResolvedMethodCleaningTask _resolved_method_cleaning_task; - -public: - // The constructor is run in the VMThread. - G1ParallelCleaningTask(BoolObjectClosure* is_alive, uint num_workers, bool unloading_occurred) : - AbstractGangTask("Parallel Cleaning"), - _unloading_occurred(unloading_occurred), - _string_symbol_task(is_alive, true, true, G1StringDedup::is_enabled()), - _code_cache_task(num_workers, is_alive, unloading_occurred), - _klass_cleaning_task(), - _resolved_method_cleaning_task() { - } - - // The parallel work done by all worker threads. - void work(uint worker_id) { - // Do first pass of code cache cleaning. - _code_cache_task.work_first_pass(worker_id); - - // Let the threads mark that the first pass is done. - _code_cache_task.barrier_mark(worker_id); - - // Clean the Strings and Symbols. - _string_symbol_task.work(worker_id); - - // Clean unreferenced things in the ResolvedMethodTable - _resolved_method_cleaning_task.work(); - - // Wait for all workers to finish the first code cache cleaning pass. - _code_cache_task.barrier_wait(worker_id); - - // Do the second code cache cleaning work, which realize on - // the liveness information gathered during the first pass. - _code_cache_task.work_second_pass(worker_id); - - // Clean all klasses that were not unloaded. - // The weak metadata in klass doesn't need to be - // processed if there was no unloading. - if (_unloading_occurred) { - _klass_cleaning_task.work(); - } - } -}; - - void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred) { uint n_workers = workers()->active_workers(); - G1ParallelCleaningTask g1_unlink_task(is_alive, n_workers, class_unloading_occurred); + G1StringDedupUnlinkOrOopsDoClosure dedup_closure(is_alive, NULL, false); + ParallelCleaningTask g1_unlink_task(is_alive, &dedup_closure, n_workers, class_unloading_occurred); workers()->run_task(&g1_unlink_task); } void G1CollectedHeap::partial_cleaning(BoolObjectClosure* is_alive, bool process_strings, - bool process_symbols, bool process_string_dedup) { - if (!process_strings && !process_symbols && !process_string_dedup) { + if (!process_strings && !process_string_dedup) { // Nothing to clean. return; } - G1StringAndSymbolCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols, process_string_dedup); + G1StringDedupUnlinkOrOopsDoClosure dedup_closure(is_alive, NULL, false); + StringCleaningTask g1_unlink_task(is_alive, process_string_dedup ? &dedup_closure : NULL, process_strings); workers()->run_task(&g1_unlink_task); - } class G1RedirtyLoggedCardsTask : public AbstractGangTask { @@ -4045,7 +3712,7 @@ void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_in process_discovered_references(per_thread_states); // FIXME - // CM's reference processing also cleans up the string and symbol tables. + // CM's reference processing also cleans up the string table. // Should we do that here also? We could, but it is a serial operation // and could significantly increase the pause time. @@ -4650,7 +4317,6 @@ bool G1CollectedHeap::check_young_list_empty() { #endif // ASSERT class TearDownRegionSetsClosure : public HeapRegionClosure { -private: HeapRegionSet *_old_set; public: @@ -4663,9 +4329,9 @@ public: r->uninstall_surv_rate_group(); } else { // We ignore free regions, we'll empty the free list afterwards. - // We ignore humongous regions, we're not tearing down the - // humongous regions set. - assert(r->is_free() || r->is_humongous(), + // We ignore humongous and archive regions, we're not tearing down these + // sets. + assert(r->is_archive() || r->is_free() || r->is_humongous(), "it cannot be another type"); } return false; @@ -4708,14 +4374,17 @@ void G1CollectedHeap::set_used(size_t bytes) { class RebuildRegionSetsClosure : public HeapRegionClosure { private: - bool _free_list_only; - HeapRegionSet* _old_set; - HeapRegionManager* _hrm; - size_t _total_used; + bool _free_list_only; + + HeapRegionSet* _old_set; + HeapRegionManager* _hrm; + + size_t _total_used; public: RebuildRegionSetsClosure(bool free_list_only, - HeapRegionSet* old_set, HeapRegionManager* hrm) : + HeapRegionSet* old_set, + HeapRegionManager* hrm) : _free_list_only(free_list_only), _old_set(old_set), _hrm(hrm), _total_used(0) { assert(_hrm->num_free_regions() == 0, "pre-condition"); @@ -4733,11 +4402,11 @@ public: _hrm->insert_into_free_list(r); } else if (!_free_list_only) { - if (r->is_humongous()) { - // We ignore humongous regions. We left the humongous set unchanged. + if (r->is_archive() || r->is_humongous()) { + // We ignore archive and humongous regions. We left these sets unchanged. } else { assert(r->is_young() || r->is_free() || r->is_old(), "invariant"); - // We now move all (non-humongous, non-old) regions to old gen, and register them as such. + // We now move all (non-humongous, non-old, non-archive) regions to old gen, and register them as such. r->move_to_old(); _old_set->add(r); } @@ -4811,7 +4480,7 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, _hr_printer.retire(alloc_region); // We update the eden sizes here, when the region is retired, // instead of when it's allocated, since this is the point that its - // used space has been recored in _summary_bytes_used. + // used space has been recorded in _summary_bytes_used. g1mm()->update_eden_size(); } @@ -4862,7 +4531,7 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, alloc_region->note_end_of_copying(during_im); g1_policy()->record_bytes_copied_during_gc(allocated_bytes); if (dest.is_old()) { - _old_set.add(alloc_region); + old_set_add(alloc_region); } _hr_printer.retire(alloc_region); } @@ -4987,17 +4656,14 @@ void G1CollectedHeap::rebuild_strong_code_roots() { CodeCache::blobs_do(&blob_cl); } +void G1CollectedHeap::initialize_serviceability() { + _g1mm->initialize_serviceability(); +} + GrowableArray G1CollectedHeap::memory_managers() { - GrowableArray memory_managers(2); - memory_managers.append(&_memory_manager); - memory_managers.append(&_full_gc_memory_manager); - return memory_managers; + return _g1mm->memory_managers(); } GrowableArray G1CollectedHeap::memory_pools() { - GrowableArray memory_pools(3); - memory_pools.append(_eden_pool); - memory_pools.append(_survivor_pool); - memory_pools.append(_old_pool); - return memory_pools; + return _g1mm->memory_pools(); } diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index adbbf913cfc..02de477faec 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -51,7 +51,6 @@ #include "gc/shared/preservedMarks.hpp" #include "gc/shared/softRefPolicy.hpp" #include "memory/memRegion.hpp" -#include "services/memoryManager.hpp" #include "utilities/stack.hpp" // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. @@ -67,6 +66,7 @@ class G1ParScanThreadState; class G1ParScanThreadStateSet; class G1ParScanThreadState; class MemoryPool; +class MemoryManager; class ObjectClosure; class SpaceClosure; class CompactibleSpaceClosure; @@ -160,23 +160,13 @@ private: SoftRefPolicy _soft_ref_policy; - GCMemoryManager _memory_manager; - GCMemoryManager _full_gc_memory_manager; - - MemoryPool* _eden_pool; - MemoryPool* _survivor_pool; - MemoryPool* _old_pool; - static size_t _humongous_object_threshold_in_words; - // It keeps track of the old regions. + // These sets keep track of old, archive and humongous regions respectively. HeapRegionSet _old_set; - - // It keeps track of the humongous regions. + HeapRegionSet _archive_set; HeapRegionSet _humongous_set; - virtual void initialize_serviceability(); - void eagerly_reclaim_humongous_regions(); // Start a new incremental collection set for the next pause. void start_new_collection_set(); @@ -970,6 +960,7 @@ public: virtual SoftRefPolicy* soft_ref_policy(); + virtual void initialize_serviceability(); virtual GrowableArray memory_managers(); virtual GrowableArray memory_pools(); @@ -1046,8 +1037,10 @@ public: inline void old_set_add(HeapRegion* hr); inline void old_set_remove(HeapRegion* hr); + inline void archive_set_add(HeapRegion* hr); + size_t non_young_capacity_bytes() { - return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes; + return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes; } // Determine whether the given region is one that we are using as an @@ -1232,20 +1225,11 @@ public: const G1SurvivorRegions* survivor() const { return &_survivor; } - uint survivor_regions_count() const { - return _survivor.length(); - } - - uint eden_regions_count() const { - return _eden.length(); - } - - uint young_regions_count() const { - return _eden.length() + _survivor.length(); - } - + uint eden_regions_count() const { return _eden.length(); } + uint survivor_regions_count() const { return _survivor.length(); } + uint young_regions_count() const { return _eden.length() + _survivor.length(); } uint old_regions_count() const { return _old_set.length(); } - + uint archive_regions_count() const { return _archive_set.length(); } uint humongous_regions_count() const { return _humongous_set.length(); } #ifdef ASSERT @@ -1324,9 +1308,8 @@ public: // Partial cleaning used when class unloading is disabled. // Let the caller choose what structures to clean out: // - StringTable - // - SymbolTable // - StringDeduplication structures - void partial_cleaning(BoolObjectClosure* is_alive, bool unlink_strings, bool unlink_symbols, bool unlink_string_dedup); + void partial_cleaning(BoolObjectClosure* is_alive, bool unlink_strings, bool unlink_string_dedup); // Complete cleaning used when class unloading is enabled. // Cleans out all structures handled by partial_cleaning and also the CodeCache. diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp index 30000fd5b34..f8e5887ef6b 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp @@ -92,6 +92,10 @@ inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { _old_set.remove(hr); } +inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) { + _archive_set.add(hr); +} + // It dirties the cards that cover the block so that the post // write barrier never queues anything when updating objects on this // block. It is assumed (and in fact we assert) that the block diff --git a/src/hotspot/share/gc/g1/g1CollectionSet.cpp b/src/hotspot/share/gc/g1/g1CollectionSet.cpp index 22246f4043f..f5e40e21ad2 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp @@ -328,10 +328,10 @@ bool G1CollectionSet::verify_young_ages() { return cl.valid(); } -class G1PrintCollectionSetClosure : public HeapRegionClosure { +class G1PrintCollectionSetDetailClosure : public HeapRegionClosure { outputStream* _st; public: - G1PrintCollectionSetClosure(outputStream* st) : HeapRegionClosure(), _st(st) { } + G1PrintCollectionSetDetailClosure(outputStream* st) : HeapRegionClosure(), _st(st) { } virtual bool do_heap_region(HeapRegion* r) { assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index()); @@ -347,7 +347,7 @@ public: void G1CollectionSet::print(outputStream* st) { st->print_cr("\nCollection_set:"); - G1PrintCollectionSetClosure cl(st); + G1PrintCollectionSetDetailClosure cl(st); iterate(&cl); } #endif // !PRODUCT diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index 053c77d9670..15ef41f0c7e 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "classfile/metadataOnStackMark.hpp" -#include "classfile/symbolTable.hpp" #include "code/codeCache.hpp" #include "gc/g1/g1BarrierSet.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" @@ -1578,8 +1577,8 @@ void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { // Is alive closure. G1CMIsAliveClosure g1_is_alive(_g1h); - // Inner scope to exclude the cleaning of the string and symbol - // tables from the displayed time. + // Inner scope to exclude the cleaning of the string table + // from the displayed time. { GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm); @@ -1673,16 +1672,16 @@ void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl); } - // Unload Klasses, String, Symbols, Code Cache, etc. + // Unload Klasses, String, Code Cache, etc. if (ClassUnloadingWithConcurrentMark) { GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm, false /* Defer cleaning */); _g1h->complete_cleaning(&g1_is_alive, purged_classes); } else { GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm); - // No need to clean string table and symbol table as they are treated as strong roots when + // No need to clean string table as it is treated as strong roots when // class unloading is disabled. - _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled()); + _g1h->partial_cleaning(&g1_is_alive, false, G1StringDedup::is_enabled()); } } diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp index 4a969c511b8..46d28bd4707 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp @@ -52,7 +52,7 @@ inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) { return false; } assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj)); - return _g1h->heap_region_containing(obj)->is_old_or_humongous(); + return _g1h->heap_region_containing(obj)->is_old_or_humongous_or_archive(); } inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj, size_t const obj_size) { diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp index 4362ee87e30..4bedf9f642a 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.cpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp @@ -103,9 +103,9 @@ uint G1FullCollector::calc_active_workers() { return worker_count; } -G1FullCollector::G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs) : +G1FullCollector::G1FullCollector(G1CollectedHeap* heap, bool explicit_gc, bool clear_soft_refs) : _heap(heap), - _scope(memory_manager, explicit_gc, clear_soft_refs), + _scope(heap->g1mm(), explicit_gc, clear_soft_refs), _num_workers(calc_active_workers()), _oop_queue_set(_num_workers), _array_queue_set(_num_workers), @@ -226,8 +226,8 @@ void G1FullCollector::phase1_mark_live_objects() { _heap->complete_cleaning(&_is_alive, purged_class); } else { GCTraceTime(Debug, gc, phases) debug("Phase 1: String and Symbol Tables Cleanup", scope()->timer()); - // If no class unloading just clean out strings and symbols. - _heap->partial_cleaning(&_is_alive, true, true, G1StringDedup::is_enabled()); + // If no class unloading just clean out strings. + _heap->partial_cleaning(&_is_alive, true, G1StringDedup::is_enabled()); } scope()->tracer()->report_object_count_after_gc(&_is_alive); diff --git a/src/hotspot/share/gc/g1/g1FullCollector.hpp b/src/hotspot/share/gc/g1/g1FullCollector.hpp index 0b97abeea57..9c21f65fc21 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.hpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp @@ -72,7 +72,7 @@ class G1FullCollector : StackObj { ReferenceProcessorSubjectToDiscoveryMutator _is_subject_mutator; public: - G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs); + G1FullCollector(G1CollectedHeap* heap, bool explicit_gc, bool clear_soft_refs); ~G1FullCollector(); void prepare_collection(); diff --git a/src/hotspot/share/gc/g1/g1FullGCScope.cpp b/src/hotspot/share/gc/g1/g1FullGCScope.cpp index 9986c0016b9..ab2cc19d56f 100644 --- a/src/hotspot/share/gc/g1/g1FullGCScope.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCScope.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "gc/g1/g1FullGCScope.hpp" -G1FullGCScope::G1FullGCScope(GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft) : +G1FullGCScope::G1FullGCScope(G1MonitoringSupport* monitoring_support, bool explicit_gc, bool clear_soft) : _rm(), _explicit_gc(explicit_gc), _g1h(G1CollectedHeap::heap()), @@ -36,8 +36,7 @@ G1FullGCScope::G1FullGCScope(GCMemoryManager* memory_manager, bool explicit_gc, _active(), _cpu_time(), _soft_refs(clear_soft, _g1h->soft_ref_policy()), - _collector_stats(_g1h->g1mm()->full_collection_counters()), - _memory_stats(memory_manager, _g1h->gc_cause()), + _monitoring_scope(monitoring_support, true /* full_gc */, true /* all_memory_pools_affected */), _heap_transition(_g1h) { _timer.register_gc_start(); _tracer.report_gc_start(_g1h->gc_cause(), _timer.gc_start()); diff --git a/src/hotspot/share/gc/g1/g1FullGCScope.hpp b/src/hotspot/share/gc/g1/g1FullGCScope.hpp index 850ee0aea0f..4842a836ce9 100644 --- a/src/hotspot/share/gc/g1/g1FullGCScope.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCScope.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,6 @@ #include "gc/g1/g1CollectedHeap.hpp" #include "gc/g1/g1HeapTransition.hpp" -#include "gc/shared/collectorCounters.hpp" #include "gc/shared/gcId.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.hpp" @@ -51,12 +50,11 @@ class G1FullGCScope : public StackObj { IsGCActiveMark _active; GCTraceCPUTime _cpu_time; ClearedAllSoftRefs _soft_refs; - TraceCollectorStats _collector_stats; - TraceMemoryManagerStats _memory_stats; + G1MonitoringScope _monitoring_scope; G1HeapTransition _heap_transition; public: - G1FullGCScope(GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft); + G1FullGCScope(G1MonitoringSupport* monitoring_support, bool explicit_gc, bool clear_soft); ~G1FullGCScope(); bool is_explicit_gc(); diff --git a/src/hotspot/share/gc/g1/g1HeapRegionTraceType.hpp b/src/hotspot/share/gc/g1/g1HeapRegionTraceType.hpp index 7d8ebbcb95c..b727e81fe8a 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegionTraceType.hpp +++ b/src/hotspot/share/gc/g1/g1HeapRegionTraceType.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,6 @@ class G1HeapRegionTraceType : AllStatic { StartsHumongous, ContinuesHumongous, Old, - Pinned, OpenArchive, ClosedArchive, G1HeapRegionTypeEndSentinel @@ -51,7 +50,6 @@ class G1HeapRegionTraceType : AllStatic { case StartsHumongous: return "Starts Humongous"; case ContinuesHumongous: return "Continues Humongous"; case Old: return "Old"; - case Pinned: return "Pinned"; case OpenArchive: return "OpenArchive"; case ClosedArchive: return "ClosedArchive"; default: ShouldNotReachHere(); return NULL; diff --git a/src/hotspot/share/gc/g1/g1HeapTransition.cpp b/src/hotspot/share/gc/g1/g1HeapTransition.cpp index 907289f28df..2a1e695eb4c 100644 --- a/src/hotspot/share/gc/g1/g1HeapTransition.cpp +++ b/src/hotspot/share/gc/g1/g1HeapTransition.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) { _eden_length = g1_heap->eden_regions_count(); _survivor_length = g1_heap->survivor_regions_count(); _old_length = g1_heap->old_regions_count(); + _archive_length = g1_heap->archive_regions_count(); _humongous_length = g1_heap->humongous_regions_count(); _metaspace_used_bytes = MetaspaceUtils::used_bytes(); } @@ -43,16 +44,19 @@ struct DetailedUsage : public StackObj { size_t _eden_used; size_t _survivor_used; size_t _old_used; + size_t _archive_used; size_t _humongous_used; size_t _eden_region_count; size_t _survivor_region_count; size_t _old_region_count; + size_t _archive_region_count; size_t _humongous_region_count; DetailedUsage() : - _eden_used(0), _survivor_used(0), _old_used(0), _humongous_used(0), - _eden_region_count(0), _survivor_region_count(0), _old_region_count(0), _humongous_region_count(0) {} + _eden_used(0), _survivor_used(0), _old_used(0), _archive_used(0), _humongous_used(0), + _eden_region_count(0), _survivor_region_count(0), _old_region_count(0), + _archive_region_count(0), _humongous_region_count(0) {} }; class DetailedUsageClosure: public HeapRegionClosure { @@ -62,6 +66,9 @@ public: if (r->is_old()) { _usage._old_used += r->used(); _usage._old_region_count++; + } else if (r->is_archive()) { + _usage._archive_used += r->used(); + _usage._archive_region_count++; } else if (r->is_survivor()) { _usage._survivor_used += r->used(); _usage._survivor_region_count++; @@ -94,6 +101,8 @@ void G1HeapTransition::print() { after._survivor_length, usage._survivor_region_count); assert(usage._old_region_count == after._old_length, "Expected old to be " SIZE_FORMAT " but was " SIZE_FORMAT, after._old_length, usage._old_region_count); + assert(usage._archive_region_count == after._archive_length, "Expected archive to be " SIZE_FORMAT " but was " SIZE_FORMAT, + after._archive_length, usage._archive_region_count); assert(usage._humongous_region_count == after._humongous_length, "Expected humongous to be " SIZE_FORMAT " but was " SIZE_FORMAT, after._humongous_length, usage._humongous_region_count); } @@ -112,6 +121,11 @@ void G1HeapTransition::print() { log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K", usage._old_used / K, ((after._old_length * HeapRegion::GrainBytes) - usage._old_used) / K); + log_info(gc, heap)("Archive regions: " SIZE_FORMAT "->" SIZE_FORMAT, + _before._archive_length, after._archive_length); + log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K", + usage._archive_used / K, ((after._archive_length * HeapRegion::GrainBytes) - usage._archive_used) / K); + log_info(gc, heap)("Humongous regions: " SIZE_FORMAT "->" SIZE_FORMAT, _before._humongous_length, after._humongous_length); log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K", diff --git a/src/hotspot/share/gc/g1/g1HeapTransition.hpp b/src/hotspot/share/gc/g1/g1HeapTransition.hpp index 97db5076997..9aa927f0e11 100644 --- a/src/hotspot/share/gc/g1/g1HeapTransition.hpp +++ b/src/hotspot/share/gc/g1/g1HeapTransition.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,7 @@ class G1HeapTransition { size_t _eden_length; size_t _survivor_length; size_t _old_length; + size_t _archive_length; size_t _humongous_length; size_t _metaspace_used_bytes; diff --git a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp index d29c8a09984..7c19509698c 100644 --- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp +++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp @@ -488,19 +488,22 @@ void G1HeapVerifier::verify(VerifyOption vo) { class VerifyRegionListsClosure : public HeapRegionClosure { private: HeapRegionSet* _old_set; + HeapRegionSet* _archive_set; HeapRegionSet* _humongous_set; - HeapRegionManager* _hrm; + HeapRegionManager* _hrm; public: uint _old_count; + uint _archive_count; uint _humongous_count; uint _free_count; VerifyRegionListsClosure(HeapRegionSet* old_set, + HeapRegionSet* archive_set, HeapRegionSet* humongous_set, HeapRegionManager* hrm) : - _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm), - _old_count(), _humongous_count(), _free_count(){ } + _old_set(old_set), _archive_set(archive_set), _humongous_set(humongous_set), _hrm(hrm), + _old_count(), _archive_count(), _humongous_count(), _free_count(){ } bool do_heap_region(HeapRegion* hr) { if (hr->is_young()) { @@ -511,6 +514,9 @@ public: } else if (hr->is_empty()) { assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index()); _free_count++; + } else if (hr->is_archive()) { + assert(hr->containing_set() == _archive_set, "Heap region %u is archive but not in the archive set.", hr->hrm_index()); + _archive_count++; } else if (hr->is_old()) { assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index()); _old_count++; @@ -523,8 +529,9 @@ public: return false; } - void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) { + void verify_counts(HeapRegionSet* old_set, HeapRegionSet* archive_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) { guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count); + guarantee(archive_set->length() == _archive_count, "Archive set count mismatch. Expected %u, actual %u.", archive_set->length(), _archive_count); guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count); guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count); } @@ -539,9 +546,9 @@ void G1HeapVerifier::verify_region_sets() { // Finally, make sure that the region accounting in the lists is // consistent with what we see in the heap. - VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm); + VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm); _g1h->heap_region_iterate(&cl); - cl.verify_counts(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm); + cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm); } void G1HeapVerifier::prepare_for_verify() { @@ -755,6 +762,11 @@ class G1CheckCSetFastTableClosure : public HeapRegionClosure { return true; } if (cset_state.is_in_cset()) { + if (hr->is_archive()) { + log_error(gc, verify)("## is_archive in collection set for region %u", i); + _failures = true; + return true; + } if (hr->is_young() != (cset_state.is_young())) { log_error(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u", hr->is_young(), cset_state.value(), i); diff --git a/src/hotspot/share/gc/g1/g1MemoryPool.cpp b/src/hotspot/share/gc/g1/g1MemoryPool.cpp index 2a5944a9fa4..d53f046ab80 100644 --- a/src/hotspot/share/gc/g1/g1MemoryPool.cpp +++ b/src/hotspot/share/gc/g1/g1MemoryPool.cpp @@ -40,50 +40,41 @@ G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h, assert(UseG1GC, "sanity"); } -G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) : +G1EdenPool::G1EdenPool(G1CollectedHeap* g1h, size_t initial_size) : G1MemoryPoolSuper(g1h, "G1 Eden Space", - g1h->g1mm()->eden_space_committed(), /* init_size */ - _undefined_max, + initial_size, + MemoryUsage::undefined_size(), false /* support_usage_threshold */) { } MemoryUsage G1EdenPool::get_memory_usage() { - size_t initial_sz = initial_size(); - size_t max_sz = max_size(); - size_t used = used_in_bytes(); size_t committed = _g1mm->eden_space_committed(); - return MemoryUsage(initial_sz, used, committed, max_sz); + return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size()); } -G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h) : +G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h, size_t initial_size) : G1MemoryPoolSuper(g1h, "G1 Survivor Space", - g1h->g1mm()->survivor_space_committed(), /* init_size */ - _undefined_max, + initial_size, + MemoryUsage::undefined_size(), false /* support_usage_threshold */) { } MemoryUsage G1SurvivorPool::get_memory_usage() { - size_t initial_sz = initial_size(); - size_t max_sz = max_size(); - size_t used = used_in_bytes(); size_t committed = _g1mm->survivor_space_committed(); - return MemoryUsage(initial_sz, used, committed, max_sz); + return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size()); } -G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h) : +G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h, size_t initial_size, size_t max_size) : G1MemoryPoolSuper(g1h, "G1 Old Gen", - g1h->g1mm()->old_space_committed(), /* init_size */ - g1h->g1mm()->old_gen_max(), + initial_size, + max_size, true /* support_usage_threshold */) { } MemoryUsage G1OldGenPool::get_memory_usage() { - size_t initial_sz = initial_size(); - size_t max_sz = max_size(); - size_t used = used_in_bytes(); - size_t committed = _g1mm->old_space_committed(); + size_t committed = _g1mm->old_gen_committed(); - return MemoryUsage(initial_sz, used, committed, max_sz); + return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size()); } diff --git a/src/hotspot/share/gc/g1/g1MemoryPool.hpp b/src/hotspot/share/gc/g1/g1MemoryPool.hpp index a6771c3fd79..ffdd092df71 100644 --- a/src/hotspot/share/gc/g1/g1MemoryPool.hpp +++ b/src/hotspot/share/gc/g1/g1MemoryPool.hpp @@ -53,7 +53,6 @@ class G1CollectedHeap; // (G1EdenPool, G1SurvivorPool, G1OldGenPool). class G1MemoryPoolSuper : public CollectedMemoryPool { protected: - const static size_t _undefined_max = (size_t) -1; G1MonitoringSupport* _g1mm; // Would only be called from subclasses. @@ -67,42 +66,30 @@ protected: // Memory pool that represents the G1 eden. class G1EdenPool : public G1MemoryPoolSuper { public: - G1EdenPool(G1CollectedHeap* g1h); + G1EdenPool(G1CollectedHeap* g1h, size_t initial_size); + + size_t used_in_bytes() { return _g1mm->eden_space_used(); } - size_t used_in_bytes() { - return _g1mm->eden_space_used(); - } - size_t max_size() const { - return _undefined_max; - } MemoryUsage get_memory_usage(); }; // Memory pool that represents the G1 survivor. class G1SurvivorPool : public G1MemoryPoolSuper { public: - G1SurvivorPool(G1CollectedHeap* g1h); + G1SurvivorPool(G1CollectedHeap* g1h, size_t initial_size); + + size_t used_in_bytes() { return _g1mm->survivor_space_used(); } - size_t used_in_bytes() { - return _g1mm->survivor_space_used(); - } - size_t max_size() const { - return _undefined_max; - } MemoryUsage get_memory_usage(); }; // Memory pool that represents the G1 old gen. class G1OldGenPool : public G1MemoryPoolSuper { public: - G1OldGenPool(G1CollectedHeap* g1h); + G1OldGenPool(G1CollectedHeap* g1h, size_t initial_size, size_t max_size); + + size_t used_in_bytes() { return _g1mm->old_gen_used(); } - size_t used_in_bytes() { - return _g1mm->old_space_used(); - } - size_t max_size() const { - return _g1mm->old_gen_max(); - } MemoryUsage get_memory_usage(); }; diff --git a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp index 2ccb33ff6a6..1258477bb65 100644 --- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp +++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp @@ -26,83 +26,95 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1MonitoringSupport.hpp" #include "gc/g1/g1Policy.hpp" -#include "gc/shared/collectorCounters.hpp" +#include "gc/g1/g1MemoryPool.hpp" #include "gc/shared/hSpaceCounters.hpp" #include "memory/metaspaceCounters.hpp" +#include "services/memoryPool.hpp" -G1GenerationCounters::G1GenerationCounters(G1MonitoringSupport* g1mm, - const char* name, - int ordinal, int spaces, - size_t min_capacity, - size_t max_capacity, - size_t curr_capacity) +class G1GenerationCounters : public GenerationCounters { +protected: + G1MonitoringSupport* _g1mm; + +public: + G1GenerationCounters(G1MonitoringSupport* g1mm, + const char* name, int ordinal, int spaces, + size_t min_capacity, size_t max_capacity, + size_t curr_capacity) : GenerationCounters(name, ordinal, spaces, min_capacity, max_capacity, curr_capacity), _g1mm(g1mm) { } +}; -// We pad the capacity three times given that the young generation -// contains three spaces (eden and two survivors). -G1YoungGenerationCounters::G1YoungGenerationCounters(G1MonitoringSupport* g1mm, - const char* name) +class G1YoungGenerationCounters : public G1GenerationCounters { +public: + // We pad the capacity three times given that the young generation + // contains three spaces (eden and two survivors). + G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name, size_t max_size) : G1GenerationCounters(g1mm, name, 0 /* ordinal */, 3 /* spaces */, - G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */, - G1MonitoringSupport::pad_capacity(g1mm->young_gen_max(), 3), - G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) { - if (UsePerfData) { - update_all(); + G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */, + G1MonitoringSupport::pad_capacity(max_size, 3), + G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) { + if (UsePerfData) { + update_all(); + } } -} -G1OldGenerationCounters::G1OldGenerationCounters(G1MonitoringSupport* g1mm, - const char* name) + virtual void update_all() { + size_t committed = + G1MonitoringSupport::pad_capacity(_g1mm->young_gen_committed(), 3); + _current_size->set_value(committed); + } +}; + +class G1OldGenerationCounters : public G1GenerationCounters { +public: + G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name, size_t max_size) : G1GenerationCounters(g1mm, name, 1 /* ordinal */, 1 /* spaces */, - G1MonitoringSupport::pad_capacity(0) /* min_capacity */, - G1MonitoringSupport::pad_capacity(g1mm->old_gen_max()), - G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) { - if (UsePerfData) { - update_all(); + G1MonitoringSupport::pad_capacity(0) /* min_capacity */, + G1MonitoringSupport::pad_capacity(max_size), + G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) { + if (UsePerfData) { + update_all(); + } } -} -void G1YoungGenerationCounters::update_all() { - size_t committed = - G1MonitoringSupport::pad_capacity(_g1mm->young_gen_committed(), 3); - _current_size->set_value(committed); -} - -void G1OldGenerationCounters::update_all() { - size_t committed = - G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed()); - _current_size->set_value(committed); -} + virtual void update_all() { + size_t committed = + G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed()); + _current_size->set_value(committed); + } +}; G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) : _g1h(g1h), + _incremental_memory_manager("G1 Young Generation", "end of minor GC"), + _full_gc_memory_manager("G1 Old Generation", "end of major GC"), + _eden_space_pool(NULL), + _survivor_space_pool(NULL), + _old_gen_pool(NULL), _incremental_collection_counters(NULL), _full_collection_counters(NULL), _conc_collection_counters(NULL), - _young_collection_counters(NULL), - _old_collection_counters(NULL), + _young_gen_counters(NULL), + _old_gen_counters(NULL), _old_space_counters(NULL), - _eden_counters(NULL), - _from_counters(NULL), - _to_counters(NULL), + _eden_space_counters(NULL), + _from_space_counters(NULL), + _to_space_counters(NULL), - _overall_reserved(0), _overall_committed(0), _overall_used(0), - _young_region_num(0), _young_gen_committed(0), - _eden_committed(0), - _eden_used(0), - _survivor_committed(0), - _survivor_used(0), - _old_committed(0), - _old_used(0) { + _old_gen_committed(0), + + _eden_space_committed(0), + _eden_space_used(0), + _survivor_space_committed(0), + _survivor_space_used(0), + _old_gen_used(0) { - _overall_reserved = g1h->max_capacity(); recalculate_sizes(); - // Counters for GC collections + // Counters for garbage collections // // name "collector.0". In a generational collector this would be the // young generation collection. @@ -117,77 +129,96 @@ G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) : _conc_collection_counters = new CollectorCounters("G1 stop-the-world phases", 2); - // timer sampling for all counters supporting sampling only update the - // used value. See the take_sample() method. G1 requires both used and - // capacity updated so sampling is not currently used. It might - // be sufficient to update all counters in take_sample() even though - // take_sample() only returns "used". When sampling was used, there - // were some anomolous values emitted which may have been the consequence - // of not updating all values simultaneously (i.e., see the calculation done - // in eden_space_used(), is it possible that the values used to - // calculate either eden_used or survivor_used are being updated by - // the collector when the sample is being done?). - const bool sampled = false; - // "Generation" and "Space" counters. // // name "generation.1" This is logically the old generation in // generational GC terms. The "1, 1" parameters are for // the n-th generation (=1) with 1 space. // Counters are created from minCapacity, maxCapacity, and capacity - _old_collection_counters = new G1OldGenerationCounters(this, "old"); + _old_gen_counters = new G1OldGenerationCounters(this, "old", _g1h->max_capacity()); // name "generation.1.space.0" // Counters are created from maxCapacity, capacity, initCapacity, // and used. - _old_space_counters = new HSpaceCounters(_old_collection_counters->name_space(), + _old_space_counters = new HSpaceCounters(_old_gen_counters->name_space(), "space", 0 /* ordinal */, - pad_capacity(overall_reserved()) /* max_capacity */, - pad_capacity(old_space_committed()) /* init_capacity */); + pad_capacity(g1h->max_capacity()) /* max_capacity */, + pad_capacity(_old_gen_committed) /* init_capacity */); // Young collection set // name "generation.0". This is logically the young generation. // The "0, 3" are parameters for the n-th generation (=0) with 3 spaces. // See _old_collection_counters for additional counters - _young_collection_counters = new G1YoungGenerationCounters(this, "young"); + _young_gen_counters = new G1YoungGenerationCounters(this, "young", _g1h->max_capacity()); - const char* young_collection_name_space = _young_collection_counters->name_space(); + const char* young_collection_name_space = _young_gen_counters->name_space(); // name "generation.0.space.0" // See _old_space_counters for additional counters - _eden_counters = new HSpaceCounters(young_collection_name_space, + _eden_space_counters = new HSpaceCounters(young_collection_name_space, "eden", 0 /* ordinal */, - pad_capacity(overall_reserved()) /* max_capacity */, - pad_capacity(eden_space_committed()) /* init_capacity */); + pad_capacity(g1h->max_capacity()) /* max_capacity */, + pad_capacity(_eden_space_committed) /* init_capacity */); // name "generation.0.space.1" // See _old_space_counters for additional counters // Set the arguments to indicate that this survivor space is not used. - _from_counters = new HSpaceCounters(young_collection_name_space, + _from_space_counters = new HSpaceCounters(young_collection_name_space, "s0", 1 /* ordinal */, pad_capacity(0) /* max_capacity */, pad_capacity(0) /* init_capacity */); + // Given that this survivor space is not used, we update it here + // once to reflect that its used space is 0 so that we don't have to + // worry about updating it again later. + _from_space_counters->update_used(0); // name "generation.0.space.2" // See _old_space_counters for additional counters - _to_counters = new HSpaceCounters(young_collection_name_space, + _to_space_counters = new HSpaceCounters(young_collection_name_space, "s1", 2 /* ordinal */, - pad_capacity(overall_reserved()) /* max_capacity */, - pad_capacity(survivor_space_committed()) /* init_capacity */); + pad_capacity(g1h->max_capacity()) /* max_capacity */, + pad_capacity(_survivor_space_committed) /* init_capacity */); +} - if (UsePerfData) { - // Given that this survivor space is not used, we update it here - // once to reflect that its used space is 0 so that we don't have to - // worry about updating it again later. - _from_counters->update_used(0); - } +G1MonitoringSupport::~G1MonitoringSupport() { + delete _eden_space_pool; + delete _survivor_space_pool; + delete _old_gen_pool; +} + +void G1MonitoringSupport::initialize_serviceability() { + _eden_space_pool = new G1EdenPool(_g1h, _eden_space_committed); + _survivor_space_pool = new G1SurvivorPool(_g1h, _survivor_space_committed); + _old_gen_pool = new G1OldGenPool(_g1h, _old_gen_committed, _g1h->max_capacity()); + + _full_gc_memory_manager.add_pool(_eden_space_pool); + _full_gc_memory_manager.add_pool(_survivor_space_pool); + _full_gc_memory_manager.add_pool(_old_gen_pool); + + _incremental_memory_manager.add_pool(_eden_space_pool); + _incremental_memory_manager.add_pool(_survivor_space_pool); + _incremental_memory_manager.add_pool(_old_gen_pool, false /* always_affected_by_gc */); +} + +GrowableArray G1MonitoringSupport::memory_managers() { + GrowableArray memory_managers(2); + memory_managers.append(&_incremental_memory_manager); + memory_managers.append(&_full_gc_memory_manager); + return memory_managers; +} + +GrowableArray G1MonitoringSupport::memory_pools() { + GrowableArray memory_pools(3); + memory_pools.append(_eden_space_pool); + memory_pools.append(_survivor_space_pool); + memory_pools.append(_old_gen_pool); + return memory_pools; } void G1MonitoringSupport::recalculate_sizes() { - // Recalculate all the sizes from scratch. We assume that this is - // called at a point where no concurrent updates to the various - // values we read here are possible (i.e., at a STW phase at the end - // of a GC). + assert_heap_locked_or_at_safepoint(true); + + // Recalculate all the sizes from scratch. uint young_list_length = _g1h->young_regions_count(); uint survivor_list_length = _g1h->survivor_regions_count(); @@ -200,14 +231,13 @@ void G1MonitoringSupport::recalculate_sizes() { uint eden_list_max_length = young_list_max_length - survivor_list_length; _overall_used = _g1h->used_unlocked(); - _eden_used = (size_t) eden_list_length * HeapRegion::GrainBytes; - _survivor_used = (size_t) survivor_list_length * HeapRegion::GrainBytes; - _young_region_num = young_list_length; - _old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used); + _eden_space_used = (size_t) eden_list_length * HeapRegion::GrainBytes; + _survivor_space_used = (size_t) survivor_list_length * HeapRegion::GrainBytes; + _old_gen_used = subtract_up_to_zero(_overall_used, _eden_space_used + _survivor_space_used); // First calculate the committed sizes that can be calculated independently. - _survivor_committed = _survivor_used; - _old_committed = HeapRegion::align_up_to_region_byte_size(_old_used); + _survivor_space_committed = _survivor_space_used; + _old_gen_committed = HeapRegion::align_up_to_region_byte_size(_old_gen_used); // Next, start with the overall committed size. _overall_committed = _g1h->capacity(); @@ -215,70 +245,64 @@ void G1MonitoringSupport::recalculate_sizes() { // Remove the committed size we have calculated so far (for the // survivor and old space). - assert(committed >= (_survivor_committed + _old_committed), "sanity"); - committed -= _survivor_committed + _old_committed; + assert(committed >= (_survivor_space_committed + _old_gen_committed), "sanity"); + committed -= _survivor_space_committed + _old_gen_committed; // Next, calculate and remove the committed size for the eden. - _eden_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes; + _eden_space_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes; // Somewhat defensive: be robust in case there are inaccuracies in // the calculations - _eden_committed = MIN2(_eden_committed, committed); - committed -= _eden_committed; + _eden_space_committed = MIN2(_eden_space_committed, committed); + committed -= _eden_space_committed; // Finally, give the rest to the old space... - _old_committed += committed; + _old_gen_committed += committed; // ..and calculate the young gen committed. - _young_gen_committed = _eden_committed + _survivor_committed; + _young_gen_committed = _eden_space_committed + _survivor_space_committed; assert(_overall_committed == - (_eden_committed + _survivor_committed + _old_committed), + (_eden_space_committed + _survivor_space_committed + _old_gen_committed), "the committed sizes should add up"); // Somewhat defensive: cap the eden used size to make sure it // never exceeds the committed size. - _eden_used = MIN2(_eden_used, _eden_committed); + _eden_space_used = MIN2(_eden_space_used, _eden_space_committed); // _survivor_committed and _old_committed are calculated in terms of // the corresponding _*_used value, so the next two conditions // should hold. - assert(_survivor_used <= _survivor_committed, "post-condition"); - assert(_old_used <= _old_committed, "post-condition"); -} - -void G1MonitoringSupport::recalculate_eden_size() { - // When a new eden region is allocated, only the eden_used size is - // affected (since we have recalculated everything else at the last GC). - - uint young_region_num = _g1h->young_regions_count(); - if (young_region_num > _young_region_num) { - uint diff = young_region_num - _young_region_num; - _eden_used += (size_t) diff * HeapRegion::GrainBytes; - // Somewhat defensive: cap the eden used size to make sure it - // never exceeds the committed size. - _eden_used = MIN2(_eden_used, _eden_committed); - _young_region_num = young_region_num; - } + assert(_survivor_space_used <= _survivor_space_committed, "post-condition"); + assert(_old_gen_used <= _old_gen_committed, "post-condition"); } void G1MonitoringSupport::update_sizes() { recalculate_sizes(); if (UsePerfData) { - eden_counters()->update_capacity(pad_capacity(eden_space_committed())); - eden_counters()->update_used(eden_space_used()); - // only the to survivor space (s1) is active, so we don't need to - // update the counters for the from survivor space (s0) - to_counters()->update_capacity(pad_capacity(survivor_space_committed())); - to_counters()->update_used(survivor_space_used()); - old_space_counters()->update_capacity(pad_capacity(old_space_committed())); - old_space_counters()->update_used(old_space_used()); - old_collection_counters()->update_all(); - young_collection_counters()->update_all(); + _eden_space_counters->update_capacity(pad_capacity(_eden_space_committed)); + _eden_space_counters->update_used(_eden_space_used); + // only the "to" survivor space is active, so we don't need to + // update the counters for the "from" survivor space + _to_space_counters->update_capacity(pad_capacity(_survivor_space_committed)); + _to_space_counters->update_used(_survivor_space_used); + _old_space_counters->update_capacity(pad_capacity(_old_gen_committed)); + _old_space_counters->update_used(_old_gen_used); + + _young_gen_counters->update_all(); + _old_gen_counters->update_all(); + MetaspaceCounters::update_performance_counters(); CompressedClassSpaceCounters::update_performance_counters(); } } void G1MonitoringSupport::update_eden_size() { - recalculate_eden_size(); + // Recalculate everything - this is fast enough. + recalculate_sizes(); if (UsePerfData) { - eden_counters()->update_used(eden_space_used()); + _eden_space_counters->update_used(_eden_space_used); } } + +G1MonitoringScope::G1MonitoringScope(G1MonitoringSupport* g1mm, bool full_gc, bool all_memory_pools_affected) : + _tcs(full_gc ? g1mm->_full_collection_counters : g1mm->_incremental_collection_counters), + _tms(full_gc ? &g1mm->_full_gc_memory_manager : &g1mm->_incremental_memory_manager, + G1CollectedHeap::heap()->gc_cause(), all_memory_pools_affected) { +} diff --git a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp index 7458c0a1744..bad38617de6 100644 --- a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp +++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp @@ -25,11 +25,15 @@ #ifndef SHARE_VM_GC_G1_G1MONITORINGSUPPORT_HPP #define SHARE_VM_GC_G1_G1MONITORINGSUPPORT_HPP +#include "gc/shared/collectorCounters.hpp" #include "gc/shared/generationCounters.hpp" +#include "services/memoryManager.hpp" +#include "services/memoryService.hpp" class CollectorCounters; class G1CollectedHeap; class HSpaceCounters; +class MemoryPool; // Class for monitoring logical spaces in G1. It provides data for // both G1's jstat counters as well as G1's memory pools. @@ -116,9 +120,18 @@ class HSpaceCounters; class G1MonitoringSupport : public CHeapObj { friend class VMStructs; + friend class G1MonitoringScope; G1CollectedHeap* _g1h; + // java.lang.management MemoryManager and MemoryPool support + GCMemoryManager _incremental_memory_manager; + GCMemoryManager _full_gc_memory_manager; + + MemoryPool* _eden_space_pool; + MemoryPool* _survivor_space_pool; + MemoryPool* _old_gen_pool; + // jstat performance counters // incremental collections both young and mixed CollectorCounters* _incremental_collection_counters; @@ -129,37 +142,36 @@ class G1MonitoringSupport : public CHeapObj { // young collection set counters. The _eden_counters, // _from_counters, and _to_counters are associated with // this "generational" counter. - GenerationCounters* _young_collection_counters; + GenerationCounters* _young_gen_counters; // old collection set counters. The _old_space_counters // below are associated with this "generational" counter. - GenerationCounters* _old_collection_counters; + GenerationCounters* _old_gen_counters; // Counters for the capacity and used for // the whole heap HSpaceCounters* _old_space_counters; // the young collection - HSpaceCounters* _eden_counters; + HSpaceCounters* _eden_space_counters; // the survivor collection (only one, _to_counters, is actively used) - HSpaceCounters* _from_counters; - HSpaceCounters* _to_counters; + HSpaceCounters* _from_space_counters; + HSpaceCounters* _to_space_counters; // When it's appropriate to recalculate the various sizes (at the // end of a GC, when a new eden region is allocated, etc.) we store // them here so that we can easily report them when needed and not // have to recalculate them every time. - size_t _overall_reserved; size_t _overall_committed; size_t _overall_used; - uint _young_region_num; size_t _young_gen_committed; - size_t _eden_committed; - size_t _eden_used; - size_t _survivor_committed; - size_t _survivor_used; + size_t _old_gen_committed; - size_t _old_committed; - size_t _old_used; + size_t _eden_space_committed; + size_t _eden_space_used; + size_t _survivor_space_committed; + size_t _survivor_space_used; + + size_t _old_gen_used; // It returns x - y if x > y, 0 otherwise. // As described in the comment above, some of the inputs to the @@ -178,11 +190,16 @@ class G1MonitoringSupport : public CHeapObj { // Recalculate all the sizes. void recalculate_sizes(); - // Recalculate only what's necessary when a new eden region is allocated. + void recalculate_eden_size(); - public: +public: G1MonitoringSupport(G1CollectedHeap* g1h); + ~G1MonitoringSupport(); + + void initialize_serviceability(); + GrowableArray memory_managers(); + GrowableArray memory_pools(); // Unfortunately, the jstat tool assumes that no space has 0 // capacity. In our case, given that each space is logical, it's @@ -202,73 +219,35 @@ class G1MonitoringSupport : public CHeapObj { // Recalculate all the sizes from scratch and update all the jstat // counters accordingly. void update_sizes(); - // Recalculate only what's necessary when a new eden region is - // allocated and update any jstat counters that need to be updated. + void update_eden_size(); - CollectorCounters* incremental_collection_counters() { - return _incremental_collection_counters; - } - CollectorCounters* full_collection_counters() { - return _full_collection_counters; - } CollectorCounters* conc_collection_counters() { return _conc_collection_counters; } - GenerationCounters* young_collection_counters() { - return _young_collection_counters; - } - GenerationCounters* old_collection_counters() { - return _old_collection_counters; - } - HSpaceCounters* old_space_counters() { return _old_space_counters; } - HSpaceCounters* eden_counters() { return _eden_counters; } - HSpaceCounters* from_counters() { return _from_counters; } - HSpaceCounters* to_counters() { return _to_counters; } // Monitoring support used by // MemoryService // jstat counters // Tracing - size_t overall_reserved() { return _overall_reserved; } - size_t overall_committed() { return _overall_committed; } - size_t overall_used() { return _overall_used; } + size_t young_gen_committed() { return _young_gen_committed; } - size_t young_gen_committed() { return _young_gen_committed; } - size_t young_gen_max() { return overall_reserved(); } - size_t eden_space_committed() { return _eden_committed; } - size_t eden_space_used() { return _eden_used; } - size_t survivor_space_committed() { return _survivor_committed; } - size_t survivor_space_used() { return _survivor_used; } + size_t eden_space_committed() { return _eden_space_committed; } + size_t eden_space_used() { return _eden_space_used; } + size_t survivor_space_committed() { return _survivor_space_committed; } + size_t survivor_space_used() { return _survivor_space_used; } - size_t old_gen_committed() { return old_space_committed(); } - size_t old_gen_max() { return overall_reserved(); } - size_t old_space_committed() { return _old_committed; } - size_t old_space_used() { return _old_used; } + size_t old_gen_committed() { return _old_gen_committed; } + size_t old_gen_used() { return _old_gen_used; } }; -class G1GenerationCounters: public GenerationCounters { -protected: - G1MonitoringSupport* _g1mm; - +// Scope object for java.lang.management support. +class G1MonitoringScope : public StackObj { + TraceCollectorStats _tcs; + TraceMemoryManagerStats _tms; public: - G1GenerationCounters(G1MonitoringSupport* g1mm, - const char* name, int ordinal, int spaces, - size_t min_capacity, size_t max_capacity, - size_t curr_capacity); -}; - -class G1YoungGenerationCounters: public G1GenerationCounters { -public: - G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name); - virtual void update_all(); -}; - -class G1OldGenerationCounters: public G1GenerationCounters { -public: - G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name); - virtual void update_all(); + G1MonitoringScope(G1MonitoringSupport* g1mm, bool full_gc, bool all_memory_pools_affected); }; #endif // SHARE_VM_GC_G1_G1MONITORINGSUPPORT_HPP diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp index 4cd457ef89e..92117092fbb 100644 --- a/src/hotspot/share/gc/g1/g1RemSet.cpp +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp @@ -132,7 +132,7 @@ private: virtual bool do_heap_region(HeapRegion* r) { uint hrm_index = r->hrm_index(); - if (!r->in_collection_set() && r->is_old_or_humongous()) { + if (!r->in_collection_set() && r->is_old_or_humongous_or_archive()) { _scan_top[hrm_index] = r->top(); } else { _scan_top[hrm_index] = r->bottom(); @@ -571,7 +571,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr, // In the normal (non-stale) case, the synchronization between the // enqueueing of the card and processing it here will have ensured // we see the up-to-date region type here. - if (!r->is_old_or_humongous()) { + if (!r->is_old_or_humongous_or_archive()) { return; } @@ -600,7 +600,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr, // Check whether the region formerly in the cache should be // ignored, as discussed earlier for the original card. The // region could have been freed while in the cache. - if (!r->is_old_or_humongous()) { + if (!r->is_old_or_humongous_or_archive()) { return; } } // Else we still have the original card. diff --git a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp index 42ea06fe6ff..8f65f81ac6a 100644 --- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp +++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp @@ -226,6 +226,7 @@ private: RegionTypeCounter _humongous; RegionTypeCounter _free; RegionTypeCounter _old; + RegionTypeCounter _archive; RegionTypeCounter _all; size_t _max_rs_mem_sz; @@ -248,7 +249,7 @@ private: public: HRRSStatsIter() : _young("Young"), _humongous("Humongous"), - _free("Free"), _old("Old"), _all("All"), + _free("Free"), _old("Old"), _archive("Archive"), _all("All"), _max_rs_mem_sz(0), _max_rs_mem_sz_region(NULL), _max_code_root_mem_sz(0), _max_code_root_mem_sz_region(NULL) {} @@ -280,6 +281,8 @@ public: current = &_humongous; } else if (r->is_old()) { current = &_old; + } else if (r->is_archive()) { + current = &_archive; } else { ShouldNotReachHere(); } @@ -290,7 +293,7 @@ public: } void print_summary_on(outputStream* out) { - RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, NULL }; + RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, &_archive, NULL }; out->print_cr(" Current rem set statistics"); out->print_cr(" Total per region rem sets sizes = " SIZE_FORMAT "%s." diff --git a/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp index 7c92f7c4fdc..575eae7a5ee 100644 --- a/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp +++ b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp @@ -141,8 +141,9 @@ bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_by void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); - if (r->is_old_or_humongous()) { + if (r->is_old_or_humongous_or_archive()) { if (r->rem_set()->is_updating()) { + assert(!r->is_archive(), "Archive region %u with remembered set", r->hrm_index()); r->rem_set()->set_state_complete(); } G1CollectedHeap* g1h = G1CollectedHeap::heap(); diff --git a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp index 9ea99ebf05b..b3d244cfb04 100644 --- a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp +++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp @@ -27,7 +27,7 @@ #include "gc/g1/g1SATBMarkQueueSet.hpp" #include "gc/g1/g1ThreadLocalData.hpp" #include "gc/g1/heapRegion.hpp" -#include "gc/g1/satbMarkQueue.hpp" +#include "gc/shared/satbMarkQueue.hpp" #include "oops/oop.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" diff --git a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp index 4c311e851a5..963ef0f3af4 100644 --- a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp +++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_VM_GC_G1_G1SATBMARKQUEUE_HPP #define SHARE_VM_GC_G1_G1SATBMARKQUEUE_HPP -#include "gc/g1/satbMarkQueue.hpp" +#include "gc/shared/satbMarkQueue.hpp" class G1CollectedHeap; class JavaThread; diff --git a/src/hotspot/share/gc/g1/g1ThreadLocalData.hpp b/src/hotspot/share/gc/g1/g1ThreadLocalData.hpp index c241a290c28..878e71df40e 100644 --- a/src/hotspot/share/gc/g1/g1ThreadLocalData.hpp +++ b/src/hotspot/share/gc/g1/g1ThreadLocalData.hpp @@ -26,7 +26,7 @@ #include "gc/g1/dirtyCardQueue.hpp" #include "gc/g1/g1BarrierSet.hpp" -#include "gc/g1/satbMarkQueue.hpp" +#include "gc/shared/satbMarkQueue.hpp" #include "runtime/thread.hpp" #include "utilities/debug.hpp" #include "utilities/sizes.hpp" diff --git a/src/hotspot/share/gc/g1/heapRegion.hpp b/src/hotspot/share/gc/g1/heapRegion.hpp index 01d3c4d8758..5973e4c70e3 100644 --- a/src/hotspot/share/gc/g1/heapRegion.hpp +++ b/src/hotspot/share/gc/g1/heapRegion.hpp @@ -426,6 +426,8 @@ class HeapRegion: public G1ContiguousSpace { bool is_old_or_humongous() const { return _type.is_old_or_humongous(); } + bool is_old_or_humongous_or_archive() const { return _type.is_old_or_humongous_or_archive(); } + // A pinned region contains objects which are not moved by garbage collections. // Humongous regions and archive regions are pinned. bool is_pinned() const { return _type.is_pinned(); } diff --git a/src/hotspot/share/gc/g1/heapRegion.inline.hpp b/src/hotspot/share/gc/g1/heapRegion.inline.hpp index 604afd82175..a37ae507cfa 100644 --- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp +++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp @@ -350,7 +350,7 @@ bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr, if (is_humongous()) { return do_oops_on_card_in_humongous(mr, cl, g1h); } - assert(is_old(), "precondition"); + assert(is_old() || is_archive(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str()); // Because mr has been trimmed to what's been allocated in this // region, the parts of the heap that are examined here are always diff --git a/src/hotspot/share/gc/g1/heapRegionManager.cpp b/src/hotspot/share/gc/g1/heapRegionManager.cpp index 3108bce980a..bd1d573f88b 100644 --- a/src/hotspot/share/gc/g1/heapRegionManager.cpp +++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,41 @@ #include "gc/g1/heapRegionSet.inline.hpp" #include "memory/allocation.hpp" +class MasterFreeRegionListChecker : public HeapRegionSetChecker { +public: + void check_mt_safety() { + // Master Free List MT safety protocol: + // (a) If we're at a safepoint, operations on the master free list + // should be invoked by either the VM thread (which will serialize + // them) or by the GC workers while holding the + // FreeList_lock. + // (b) If we're not at a safepoint, operations on the master free + // list should be invoked while holding the Heap_lock. + + if (SafepointSynchronize::is_at_safepoint()) { + guarantee(Thread::current()->is_VM_thread() || + FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint"); + } else { + guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint"); + } + } + bool is_correct_type(HeapRegion* hr) { return hr->is_free(); } + const char* get_description() { return "Free Regions"; } +}; + +HeapRegionManager::HeapRegionManager() : + _regions(), _heap_mapper(NULL), + _prev_bitmap_mapper(NULL), + _next_bitmap_mapper(NULL), + _bot_mapper(NULL), + _cardtable_mapper(NULL), + _card_counts_mapper(NULL), + _free_list("Free list", new MasterFreeRegionListChecker()), + _available_map(mtGC), + _num_committed(0), + _allocated_heapregions_length(0) +{ } + void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage, G1RegionToSpaceMapper* prev_bitmap, G1RegionToSpaceMapper* next_bitmap, diff --git a/src/hotspot/share/gc/g1/heapRegionManager.hpp b/src/hotspot/share/gc/g1/heapRegionManager.hpp index 11a692e1e90..ecbfa0cd9c1 100644 --- a/src/hotspot/share/gc/g1/heapRegionManager.hpp +++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -128,14 +128,7 @@ public: public: // Empty constructor, we'll initialize it with the initialize() method. - HeapRegionManager() : - _regions(), _heap_mapper(NULL), - _prev_bitmap_mapper(NULL), _next_bitmap_mapper(NULL), _bot_mapper(NULL), - _cardtable_mapper(NULL), _card_counts_mapper(NULL), - _free_list("Free list", new MasterFreeRegionListMtSafeChecker()), - _available_map(mtGC), _num_committed(0), - _allocated_heapregions_length(0) - { } + HeapRegionManager(); void initialize(G1RegionToSpaceMapper* heap_storage, G1RegionToSpaceMapper* prev_bitmap, diff --git a/src/hotspot/share/gc/g1/heapRegionSet.cpp b/src/hotspot/share/gc/g1/heapRegionSet.cpp index fe7d81dfccf..c296a53400c 100644 --- a/src/hotspot/share/gc/g1/heapRegionSet.cpp +++ b/src/hotspot/share/gc/g1/heapRegionSet.cpp @@ -33,8 +33,8 @@ uint FreeRegionList::_unrealistically_long_length = 0; void HeapRegionSetBase::verify_region(HeapRegion* hr) { assert(hr->containing_set() == this, "Inconsistent containing set for %u", hr->hrm_index()); assert(!hr->is_young(), "Adding young region %u", hr->hrm_index()); // currently we don't use these sets for young regions - assert(hr->is_humongous() == regions_humongous(), "Wrong humongous state for region %u and set %s", hr->hrm_index(), name()); - assert(hr->is_free() == regions_free(), "Wrong free state for region %u and set %s", hr->hrm_index(), name()); + assert(_checker == NULL || _checker->is_correct_type(hr), "Wrong type of region %u (%s) and set %s", + hr->hrm_index(), hr->get_type_str(), name()); assert(!hr->is_free() || hr->is_empty(), "Free region %u is not empty for set %s", hr->hrm_index(), name()); assert(!hr->is_empty() || hr->is_free() || hr->is_archive(), "Empty region %u is not free or archive for set %s", hr->hrm_index(), name()); @@ -75,21 +75,14 @@ void HeapRegionSetBase::verify_end() { void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) { out->cr(); out->print_cr("Set: %s (" PTR_FORMAT ")", name(), p2i(this)); - out->print_cr(" Region Assumptions"); - out->print_cr(" humongous : %s", BOOL_TO_STR(regions_humongous())); - out->print_cr(" free : %s", BOOL_TO_STR(regions_free())); - out->print_cr(" Attributes"); - out->print_cr(" length : %14u", length()); + out->print_cr(" Region Type : %s", _checker->get_description()); + out->print_cr(" Length : %14u", length()); } -HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker) - : _is_humongous(humongous), - _is_free(free), - _mt_safety_checker(mt_safety_checker), - _length(0), - _name(name), - _verify_in_progress(false) -{ } +HeapRegionSetBase::HeapRegionSetBase(const char* name, HeapRegionSetChecker* checker) + : _checker(checker), _length(0), _name(name), _verify_in_progress(false) +{ +} void FreeRegionList::set_unrealistically_long_length(uint len) { guarantee(_unrealistically_long_length == 0, "should only be set once"); @@ -295,73 +288,3 @@ void FreeRegionList::verify_list() { guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next"); guarantee(length() == count, "%s count mismatch. Expected %u, actual %u.", name(), length(), count); } - -// Note on the check_mt_safety() methods below: -// -// Verification of the "master" heap region sets / lists that are -// maintained by G1CollectedHeap is always done during a STW pause and -// by the VM thread at the start / end of the pause. The standard -// verification methods all assert check_mt_safety(). This is -// important as it ensures that verification is done without -// concurrent updates taking place at the same time. It follows, that, -// for the "master" heap region sets / lists, the check_mt_safety() -// method should include the VM thread / STW case. - -void MasterFreeRegionListMtSafeChecker::check() { - // Master Free List MT safety protocol: - // (a) If we're at a safepoint, operations on the master free list - // should be invoked by either the VM thread (which will serialize - // them) or by the GC workers while holding the - // FreeList_lock. - // (b) If we're not at a safepoint, operations on the master free - // list should be invoked while holding the Heap_lock. - - if (SafepointSynchronize::is_at_safepoint()) { - guarantee(Thread::current()->is_VM_thread() || - FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint"); - } else { - guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint"); - } -} - -void OldRegionSetMtSafeChecker::check() { - // Master Old Set MT safety protocol: - // (a) If we're at a safepoint, operations on the master old set - // should be invoked: - // - by the VM thread (which will serialize them), or - // - by the GC workers while holding the FreeList_lock, if we're - // at a safepoint for an evacuation pause (this lock is taken - // anyway when an GC alloc region is retired so that a new one - // is allocated from the free list), or - // - by the GC workers while holding the OldSets_lock, if we're at a - // safepoint for a cleanup pause. - // (b) If we're not at a safepoint, operations on the master old set - // should be invoked while holding the Heap_lock. - - if (SafepointSynchronize::is_at_safepoint()) { - guarantee(Thread::current()->is_VM_thread() - || FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(), - "master old set MT safety protocol at a safepoint"); - } else { - guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint"); - } -} - -void HumongousRegionSetMtSafeChecker::check() { - // Humongous Set MT safety protocol: - // (a) If we're at a safepoint, operations on the master humongous - // set should be invoked by either the VM thread (which will - // serialize them) or by the GC workers while holding the - // OldSets_lock. - // (b) If we're not at a safepoint, operations on the master - // humongous set should be invoked while holding the Heap_lock. - - if (SafepointSynchronize::is_at_safepoint()) { - guarantee(Thread::current()->is_VM_thread() || - OldSets_lock->owned_by_self(), - "master humongous set MT safety protocol at a safepoint"); - } else { - guarantee(Heap_lock->owned_by_self(), - "master humongous set MT safety protocol outside a safepoint"); - } -} diff --git a/src/hotspot/share/gc/g1/heapRegionSet.hpp b/src/hotspot/share/gc/g1/heapRegionSet.hpp index 0c960576651..b693918df46 100644 --- a/src/hotspot/share/gc/g1/heapRegionSet.hpp +++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp @@ -47,15 +47,18 @@ } while (0) -class HRSMtSafeChecker : public CHeapObj { +// Interface collecting various instance specific verification methods of +// HeapRegionSets. +class HeapRegionSetChecker : public CHeapObj { public: - virtual void check() = 0; + // Verify MT safety for this HeapRegionSet. + virtual void check_mt_safety() = 0; + // Returns true if the given HeapRegion is of the correct type for this HeapRegionSet. + virtual bool is_correct_type(HeapRegion* hr) = 0; + // Return a description of the type of regions this HeapRegionSet contains. + virtual const char* get_description() = 0; }; -class MasterFreeRegionListMtSafeChecker : public HRSMtSafeChecker { public: void check(); }; -class HumongousRegionSetMtSafeChecker : public HRSMtSafeChecker { public: void check(); }; -class OldRegionSetMtSafeChecker : public HRSMtSafeChecker { public: void check(); }; - // Base class for all the classes that represent heap region sets. It // contains the basic attributes that each set needs to maintain // (e.g., length, region num, used bytes sum) plus any shared @@ -63,10 +66,8 @@ class OldRegionSetMtSafeChecker : public HRSMtSafeChecker { public: v class HeapRegionSetBase { friend class VMStructs; -private: - bool _is_humongous; - bool _is_free; - HRSMtSafeChecker* _mt_safety_checker; + + HeapRegionSetChecker* _checker; protected: // The number of regions in to the set. @@ -80,21 +81,13 @@ protected: // added to / removed from a set are consistent. void verify_region(HeapRegion* hr) PRODUCT_RETURN; - // Indicates whether all regions in the set should be humongous or - // not. Only used during verification. - bool regions_humongous() { return _is_humongous; } - - // Indicates whether all regions in the set should be free or - // not. Only used during verification. - bool regions_free() { return _is_free; } - void check_mt_safety() { - if (_mt_safety_checker != NULL) { - _mt_safety_checker->check(); + if (_checker != NULL) { + _checker->check_mt_safety(); } } - HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker); + HeapRegionSetBase(const char* name, HeapRegionSetChecker* verifier); public: const char* name() { return _name; } @@ -121,15 +114,6 @@ public: virtual void print_on(outputStream* out, bool print_contents = false); }; -#define hrs_assert_sets_match(_set1_, _set2_) \ - do { \ - assert(((_set1_)->regions_humongous() == (_set2_)->regions_humongous()) && \ - ((_set1_)->regions_free() == (_set2_)->regions_free()), \ - "the contents of set %s and set %s should match", \ - (_set1_)->name(), \ - (_set2_)->name()); \ - } while (0) - // This class represents heap region sets whose members are not // explicitly tracked. It's helpful to group regions using such sets // so that we can reason about all the region groups in the heap using @@ -137,8 +121,9 @@ public: class HeapRegionSet : public HeapRegionSetBase { public: - HeapRegionSet(const char* name, bool humongous, HRSMtSafeChecker* mt_safety_checker): - HeapRegionSetBase(name, humongous, false /* free */, mt_safety_checker) { } + HeapRegionSet(const char* name, HeapRegionSetChecker* checker): + HeapRegionSetBase(name, checker) { + } void bulk_remove(const uint removed) { _length -= removed; @@ -173,8 +158,8 @@ protected: virtual void clear(); public: - FreeRegionList(const char* name, HRSMtSafeChecker* mt_safety_checker = NULL): - HeapRegionSetBase(name, false /* humongous */, true /* empty */, mt_safety_checker) { + FreeRegionList(const char* name, HeapRegionSetChecker* checker = NULL): + HeapRegionSetBase(name, checker) { clear(); } diff --git a/src/hotspot/share/gc/g1/heapRegionType.hpp b/src/hotspot/share/gc/g1/heapRegionType.hpp index 12259984b26..ed6f92f4768 100644 --- a/src/hotspot/share/gc/g1/heapRegionType.hpp +++ b/src/hotspot/share/gc/g1/heapRegionType.hpp @@ -86,8 +86,8 @@ private: // Objects within these regions are allowed to have references to objects // contained in any other kind of regions. ArchiveMask = 32, - OpenArchiveTag = ArchiveMask | PinnedMask | OldMask, - ClosedArchiveTag = ArchiveMask | PinnedMask | OldMask + 1 + OpenArchiveTag = ArchiveMask | PinnedMask, + ClosedArchiveTag = ArchiveMask | PinnedMask + 1 } Tag; volatile Tag _tag; @@ -139,6 +139,8 @@ public: bool is_old_or_humongous() const { return (get() & (OldMask | HumongousMask)) != 0; } + bool is_old_or_humongous_or_archive() const { return (get() & (OldMask | HumongousMask | ArchiveMask)) != 0; } + // is_pinned regions may be archive or humongous bool is_pinned() const { return (get() & PinnedMask) != 0; } diff --git a/src/hotspot/share/gc/g1/vmStructs_g1.hpp b/src/hotspot/share/gc/g1/vmStructs_g1.hpp index aabadf2a0c1..7f1055ef0db 100644 --- a/src/hotspot/share/gc/g1/vmStructs_g1.hpp +++ b/src/hotspot/share/gc/g1/vmStructs_g1.hpp @@ -56,14 +56,15 @@ nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \ nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \ nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \ + nonstatic_field(G1CollectedHeap, _archive_set, HeapRegionSetBase) \ nonstatic_field(G1CollectedHeap, _humongous_set, HeapRegionSetBase) \ \ - nonstatic_field(G1MonitoringSupport, _eden_committed, size_t) \ - nonstatic_field(G1MonitoringSupport, _eden_used, size_t) \ - nonstatic_field(G1MonitoringSupport, _survivor_committed, size_t) \ - nonstatic_field(G1MonitoringSupport, _survivor_used, size_t) \ - nonstatic_field(G1MonitoringSupport, _old_committed, size_t) \ - nonstatic_field(G1MonitoringSupport, _old_used, size_t) \ + nonstatic_field(G1MonitoringSupport, _eden_space_committed, size_t) \ + nonstatic_field(G1MonitoringSupport, _eden_space_used, size_t) \ + nonstatic_field(G1MonitoringSupport, _survivor_space_committed, size_t) \ + nonstatic_field(G1MonitoringSupport, _survivor_space_used, size_t) \ + nonstatic_field(G1MonitoringSupport, _old_gen_committed, size_t) \ + nonstatic_field(G1MonitoringSupport, _old_gen_used, size_t) \ \ nonstatic_field(HeapRegionSetBase, _length, uint) \ \ diff --git a/src/hotspot/share/gc/parallel/pcTasks.cpp b/src/hotspot/share/gc/parallel/pcTasks.cpp index 60d2ad9dc2b..eb128cd3d55 100644 --- a/src/hotspot/share/gc/parallel/pcTasks.cpp +++ b/src/hotspot/share/gc/parallel/pcTasks.cpp @@ -107,8 +107,10 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) { SystemDictionary::oops_do(&mark_and_push_closure); break; - case class_loader_data: - ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, true); + case class_loader_data: { + CLDToOopClosure cld_closure(&mark_and_push_closure); + ClassLoaderDataGraph::always_strong_cld_do(&cld_closure); + } break; case code_cache: diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.cpp b/src/hotspot/share/gc/parallel/psCompactionManager.cpp index 27642bc51cf..00bf6f6dacd 100644 --- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp +++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp @@ -153,14 +153,14 @@ void InstanceMirrorKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* // Follow the klass field in the mirror. Klass* klass = java_lang_Class::as_Klass(obj); if (klass != NULL) { - // An anonymous class doesn't have its own class loader, so the call - // to follow_klass will mark and push its java mirror instead of the - // class loader. When handling the java mirror for an anonymous class - // we need to make sure its class loader data is claimed, this is done - // by calling follow_class_loader explicitly. For non-anonymous classes - // the call to follow_class_loader is made when the class loader itself - // is handled. - if (klass->is_instance_klass() && InstanceKlass::cast(klass)->is_anonymous()) { + // An unsafe anonymous class doesn't have its own class loader, + // so the call to follow_klass will mark and push its java mirror instead of the + // class loader. When handling the java mirror for an unsafe anonymous + // class we need to make sure its class loader data is claimed, this is done + // by calling follow_class_loader explicitly. For non-anonymous classes the + // call to follow_class_loader is made when the class loader itself is handled. + if (klass->is_instance_klass() && + InstanceKlass::cast(klass)->is_unsafe_anonymous()) { cm->follow_class_loader(klass->class_loader_data()); } else { cm->follow_klass(klass); diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index 073a452af46..bb973b7c32e 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -2188,7 +2188,8 @@ void PSParallelCompact::adjust_roots(ParCompactionManager* cm) { Management::oops_do(&oop_closure); JvmtiExport::oops_do(&oop_closure); SystemDictionary::oops_do(&oop_closure); - ClassLoaderDataGraph::oops_do(&oop_closure, true); + CLDToOopClosure cld_closure(&oop_closure); + ClassLoaderDataGraph::cld_do(&cld_closure); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) diff --git a/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp b/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp index abcc53b7637..27fa49567d7 100644 --- a/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp +++ b/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp @@ -90,6 +90,13 @@ void BarrierSetC1::load_at(LIRAccess& access, LIR_Opr result) { load_at_resolved(access, result); } +void BarrierSetC1::load(LIRAccess& access, LIR_Opr result) { + DecoratorSet decorators = access.decorators(); + bool in_heap = (decorators & IN_HEAP) != 0; + assert(!in_heap, "consider using load_at"); + load_at_resolved(access, result); +} + LIR_Opr BarrierSetC1::atomic_cmpxchg_at(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) { DecoratorSet decorators = access.decorators(); bool in_heap = (decorators & IN_HEAP) != 0; @@ -159,13 +166,16 @@ void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) { bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP(); bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0; bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0; + bool in_native = (decorators & IN_NATIVE) != 0; if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) { __ membar(); } LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; - if (is_volatile && !needs_patching) { + if (in_native) { + __ move_wide(access.resolved_addr()->as_address_ptr(), result); + } else if (is_volatile && !needs_patching) { gen->volatile_field_load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info()); } else { __ load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info(), patch_code); @@ -324,3 +334,7 @@ void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) { } } } + +LIR_Opr BarrierSetC1::resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj) { + return obj; +} diff --git a/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp b/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp index d1522c1cef7..41e494cb61f 100644 --- a/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp +++ b/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp @@ -127,12 +127,15 @@ protected: public: virtual void store_at(LIRAccess& access, LIR_Opr value); virtual void load_at(LIRAccess& access, LIR_Opr result); + virtual void load(LIRAccess& access, LIR_Opr result); virtual LIR_Opr atomic_cmpxchg_at(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value); virtual LIR_Opr atomic_xchg_at(LIRAccess& access, LIRItem& value); virtual LIR_Opr atomic_add_at(LIRAccess& access, LIRItem& value); + virtual LIR_Opr resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj); + virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob) {} }; diff --git a/src/hotspot/share/gc/shared/c1/modRefBarrierSetC1.cpp b/src/hotspot/share/gc/shared/c1/modRefBarrierSetC1.cpp index 0cb2951f455..de3a26c1ed5 100644 --- a/src/hotspot/share/gc/shared/c1/modRefBarrierSetC1.cpp +++ b/src/hotspot/share/gc/shared/c1/modRefBarrierSetC1.cpp @@ -86,7 +86,7 @@ LIR_Opr ModRefBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& LIR_Opr ModRefBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) { DecoratorSet decorators = access.decorators(); bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0; - bool is_write = (decorators & C1_WRITE_ACCESS) != 0; + bool is_write = (decorators & ACCESS_WRITE) != 0; bool is_array = (decorators & IS_ARRAY) != 0; bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; bool precise = is_array || on_anonymous; diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp index 717a14157e5..0c8bfa7f7df 100644 --- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp +++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp @@ -104,14 +104,18 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con bool pinned = (decorators & C2_PINNED_LOAD) != 0; bool in_native = (decorators & IN_NATIVE) != 0; - assert(!in_native, "not supported yet"); MemNode::MemOrd mo = access.mem_node_mo(); LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest; Node* control = control_dependent ? kit->control() : NULL; - Node* load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo, - dep, requires_atomic_access, unaligned, mismatched); + Node* load; + if (in_native) { + load = kit->make_load(control, adr, val_type, access.type(), mo); + } else { + load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo, + dep, requires_atomic_access, unaligned, mismatched); + } access.set_raw_access(load); return load; @@ -119,10 +123,11 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con class C2AccessFence: public StackObj { C2Access& _access; + Node* _leading_membar; public: C2AccessFence(C2Access& access) : - _access(access) { + _access(access), _leading_membar(NULL) { GraphKit* kit = access.kit(); DecoratorSet decorators = access.decorators(); @@ -139,12 +144,12 @@ public: // into actual barriers on most machines, but we still need rest of // compiler to respect ordering. if (is_release) { - kit->insert_mem_bar(Op_MemBarRelease); + _leading_membar = kit->insert_mem_bar(Op_MemBarRelease); } else if (is_volatile) { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { - kit->insert_mem_bar(Op_MemBarVolatile); + _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile); } else { - kit->insert_mem_bar(Op_MemBarRelease); + _leading_membar = kit->insert_mem_bar(Op_MemBarRelease); } } } else if (is_write) { @@ -152,7 +157,7 @@ public: // floating down past the volatile write. Also prevents commoning // another volatile read. if (is_volatile || is_release) { - kit->insert_mem_bar(Op_MemBarRelease); + _leading_membar = kit->insert_mem_bar(Op_MemBarRelease); } } else { // Memory barrier to prevent normal and 'unsafe' accesses from @@ -161,7 +166,7 @@ public: // so there's no problems making a strong assert about mixing users // of safe & unsafe memory. if (is_volatile && support_IRIW_for_not_multiple_copy_atomic_cpu) { - kit->insert_mem_bar(Op_MemBarVolatile); + _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile); } } @@ -196,20 +201,30 @@ public: if (is_atomic) { if (is_acquire || is_volatile) { - kit->insert_mem_bar(Op_MemBarAcquire); + Node* n = _access.raw_access(); + Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n); + if (_leading_membar != NULL) { + MemBarNode::set_load_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar()); + } } } else if (is_write) { // If not multiple copy atomic, we do the MemBarVolatile before the load. if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) { - kit->insert_mem_bar(Op_MemBarVolatile); // Use fat membar + Node* n = _access.raw_access(); + Node* mb = kit->insert_mem_bar(Op_MemBarVolatile, n); // Use fat membar + if (_leading_membar != NULL) { + MemBarNode::set_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar()); + } } } else { if (is_volatile || is_acquire) { - kit->insert_mem_bar(Op_MemBarAcquire, _access.raw_access()); + Node* n = _access.raw_access(); + assert(_leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected"); + Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n); + mb->as_MemBar()->set_trailing_load(); } } } - }; Node* BarrierSetC2::store_at(C2Access& access, C2AccessValue& val) const { diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp index 6ea28d55073..982fabefa8c 100644 --- a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp +++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp @@ -194,6 +194,7 @@ public: virtual bool array_copy_requires_gc_barriers(BasicType type) const { return false; } // Support for GC barriers emitted during parsing + virtual bool has_load_barriers() const { return false; } virtual bool is_gc_barrier_node(Node* node) const { return false; } virtual Node* step_over_gc_barrier(Node* c) const { return c; } diff --git a/src/hotspot/share/gc/shared/oopStorage.cpp b/src/hotspot/share/gc/shared/oopStorage.cpp index 91729f2af78..ac66a13375c 100644 --- a/src/hotspot/share/gc/shared/oopStorage.cpp +++ b/src/hotspot/share/gc/shared/oopStorage.cpp @@ -43,7 +43,6 @@ #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #include "utilities/ostream.hpp" -#include "utilities/spinYield.hpp" OopStorage::AllocationListEntry::AllocationListEntry() : _prev(NULL), _next(NULL) {} @@ -495,48 +494,6 @@ bool OopStorage::expand_active_array() { return true; } -OopStorage::ProtectActive::ProtectActive() : _enter(0), _exit() {} - -// Begin read-side critical section. -uint OopStorage::ProtectActive::read_enter() { - return Atomic::add(2u, &_enter); -} - -// End read-side critical section. -void OopStorage::ProtectActive::read_exit(uint enter_value) { - Atomic::add(2u, &_exit[enter_value & 1]); -} - -// Wait until all readers that entered the critical section before -// synchronization have exited that critical section. -void OopStorage::ProtectActive::write_synchronize() { - SpinYield spinner; - // Determine old and new exit counters, based on bit0 of the - // on-entry _enter counter. - uint value = OrderAccess::load_acquire(&_enter); - volatile uint* new_ptr = &_exit[(value + 1) & 1]; - // Atomically change the in-use exit counter to the new counter, by - // adding 1 to the _enter counter (flipping bit0 between 0 and 1) - // and initializing the new exit counter to that enter value. Note: - // The new exit counter is not being used by read operations until - // this change succeeds. - uint old; - do { - old = value; - *new_ptr = ++value; - value = Atomic::cmpxchg(value, &_enter, old); - } while (old != value); - // Readers that entered the critical section before we changed the - // selected exit counter will use the old exit counter. Readers - // entering after the change will use the new exit counter. Wait - // for all the critical sections started before the change to - // complete, e.g. for the value of old_ptr to catch up with old. - volatile uint* old_ptr = &_exit[old & 1]; - while (old != OrderAccess::load_acquire(old_ptr)) { - spinner.wait(); - } -} - // Make new_array the _active_array. Increments new_array's refcount // to account for the new reference. The assignment is atomic wrto // obtain_active_array; once this function returns, it is safe for the @@ -548,7 +505,10 @@ void OopStorage::replace_active_array(ActiveArray* new_array) { // Install new_array, ensuring its initialization is complete first. OrderAccess::release_store(&_active_array, new_array); // Wait for any readers that could read the old array from _active_array. - _protect_active.write_synchronize(); + // Can't use GlobalCounter here, because this is called from allocate(), + // which may be called in the scope of a GlobalCounter critical section + // when inserting a StringTable entry. + _protect_active.synchronize(); // All obtain critical sections that could see the old array have // completed, having incremented the refcount of the old array. The // caller can now safely relinquish the old array. @@ -560,10 +520,9 @@ void OopStorage::replace_active_array(ActiveArray* new_array) { // _active_array. The caller must relinquish the array when done // using it. OopStorage::ActiveArray* OopStorage::obtain_active_array() const { - uint enter_value = _protect_active.read_enter(); + SingleWriterSynchronizer::CriticalSection cs(&_protect_active); ActiveArray* result = OrderAccess::load_acquire(&_active_array); result->increment_refcount(); - _protect_active.read_exit(enter_value); return result; } diff --git a/src/hotspot/share/gc/shared/oopStorage.hpp b/src/hotspot/share/gc/shared/oopStorage.hpp index f13abcb190d..67216180839 100644 --- a/src/hotspot/share/gc/shared/oopStorage.hpp +++ b/src/hotspot/share/gc/shared/oopStorage.hpp @@ -29,6 +29,7 @@ #include "oops/oop.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" +#include "utilities/singleWriterSynchronizer.hpp" class Mutex; class outputStream; @@ -203,19 +204,6 @@ NOT_AIX( private: ) void unlink(const Block& block); }; - // RCU-inspired protection of access to _active_array. - class ProtectActive { - volatile uint _enter; - volatile uint _exit[2]; - - public: - ProtectActive(); - - uint read_enter(); - void read_exit(uint enter_value); - void write_synchronize(); - }; - private: const char* _name; ActiveArray* _active_array; @@ -229,7 +217,7 @@ private: volatile size_t _allocation_count; // Protection for _active_array. - mutable ProtectActive _protect_active; + mutable SingleWriterSynchronizer _protect_active; // mutable because this gets set even for const iteration. mutable bool _concurrent_iteration_active; diff --git a/src/hotspot/share/gc/shared/parallelCleaning.cpp b/src/hotspot/share/gc/shared/parallelCleaning.cpp new file mode 100644 index 00000000000..8bd9e667c78 --- /dev/null +++ b/src/hotspot/share/gc/shared/parallelCleaning.cpp @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/symbolTable.hpp" +#include "classfile/stringTable.hpp" +#include "code/codeCache.hpp" +#include "gc/shared/parallelCleaning.hpp" +#include "memory/resourceArea.hpp" +#include "logging/log.hpp" + +StringCleaningTask::StringCleaningTask(BoolObjectClosure* is_alive, StringDedupUnlinkOrOopsDoClosure* dedup_closure, bool process_strings) : + AbstractGangTask("String Unlinking"), + _is_alive(is_alive), + _dedup_closure(dedup_closure), + _par_state_string(StringTable::weak_storage()), + _initial_string_table_size((int) StringTable::the_table()->table_size()), + _process_strings(process_strings), _strings_processed(0), _strings_removed(0) { + + if (process_strings) { + StringTable::reset_dead_counter(); + } +} + +StringCleaningTask::~StringCleaningTask() { + log_info(gc, stringtable)( + "Cleaned string table, " + "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", + strings_processed(), strings_removed()); + if (_process_strings) { + StringTable::finish_dead_counter(); + } +} + +void StringCleaningTask::work(uint worker_id) { + int strings_processed = 0; + int strings_removed = 0; + if (_process_strings) { + StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed); + Atomic::add(strings_processed, &_strings_processed); + Atomic::add(strings_removed, &_strings_removed); + } + if (_dedup_closure != NULL) { + StringDedup::parallel_unlink(_dedup_closure, worker_id); + } +} + +CodeCacheUnloadingTask::CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) : + _is_alive(is_alive), + _unloading_occurred(unloading_occurred), + _num_workers(num_workers), + _first_nmethod(NULL), + _claimed_nmethod(NULL), + _postponed_list(NULL), + _num_entered_barrier(0) { + CompiledMethod::increase_unloading_clock(); + // Get first alive nmethod + CompiledMethodIterator iter = CompiledMethodIterator(); + if(iter.next_alive()) { + _first_nmethod = iter.method(); + } + _claimed_nmethod = _first_nmethod; +} + +CodeCacheUnloadingTask::~CodeCacheUnloadingTask() { + CodeCache::verify_clean_inline_caches(); + + CodeCache::set_needs_cache_clean(false); + guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be"); + + CodeCache::verify_icholder_relocations(); +} + +Monitor* CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never); + +void CodeCacheUnloadingTask::add_to_postponed_list(CompiledMethod* nm) { + CompiledMethod* old; + do { + old = _postponed_list; + nm->set_unloading_next(old); + } while (Atomic::cmpxchg(nm, &_postponed_list, old) != old); +} + +void CodeCacheUnloadingTask::clean_nmethod(CompiledMethod* nm) { + bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred); + + if (postponed) { + // This nmethod referred to an nmethod that has not been cleaned/unloaded yet. + add_to_postponed_list(nm); + } + + // Mark that this nmethod has been cleaned/unloaded. + // After this call, it will be safe to ask if this nmethod was unloaded or not. + nm->set_unloading_clock(CompiledMethod::global_unloading_clock()); +} + +void CodeCacheUnloadingTask::clean_nmethod_postponed(CompiledMethod* nm) { + nm->do_unloading_parallel_postponed(); +} + +void CodeCacheUnloadingTask::claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) { + CompiledMethod* first; + CompiledMethodIterator last; + + do { + *num_claimed_nmethods = 0; + + first = _claimed_nmethod; + last = CompiledMethodIterator(first); + + if (first != NULL) { + + for (int i = 0; i < MaxClaimNmethods; i++) { + if (!last.next_alive()) { + break; + } + claimed_nmethods[i] = last.method(); + (*num_claimed_nmethods)++; + } + } + + } while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first); +} + +CompiledMethod* CodeCacheUnloadingTask::claim_postponed_nmethod() { + CompiledMethod* claim; + CompiledMethod* next; + + do { + claim = _postponed_list; + if (claim == NULL) { + return NULL; + } + + next = claim->unloading_next(); + + } while (Atomic::cmpxchg(next, &_postponed_list, claim) != claim); + + return claim; +} + +void CodeCacheUnloadingTask::barrier_mark(uint worker_id) { + MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); + _num_entered_barrier++; + if (_num_entered_barrier == _num_workers) { + ml.notify_all(); + } +} + +void CodeCacheUnloadingTask::barrier_wait(uint worker_id) { + if (_num_entered_barrier < _num_workers) { + MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); + while (_num_entered_barrier < _num_workers) { + ml.wait(Mutex::_no_safepoint_check_flag, 0, false); + } + } +} + +void CodeCacheUnloadingTask::work_first_pass(uint worker_id) { + // The first nmethods is claimed by the first worker. + if (worker_id == 0 && _first_nmethod != NULL) { + clean_nmethod(_first_nmethod); + _first_nmethod = NULL; + } + + int num_claimed_nmethods; + CompiledMethod* claimed_nmethods[MaxClaimNmethods]; + + while (true) { + claim_nmethods(claimed_nmethods, &num_claimed_nmethods); + + if (num_claimed_nmethods == 0) { + break; + } + + for (int i = 0; i < num_claimed_nmethods; i++) { + clean_nmethod(claimed_nmethods[i]); + } + } +} + +void CodeCacheUnloadingTask::work_second_pass(uint worker_id) { + CompiledMethod* nm; + // Take care of postponed nmethods. + while ((nm = claim_postponed_nmethod()) != NULL) { + clean_nmethod_postponed(nm); + } +} + +KlassCleaningTask::KlassCleaningTask() : + _clean_klass_tree_claimed(0), + _klass_iterator() { +} + +bool KlassCleaningTask::claim_clean_klass_tree_task() { + if (_clean_klass_tree_claimed) { + return false; + } + + return Atomic::cmpxchg(1, &_clean_klass_tree_claimed, 0) == 0; +} + +InstanceKlass* KlassCleaningTask::claim_next_klass() { + Klass* klass; + do { + klass =_klass_iterator.next_klass(); + } while (klass != NULL && !klass->is_instance_klass()); + + // this can be null so don't call InstanceKlass::cast + return static_cast(klass); +} + +void KlassCleaningTask::work() { + ResourceMark rm; + + // One worker will clean the subklass/sibling klass tree. + if (claim_clean_klass_tree_task()) { + Klass::clean_subklass_tree(); + } + + // All workers will help cleaning the classes, + InstanceKlass* klass; + while ((klass = claim_next_klass()) != NULL) { + clean_klass(klass); + } +} + +ParallelCleaningTask::ParallelCleaningTask(BoolObjectClosure* is_alive, + StringDedupUnlinkOrOopsDoClosure* dedup_closure, uint num_workers, bool unloading_occurred) : + AbstractGangTask("Parallel Cleaning"), + _unloading_occurred(unloading_occurred), + _string_task(is_alive, StringDedup::is_enabled() ? dedup_closure : NULL, true), + _code_cache_task(num_workers, is_alive, unloading_occurred), + _klass_cleaning_task() { +} + +// The parallel work done by all worker threads. +void ParallelCleaningTask::work(uint worker_id) { + // Do first pass of code cache cleaning. + _code_cache_task.work_first_pass(worker_id); + + // Let the threads mark that the first pass is done. + _code_cache_task.barrier_mark(worker_id); + + // Clean the Strings and Symbols. + _string_task.work(worker_id); + + // Wait for all workers to finish the first code cache cleaning pass. + _code_cache_task.barrier_wait(worker_id); + + // Do the second code cache cleaning work, which realize on + // the liveness information gathered during the first pass. + _code_cache_task.work_second_pass(worker_id); + + // Clean all klasses that were not unloaded. + // The weak metadata in klass doesn't need to be + // processed if there was no unloading. + if (_unloading_occurred) { + _klass_cleaning_task.work(); + } +} diff --git a/src/hotspot/share/gc/shared/parallelCleaning.hpp b/src/hotspot/share/gc/shared/parallelCleaning.hpp new file mode 100644 index 00000000000..4e59149b370 --- /dev/null +++ b/src/hotspot/share/gc/shared/parallelCleaning.hpp @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHARED_PARALLELCLEANING_HPP +#define SHARE_VM_GC_SHARED_PARALLELCLEANING_HPP + +#include "gc/shared/oopStorageParState.hpp" +#include "gc/shared/stringdedup/stringDedup.hpp" +#include "gc/shared/workgroup.hpp" + +class ParallelCleaningTask; + +class StringCleaningTask : public AbstractGangTask { +private: + BoolObjectClosure* _is_alive; + StringDedupUnlinkOrOopsDoClosure * const _dedup_closure; + + OopStorage::ParState _par_state_string; + + int _initial_string_table_size; + + bool _process_strings; + int _strings_processed; + int _strings_removed; + +public: + StringCleaningTask(BoolObjectClosure* is_alive, StringDedupUnlinkOrOopsDoClosure* dedup_closure, bool process_strings); + ~StringCleaningTask(); + + void work(uint worker_id); + + size_t strings_processed() const { return (size_t)_strings_processed; } + size_t strings_removed() const { return (size_t)_strings_removed; } +}; + +class CodeCacheUnloadingTask { +private: + static Monitor* _lock; + + BoolObjectClosure* const _is_alive; + const bool _unloading_occurred; + const uint _num_workers; + + // Variables used to claim nmethods. + CompiledMethod* _first_nmethod; + CompiledMethod* volatile _claimed_nmethod; + + // The list of nmethods that need to be processed by the second pass. + CompiledMethod* volatile _postponed_list; + volatile uint _num_entered_barrier; + +public: + CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred); + ~CodeCacheUnloadingTask(); + +private: + void add_to_postponed_list(CompiledMethod* nm); + void clean_nmethod(CompiledMethod* nm); + void clean_nmethod_postponed(CompiledMethod* nm); + + static const int MaxClaimNmethods = 16; + + void claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods); + CompiledMethod* claim_postponed_nmethod(); +public: + // Mark that we're done with the first pass of nmethod cleaning. + void barrier_mark(uint worker_id); + + // See if we have to wait for the other workers to + // finish their first-pass nmethod cleaning work. + void barrier_wait(uint worker_id); + + // Cleaning and unloading of nmethods. Some work has to be postponed + // to the second pass, when we know which nmethods survive. + void work_first_pass(uint worker_id); + void work_second_pass(uint worker_id); +}; + + +class KlassCleaningTask : public StackObj { + volatile int _clean_klass_tree_claimed; + ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator; + +public: + KlassCleaningTask(); + +private: + bool claim_clean_klass_tree_task(); + InstanceKlass* claim_next_klass(); + +public: + + void clean_klass(InstanceKlass* ik) { + ik->clean_weak_instanceklass_links(); + } + + void work(); +}; + +// To minimize the remark pause times, the tasks below are done in parallel. +class ParallelCleaningTask : public AbstractGangTask { +private: + bool _unloading_occurred; + StringCleaningTask _string_task; + CodeCacheUnloadingTask _code_cache_task; + KlassCleaningTask _klass_cleaning_task; + +public: + // The constructor is run in the VMThread. + ParallelCleaningTask(BoolObjectClosure* is_alive, StringDedupUnlinkOrOopsDoClosure* dedup_closure, + uint num_workers, bool unloading_occurred); + + void work(uint worker_id); +}; + +#endif // SHARE_VM_GC_SHARED_PARALLELCLEANING_HPP diff --git a/src/hotspot/share/gc/g1/ptrQueue.cpp b/src/hotspot/share/gc/shared/ptrQueue.cpp similarity index 99% rename from src/hotspot/share/gc/g1/ptrQueue.cpp rename to src/hotspot/share/gc/shared/ptrQueue.cpp index 75d9ae83c2a..b6b6f030bf9 100644 --- a/src/hotspot/share/gc/g1/ptrQueue.cpp +++ b/src/hotspot/share/gc/shared/ptrQueue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,7 @@ */ #include "precompiled.hpp" -#include "gc/g1/ptrQueue.hpp" +#include "gc/shared/ptrQueue.hpp" #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" #include "runtime/mutex.hpp" diff --git a/src/hotspot/share/gc/g1/ptrQueue.hpp b/src/hotspot/share/gc/shared/ptrQueue.hpp similarity index 99% rename from src/hotspot/share/gc/g1/ptrQueue.hpp rename to src/hotspot/share/gc/shared/ptrQueue.hpp index e449906828c..a93c9191250 100644 --- a/src/hotspot/share/gc/g1/ptrQueue.hpp +++ b/src/hotspot/share/gc/shared/ptrQueue.hpp @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_GC_G1_PTRQUEUE_HPP -#define SHARE_VM_GC_G1_PTRQUEUE_HPP +#ifndef SHARE_GC_SHARED_PTRQUEUE_HPP +#define SHARE_GC_SHARED_PTRQUEUE_HPP #include "utilities/align.hpp" #include "utilities/sizes.hpp" @@ -368,4 +368,4 @@ public: void notify_if_necessary(); }; -#endif // SHARE_VM_GC_G1_PTRQUEUE_HPP +#endif // SHARE_GC_SHARED_PTRQUEUE_HPP diff --git a/src/hotspot/share/gc/g1/satbMarkQueue.cpp b/src/hotspot/share/gc/shared/satbMarkQueue.cpp similarity index 99% rename from src/hotspot/share/gc/g1/satbMarkQueue.cpp rename to src/hotspot/share/gc/shared/satbMarkQueue.cpp index b4d96a611ff..4ae624abe2f 100644 --- a/src/hotspot/share/gc/g1/satbMarkQueue.cpp +++ b/src/hotspot/share/gc/shared/satbMarkQueue.cpp @@ -23,7 +23,7 @@ */ #include "precompiled.hpp" -#include "gc/g1/satbMarkQueue.hpp" +#include "gc/shared/satbMarkQueue.hpp" #include "gc/shared/collectedHeap.hpp" #include "logging/log.hpp" #include "memory/allocation.inline.hpp" diff --git a/src/hotspot/share/gc/g1/satbMarkQueue.hpp b/src/hotspot/share/gc/shared/satbMarkQueue.hpp similarity index 97% rename from src/hotspot/share/gc/g1/satbMarkQueue.hpp rename to src/hotspot/share/gc/shared/satbMarkQueue.hpp index d23b7417fb6..829e26ed2d7 100644 --- a/src/hotspot/share/gc/g1/satbMarkQueue.hpp +++ b/src/hotspot/share/gc/shared/satbMarkQueue.hpp @@ -22,10 +22,10 @@ * */ -#ifndef SHARE_VM_GC_G1_SATBMARKQUEUE_HPP -#define SHARE_VM_GC_G1_SATBMARKQUEUE_HPP +#ifndef SHARE_GC_SHARED_SATBMARKQUEUE_HPP +#define SHARE_GC_SHARED_SATBMARKQUEUE_HPP -#include "gc/g1/ptrQueue.hpp" +#include "gc/shared/ptrQueue.hpp" #include "memory/allocation.hpp" class JavaThread; @@ -186,4 +186,4 @@ inline void SATBMarkQueue::apply_filter(Filter filter_out) { this->set_index(dst - buf); } -#endif // SHARE_VM_GC_G1_SATBMARKQUEUE_HPP +#endif // SHARE_GC_SHARED_SATBMARKQUEUE_HPP diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupThread.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupThread.cpp index 427bf05c92c..637f8f862d7 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupThread.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupThread.cpp @@ -59,8 +59,8 @@ class StringDedupSharedClosure: public OopClosure { public: StringDedupSharedClosure(StringDedupStat* stat) : _stat(stat) {} - virtual void do_oop(oop* p) { ShouldNotReachHere(); } - virtual void do_oop(narrowOop* p) { + virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } + virtual void do_oop(oop* p) { oop java_string = RawAccess<>::oop_load(p); StringDedupTable::deduplicate(java_string, _stat); } diff --git a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp index a1b43005cf0..c627238882d 100644 --- a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp +++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp @@ -190,8 +190,8 @@ static void pre_load_barrier(LIRAccess& access) { // Downgrade access to MO_UNORDERED decorators = (decorators & ~MO_DECORATOR_MASK) | MO_UNORDERED; - // Remove C1_WRITE_ACCESS - decorators = (decorators & ~C1_WRITE_ACCESS); + // Remove ACCESS_WRITE + decorators = (decorators & ~ACCESS_WRITE); // Generate synthetic load at access.gen()->access_load_at(decorators, diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp index ec3954eba59..73817624fda 100644 --- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp +++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp @@ -68,7 +68,26 @@ ZBarrierSetC2State* ZBarrierSetC2::state() const { } bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const { - return node->is_LoadBarrier(); + // 1. This step follows potential oop projections of a load barrier before expansion + if (node->is_Proj()) { + node = node->in(0); + } + + // 2. This step checks for unexpanded load barriers + if (node->is_LoadBarrier()) { + return true; + } + + // 3. This step checks for the phi corresponding to an optimized load barrier expansion + if (node->is_Phi()) { + PhiNode* phi = node->as_Phi(); + Node* n = phi->in(1); + if (n != NULL && (n->is_LoadBarrierSlowReg() || n->is_LoadBarrierWeakSlowReg())) { + return true; + } + } + + return false; } void ZBarrierSetC2::register_potential_barrier_node(Node* node) const { @@ -109,7 +128,7 @@ void ZBarrierSetC2::find_dominating_barriers(PhaseIterGVN& igvn) { ZBarrierSetC2State* s = bs->state(); if (s->load_barrier_count() >= 2) { Compile::TracePhase tp("idealLoop", &C->timers[Phase::_t_idealLoop]); - PhaseIdealLoop ideal_loop(igvn, true, false, true); + PhaseIdealLoop ideal_loop(igvn, LoopOptsLastRound); if (C->major_progress()) C->print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2); } } @@ -637,7 +656,10 @@ Node* ZBarrierSetC2::load_barrier(GraphKit* kit, Node* val, Node* adr, bool weak if (barrier == transformed_barrier) { kit->set_control(gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control))); } - return gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop)); + Node* result = gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop)); + assert(is_gc_barrier_node(result), "sanity"); + assert(step_over_gc_barrier(result) == val, "sanity"); + return result; } else { return val; } @@ -963,6 +985,9 @@ void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBa traverse(preceding_barrier_node, result_region, result_phi, -1); #endif + assert(is_gc_barrier_node(result_phi), "sanity"); + assert(step_over_gc_barrier(result_phi) == in_val, "sanity"); + return; } @@ -1376,6 +1401,32 @@ void ZBarrierSetC2::loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, } } +Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const { + Node* node = c; + + // 1. This step follows potential oop projections of a load barrier before expansion + if (node->is_Proj()) { + node = node->in(0); + } + + // 2. This step checks for unexpanded load barriers + if (node->is_LoadBarrier()) { + return node->in(LoadBarrierNode::Oop); + } + + // 3. This step checks for the phi corresponding to an optimized load barrier expansion + if (node->is_Phi()) { + PhiNode* phi = node->as_Phi(); + Node* n = phi->in(1); + if (n != NULL && (n->is_LoadBarrierSlowReg() || n->is_LoadBarrierWeakSlowReg())) { + assert(c == node, "projections from step 1 should only be seen before macro expansion"); + return phi->in(2); + } + } + + return c; +} + // == Verification == #ifdef ASSERT diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp index 165ae69d75a..d74b680b348 100644 --- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp +++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp @@ -101,7 +101,9 @@ public: const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) - : LoadPNode(c, mem, adr, at, t, mo, control_dependency) {} + : LoadPNode(c, mem, adr, at, t, mo, control_dependency) { + init_class_id(Class_LoadBarrierSlowReg); + } virtual const char * name() { return "LoadBarrierSlowRegNode"; @@ -123,7 +125,9 @@ public: const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) - : LoadPNode(c, mem, adr, at, t, mo, control_dependency) {} + : LoadPNode(c, mem, adr, at, t, mo, control_dependency) { + init_class_id(Class_LoadBarrierWeakSlowReg); + } virtual const char * name() { return "LoadBarrierWeakSlowRegNode"; @@ -182,6 +186,7 @@ public: bool oop_reload_allowed = true) const; virtual void* create_barrier_state(Arena* comp_arena) const; + virtual bool has_load_barriers() const { return true; } virtual bool is_gc_barrier_node(Node* node) const; virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { } virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful) const; @@ -190,7 +195,7 @@ public: virtual void register_potential_barrier_node(Node* node) const; virtual void unregister_potential_barrier_node(Node* node) const; virtual bool array_copy_requires_gc_barriers(BasicType type) const { return true; } - virtual Node* step_over_gc_barrier(Node* c) const { return c; } + virtual Node* step_over_gc_barrier(Node* c) const; // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be // expanded later, then now is the time to do so. virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const; diff --git a/src/hotspot/share/gc/z/zBarrierSet.cpp b/src/hotspot/share/gc/z/zBarrierSet.cpp index 9329bb8c300..26cfb411e7e 100644 --- a/src/hotspot/share/gc/z/zBarrierSet.cpp +++ b/src/hotspot/share/gc/z/zBarrierSet.cpp @@ -22,23 +22,27 @@ */ #include "precompiled.hpp" -#ifdef COMPILER1 -#include "gc/z/c1/zBarrierSetC1.hpp" -#endif -#ifdef COMPILER2 -#include "gc/z/c2/zBarrierSetC2.hpp" -#endif #include "gc/z/zBarrierSet.hpp" #include "gc/z/zBarrierSetAssembler.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zHeap.inline.hpp" #include "gc/z/zThreadLocalData.hpp" #include "runtime/thread.hpp" +#include "utilities/macros.hpp" +#ifdef COMPILER1 +#include "gc/z/c1/zBarrierSetC1.hpp" +#endif +#ifdef COMPILER2 +#include "gc/z/c2/zBarrierSetC2.hpp" +#endif + +class ZBarrierSetC1; +class ZBarrierSetC2; ZBarrierSet::ZBarrierSet() : BarrierSet(make_barrier_set_assembler(), - COMPILER1_PRESENT( make_barrier_set_c1() ) NOT_COMPILER1(NULL), - COMPILER2_PRESENT( make_barrier_set_c2() ) NOT_COMPILER2(NULL), + make_barrier_set_c1(), + make_barrier_set_c2(), BarrierSet::FakeRtti(BarrierSet::ZBarrierSet)) {} ZBarrierSetAssembler* ZBarrierSet::assembler() { diff --git a/src/hotspot/share/gc/z/zRelocationSetSelector.cpp b/src/hotspot/share/gc/z/zRelocationSetSelector.cpp index 222529f14ce..84b3e0cc351 100644 --- a/src/hotspot/share/gc/z/zRelocationSetSelector.cpp +++ b/src/hotspot/share/gc/z/zRelocationSetSelector.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,34 +63,34 @@ void ZRelocationSetSelectorGroup::semi_sort() { const size_t partition_size_shift = exact_log2(partition_size); const size_t npages = _registered_pages.size(); - size_t partition_slots[npartitions]; - size_t partition_finger[npartitions]; + // Partition slots/fingers + size_t partitions[npartitions]; // Allocate destination array _sorted_pages = REALLOC_C_HEAP_ARRAY(const ZPage*, _sorted_pages, npages, mtGC); debug_only(memset(_sorted_pages, 0, npages * sizeof(ZPage*))); // Calculate partition slots - memset(partition_slots, 0, sizeof(partition_slots)); + memset(partitions, 0, sizeof(partitions)); ZArrayIterator iter1(&_registered_pages); for (const ZPage* page; iter1.next(&page);) { const size_t index = page->live_bytes() >> partition_size_shift; - partition_slots[index]++; + partitions[index]++; } - // Calculate accumulated partition slots and fingers - size_t prev_partition_slots = 0; + // Calculate partition fingers + size_t finger = 0; for (size_t i = 0; i < npartitions; i++) { - partition_slots[i] += prev_partition_slots; - partition_finger[i] = prev_partition_slots; - prev_partition_slots = partition_slots[i]; + const size_t slots = partitions[i]; + partitions[i] = finger; + finger += slots; } // Sort pages into partitions ZArrayIterator iter2(&_registered_pages); for (const ZPage* page; iter2.next(&page);) { const size_t index = page->live_bytes() >> partition_size_shift; - const size_t finger = partition_finger[index]++; + const size_t finger = partitions[index]++; assert(_sorted_pages[finger] == NULL, "Invalid finger"); _sorted_pages[finger] = page; } diff --git a/src/hotspot/share/gc/z/zRootsIterator.cpp b/src/hotspot/share/gc/z/zRootsIterator.cpp index 4117f6e448b..58493c212c0 100644 --- a/src/hotspot/share/gc/z/zRootsIterator.cpp +++ b/src/hotspot/share/gc/z/zRootsIterator.cpp @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "classfile/classLoaderData.hpp" #include "classfile/stringTable.hpp" -#include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" #include "compiler/oopMap.hpp" @@ -74,7 +73,6 @@ static const ZStatSubPhase ZSubPhasePauseWeakRootsVMWeakHandles("Pause Weak Root static const ZStatSubPhase ZSubPhasePauseWeakRootsJNIWeakHandles("Pause Weak Roots JNIWeakHandles"); static const ZStatSubPhase ZSubPhasePauseWeakRootsJVMTIWeakExport("Pause Weak Roots JVMTIWeakExport"); static const ZStatSubPhase ZSubPhasePauseWeakRootsJFRWeak("Pause Weak Roots JFRWeak"); -static const ZStatSubPhase ZSubPhasePauseWeakRootsSymbolTable("Pause Weak Roots SymbolTable"); static const ZStatSubPhase ZSubPhasePauseWeakRootsStringTable("Pause Weak Roots StringTable"); static const ZStatSubPhase ZSubPhaseConcurrentWeakRoots("Concurrent Weak Roots"); @@ -302,11 +300,9 @@ ZWeakRootsIterator::ZWeakRootsIterator() : _jfr_weak(this), _vm_weak_handles(this), _jni_weak_handles(this), - _symbol_table(this), _string_table(this) { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); ZStatTimer timer(ZSubPhasePauseWeakRootsSetup); - SymbolTable::clear_parallel_claimed_index(); StringTable::reset_dead_counter(); } @@ -337,12 +333,6 @@ void ZWeakRootsIterator::do_jfr_weak(BoolObjectClosure* is_alive, OopClosure* cl #endif } -void ZWeakRootsIterator::do_symbol_table(BoolObjectClosure* is_alive, OopClosure* cl) { - ZStatTimer timer(ZSubPhasePauseWeakRootsSymbolTable); - int dummy; - SymbolTable::possibly_parallel_unlink(&dummy, &dummy); -} - class ZStringTableDeadCounterBoolObjectClosure : public BoolObjectClosure { private: BoolObjectClosure* const _cl; @@ -375,9 +365,6 @@ void ZWeakRootsIterator::do_string_table(BoolObjectClosure* is_alive, OopClosure void ZWeakRootsIterator::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* cl) { ZStatTimer timer(ZSubPhasePauseWeakRoots); - if (ZSymbolTableUnloading) { - _symbol_table.weak_oops_do(is_alive, cl); - } if (ZWeakRoots) { _jvmti_weak_export.weak_oops_do(is_alive, cl); _jfr_weak.weak_oops_do(is_alive, cl); diff --git a/src/hotspot/share/gc/z/zRootsIterator.hpp b/src/hotspot/share/gc/z/zRootsIterator.hpp index f3bc1b8ab9d..03c9577f59b 100644 --- a/src/hotspot/share/gc/z/zRootsIterator.hpp +++ b/src/hotspot/share/gc/z/zRootsIterator.hpp @@ -130,14 +130,12 @@ private: void do_jni_weak_handles(BoolObjectClosure* is_alive, OopClosure* cl); void do_jvmti_weak_export(BoolObjectClosure* is_alive, OopClosure* cl); void do_jfr_weak(BoolObjectClosure* is_alive, OopClosure* cl); - void do_symbol_table(BoolObjectClosure* is_alive, OopClosure* cl); void do_string_table(BoolObjectClosure* is_alive, OopClosure* cl); ZSerialWeakOopsDo _jvmti_weak_export; ZSerialWeakOopsDo _jfr_weak; ZParallelWeakOopsDo _vm_weak_handles; ZParallelWeakOopsDo _jni_weak_handles; - ZParallelWeakOopsDo _symbol_table; ZParallelWeakOopsDo _string_table; public: diff --git a/src/hotspot/share/gc/z/z_globals.hpp b/src/hotspot/share/gc/z/z_globals.hpp index 5754b13728d..40d1202feb2 100644 --- a/src/hotspot/share/gc/z/z_globals.hpp +++ b/src/hotspot/share/gc/z/z_globals.hpp @@ -79,9 +79,6 @@ diagnostic(bool, ZVerifyForwarding, false, \ "Verify forwarding tables") \ \ - diagnostic(bool, ZSymbolTableUnloading, false, \ - "Unload unused VM symbols") \ - \ diagnostic(bool, ZWeakRoots, true, \ "Treat JNI WeakGlobalRefs and StringTable as weak roots") \ \ diff --git a/src/hotspot/share/include/cds.h b/src/hotspot/share/include/cds.h new file mode 100644 index 00000000000..d55bedba10c --- /dev/null +++ b/src/hotspot/share/include/cds.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_INCLUDE_CDS_H +#define SHARE_INCLUDE_CDS_H + +// This file declares the CDS data structures that are used by the HotSpot Serviceability Agent +// (see C sources inside src/jdk.hotspot.agent). +// +// We should use only standard C types. Do not use custom types such as bool, intx, +// etc, to avoid introducing unnecessary dependencies to other HotSpot type declarations. +// +// Also, this is a C header file. Do not use C++ here. + +#define NUM_CDS_REGIONS 9 +#define CDS_ARCHIVE_MAGIC 0xf00baba2 +#define CURRENT_CDS_ARCHIVE_VERSION 5 +#define INVALID_CDS_ARCHIVE_VERSION -1 + +struct CDSFileMapRegion { + int _crc; // crc checksum of the current space + size_t _file_offset; // sizeof(this) rounded to vm page size + union { + char* _base; // copy-on-write base address + size_t _offset; // offset from the compressed oop encoding base, only used + // by archive heap space + } _addr; + size_t _used; // for setting space top on read + int _read_only; // read only space? + int _allow_exec; // executable code in space? + void* _oopmap; // bitmap for relocating embedded oops + size_t _oopmap_size_in_bits; +}; + +struct CDSFileMapHeaderBase { + unsigned int _magic; // identify file type + int _crc; // header crc checksum + int _version; // must be CURRENT_CDS_ARCHIVE_VERSION + struct CDSFileMapRegion _space[NUM_CDS_REGIONS]; +}; + +typedef struct CDSFileMapHeaderBase CDSFileMapHeaderBase; + +#endif // SHARE_INCLUDE_CDS_H diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp index 5c8c720e648..c6d71b6b5f5 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp @@ -52,7 +52,7 @@ #include "runtime/biasedLocking.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" -#include "runtime/fieldDescriptor.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/icache.hpp" @@ -924,11 +924,11 @@ void InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code byte info.call_kind() == CallInfo::vtable_call, ""); } #endif - // Get sender or sender's host_klass, and only set cpCache entry to resolved if + // Get sender or sender's unsafe_anonymous_host, and only set cpCache entry to resolved if // it is not an interface. The receiver for invokespecial calls within interface // methods must be checked for every call. InstanceKlass* sender = pool->pool_holder(); - sender = sender->has_host_klass() ? sender->host_klass() : sender; + sender = sender->is_unsafe_anonymous() ? sender->unsafe_anonymous_host() : sender; switch (info.call_kind()) { case CallInfo::direct_call: diff --git a/src/hotspot/share/interpreter/linkResolver.cpp b/src/hotspot/share/interpreter/linkResolver.cpp index 2808269b27a..ab17cb4e560 100644 --- a/src/hotspot/share/interpreter/linkResolver.cpp +++ b/src/hotspot/share/interpreter/linkResolver.cpp @@ -48,7 +48,7 @@ #include "prims/methodHandles.hpp" #include "prims/nativeLookup.hpp" #include "runtime/compilationPolicy.hpp" -#include "runtime/fieldDescriptor.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/reflection.hpp" @@ -1167,9 +1167,9 @@ methodHandle LinkResolver::linktime_resolve_special_method(const LinkInfo& link_ Klass* current_klass = link_info.current_klass(); if (current_klass != NULL && resolved_klass->is_interface()) { InstanceKlass* ck = InstanceKlass::cast(current_klass); - InstanceKlass *klass_to_check = !ck->is_anonymous() ? + InstanceKlass *klass_to_check = !ck->is_unsafe_anonymous() ? ck : - InstanceKlass::cast(ck->host_klass()); + InstanceKlass::cast(ck->unsafe_anonymous_host()); // Disable verification for the dynamically-generated reflection bytecodes. bool is_reflect = klass_to_check->is_subclass_of( SystemDictionary::reflect_MagicAccessorImpl_klass()); @@ -1260,7 +1260,7 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result, // The verifier also checks that the receiver is a subtype of the sender, if the sender is // a class. If the sender is an interface, the check has to be performed at runtime. InstanceKlass* sender = InstanceKlass::cast(current_klass); - sender = sender->is_anonymous() ? sender->host_klass() : sender; + sender = sender->is_unsafe_anonymous() ? sender->unsafe_anonymous_host() : sender; if (sender->is_interface() && recv.not_null()) { Klass* receiver_klass = recv->klass(); if (!receiver_klass->is_subtype_of(sender)) { diff --git a/src/hotspot/share/interpreter/rewriter.cpp b/src/hotspot/share/interpreter/rewriter.cpp index c3b6190fae1..5e7f27b81ca 100644 --- a/src/hotspot/share/interpreter/rewriter.cpp +++ b/src/hotspot/share/interpreter/rewriter.cpp @@ -30,6 +30,7 @@ #include "memory/resourceArea.hpp" #include "oops/generateOopMap.hpp" #include "prims/methodHandles.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" // Computes a CPC map (new_index -> original_index) for constant pool entries diff --git a/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp b/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp index c22c4d6ed6b..26c65e26849 100644 --- a/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp +++ b/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp @@ -39,7 +39,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "runtime/handles.inline.hpp" -#include "runtime/fieldDescriptor.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "runtime/java.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/synchronizer.hpp" diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp index 916d36f4e6a..3fcd55a4548 100644 --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp @@ -29,6 +29,7 @@ #include "jfr/leakprofiler/checkpoint/objectSampleDescription.hpp" #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" #include "oops/oop.inline.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "runtime/thread.hpp" #include "utilities/ostream.hpp" @@ -161,10 +162,10 @@ void ObjectSampleDescription::write_class_name() { if (k->is_instance_klass()) { const InstanceKlass* ik = InstanceKlass::cast(k); - if (ik->is_anonymous()) { + if (ik->is_unsafe_anonymous()) { return; } - assert(!ik->is_anonymous(), "invariant"); + assert(!ik->is_unsafe_anonymous(), "invariant"); const Symbol* name = ik->name(); if (name != NULL) { write_text("Class Name: "); diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml index a82498a2bcf..4b8628e7481 100644 --- a/src/hotspot/share/jfr/metadata/metadata.xml +++ b/src/hotspot/share/jfr/metadata/metadata.xml @@ -200,7 +200,7 @@ - + @@ -208,7 +208,7 @@ - + @@ -682,11 +682,11 @@ - - - + + + diff --git a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp index 9f4e6b47c84..c47c6541840 100644 --- a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp +++ b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp @@ -476,9 +476,9 @@ public: event.set_classCount(cls->_classes_count); event.set_chunkSize(cls->_chunk_sz); event.set_blockSize(cls->_block_sz); - event.set_anonymousClassCount(cls->_anon_classes_count); - event.set_anonymousChunkSize(cls->_anon_chunk_sz); - event.set_anonymousBlockSize(cls->_anon_block_sz); + event.set_unsafeAnonymousClassCount(cls->_anon_classes_count); + event.set_unsafeAnonymousChunkSize(cls->_anon_chunk_sz); + event.set_unsafeAnonymousBlockSize(cls->_anon_block_sz); event.commit(); return true; } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp index 336d0d0cd3b..e66f0fe2d99 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp @@ -77,7 +77,7 @@ static traceid package_id(KlassPtr klass) { static traceid cld_id(CldPtr cld) { assert(cld != NULL, "invariant"); - return cld->is_anonymous() ? 0 : TRACE_ID(cld); + return cld->is_unsafe_anonymous() ? 0 : TRACE_ID(cld); } static void tag_leakp_klass_artifacts(KlassPtr k, bool class_unload) { @@ -92,7 +92,7 @@ static void tag_leakp_klass_artifacts(KlassPtr k, bool class_unload) { } CldPtr cld = k->class_loader_data(); assert(cld != NULL, "invariant"); - if (!cld->is_anonymous()) { + if (!cld->is_unsafe_anonymous()) { tag_leakp_artifact(cld, class_unload); } } @@ -230,7 +230,7 @@ typedef JfrArtifactWriterHost ModuleWriter; int write__artifact__classloader(JfrCheckpointWriter* writer, JfrArtifactSet* artifacts, const void* c) { assert(c != NULL, "invariant"); CldPtr cld = (CldPtr)c; - assert(!cld->is_anonymous(), "invariant"); + assert(!cld->is_unsafe_anonymous(), "invariant"); const traceid cld_id = TRACE_ID(cld); // class loader type const Klass* class_loader_klass = cld->class_loader_klass(); @@ -301,9 +301,9 @@ int write__artifact__klass__symbol(JfrCheckpointWriter* writer, JfrArtifactSet* assert(artifacts != NULL, "invaiant"); assert(k != NULL, "invariant"); const InstanceKlass* const ik = (const InstanceKlass*)k; - if (ik->is_anonymous()) { + if (ik->is_unsafe_anonymous()) { CStringEntryPtr entry = - artifacts->map_cstring(JfrSymbolId::anonymous_klass_name_hash_code(ik)); + artifacts->map_cstring(JfrSymbolId::unsafe_anonymous_klass_name_hash_code(ik)); assert(entry != NULL, "invariant"); return write__artifact__cstring__entry__(writer, entry); } @@ -358,7 +358,7 @@ class KlassSymbolWriterImpl { } CldPtr cld = klass->class_loader_data(); assert(cld != NULL, "invariant"); - if (!cld->is_anonymous()) { + if (!cld->is_unsafe_anonymous()) { count += class_loader_symbols(cld); } if (_method_used_predicate(klass)) { @@ -374,9 +374,9 @@ int KlassSymbolWriterImpl::klass_symbols(KlassPtr klass) { assert(klass != NULL, "invariant"); assert(_predicate(klass), "invariant"); const InstanceKlass* const ik = (const InstanceKlass*)klass; - if (ik->is_anonymous()) { + if (ik->is_unsafe_anonymous()) { CStringEntryPtr entry = - this->_artifacts->map_cstring(JfrSymbolId::anonymous_klass_name_hash_code(ik)); + this->_artifacts->map_cstring(JfrSymbolId::unsafe_anonymous_klass_name_hash_code(ik)); assert(entry != NULL, "invariant"); return _unique_predicate(entry->id()) ? write__artifact__cstring__entry__(this->_writer, entry) : 0; } @@ -432,7 +432,7 @@ int KlassSymbolWriterImpl::module_symbols(ModPtr module) { template