diff --git a/.hgtags b/.hgtags
index 23e77d2c4dc..a5b334556c9 100644
--- a/.hgtags
+++ b/.hgtags
@@ -638,3 +638,4 @@ f143729ca00ec14a98ea5c7f73acba88da97746e jdk-15+23
58833044988772ca06c97ab2f142474a8627af80 jdk-15+25
58833044988772ca06c97ab2f142474a8627af80 jdk-15+25
90b266a84c06f1b3dc0ed8767856793e8c1c357e jdk-15+25
+0a32396f7a690015d22ca3328ac441a358295d90 jdk-15+26
diff --git a/make/CompileToolsJdk.gmk b/make/CompileToolsJdk.gmk
index 367b60887a1..a671f934998 100644
--- a/make/CompileToolsJdk.gmk
+++ b/make/CompileToolsJdk.gmk
@@ -103,8 +103,8 @@ ifeq ($(ENABLE_PANDOC), true)
SOURCE_FILES := $(TOPDIR)/make/scripts/pandoc-troff-manpage-filter.sh.template, \
OUTPUT_FILE := $(PANDOC_TROFF_MANPAGE_FILTER), \
REPLACEMENTS := \
- @@JJS@@ => $(JJS) ; \
- @@TOPDIR@@ => $(TOPDIR) ; \
+ @@JAVA_SMALL@@ => $(JAVA_SMALL) ; \
+ @@BUILDTOOLS_OUTPUTDIR@@ => $(BUILDTOOLS_OUTPUTDIR) ; \
))
# Created script must be made executable
@@ -126,8 +126,8 @@ ifeq ($(ENABLE_PANDOC), true)
SOURCE_FILES := $(TOPDIR)/make/scripts/pandoc-html-manpage-filter.sh.template, \
OUTPUT_FILE := $(PANDOC_HTML_MANPAGE_FILTER), \
REPLACEMENTS := \
- @@JJS@@ => $(JJS) ; \
- @@TOPDIR@@ => $(TOPDIR) ; \
+ @@JAVA_SMALL@@ => $(JAVA_SMALL) ; \
+ @@BUILDTOOLS_OUTPUTDIR@@ => $(BUILDTOOLS_OUTPUTDIR) ; \
))
# Created script must be made executable
diff --git a/make/Docs.gmk b/make/Docs.gmk
index 00361780d84..898144f6e0a 100644
--- a/make/Docs.gmk
+++ b/make/Docs.gmk
@@ -610,9 +610,9 @@ ifeq ($(ENABLE_PANDOC), true)
# PANDOC_HTML_MANPAGE_FILTER, a wrapper around
# PANDOC_HTML_MANPAGE_FILTER_JAVASCRIPT. This is created by buildtools-jdk.
- # We should also depend on the source javascript filter
- PANDOC_HTML_MANPAGE_FILTER_JAVASCRIPT := \
- $(TOPDIR)/make/scripts/pandoc-html-manpage-filter.js
+ # We should also depend on the source code for the filter
+ PANDOC_HTML_MANPAGE_FILTER_SOURCE := $(call FindFiles, \
+ $(TOPDIR)/make/jdk/src/classes/build/tools/pandocfilter)
$(foreach m, $(ALL_MODULES), \
$(eval MAN_$m := $(call FindModuleManDirs, $m)) \
@@ -632,7 +632,7 @@ ifeq ($(ENABLE_PANDOC), true)
OPTIONS := --toc -V include-before='$(SPECS_TOP)' -V include-after='$(SPECS_BOTTOM_1)', \
POST_PROCESS := $(TOOL_FIXUPPANDOC), \
EXTRA_DEPS := $(PANDOC_HTML_MANPAGE_FILTER) \
- $(PANDOC_HTML_MANPAGE_FILTER_JAVASCRIPT), \
+ $(PANDOC_HTML_MANPAGE_FILTER_SOURCE), \
)) \
$(eval JDK_SPECS_TARGETS += $($($m_$f_NAME))) \
) \
diff --git a/make/GenerateLinkOptData.gmk b/make/GenerateLinkOptData.gmk
index bc8d4f21e43..dd92bddc560 100644
--- a/make/GenerateLinkOptData.gmk
+++ b/make/GenerateLinkOptData.gmk
@@ -69,10 +69,12 @@ $(CLASSLIST_FILE): $(INTERIM_IMAGE_DIR)/bin/java$(EXE_SUFFIX) $(CLASSLIST_JAR)
-Duser.language=en -Duser.country=US \
-cp $(SUPPORT_OUTPUTDIR)/classlist.jar \
build.tools.classlist.HelloClasslist $(LOG_DEBUG)
- $(GREP) -v HelloClasslist $@.raw > $(INTERIM_IMAGE_DIR)/lib/classlist
+ $(GREP) -v HelloClasslist $@.raw > $@.interim
$(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -Xshare:dump \
+ -XX:SharedClassListFile=$@.interim -XX:SharedArchiveFile=$@.jsa \
-Xmx128M -Xms128M $(LOG_INFO)
- $(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -XX:DumpLoadedClassList=$@.raw \
+ $(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -XX:DumpLoadedClassList=$@.raw.2 \
+ -XX:SharedClassListFile=$@.interim -XX:SharedArchiveFile=$@.jsa \
-Djava.lang.invoke.MethodHandle.TRACE_RESOLVE=true \
-Duser.language=en -Duser.country=US \
--module-path $(SUPPORT_OUTPUTDIR)/classlist.jar \
@@ -86,7 +88,7 @@ $(CLASSLIST_FILE): $(INTERIM_IMAGE_DIR)/bin/java$(EXE_SUFFIX) $(CLASSLIST_JAR)
$(CAT) $(LINK_OPT_DIR)/stderr $(JLI_TRACE_FILE) ; \
exit $$exitcode \
)
- $(GREP) -v HelloClasslist $@.raw > $@
+ $(GREP) -v HelloClasslist $@.raw.2 > $@
# The jli trace is created by the same recipe as classlist. By declaring these
# dependencies, make will correctly rebuild both jli trace and classlist
diff --git a/make/InitSupport.gmk b/make/InitSupport.gmk
index 9820579e53f..6dbc963ba8c 100644
--- a/make/InitSupport.gmk
+++ b/make/InitSupport.gmk
@@ -372,6 +372,10 @@ else # $(HAS_SPEC)=true
else ifeq ($$(wildcard $$(COMPARE_BUILD_PATCH)), )
$$(error Patch file $$(COMPARE_BUILD_PATCH) does not exist)
endif
+ PATCH_DRY_RUN := $$(shell cd $$(topdir) && $$(PATCH) --dry-run -p1 < $$(COMPARE_BUILD_PATCH) > /dev/null 2>&1 || $$(ECHO) FAILED)
+ ifeq ($$(PATCH_DRY_RUN), FAILED)
+ $$(error Patch file $$(COMPARE_BUILD_PATCH) does not apply cleanly)
+ endif
endif
ifneq ($$(COMPARE_BUILD_FAIL), true)
COMPARE_BUILD_IGNORE_RESULT := || true
diff --git a/make/RunTests.gmk b/make/RunTests.gmk
index ccd9632ad4f..805de4dd785 100644
--- a/make/RunTests.gmk
+++ b/make/RunTests.gmk
@@ -687,13 +687,15 @@ define SetupRunMicroTestBody
$1_MICRO_BASIC_OPTIONS += -rff $$($1_TEST_RESULTS_DIR)/jmh-result.$(MICRO_RESULTS_FORMAT)
endif
+ # Set library path for native dependencies
+ $1_JMH_JVM_ARGS := -Djava.library.path=$$(TEST_IMAGE_DIR)/micro/native
+
ifneq ($$(MICRO_VM_OPTIONS)$$(MICRO_JAVA_OPTIONS), )
- JMH_JVM_ARGS := $$(MICRO_VM_OPTIONS) $$(MICRO_JAVA_OPTIONS)
- # Set library path for native dependencies
- JMH_JVM_ARGS += -Djava.library.path=$$(TEST_IMAGE_DIR)/micro/native
- $1_MICRO_VM_OPTIONS := -jvmArgs $(call ShellQuote,$$(JMH_JVM_ARGS))
+ $1_JMH_JVM_ARGS += $$(MICRO_VM_OPTIONS) $$(MICRO_JAVA_OPTIONS)
endif
+ $1_MICRO_VM_OPTIONS := -jvmArgs $(call ShellQuote,$$($1_JMH_JVM_ARGS))
+
ifneq ($$(MICRO_ITER), )
$1_MICRO_ITER := -i $$(MICRO_ITER)
endif
diff --git a/make/autoconf/boot-jdk.m4 b/make/autoconf/boot-jdk.m4
index 593e9df4236..c16cee8266c 100644
--- a/make/autoconf/boot-jdk.m4
+++ b/make/autoconf/boot-jdk.m4
@@ -381,22 +381,6 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK],
BOOTJDK_USE_LOCAL_CDS=false
AC_MSG_RESULT([no, -XX:SharedArchiveFile not supported])
fi
-
- # Check for jjs in bootjdk
- UTIL_SETUP_TOOL(JJS,
- [
- AC_MSG_CHECKING([for jjs in Boot JDK])
- JJS=$BOOT_JDK/bin/jjs
- if test ! -x $JJS; then
- AC_MSG_RESULT(not found)
- JJS=""
- AC_MSG_NOTICE([Cannot use pandoc without jjs])
- ENABLE_PANDOC=false
- else
- AC_MSG_RESULT(ok)
- fi
- AC_SUBST(JJS)
- ])
])
AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4
index da397aa8456..f5545caf10d 100644
--- a/make/autoconf/flags-cflags.m4
+++ b/make/autoconf/flags-cflags.m4
@@ -239,21 +239,11 @@ AC_DEFUN([FLAGS_SETUP_OPTIMIZATION],
C_O_FLAG_NONE="${C_O_FLAG_NONE} ${DISABLE_FORTIFY_CFLAGS}"
fi
elif test "x$TOOLCHAIN_TYPE" = xclang; then
- if test "x$OPENJDK_TARGET_OS" = xmacosx; then
- # On MacOSX we optimize for size, something
- # we should do for all platforms?
- C_O_FLAG_HIGHEST_JVM="-Os"
- C_O_FLAG_HIGHEST="-Os"
- C_O_FLAG_HI="-Os"
- C_O_FLAG_NORM="-Os"
- C_O_FLAG_DEBUG_JVM=""
- else
- C_O_FLAG_HIGHEST_JVM="-O3"
- C_O_FLAG_HIGHEST="-O3"
- C_O_FLAG_HI="-O3"
- C_O_FLAG_NORM="-O2"
- C_O_FLAG_DEBUG_JVM="-O0"
- fi
+ C_O_FLAG_HIGHEST_JVM="-O3"
+ C_O_FLAG_HIGHEST="-O3"
+ C_O_FLAG_HI="-O3"
+ C_O_FLAG_NORM="-O2"
+ C_O_FLAG_DEBUG_JVM="-O0"
C_O_FLAG_SIZE="-Os"
C_O_FLAG_DEBUG="-O0"
C_O_FLAG_NONE="-O0"
diff --git a/make/autoconf/spec.gmk.in b/make/autoconf/spec.gmk.in
index ed4e860e01c..7b7cf98f157 100644
--- a/make/autoconf/spec.gmk.in
+++ b/make/autoconf/spec.gmk.in
@@ -625,7 +625,6 @@ JAR_CMD:=@JAR@
JLINK_CMD := @JLINK@
JMOD_CMD := @JMOD@
JARSIGNER_CMD:=@JARSIGNER@
-JJS_CMD:=@JJS@
# These variables are meant to be used. They are defined with = instead of := to make
# it possible to override only the *_CMD variables.
JAVA=@FIXPATH@ $(JAVA_CMD) $(JAVA_FLAGS_BIG) $(JAVA_FLAGS)
@@ -637,7 +636,6 @@ JAR=@FIXPATH@ $(JAR_CMD)
JLINK = @FIXPATH@ $(JLINK_CMD)
JMOD = @FIXPATH@ $(JMOD_CMD) $(JAVA_TOOL_FLAGS_SMALL)
JARSIGNER=@FIXPATH@ $(JARSIGNER_CMD)
-JJS=@FIXPATH@ $(JJS_CMD) $(JAVA_TOOL_FLAGS_SMALL)
BUILD_JAVA_FLAGS := @BOOTCYCLE_JVM_ARGS_BIG@
BUILD_JAVA=@FIXPATH@ $(BUILD_JDK)/bin/java $(BUILD_JAVA_FLAGS)
@@ -650,6 +648,7 @@ INTERIM_LANGTOOLS_ADD_EXPORTS := \
--add-exports java.base/sun.reflect.annotation=jdk.compiler.interim \
--add-exports java.base/jdk.internal.jmod=jdk.compiler.interim \
--add-exports java.base/jdk.internal.misc=jdk.compiler.interim \
+ --add-exports java.base/sun.invoke.util=jdk.compiler.interim \
#
INTERIM_LANGTOOLS_MODULES_COMMA := $(strip $(subst $(SPACE),$(COMMA),$(strip \
$(INTERIM_LANGTOOLS_MODULES))))
diff --git a/make/common/modules/LauncherCommon.gmk b/make/common/modules/LauncherCommon.gmk
index 8620d48d895..5aa8bce79bb 100644
--- a/make/common/modules/LauncherCommon.gmk
+++ b/make/common/modules/LauncherCommon.gmk
@@ -199,9 +199,9 @@ ifeq ($(call isTargetOsType, unix), true)
# PANDOC_TROFF_MANPAGE_FILTER, a wrapper around
# PANDOC_TROFF_MANPAGE_FILTER_JAVASCRIPT. This is created by buildtools-jdk.
- # We should also depend on the source javascript filter
- PANDOC_TROFF_MANPAGE_FILTER_JAVASCRIPT := \
- $(TOPDIR)/make/scripts/pandoc-troff-manpage-filter.js
+ # We should also depend on the source code for the filter
+ PANDOC_TROFF_MANPAGE_FILTER_SOURCE := $(call FindFiles, \
+ $(TOPDIR)/make/jdk/src/classes/build/tools/pandocfilter)
# The norm in man pages is to display code literals as bold, but pandoc
# "correctly" converts these constructs (encoded in markdown using `...`
@@ -231,7 +231,7 @@ ifeq ($(call isTargetOsType, unix), true)
@@VERSION_SHORT@@ => $(VERSION_SHORT) ; \
@@VERSION_SPECIFICATION@@ => $(VERSION_SPECIFICATION), \
EXTRA_DEPS := $(PANDOC_TROFF_MANPAGE_FILTER) \
- $(PANDOC_TROFF_MANPAGE_FILTER_JAVASCRIPT), \
+ $(PANDOC_TROFF_MANPAGE_FILTER_SOURCE), \
))
TARGETS += $(BUILD_MAN_PAGES)
diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js
index a070909f397..4a586cb4842 100644
--- a/make/conf/jib-profiles.js
+++ b/make/conf/jib-profiles.js
@@ -604,10 +604,6 @@ var getJibProfilesProfiles = function (input, common, data) {
dependencies: [ name + ".jdk" ],
configure_args: [
"--with-boot-jdk=" + input.get(name + ".jdk", "home_path"),
- // Full docs do not currently work with bootcycle build
- // since Nashorn was removed. This negates the
- // --enable-full-docs from the main profile.
- "--enable-full-docs=auto",
]
}
profiles[bootcyclePrebuiltName] = concatObjects(profiles[name],
@@ -688,7 +684,7 @@ var getJibProfilesProfiles = function (input, common, data) {
local: "bundles/\\(jdk.*doc-api-spec.tar.gz\\)",
remote: [
"bundles/common/jdk-" + data.version + "_doc-api-spec.tar.gz",
- "bundles/linux-x64/\\1"
+ "bundles/common/\\1"
],
},
}
@@ -765,7 +761,7 @@ var getJibProfilesProfiles = function (input, common, data) {
profiles[cmpBaselineName].make_args = [ "COMPARE_BUILD=CONF=" ];
profiles[cmpBaselineName].configure_args = concat(
profiles[cmpBaselineName].configure_args,
- "--with-hotspot-build-time=n/a",
+ "--with-hotspot-build-time=n/a",
"--disable-precompiled-headers");
// Do not inherit artifact definitions from base profile
delete profiles[cmpBaselineName].artifacts;
diff --git a/make/data/jdwp/jdwp.spec b/make/data/jdwp/jdwp.spec
index 48d1aab3c15..8a4f0bac95f 100644
--- a/make/data/jdwp/jdwp.spec
+++ b/make/data/jdwp/jdwp.spec
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -393,11 +393,14 @@ JDWP "Java(tm) Debug Wire Protocol"
(boolean canRedefineClasses
"Can the VM redefine classes?")
(boolean canAddMethod
- "Can the VM add methods when redefining "
- "classes?")
+ "Can the VM add methods when redefining classes? "
+ "
@Deprecated(since=\"15\") A JVM TI based JDWP back-end "
+ "will never set this capability to true.")
(boolean canUnrestrictedlyRedefineClasses
"Can the VM redefine classes "
- "in ways that are normally restricted?")
+ "in ways that are normally restricted?"
+ "
@Deprecated(since=\"15\") A JVM TI based JDWP back-end "
+ "will never set this capability to true.")
(boolean canPopFrames
"Can the VM pop stack frames?")
(boolean canUseInstanceFilters
@@ -467,6 +470,7 @@ JDWP "Java(tm) Debug Wire Protocol"
"
"
"Requires canRedefineClasses capability - see "
"CapabilitiesNew. "
+ "
@Deprecated(since=\"15\") "
"In addition to the canRedefineClasses capability, the target VM must "
"have the canAddMethod capability to add methods when redefining classes, "
"or the canUnrestrictedlyRedefineClasses capability to redefine classes in ways "
@@ -3160,8 +3164,8 @@ JDWP "Java(tm) Debug Wire Protocol"
"canUnrestrictedlyRedefineClasses is false.")
(Constant CLASS_ATTRIBUTE_CHANGE_NOT_IMPLEMENTED
=72 "The new class version has a different NestHost, "
- "NestMembers, or Record class attribute and "
- "canUnrestrictedlyRedefineClasses is false.")
+ "NestMembers, PermittedSubclasses, or Record class attribute "
+ "and canUnrestrictedlyRedefineClasses is false.")
(Constant NOT_IMPLEMENTED =99 "The functionality is not implemented in "
"this virtual machine.")
(Constant NULL_POINTER =100 "Invalid pointer.")
diff --git a/make/hotspot/symbols/symbols-unix b/make/hotspot/symbols/symbols-unix
index 5c17fd8f3ce..986723a1c9b 100644
--- a/make/hotspot/symbols/symbols-unix
+++ b/make/hotspot/symbols/symbols-unix
@@ -119,6 +119,7 @@ JVM_GetMethodTypeAnnotations
JVM_GetNanoTimeAdjustment
JVM_GetNestHost
JVM_GetNestMembers
+JVM_GetPermittedSubclasses
JVM_GetPrimitiveArrayElement
JVM_GetProperties
JVM_GetProtectionDomain
diff --git a/make/jdk/src/classes/build/tools/classlist/HelloClasslist.java b/make/jdk/src/classes/build/tools/classlist/HelloClasslist.java
index 8e5dbe73d41..e5a1d044676 100644
--- a/make/jdk/src/classes/build/tools/classlist/HelloClasslist.java
+++ b/make/jdk/src/classes/build/tools/classlist/HelloClasslist.java
@@ -74,31 +74,40 @@ public class HelloClasslist {
.forEach(System.out::println);
// Common concatenation patterns
- String SS = String.valueOf(args.length) + String.valueOf(args.length);
- String CS = "string" + String.valueOf(args.length);
- String SC = String.valueOf(args.length) + "string";
- String SCS = String.valueOf(args.length) + "string" + String.valueOf(args.length);
- String CSS = "string" + String.valueOf(args.length) + String.valueOf(args.length);
- String CSC = "string" + String.valueOf(args.length) + "string";
- String SSC = String.valueOf(args.length) + String.valueOf(args.length) + "string";
- String CSCS = "string" + String.valueOf(args.length) + "string" + String.valueOf(args.length);
- String SCSC = String.valueOf(args.length) + "string" + String.valueOf(args.length) + "string";
- String CSCSC = "string" + String.valueOf(args.length) + "string" + String.valueOf(args.length) + "string";
- String SCSCS = String.valueOf(args.length) + "string" + String.valueOf(args.length) + "string" + String.valueOf(args.length);
- String CI = "string" + args.length;
- String IC = args.length + "string";
- String SI = String.valueOf(args.length) + args.length;
- String IS = args.length + String.valueOf(args.length);
- String CIS = "string" + args.length + String.valueOf(args.length);
- String CSCI = "string" + String.valueOf(args.length) + "string" + args.length;
- String CIC = "string" + args.length + "string";
- String CICI = "string" + args.length + "string" + args.length;
- String CJ = "string" + System.currentTimeMillis();
- String JC = System.currentTimeMillis() + "string";
- String CD = "string" + (args.length/2.0);
- String CJC = "string" + System.currentTimeMillis() + "string";
- String CJCJ = "string" + System.currentTimeMillis() + "string" + System.currentTimeMillis();
- String CJCJC = "string" + System.currentTimeMillis() + "string" + System.currentTimeMillis() + "string";
+ int i = args.length;
+ String s = String.valueOf(i);
+
+ String SS = s + s;
+ String CS = "string" + s;
+ String SC = s + "string";
+ String SCS = s + "string" + s;
+ String CSS = "string" + s + s;
+ String CSC = "string" + s + "string";
+ String SSC = s + s + "string";
+ String CSCS = "string" + s + "string" + s;
+ String SCSC = s + "string" + s + "string";
+ String CSCSC = "string" + s + "string" + s + "string";
+ String SCSCS = s + "string" + s + "string" + s;
+ String SSCSS = s + s + "string" + s + s;
+ String SSSSS = s + s + s + s + s;
+
+ String CI = "string" + i;
+ String IC = i + "string";
+ String SI = s + i;
+ String IS = i + s;
+ String CIS = "string" + i + s;
+ String CSCI = "string" + s + "string" + i;
+ String CIC = "string" + i + "string";
+ String CICI = "string" + i + "string" + i;
+
+ long l = System.currentTimeMillis();
+ String CJ = "string" + l;
+ String JC = l + "string";
+ String CJC = "string" + l + "string";
+ String CJCJ = "string" + l + "string" + l;
+ String CJCJC = "string" + l + "string" + l + "string";
+ double d = i / 2.0;
+ String CD = "string" + d;
String newDate = DateTimeFormatter.ISO_LOCAL_DATE_TIME.format(
LocalDateTime.now(ZoneId.of("GMT")));
diff --git a/make/jdk/src/classes/build/tools/pandocfilter/PandocFilter.java b/make/jdk/src/classes/build/tools/pandocfilter/PandocFilter.java
new file mode 100644
index 00000000000..64eeaaa36df
--- /dev/null
+++ b/make/jdk/src/classes/build/tools/pandocfilter/PandocFilter.java
@@ -0,0 +1,108 @@
+package build.tools.pandocfilter;
+
+import build.tools.pandocfilter.json.JSON;
+import build.tools.pandocfilter.json.JSONArray;
+import build.tools.pandocfilter.json.JSONObject;
+import build.tools.pandocfilter.json.JSONString;
+import build.tools.pandocfilter.json.JSONValue;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.InputStreamReader;
+import java.util.Map;
+
+public class PandocFilter {
+ /**
+ * Traverse a tree of pandoc format objects, calling callback on each
+ * element, and replacing it if callback returns a new object.
+ *
+ * Inspired by the walk method in
+ * https://github.com/jgm/pandocfilters/blob/master/pandocfilters.py
+ */
+ public JSONValue traverse(JSONValue obj, Callback callback, boolean deep) {
+ if (obj instanceof JSONArray) {
+ JSONArray array = (JSONArray) obj;
+
+ JSONArray processed_array = new JSONArray();
+ for (JSONValue elem : array) {
+ if (elem instanceof JSONObject && elem.contains("t")) {
+ JSONValue replacement = callback.invoke(elem.get("t").asString(), elem.contains("c") ? elem.get("c") : new JSONArray());
+ if (replacement == null) {
+ // no replacement object returned, use original
+ processed_array.add(traverse(elem, callback, deep));
+ } else if (replacement instanceof JSONArray) {
+ // array of objects returned, splice all elements into array
+ JSONArray replacement_array = (JSONArray) replacement;
+ for (JSONValue repl_elem : replacement_array) {
+ processed_array.add(traverse(repl_elem, callback, deep));
+ }
+ } else {
+ // replacement object given, traverse it
+ processed_array.add(traverse(replacement, callback, deep));
+ }
+ } else {
+ processed_array.add(traverse(elem, callback, deep));
+ }
+ }
+ return processed_array;
+ } else if (obj instanceof JSONObject) {
+ if (deep && obj.contains("t")) {
+ JSONValue replacement = callback.invoke(obj.get("t").asString(), obj.contains("c") ? obj.get("c") : new JSONArray());
+ if (replacement != null) {
+ return replacement;
+ }
+ } JSONObject obj_obj = (JSONObject) obj;
+ var processed_obj = new JSONObject();
+ for (String key : obj_obj.keys()) {
+ processed_obj.put(key, traverse(obj_obj.get(key), callback, deep));
+ }
+ return processed_obj;
+ } else {
+ return obj;
+ }
+ }
+
+ public JSONValue createPandocNode(String type, JSONValue content) {
+ if (content == null) {
+ return new JSONObject(Map.of(
+ "t", new JSONString(type)));
+ } else {
+ return new JSONObject(Map.of(
+ "t", new JSONString(type),
+ "c", content));
+ }
+ }
+
+ public JSONValue createPandocNode(String type) {
+ return createPandocNode(type, null);
+ }
+
+ /*
+ * Helper constructors to create pandoc format objects
+ */
+ public JSONValue createSpace() {
+ return createPandocNode("Space");
+ }
+
+ public JSONValue createStr(String string) {
+ return createPandocNode("Str", new JSONString(string));
+ }
+
+ public static JSONValue loadJson(String[] args) throws FileNotFoundException {
+ StringBuffer input = new StringBuffer();
+ InputStreamReader reader;
+ if (args.length > 0)
+ reader = new FileReader(args[0]);
+ else {
+ reader = new InputStreamReader(System.in);
+ }
+ new BufferedReader(reader).lines().forEach(line -> input.append(line));
+
+ return JSON.parse(input.toString());
+ }
+
+ public interface Callback {
+ JSONValue invoke(String type, JSONValue value);
+ }
+}
diff --git a/make/jdk/src/classes/build/tools/pandocfilter/PandocManPageHtmlFilter.java b/make/jdk/src/classes/build/tools/pandocfilter/PandocManPageHtmlFilter.java
new file mode 100644
index 00000000000..2a4165c9bc3
--- /dev/null
+++ b/make/jdk/src/classes/build/tools/pandocfilter/PandocManPageHtmlFilter.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package build.tools.pandocfilter;
+
+import build.tools.pandocfilter.json.JSONArray;
+import build.tools.pandocfilter.json.JSONObject;
+import build.tools.pandocfilter.json.JSONValue;
+
+import java.io.FileNotFoundException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class PandocManPageHtmlFilter extends PandocFilter {
+
+ private JSONValue MetaInlines(JSONValue value) {
+ return createPandocNode("MetaInlines", value);
+ }
+
+ private JSONValue changeTitle(String type, JSONValue value) {
+ if (type.equals("MetaInlines")) {
+ String subType = value.get(0).get("t").asString();
+ String subContent = value.get(0).get("c").asString();
+ if (subType.equals("Str")) {
+ Pattern pattern = Pattern.compile("^([A-Z0-9]+)\\([0-9]+\\)$");
+ Matcher matcher = pattern.matcher(subContent);
+ if (matcher.find()) {
+ String commandName = matcher.group(1).toLowerCase();
+ return MetaInlines(new JSONArray(
+ createStr("The"), createSpace(),
+ createStr(commandName),
+ createSpace(), createStr("Command")));
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Main function
+ */
+ public static void main(String[] args) throws FileNotFoundException {
+ JSONValue json = loadJson(args);
+
+ PandocManPageHtmlFilter filter = new PandocManPageHtmlFilter();
+
+ JSONValue meta = json.get("meta");
+ if (meta != null && meta instanceof JSONObject) {
+ JSONObject metaobj = (JSONObject) meta;
+ metaobj.remove("date");
+ JSONValue title = meta.get("title");
+ if (title != null) {
+ metaobj.put("title", filter.traverse(title, filter::changeTitle, true));
+ }
+ }
+
+ System.out.println(json);
+ }
+}
diff --git a/make/jdk/src/classes/build/tools/pandocfilter/PandocManPageTroffFilter.java b/make/jdk/src/classes/build/tools/pandocfilter/PandocManPageTroffFilter.java
new file mode 100644
index 00000000000..e7bbc1346f5
--- /dev/null
+++ b/make/jdk/src/classes/build/tools/pandocfilter/PandocManPageTroffFilter.java
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package build.tools.pandocfilter;
+
+import build.tools.pandocfilter.json.JSONArray;
+import build.tools.pandocfilter.json.JSONValue;
+
+import java.io.FileNotFoundException;
+
+public class PandocManPageTroffFilter extends PandocFilter {
+
+ private JSONValue createStrong(JSONValue value) {
+ return createPandocNode("Strong", value);
+ }
+
+ private JSONValue createHeader(JSONValue value) {
+ return createPandocNode("Header", value);
+ }
+
+ /**
+ * Callback to change all Str texts to upper case
+ */
+ private JSONValue uppercase(String type, JSONValue value) {
+ if (type.equals("Str")) {
+ return createStr(value.asString().toUpperCase());
+ }
+ return null;
+ }
+
+ /**
+ * Main callback function that performs our man page AST rewrites
+ */
+ private JSONValue manpageFilter(String type, JSONValue value) {
+ // If it is a header, decrease the heading level by one, and
+ // if it is a level 1 header, convert it to upper case.
+ if (type.equals("Header")) {
+ JSONArray array = value.asArray();
+ int level = array.get(0).asInt();
+ array.set(0, JSONValue.from(level - 1));
+ if (value.asArray().get(0).asInt() == 1) {
+ return createHeader(traverse(value, this::uppercase, false));
+ }
+ }
+
+ // Man pages does not have superscript. We use it for footnotes, so
+ // enclose in [...] for best representation.
+ if (type.equals("Superscript")) {
+ return new JSONArray(createStr("["), value, createStr("]"));
+ }
+
+ // If it is a link, put the link name in bold. If it is an external
+ // link, put it in brackets. Otherwise, it is either an internal link
+ // (like "#next-heading"), or a relative link to another man page
+ // (like "java.html"), so remove it for man pages.
+ if (type.equals("Link")) {
+ JSONValue target = value.asArray().get(2).asArray().get(0);
+ String targetStr = target.asString();
+ if (targetStr.startsWith("https:") || targetStr.startsWith("http:")) {
+ return new JSONArray(createStrong(value.asArray().get(1)), createSpace(), createStr("[" + targetStr + "]"));
+ } else {
+ return createStrong(value.asArray().get(1));
+ }
+ }
+
+ return null;
+ }
+
+ /**
+ * Main function
+ */
+ public static void main(String[] args) throws FileNotFoundException {
+ JSONValue json = loadJson(args);
+ build.tools.pandocfilter.PandocManPageTroffFilter filter = new build.tools.pandocfilter.PandocManPageTroffFilter();
+
+ JSONValue transformed_json = filter.traverse(json, filter::manpageFilter, false);
+
+ System.out.println(transformed_json);
+ }
+}
diff --git a/make/jdk/src/classes/build/tools/pandocfilter/json/JSON.java b/make/jdk/src/classes/build/tools/pandocfilter/json/JSON.java
new file mode 100644
index 00000000000..54c5027b7e8
--- /dev/null
+++ b/make/jdk/src/classes/build/tools/pandocfilter/json/JSON.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package build.tools.pandocfilter.json;
+
+public class JSON {
+ public static JSONValue parse(String s) {
+ return new JSONParser().parse(s);
+ }
+
+ public static JSONValue of(int i) {
+ return JSONValue.from(i);
+ }
+
+ public static JSONValue of(long l) {
+ return JSONValue.from(l);
+ }
+
+ public static JSONValue of(double d) {
+ return JSONValue.from(d);
+ }
+
+ public static JSONValue of(boolean b) {
+ return JSONValue.from(b);
+ }
+
+ public static JSONValue of(String s) {
+ return JSONValue.from(s);
+ }
+
+ public static JSONValue of() {
+ return JSONValue.fromNull();
+ }
+
+ public static JSONArray array() {
+ return new JSONArray();
+ }
+
+ public static JSONObject object() {
+ return new JSONObject();
+ }
+}
diff --git a/make/jdk/src/classes/build/tools/pandocfilter/json/JSONArray.java b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONArray.java
new file mode 100644
index 00000000000..05262d89bd5
--- /dev/null
+++ b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONArray.java
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package build.tools.pandocfilter.json;
+
+import java.util.*;
+import java.util.stream.Stream;
+
+public class JSONArray implements JSONValue, Iterable {
+ private final List values;
+
+ public JSONArray() {
+ this.values = new ArrayList();
+ }
+
+ public JSONArray(JSONValue[] array) {
+ this.values = new ArrayList(array.length);
+ for (var v : array) {
+ values.add(v);
+ }
+ }
+
+ private void append(JSONValue value) {
+ if (value instanceof JSONArray) {
+ for (var v : value.asArray()) {
+ append(v);
+ }
+ } else {
+ this.values.add(value);
+ }
+ }
+
+ public JSONArray(JSONValue value, JSONValue... values) {
+ this.values = new ArrayList(values.length + 1);
+ append(value);
+ for (var v : values) {
+ append(v);
+ }
+ }
+
+ public JSONArray(List values) {
+ this.values = new ArrayList(values);
+ }
+
+ @Override
+ public boolean isArray() {
+ return true;
+ }
+
+ @Override
+ public JSONArray asArray() {
+ return this;
+ }
+
+ public JSONArray set(int i, boolean value) {
+ values.set(i, JSON.of(value));
+ return this;
+ }
+
+ public JSONArray set(int i, int value) {
+ values.set(i, JSON.of(value));
+ return this;
+ }
+
+ public JSONArray set(int i, long value) {
+ values.set(i, JSON.of(value));
+ return this;
+ }
+
+ public JSONArray set(int i, String value) {
+ values.set(i, JSON.of(value));
+ return this;
+ }
+
+ public JSONArray set(int i, double value) {
+ values.set(i, JSON.of(value));
+ return this;
+ }
+
+ public JSONArray set(int i, JSONValue value) {
+ values.set(i, value);
+ return this;
+ }
+
+ public JSONArray setNull(int i) {
+ values.set(i, JSON.of());
+ return this;
+ }
+
+ public JSONArray add(boolean value) {
+ values.add(JSON.of(value));
+ return this;
+ }
+
+ public JSONArray add(int value) {
+ values.add(JSON.of(value));
+ return this;
+ }
+
+ public JSONArray add(long value) {
+ values.add(JSON.of(value));
+ return this;
+ }
+
+ public JSONArray add(String value) {
+ values.add(JSON.of(value));
+ return this;
+ }
+
+ public JSONArray add(double value) {
+ values.add(JSON.of(value));
+ return this;
+ }
+
+ public JSONArray add(JSONValue value) {
+ values.add(value);
+ return this;
+ }
+
+ public JSONArray addNull() {
+ values.add(JSON.of());
+ return this;
+ }
+
+ public JSONValue get(int i) {
+ return values.get(i);
+ }
+
+ public int size() {
+ return values.size();
+ }
+
+ @Override
+ public String toString() {
+ var builder = new StringBuilder();
+
+ builder.append("[");
+ for (var i = 0; i < size(); i++) {
+ builder.append(get(i).toString());
+ if (i != (size() - 1)) {
+ builder.append(",");
+ }
+ }
+ builder.append("]");
+ return builder.toString();
+ }
+
+ @Override
+ public Stream stream() {
+ return values.stream();
+ }
+
+ @Override
+ public Iterator iterator() {
+ return values.iterator();
+ }
+}
diff --git a/make/jdk/src/classes/build/tools/pandocfilter/json/JSONBoolean.java b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONBoolean.java
new file mode 100644
index 00000000000..dfade3656db
--- /dev/null
+++ b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONBoolean.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package build.tools.pandocfilter.json;
+
+public class JSONBoolean implements JSONValue {
+ private boolean value;
+
+ public JSONBoolean(boolean value) {
+ this.value = value;
+ }
+
+ @Override
+ public boolean isBoolean() {
+ return true;
+ }
+
+ @Override
+ public boolean asBoolean() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return value ? "true" : "false";
+ }
+}
diff --git a/make/jdk/src/classes/build/tools/pandocfilter/json/JSONDecimal.java b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONDecimal.java
new file mode 100644
index 00000000000..f9d165f1d69
--- /dev/null
+++ b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONDecimal.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package build.tools.pandocfilter.json;
+
+public class JSONDecimal implements JSONValue {
+ private double value;
+
+ public JSONDecimal(double value) {
+ this.value = value;
+ }
+
+ @Override
+ public boolean isDouble() {
+ return true;
+ }
+
+ @Override
+ public double asDouble() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return Double.toString(value);
+ }
+}
diff --git a/src/utils/reorder/tests/LoadFrame.java b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONNull.java
similarity index 62%
rename from src/utils/reorder/tests/LoadFrame.java
rename to make/jdk/src/classes/build/tools/pandocfilter/json/JSONNull.java
index c4c8124d2f2..03e03452e67 100644
--- a/src/utils/reorder/tests/LoadFrame.java
+++ b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONNull.java
@@ -1,12 +1,10 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@@ -22,19 +20,34 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
+package build.tools.pandocfilter.json;
+public class JSONNull implements JSONValue {
+ public JSONNull() {
+ }
-import java.awt.Frame;
+ @Override
+ public boolean isNull() {
+ return true;
+ }
-public class LoadFrame {
+ @Override
+ public String asString() {
+ return null;
+ }
- public static void main(String[] args) {
- new Frame().show();
- // This starts a thread which never exits - so we suicide.
- try {
- Thread.sleep(5000);
- } catch (Exception e) {
- }
- System.exit(0);
+ @Override
+ public JSONArray asArray() {
+ return null;
+ }
+
+ @Override
+ public JSONObject asObject() {
+ return null;
+ }
+
+ @Override
+ public String toString() {
+ return "null";
}
}
diff --git a/src/utils/reorder/tests/JHello.java b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONNumber.java
similarity index 56%
rename from src/utils/reorder/tests/JHello.java
rename to make/jdk/src/classes/build/tools/pandocfilter/json/JSONNumber.java
index 5c97d7073d0..e5f9a0075c7 100644
--- a/src/utils/reorder/tests/JHello.java
+++ b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONNumber.java
@@ -1,12 +1,10 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@@ -22,27 +20,41 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
+package build.tools.pandocfilter.json;
+public class JSONNumber implements JSONValue {
+ private long value;
-import java.awt.*;
-import java.io.PrintStream;
-import javax.swing.*;
-
-public class JHello extends JFrame {
-
- JHello() {
- JLabel jlabel = new JLabel("Hello");
- jlabel.setFont(new Font("Monospaced", 0, 144));
- getContentPane().add(jlabel);
- pack();
+ public JSONNumber(int value) {
+ this.value = value;
}
- public static void main(String args[]) {
- new JHello().show();
- try {
- Thread.sleep(10000);
- } catch (Exception e) {
- }
- System.exit(0);
+ public JSONNumber(long value) {
+ this.value = value;
+ }
+
+ @Override
+ public boolean isInt() {
+ return true;
+ }
+
+ @Override
+ public boolean isLong() {
+ return true;
+ }
+
+ @Override
+ public int asInt() {
+ return Math.toIntExact(value);
+ }
+
+ @Override
+ public long asLong() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return Long.toString(value);
}
}
diff --git a/make/jdk/src/classes/build/tools/pandocfilter/json/JSONObject.java b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONObject.java
new file mode 100644
index 00000000000..962356d5a79
--- /dev/null
+++ b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONObject.java
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package build.tools.pandocfilter.json;
+
+import java.util.*;
+import java.util.stream.Collectors;
+
+public class JSONObject implements JSONValue {
+ public static class Field {
+ private final String name;
+ private final JSONValue value;
+
+ private Field(String name, JSONValue value) {
+ this.name = name;
+ this.value = value;
+ }
+
+ public String name() {
+ return name;
+ }
+
+ public JSONValue value() {
+ return value;
+ }
+ }
+
+ private final Map value;
+
+ public JSONObject() {
+ this.value = new HashMap();
+ }
+
+ public JSONObject(Map map) {
+ this.value = new HashMap(map);
+ }
+
+ @Override
+ public boolean isObject() {
+ return true;
+ }
+
+ @Override
+ public JSONObject asObject() {
+ return this;
+ }
+
+ public JSONObject put(String k, boolean v) {
+ value.put(k, JSON.of(v));
+ return this;
+ }
+
+ public JSONObject put(String k, int v) {
+ value.put(k, JSON.of(v));
+ return this;
+ }
+
+ public JSONObject put(String k, long v) {
+ value.put(k, JSON.of(v));
+ return this;
+ }
+
+ public JSONObject put(String k, String v) {
+ value.put(k, JSON.of(v));
+ return this;
+ }
+
+ public JSONObject put(String k, double v) {
+ value.put(k, JSON.of(v));
+ return this;
+ }
+
+ public JSONObject put(String k, JSONArray v) {
+ value.put(k, v);
+ return this;
+ }
+
+ public JSONObject put(String k, JSONObject v) {
+ value.put(k, v);
+ return this;
+ }
+
+ public JSONObject put(String k, JSONValue v) {
+ value.put(k, v);
+ return this;
+ }
+
+ public JSONObject putNull(String k) {
+ value.put(k, JSON.of());
+ return this;
+ }
+
+ public JSONValue remove(String k) {
+ return value.remove(k);
+ }
+
+ public JSONValue get(String k) {
+ return value.get(k);
+ }
+
+ public List fields() {
+ return value.entrySet()
+ .stream()
+ .map(e -> new Field(e.getKey(), e.getValue()))
+ .collect(Collectors.toList());
+ }
+
+ public boolean contains(String field) {
+ return value.containsKey(field);
+ }
+
+ public Set keys() {
+ return value.keySet();
+ }
+
+ @Override
+ public String toString() {
+ var builder = new StringBuilder();
+ builder.append("{");
+ for (var key : value.keySet()) {
+ builder.append("\"");
+ builder.append(key);
+ builder.append("\":");
+ builder.append(value.get(key).toString());
+ builder.append(",");
+ }
+
+ var end = builder.length() - 1;
+ if (builder.charAt(end) == ',') {
+ builder.deleteCharAt(end);
+ }
+
+ builder.append("}");
+ return builder.toString();
+ }
+}
diff --git a/make/jdk/src/classes/build/tools/pandocfilter/json/JSONParser.java b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONParser.java
new file mode 100644
index 00000000000..d6ed9d4022b
--- /dev/null
+++ b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONParser.java
@@ -0,0 +1,391 @@
+/*
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package build.tools.pandocfilter.json;
+
+import java.util.*;
+
+class JSONParser {
+ private int pos = 0;
+ private String input;
+
+ JSONParser() {
+ }
+
+ private IllegalStateException failure(String message) {
+ return new IllegalStateException(String.format("[%d]: %s : %s", pos, message, input));
+ }
+
+ private char current() {
+ return input.charAt(pos);
+ }
+
+ private void advance() {
+ pos++;
+ }
+
+ private boolean hasInput() {
+ return pos < input.length();
+ }
+
+ private void expectMoreInput(String message) {
+ if (!hasInput()) {
+ throw failure(message);
+ }
+ }
+
+ private char next(String message) {
+ advance();
+ if (!hasInput()) {
+ throw failure(message);
+ }
+ return current();
+ }
+
+
+ private void expect(char c) {
+ var msg = String.format("Expected character %c", c);
+
+ var n = next(msg);
+ if (n != c) {
+ throw failure(msg);
+ }
+ }
+
+ private void assume(char c, String message) {
+ expectMoreInput(message);
+ if (current() != c) {
+ throw failure(message);
+ }
+ }
+
+ private JSONBoolean parseBoolean() {
+ if (current() == 't') {
+ expect('r');
+ expect('u');
+ expect('e');
+ advance();
+ return new JSONBoolean(true);
+ }
+
+ if (current() == 'f') {
+ expect('a');
+ expect('l');
+ expect('s');
+ expect('e');
+ advance();
+ return new JSONBoolean(false);
+ }
+
+ throw failure("a boolean can only be 'true' or 'false'");
+ }
+
+ private JSONValue parseNumber() {
+ var isInteger = true;
+ var builder = new StringBuilder();
+
+ if (current() == '-') {
+ builder.append(current());
+ advance();
+ expectMoreInput("a number cannot consist of only '-'");
+ }
+
+ if (current() == '0') {
+ builder.append(current());
+ advance();
+
+ if (hasInput() && current() == '.') {
+ isInteger = false;
+ builder.append(current());
+ advance();
+
+ expectMoreInput("a number cannot end with '.'");
+
+ if (!isDigit(current())) {
+ throw failure("must be at least one digit after '.'");
+ }
+
+ while (hasInput() && isDigit(current())) {
+ builder.append(current());
+ advance();
+ }
+ }
+ } else {
+ while (hasInput() && isDigit(current())) {
+ builder.append(current());
+ advance();
+ }
+
+ if (hasInput() && current() == '.') {
+ isInteger = false;
+ builder.append(current());
+ advance();
+
+ expectMoreInput("a number cannot end with '.'");
+
+ if (!isDigit(current())) {
+ throw failure("must be at least one digit after '.'");
+ }
+
+ while (hasInput() && isDigit(current())) {
+ builder.append(current());
+ advance();
+ }
+ }
+ }
+
+ if (hasInput() && (current() == 'e' || current() == 'E')) {
+ isInteger = false;
+
+ builder.append(current());
+ advance();
+ expectMoreInput("a number cannot end with 'e' or 'E'");
+
+ if (current() == '+' || current() == '-') {
+ builder.append(current());
+ advance();
+ }
+
+ if (!isDigit(current())) {
+ throw failure("a digit must follow {'e','E'}{'+','-'}");
+ }
+
+ while (hasInput() && isDigit(current())) {
+ builder.append(current());
+ advance();
+ }
+ }
+
+ var value = builder.toString();
+ return isInteger ? new JSONNumber(Long.parseLong(value)) :
+ new JSONDecimal(Double.parseDouble(value));
+
+ }
+
+ private JSONString parseString() {
+ var missingEndChar = "string is not terminated with '\"'";
+ var builder = new StringBuilder();
+ for (var c = next(missingEndChar); c != '"'; c = next(missingEndChar)) {
+ if (c == '\\') {
+ var n = next(missingEndChar);
+ switch (n) {
+ case '"':
+ builder.append("\"");
+ break;
+ case '\\':
+ builder.append("\\");
+ break;
+ case '/':
+ builder.append("/");
+ break;
+ case 'b':
+ builder.append("\b");
+ break;
+ case 'f':
+ builder.append("\f");
+ break;
+ case 'n':
+ builder.append("\n");
+ break;
+ case 'r':
+ builder.append("\r");
+ break;
+ case 't':
+ builder.append("\t");
+ break;
+ case 'u':
+ var u1 = next(missingEndChar);
+ var u2 = next(missingEndChar);
+ var u3 = next(missingEndChar);
+ var u4 = next(missingEndChar);
+ var cp = Integer.parseInt(String.format("%c%c%c%c", u1, u2, u3, u4), 16);
+ builder.append(new String(new int[]{cp}, 0, 1));
+ break;
+ default:
+ throw failure(String.format("Unexpected escaped character '%c'", n));
+ }
+ } else {
+ builder.append(c);
+ }
+ }
+
+ advance(); // step beyond closing "
+ return new JSONString(builder.toString());
+ }
+
+ private JSONArray parseArray() {
+ var error = "array is not terminated with ']'";
+ var list = new ArrayList();
+
+ advance(); // step beyond opening '['
+ consumeWhitespace();
+ expectMoreInput(error);
+
+ while (current() != ']') {
+ var val = parseValue();
+ list.add(val);
+
+ expectMoreInput(error);
+ if (current() == ',') {
+ advance();
+ }
+ expectMoreInput(error);
+ }
+
+ advance(); // step beyond closing ']'
+ return new JSONArray(list.toArray(new JSONValue[0]));
+ }
+
+ public JSONNull parseNull() {
+ expect('u');
+ expect('l');
+ expect('l');
+ advance();
+ return new JSONNull();
+ }
+
+ public JSONObject parseObject() {
+ var error = "object is not terminated with '}'";
+ var map = new HashMap();
+
+ advance(); // step beyond opening '{'
+ consumeWhitespace();
+ expectMoreInput(error);
+
+ while (current() != '}') {
+ var key = parseValue();
+ if (!(key instanceof JSONString)) {
+ throw failure("a field must of type string");
+ }
+
+ if (!hasInput() || current() != ':') {
+ throw failure("a field must be followed by ':'");
+ }
+ advance(); // skip ':'
+
+ var val = parseValue();
+ map.put(key.asString(), val);
+
+ expectMoreInput(error);
+ if (current() == ',') {
+ advance();
+ }
+ expectMoreInput(error);
+ }
+
+ advance(); // step beyond '}'
+ return new JSONObject(map);
+ }
+
+ private boolean isDigit(char c) {
+ return c == '0' ||
+ c == '1' ||
+ c == '2' ||
+ c == '3' ||
+ c == '4' ||
+ c == '5' ||
+ c == '6' ||
+ c == '7' ||
+ c == '8' ||
+ c == '9';
+ }
+
+ private boolean isStartOfNumber(char c) {
+ return isDigit(c) || c == '-';
+ }
+
+ private boolean isStartOfString(char c) {
+ return c == '"';
+ }
+
+ private boolean isStartOfBoolean(char c) {
+ return c == 't' || c == 'f';
+ }
+
+ private boolean isStartOfArray(char c) {
+ return c == '[';
+ }
+
+ private boolean isStartOfNull(char c) {
+ return c == 'n';
+ }
+
+ private boolean isWhitespace(char c) {
+ return c == '\r' ||
+ c == '\n' ||
+ c == '\t' ||
+ c == ' ';
+ }
+
+ private boolean isStartOfObject(char c) {
+ return c == '{';
+ }
+
+ private void consumeWhitespace() {
+ while (hasInput() && isWhitespace(current())) {
+ advance();
+ }
+ }
+
+ public JSONValue parseValue() {
+ JSONValue ret = null;
+
+ consumeWhitespace();
+ if (hasInput()) {
+ var c = current();
+
+ if (isStartOfNumber(c)) {
+ ret = parseNumber();
+ } else if (isStartOfString(c)) {
+ ret = parseString();
+ } else if (isStartOfBoolean(c)) {
+ ret = parseBoolean();
+ } else if (isStartOfArray(c)) {
+ ret = parseArray();
+ } else if (isStartOfNull(c)) {
+ ret = parseNull();
+ } else if (isStartOfObject(c)) {
+ ret = parseObject();
+ } else {
+ throw failure("not a valid start of a JSON value");
+ }
+ }
+ consumeWhitespace();
+
+ return ret;
+ }
+
+ public JSONValue parse(String s) {
+ if (s == null || s.equals("")) {
+ return null;
+ }
+
+ pos = 0;
+ input = s;
+
+ var result = parseValue();
+ if (hasInput()) {
+ throw failure("can only have one top-level JSON value");
+ }
+ return result;
+ }
+}
diff --git a/make/jdk/src/classes/build/tools/pandocfilter/json/JSONString.java b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONString.java
new file mode 100644
index 00000000000..97ca646232f
--- /dev/null
+++ b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONString.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package build.tools.pandocfilter.json;
+
+public class JSONString implements JSONValue {
+ private String value;
+
+ public JSONString(String value) {
+ this.value = value;
+ }
+
+ @Override
+ public boolean isString() {
+ return true;
+ }
+
+ @Override
+ public String asString() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ var builder = new StringBuilder();
+ builder.append("\"");
+
+ for (var i = 0; i < value.length(); i++) {
+ var c = value.charAt(i);
+
+ switch (c) {
+ case '"':
+ builder.append("\\\"");
+ break;
+ case '\\':
+ builder.append("\\\\");
+ break;
+ case '/':
+ builder.append("\\/");
+ break;
+ case '\b':
+ builder.append("\\b");
+ break;
+ case '\f':
+ builder.append("\\f");
+ break;
+ case '\n':
+ builder.append("\\n");
+ break;
+ case '\r':
+ builder.append("\\r");
+ break;
+ case '\t':
+ builder.append("\\t");
+ break;
+ default:
+ builder.append(c);
+ break;
+ }
+ }
+
+ builder.append("\"");
+ return builder.toString();
+ }
+}
diff --git a/make/jdk/src/classes/build/tools/pandocfilter/json/JSONValue.java b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONValue.java
new file mode 100644
index 00000000000..7d1d83b5c8d
--- /dev/null
+++ b/make/jdk/src/classes/build/tools/pandocfilter/json/JSONValue.java
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package build.tools.pandocfilter.json;
+
+import java.util.stream.Stream;
+import java.util.List;
+
+public interface JSONValue {
+ default int asInt() {
+ throw new IllegalStateException("Unsupported conversion to int");
+ }
+
+ default long asLong() {
+ throw new IllegalStateException("Unsupported conversion to long");
+ }
+
+ default double asDouble() {
+ throw new IllegalStateException("Unsupported conversion to double");
+ }
+
+ default String asString() {
+ throw new IllegalStateException("Unsupported conversion to String");
+ }
+
+ default boolean asBoolean() {
+ throw new IllegalStateException("Unsupported conversion to boolean");
+ }
+
+ default JSONArray asArray() {
+ throw new IllegalStateException("Unsupported conversion to array");
+ }
+
+ default JSONObject asObject() {
+ throw new IllegalStateException("Unsupported conversion to object");
+ }
+
+ default boolean isInt() {
+ return false;
+ }
+
+ default boolean isLong() {
+ return false;
+ }
+
+ default boolean isDouble() {
+ return false;
+ }
+
+ default boolean isString() {
+ return false;
+ }
+
+ default boolean isBoolean() {
+ return false;
+ }
+
+ default boolean isArray() {
+ return false;
+ }
+
+ default boolean isObject() {
+ return false;
+ }
+
+ default boolean isNull() {
+ return false;
+ }
+
+ default List fields() {
+ return asObject().fields();
+ }
+
+ default boolean contains(String key) {
+ return asObject().contains(key);
+ }
+
+ default JSONValue get(String key) {
+ return asObject().get(key);
+ }
+
+ default JSONValue get(int i) {
+ return asArray().get(i);
+ }
+
+ default Stream stream() {
+ return Stream.of(this);
+ }
+
+ static JSONValue from(int i) {
+ return new JSONNumber(i);
+ }
+
+ static JSONValue from(long l) {
+ return new JSONNumber(l);
+ }
+
+ static JSONValue from(double d) {
+ return new JSONDecimal(d);
+ }
+
+ static JSONValue from(boolean b) {
+ return new JSONBoolean(b);
+ }
+
+ static JSONValue from(String s) {
+ return new JSONString(s);
+ }
+
+ static JSONValue fromNull() {
+ return new JSONNull();
+ }
+}
diff --git a/make/scripts/pandoc-html-manpage-filter.js b/make/scripts/pandoc-html-manpage-filter.js
deleted file mode 100644
index a7c671a078c..00000000000
--- a/make/scripts/pandoc-html-manpage-filter.js
+++ /dev/null
@@ -1,125 +0,0 @@
-//
-// Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-
-//
-// Traverse a tree of pandoc format objects, calling callback on each
-// element, and replacing it if callback returns a new object.
-//
-// Inspired by the walk method in
-// https://github.com/jgm/pandocfilters/blob/master/pandocfilters.py
-//
-function traverse(obj, callback) {
- if (Array.isArray(obj)) {
- var processed_array = [];
- obj.forEach(function(elem) {
- if (elem === Object(elem) && elem.t) {
- var replacement = callback(elem.t, elem.c || []);
- if (!replacement) {
- // no replacement object returned, use original
- processed_array.push(traverse(elem, callback));
- } else if (Array.isArray(replacement)) {
- // array of objects returned, splice all elements into array
- replacement.forEach(function(repl_elem) {
- processed_array.push(traverse(repl_elem, callback));
- })
- } else {
- // replacement object given, traverse it
- processed_array.push(traverse(replacement, callback));
- }
- } else {
- processed_array.push(traverse(elem, callback));
- }
- })
- return processed_array;
- } else if (obj === Object(obj)) {
- if (obj.t) {
- var replacement = callback(obj.t, obj.c || []);
- if (replacement) {
- return replacement;
- }
- }
- var processed_obj = {};
- Object.keys(obj).forEach(function(key) {
- processed_obj[key] = traverse(obj[key], callback);
- })
- return processed_obj;
- } else {
- return obj;
- }
-}
-
-//
-// Helper constructors to create pandoc format objects
-//
-function Space() {
- return { 't': 'Space' };
-}
-
-function Str(value) {
- return { 't': 'Str', 'c': value };
-}
-
-function MetaInlines(value) {
- return { 't': 'MetaInlines', 'c': value };
-}
-
-function change_title(type, value) {
- if (type === 'MetaInlines') {
- if (value[0].t === 'Str') {
- var match = value[0].c.match(/^([A-Z0-9]+)\([0-9]+\)$/);
- if (match) {
- return MetaInlines([
- Str("The"), Space(),
- Str(match[1].toLowerCase()),
- Space(), Str("Command")
- ]);
- }
- }
- }
-}
-
-//
-// Main function
-//
-function main() {
- var input = "";
- while (line = readLine()) {
- input = input.concat(line);
- }
-
- var json = JSON.parse(input);
-
- var meta = json.meta;
- if (meta) {
- meta.date = undefined;
- var title = meta.title;
- if (meta.title) {
- meta.title = traverse(meta.title, change_title);
- }
- }
-
- print(JSON.stringify(json));
-}
-
-// ... and execute it
-main();
diff --git a/make/scripts/pandoc-html-manpage-filter.sh.template b/make/scripts/pandoc-html-manpage-filter.sh.template
index 9f906eaa389..6f9612513c0 100644
--- a/make/scripts/pandoc-html-manpage-filter.sh.template
+++ b/make/scripts/pandoc-html-manpage-filter.sh.template
@@ -22,7 +22,7 @@
# questions.
#
-# Simple wrapper script to call Nashorn with the javascript pandoc filter
+# Simple wrapper script to call Java with the pandoc filter
-@@JJS@@ -scripting \
- "@@TOPDIR@@/make/scripts/pandoc-html-manpage-filter.js" 2> /dev/null
+@@JAVA_SMALL@@ -cp @@BUILDTOOLS_OUTPUTDIR@@/jdk_tools_classes \
+ build.tools.pandocfilter.PandocManPageHtmlFilter
diff --git a/make/scripts/pandoc-troff-manpage-filter.js b/make/scripts/pandoc-troff-manpage-filter.js
deleted file mode 100644
index d1b20630a48..00000000000
--- a/make/scripts/pandoc-troff-manpage-filter.js
+++ /dev/null
@@ -1,142 +0,0 @@
-//
-// Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-
-//
-// Traverse a tree of pandoc format objects, calling callback on each
-// element, and replacing it if callback returns a new object.
-//
-// Inspired by the walk method in
-// https://github.com/jgm/pandocfilters/blob/master/pandocfilters.py
-//
-function traverse(obj, callback) {
- if (Array.isArray(obj)) {
- var processed_array = [];
- obj.forEach(function(elem) {
- if (elem === Object(elem) && elem.t) {
- var replacement = callback(elem.t, elem.c || []);
- if (!replacement) {
- // no replacement object returned, use original
- processed_array.push(traverse(elem, callback));
- } else if (Array.isArray(replacement)) {
- // array of objects returned, splice all elements into array
- replacement.forEach(function(repl_elem) {
- processed_array.push(traverse(repl_elem, callback));
- })
- } else {
- // replacement object given, traverse it
- processed_array.push(traverse(replacement, callback));
- }
- } else {
- processed_array.push(traverse(elem, callback));
- }
- })
- return processed_array;
- } else if (obj === Object(obj)) {
- var processed_obj = {};
- Object.keys(obj).forEach(function(key) {
- processed_obj[key] = traverse(obj[key], callback);
- })
- return processed_obj;
- } else {
- return obj;
- }
-}
-
-//
-// Helper constructors to create pandoc format objects
-//
-function Space() {
- return { 't': 'Space', 'c': [] };
-}
-
-function Str(value) {
- return { 't': 'Str', 'c': value };
-}
-
-function Strong(value) {
- return { 't': 'Strong', 'c': value };
-}
-
-function Header(value) {
- return { 't': 'Header', 'c': value };
-}
-
-//
-// Callback to change all Str texts to upper case
-//
-function uppercase(type, value) {
- if (type === 'Str') {
- return Str(value.toUpperCase());
- }
-}
-
-//
-// Main callback function that performs our man page AST rewrites
-//
-function manpage_filter(type, value) {
- // If it is a header, decrease the heading level by one, and
- // if it is a level 1 header, convert it to upper case.
- if (type === 'Header') {
- value[0] = Math.max(1, value[0] - 1);
- if (value[0] == 1) {
- return Header(traverse(value, uppercase));
- }
- }
-
- // Man pages does not have superscript. We use it for footnotes, so
- // enclose in [...] for best representation.
- if (type === 'Superscript') {
- return [ Str('['), value[0], Str(']') ];
- }
-
- // If it is a link, put the link name in bold. If it is an external
- // link, put it in brackets. Otherwise, it is either an internal link
- // (like "#next-heading"), or a relative link to another man page
- // (like "java.html"), so remove it for man pages.
- if (type === 'Link') {
- var target = value[2][0];
- if (target.match(/^http[s]?:/)) {
- return [ Strong(value[1]), Space(), Str('[' + target + ']') ];
- } else {
- return Strong(value[1]);
- }
- }
-}
-
-//
-// Main function
-//
-function main() {
- var input = "";
- while (line = readLine()) {
- input = input.concat(line);
- }
- var json = JSON.parse(input);
-
- var transformed_json = traverse(json, manpage_filter);
-
- print(JSON.stringify(transformed_json));
-}
-
-// ... and execute it
-main();
diff --git a/make/scripts/pandoc-troff-manpage-filter.sh.template b/make/scripts/pandoc-troff-manpage-filter.sh.template
index c4f9999ed6c..b2aaec0c043 100644
--- a/make/scripts/pandoc-troff-manpage-filter.sh.template
+++ b/make/scripts/pandoc-troff-manpage-filter.sh.template
@@ -22,7 +22,7 @@
# questions.
#
-# Simple wrapper script to call Nashorn with the javascript pandoc filter
+# Simple wrapper script to call Java with the pandoc filter
-@@JJS@@ -scripting \
- "@@TOPDIR@@/make/scripts/pandoc-troff-manpage-filter.js" 2> /dev/null
+@@JAVA_SMALL@@ -cp @@BUILDTOOLS_OUTPUTDIR@@/jdk_tools_classes \
+ build.tools.pandocfilter.PandocManPageTroffFilter
diff --git a/make/src/classes/build/tools/jfr/GenerateJfrFiles.java b/make/src/classes/build/tools/jfr/GenerateJfrFiles.java
index 9d76fb0594b..eb39d2487ce 100644
--- a/make/src/classes/build/tools/jfr/GenerateJfrFiles.java
+++ b/make/src/classes/build/tools/jfr/GenerateJfrFiles.java
@@ -404,7 +404,8 @@ public class GenerateJfrFiles {
out.write(" jlong cutoff_ticks;");
out.write(" u1 stacktrace;");
out.write(" u1 enabled;");
- out.write(" u1 pad[6]; // Because GCC on linux ia32 at least tries to pack this.");
+ out.write(" u1 large;");
+ out.write(" u1 pad[5]; // Because GCC on linux ia32 at least tries to pack this.");
out.write("};");
out.write("");
out.write("union JfrNativeSettings {");
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index 48300821c87..a00e3a68ac4 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -13108,6 +13108,40 @@ instruct negD_reg_reg(vRegD dst, vRegD src) %{
ins_pipe(fp_uop_d);
%}
+instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
+%{
+ match(Set dst (AbsI src));
+
+ effect(KILL cr);
+ ins_cost(INSN_COST * 2);
+ format %{ "cmpw $src, zr\n\t"
+ "cnegw $dst, $src, Assembler::LT\t# int abs"
+ %}
+
+ ins_encode %{
+ __ cmpw(as_Register($src$$reg), zr);
+ __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
+%{
+ match(Set dst (AbsL src));
+
+ effect(KILL cr);
+ ins_cost(INSN_COST * 2);
+ format %{ "cmp $src, zr\n\t"
+ "cneg $dst, $src, Assembler::LT\t# long abs"
+ %}
+
+ ins_encode %{
+ __ cmp(as_Register($src$$reg), zr);
+ __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
instruct absF_reg(vRegF dst, vRegF src) %{
match(Set dst (AbsF src));
@@ -16998,6 +17032,91 @@ instruct vsqrt2D(vecX dst, vecX src)
// --------------------------------- ABS --------------------------------------
+instruct vabs8B(vecD dst, vecD src)
+%{
+ predicate(n->as_Vector()->length() == 4 ||
+ n->as_Vector()->length() == 8);
+ match(Set dst (AbsVB src));
+ ins_cost(INSN_COST);
+ format %{ "abs $dst, $src\t# vector (8B)" %}
+ ins_encode %{
+ __ absr(as_FloatRegister($dst$$reg), __ T8B, as_FloatRegister($src$$reg));
+ %}
+ ins_pipe(vlogical64);
+%}
+
+instruct vabs16B(vecX dst, vecX src)
+%{
+ predicate(n->as_Vector()->length() == 16);
+ match(Set dst (AbsVB src));
+ ins_cost(INSN_COST);
+ format %{ "abs $dst, $src\t# vector (16B)" %}
+ ins_encode %{
+ __ absr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($src$$reg));
+ %}
+ ins_pipe(vlogical128);
+%}
+
+instruct vabs4S(vecD dst, vecD src)
+%{
+ predicate(n->as_Vector()->length() == 4);
+ match(Set dst (AbsVS src));
+ ins_cost(INSN_COST);
+ format %{ "abs $dst, $src\t# vector (4H)" %}
+ ins_encode %{
+ __ absr(as_FloatRegister($dst$$reg), __ T4H, as_FloatRegister($src$$reg));
+ %}
+ ins_pipe(vlogical64);
+%}
+
+instruct vabs8S(vecX dst, vecX src)
+%{
+ predicate(n->as_Vector()->length() == 8);
+ match(Set dst (AbsVS src));
+ ins_cost(INSN_COST);
+ format %{ "abs $dst, $src\t# vector (8H)" %}
+ ins_encode %{
+ __ absr(as_FloatRegister($dst$$reg), __ T8H, as_FloatRegister($src$$reg));
+ %}
+ ins_pipe(vlogical128);
+%}
+
+instruct vabs2I(vecD dst, vecD src)
+%{
+ predicate(n->as_Vector()->length() == 2);
+ match(Set dst (AbsVI src));
+ ins_cost(INSN_COST);
+ format %{ "abs $dst, $src\t# vector (2S)" %}
+ ins_encode %{
+ __ absr(as_FloatRegister($dst$$reg), __ T2S, as_FloatRegister($src$$reg));
+ %}
+ ins_pipe(vlogical64);
+%}
+
+instruct vabs4I(vecX dst, vecX src)
+%{
+ predicate(n->as_Vector()->length() == 4);
+ match(Set dst (AbsVI src));
+ ins_cost(INSN_COST);
+ format %{ "abs $dst, $src\t# vector (4S)" %}
+ ins_encode %{
+ __ absr(as_FloatRegister($dst$$reg), __ T4S, as_FloatRegister($src$$reg));
+ %}
+ ins_pipe(vlogical128);
+%}
+
+instruct vabs2L(vecX dst, vecX src)
+%{
+ predicate(n->as_Vector()->length() == 2);
+ match(Set dst (AbsVL src));
+ ins_cost(INSN_COST);
+ format %{ "abs $dst, $src\t# vector (2D)" %}
+ ins_encode %{
+ __ absr(as_FloatRegister($dst$$reg), __ T2D, as_FloatRegister($src$$reg));
+ %}
+ ins_pipe(vlogical128);
+%}
+
instruct vabs2F(vecD dst, vecD src)
%{
predicate(n->as_Vector()->length() == 2);
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
index 1b8a4c96790..ae947557298 100644
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
@@ -2278,12 +2278,12 @@ public:
rf(Vn, 5), rf(Vd, 0); \
}
- INSN(absr, 0, 0b100000101110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
+ INSN(absr, 0, 0b100000101110, 3); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
INSN(negr, 1, 0b100000101110, 3); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
INSN(notr, 1, 0b100000010110, 0); // accepted arrangements: T8B, T16B
INSN(addv, 0, 0b110001101110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
- INSN(cls, 0, 0b100000010010, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
- INSN(clz, 1, 0b100000010010, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
+ INSN(cls, 0, 0b100000010010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
+ INSN(clz, 1, 0b100000010010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(cnt, 0, 0b100000010110, 0); // accepted arrangements: T8B, T16B
INSN(uaddlp, 1, 0b100000001010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
INSN(uaddlv, 1, 0b110000001110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
index 43598039050..0eb7ca917c3 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
@@ -459,7 +459,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
if (need_zero_check) {
CodeEmitInfo* info = state_for(x);
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
- __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
+ __ branch(lir_cond_equal, new DivByZeroStub(info));
}
rlock_result(x);
@@ -534,7 +534,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
if (need_zero_check) {
CodeEmitInfo* info = state_for(x);
__ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
- __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
+ __ branch(lir_cond_equal, new DivByZeroStub(info));
}
LIR_Opr ill = LIR_OprFact::illegalOpr;
@@ -1384,9 +1384,9 @@ void LIRGenerator::do_If(If* x) {
profile_branch(x, cond);
move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) {
- __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
+ __ branch(lir_cond(cond), x->tsux(), x->usux());
} else {
- __ branch(lir_cond(cond), right->type(), x->tsux());
+ __ branch(lir_cond(cond), x->tsux());
}
assert(x->default_sux() == x->fsux(), "wrong destination above");
__ jump(x->default_sux());
diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad
index f6d47638723..2a929d0ae1c 100644
--- a/src/hotspot/cpu/arm/arm.ad
+++ b/src/hotspot/cpu/arm/arm.ad
@@ -8963,7 +8963,7 @@ instruct ShouldNotReachHere( )
format %{ "ShouldNotReachHere" %}
ins_encode %{
if (is_reachable()) {
- __ udf(0xdead);
+ __ stop(_halt_reason);
}
%}
ins_pipe(tail_call);
diff --git a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
index 4dc1f81c63e..3d8aaff9677 100644
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
@@ -390,7 +390,7 @@ void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LI
LabelObj* L_already_dirty = new LabelObj();
__ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTable::dirty_card_val()));
- __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
+ __ branch(lir_cond_equal, L_already_dirty->label());
set_card(tmp, card_addr);
__ branch_destination(L_already_dirty->label());
} else {
@@ -539,7 +539,7 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
void LIRGenerator::make_div_by_zero_check(LIR_Opr right_arg, BasicType type, CodeEmitInfo* info) {
assert(right_arg->is_register(), "must be");
__ cmp(lir_cond_equal, right_arg, make_constant(type, 0));
- __ branch(lir_cond_equal, type, new DivByZeroStub(info));
+ __ branch(lir_cond_equal, new DivByZeroStub(info));
}
@@ -1227,7 +1227,7 @@ void LIRGenerator::do_soft_float_compare(If* x) {
LIR_OprFact::intConst(0) : LIR_OprFact::intConst(1));
profile_branch(x, cond);
move_to_phi(x->state());
- __ branch(lir_cond_equal, T_INT, x->tsux());
+ __ branch(lir_cond_equal, x->tsux());
}
#endif // __SOFTFP__
@@ -1285,9 +1285,9 @@ void LIRGenerator::do_If(If* x) {
profile_branch(x, cond);
move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) {
- __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
+ __ branch(lir_cond(cond), x->tsux(), x->usux());
} else {
- __ branch(lir_cond(cond), right->type(), x->tsux());
+ __ branch(lir_cond(cond), x->tsux());
}
assert(x->default_sux() == x->fsux(), "wrong destination above");
__ jump(x->default_sux());
diff --git a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
index e61bdcb5140..d42a14d9dc2 100644
--- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
@@ -440,7 +440,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
if (divisor->is_register()) {
CodeEmitInfo* null_check_info = state_for(x);
__ cmp(lir_cond_equal, divisor, LIR_OprFact::longConst(0));
- __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(null_check_info));
+ __ branch(lir_cond_equal, new DivByZeroStub(null_check_info));
} else {
jlong const_divisor = divisor->as_constant_ptr()->as_jlong();
if (const_divisor == 0) {
@@ -494,7 +494,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
if (divisor->is_register()) {
CodeEmitInfo* null_check_info = state_for(x);
__ cmp(lir_cond_equal, divisor, LIR_OprFact::intConst(0));
- __ branch(lir_cond_equal, T_INT, new DivByZeroStub(null_check_info));
+ __ branch(lir_cond_equal, new DivByZeroStub(null_check_info));
} else {
jint const_divisor = divisor->as_constant_ptr()->as_jint();
if (const_divisor == 0) {
@@ -1171,9 +1171,9 @@ void LIRGenerator::do_If(If* x) {
profile_branch(x, cond);
move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) {
- __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
+ __ branch(lir_cond(cond), x->tsux(), x->usux());
} else {
- __ branch(lir_cond(cond), right->type(), x->tsux());
+ __ branch(lir_cond(cond), x->tsux());
}
assert(x->default_sux() == x->fsux(), "wrong destination above");
__ jump(x->default_sux());
diff --git a/src/hotspot/cpu/ppc/disassembler_ppc.cpp b/src/hotspot/cpu/ppc/disassembler_ppc.cpp
index 9c6ea38acb8..1c43819b6b8 100644
--- a/src/hotspot/cpu/ppc/disassembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/disassembler_ppc.cpp
@@ -192,7 +192,10 @@ void Disassembler::annotate(address here, outputStream* st) {
st->fill_to(aligned_pos + tabspacing);
st->print(";trap: ic miss check");
} else if ((stop_type = MacroAssembler::tdi_get_si16(instruction, Assembler::traptoUnconditional, 0)) != -1) {
+ bool msg_present = (stop_type & MacroAssembler::stop_msg_present);
+ stop_type = (stop_type &~ MacroAssembler::stop_msg_present);
+ const char **detail_msg_ptr = (const char**)(here + 4);
st->fill_to(aligned_pos + tabspacing);
- st->print(";trap: stop type %d", stop_type);
+ st->print(";trap: stop type %d: %s", stop_type, msg_present ? *detail_msg_ptr : "no details provided");
}
}
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
index c63117e303e..6139d51ee5f 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
@@ -4432,17 +4432,21 @@ void MacroAssembler::verify_oop_addr(RegisterOrConstant offs, Register base, con
// Call a C-function that prints output.
void MacroAssembler::stop(int type, const char* msg) {
+ bool msg_present = (msg != NULL);
+
#ifndef PRODUCT
- block_comment(err_msg("stop(type %d): %s {", type, msg));
+ block_comment(err_msg("stop(type %d): %s {", type, msg_present ? msg : "null"));
#else
block_comment("stop {");
#endif
- if (type != stop_shouldnotreachhere) {
- // Use R0 to pass msg. "shouldnotreachhere" preserves R0.
- load_const_optimized(R0, (void*)msg);
+ if (msg_present) {
+ type |= stop_msg_present;
}
tdi_unchecked(traptoUnconditional, 0/*reg 0*/, type);
+ if (msg_present) {
+ emit_int64((uintptr_t)msg);
+ }
block_comment("} stop;");
}
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
index 6ae44b22aa6..d51a7d8aa0e 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
@@ -901,17 +901,18 @@ class MacroAssembler: public Assembler {
public:
enum {
- stop_stop = 0,
- stop_untested = 1,
- stop_unimplemented = 2,
- stop_shouldnotreachhere = 3
+ stop_stop = 0,
+ stop_untested = 1,
+ stop_unimplemented = 2,
+ stop_shouldnotreachhere = 3,
+ stop_msg_present = -0x8000
};
// Prints msg, dumps registers and stops execution.
- void stop (const char* msg = NULL) { stop(stop_stop, msg ); }
- void untested (const char* msg = NULL) { stop(stop_untested, msg ); }
- void unimplemented(const char* msg = NULL) { stop(stop_unimplemented, msg ); }
- void should_not_reach_here() { stop(stop_shouldnotreachhere, NULL); }
+ void stop (const char* msg = NULL) { stop(stop_stop, msg); }
+ void untested (const char* msg = NULL) { stop(stop_untested, msg); }
+ void unimplemented (const char* msg = NULL) { stop(stop_unimplemented, msg); }
+ void should_not_reach_here(const char* msg = NULL) { stop(stop_shouldnotreachhere, msg); }
void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
};
diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad
index c5034a48c84..6c72ac5c7c3 100644
--- a/src/hotspot/cpu/ppc/ppc.ad
+++ b/src/hotspot/cpu/ppc/ppc.ad
@@ -15196,7 +15196,7 @@ instruct ShouldNotReachHere() %{
ins_encode %{
if (is_reachable()) {
// TODO: PPC port $archOpcode(ppc64Opcode_tdi);
- __ should_not_reach_here();
+ __ stop(_halt_reason);
}
%}
ins_pipe(pipe_class_default);
diff --git a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp
index 8ff4b3a5152..3f3c00afb73 100644
--- a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp
@@ -385,7 +385,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
if (!ImplicitDiv0Checks) {
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
- __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
+ __ branch(lir_cond_equal, new DivByZeroStub(info));
// Idiv/irem cannot trap (passing info would generate an assertion).
info = NULL;
}
@@ -461,7 +461,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
if (!ImplicitDiv0Checks) {
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
- __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
+ __ branch(lir_cond_equal, new DivByZeroStub(info));
// Idiv/irem cannot trap (passing info would generate an assertion).
info = NULL;
}
@@ -988,9 +988,9 @@ void LIRGenerator::do_If (If* x) {
profile_branch(x, cond);
move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) {
- __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
+ __ branch(lir_cond(cond), x->tsux(), x->usux());
} else {
- __ branch(lir_cond(cond), right->type(), x->tsux());
+ __ branch(lir_cond(cond), x->tsux());
}
assert(x->default_sux() == x->fsux(), "wrong destination above");
__ jump(x->default_sux());
diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad
index 69d234fe583..fa4422f435e 100644
--- a/src/hotspot/cpu/s390/s390.ad
+++ b/src/hotspot/cpu/s390/s390.ad
@@ -9889,7 +9889,7 @@ instruct ShouldNotReachHere() %{
format %{ "ILLTRAP; ShouldNotReachHere" %}
ins_encode %{
if (is_reachable()) {
- __ z_illtrap();
+ __ stop(_halt_reason);
}
%}
ins_pipe(pipe_class_dummy);
diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp
index e8c5f951878..1cb1df54968 100644
--- a/src/hotspot/cpu/x86/assembler_x86.cpp
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp
@@ -6241,6 +6241,17 @@ void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, Address
emit_int8(imm8);
}
+void Assembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) {
+ assert(VM_Version::supports_evex(), "requires EVEX support");
+ assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+ emit_int8(0x25);
+ emit_int8((unsigned char)(0xC0 | encode));
+ emit_int8(imm8);
+}
+
// vinserti forms
void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
@@ -6693,6 +6704,21 @@ void Assembler::vpbroadcastq(XMMRegister dst, Address src, int vector_len) {
emit_int8(0x59);
emit_operand(dst, src);
}
+
+void Assembler::evbroadcasti32x4(XMMRegister dst, Address src, int vector_len) {
+ assert(vector_len != Assembler::AVX_128bit, "");
+ assert(VM_Version::supports_avx512dq(), "");
+ assert(dst != xnoreg, "sanity");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+ attributes.set_rex_vex_w_reverted();
+ attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit);
+ // swap src<->dst for encoding
+ vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8(0x5A);
+ emit_operand(dst, src);
+}
+
void Assembler::evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len) {
assert(vector_len != Assembler::AVX_128bit, "");
assert(VM_Version::supports_avx512dq(), "");
@@ -7587,6 +7613,15 @@ void Assembler::cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop
emit_int24((unsigned char)0xC2, (0xC0 | encode), (0xF & cop));
}
+void Assembler::blendvpb(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
+ assert(VM_Version::supports_avx(), "");
+ assert(vector_len <= AVX_256bit, "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+ int src2_enc = src2->encoding();
+ emit_int24(0x4C, (0xC0 | encode), (0xF0 & src2_enc << 4));
+}
+
void Assembler::blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
assert(VM_Version::supports_avx(), "");
assert(vector_len <= AVX_256bit, "");
diff --git a/src/hotspot/cpu/x86/assembler_x86.hpp b/src/hotspot/cpu/x86/assembler_x86.hpp
index d7c548381b0..01d92f896f3 100644
--- a/src/hotspot/cpu/x86/assembler_x86.hpp
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp
@@ -2201,6 +2201,7 @@ private:
// Ternary logic instruction.
void vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len);
void vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, Address src3, int vector_len);
+ void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len);
// vinserti forms
void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
@@ -2245,6 +2246,7 @@ private:
void vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len);
void vpbroadcastq(XMMRegister dst, Address src, int vector_len);
+ void evbroadcasti32x4(XMMRegister dst, Address src, int vector_len);
void evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len);
void evbroadcasti64x2(XMMRegister dst, Address src, int vector_len);
@@ -2274,6 +2276,7 @@ private:
void vzeroupper();
// AVX support for vectorized conditional move (float/double). The following two instructions used only coupled.
+ void blendvpb(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len);
void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len);
void blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len);
void cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len);
diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
index 6dcd0f280e4..f8cb7bb70e6 100644
--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
@@ -481,7 +481,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
__ move(right.result(), cc->at(0));
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
- __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
+ __ branch(lir_cond_equal, new DivByZeroStub(info));
address entry = NULL;
switch (x->op()) {
@@ -565,7 +565,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
if (!ImplicitDiv0Checks) {
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
- __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
+ __ branch(lir_cond_equal, new DivByZeroStub(info));
// Idiv/irem cannot trap (passing info would generate an assertion).
info = NULL;
}
@@ -1503,9 +1503,9 @@ void LIRGenerator::do_If(If* x) {
profile_branch(x, cond);
move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) {
- __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
+ __ branch(lir_cond(cond), x->tsux(), x->usux());
} else {
- __ branch(lir_cond(cond), right->type(), x->tsux());
+ __ branch(lir_cond(cond), x->tsux());
}
assert(x->default_sux() == x->fsux(), "wrong destination above");
__ jump(x->default_sux());
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index b5753410fd4..522ddf71817 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -6479,16 +6479,6 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
xorl(crc, Address(table, val, Address::times_4, 0));
}
-/**
-* Fold four 128-bit data chunks
-*/
-void MacroAssembler::fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
- evpclmulhdq(xtmp, xK, xcrc, Assembler::AVX_512bit); // [123:64]
- evpclmulldq(xcrc, xK, xcrc, Assembler::AVX_512bit); // [63:0]
- evpxorq(xcrc, xcrc, Address(buf, offset), Assembler::AVX_512bit /* vector_len */);
- evpxorq(xcrc, xcrc, xtmp, Assembler::AVX_512bit /* vector_len */);
-}
-
/**
* Fold 128-bit data chunk
*/
@@ -6692,6 +6682,372 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi
}
#ifdef _LP64
+// Helper function for AVX 512 CRC32
+// Fold 512-bit data chunks
+void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf,
+ Register pos, int offset) {
+ evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit);
+ evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64]
+ evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0]
+ evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */);
+ evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */);
+}
+
+// Helper function for AVX 512 CRC32
+// Compute CRC32 for < 256B buffers
+void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
+ Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
+ Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) {
+
+ Label L_less_than_32, L_exact_16_left, L_less_than_16_left;
+ Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left;
+ Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2;
+
+ // check if there is enough buffer to be able to fold 16B at a time
+ cmpl(len, 32);
+ jcc(Assembler::less, L_less_than_32);
+
+ // if there is, load the constants
+ movdqu(xmm10, Address(key, 1 * 16)); //rk1 and rk2 in xmm10
+ movdl(xmm0, crc); // get the initial crc value
+ movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
+ pxor(xmm7, xmm0);
+
+ // update the buffer pointer
+ addl(pos, 16);
+ //update the counter.subtract 32 instead of 16 to save one instruction from the loop
+ subl(len, 32);
+ jmp(L_16B_reduction_loop);
+
+ bind(L_less_than_32);
+ //mov initial crc to the return value. this is necessary for zero - length buffers.
+ movl(rax, crc);
+ testl(len, len);
+ jcc(Assembler::equal, L_cleanup);
+
+ movdl(xmm0, crc); //get the initial crc value
+
+ cmpl(len, 16);
+ jcc(Assembler::equal, L_exact_16_left);
+ jcc(Assembler::less, L_less_than_16_left);
+
+ movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext
+ pxor(xmm7, xmm0); //xor the initial crc value
+ addl(pos, 16);
+ subl(len, 16);
+ movdqu(xmm10, Address(key, 1 * 16)); // rk1 and rk2 in xmm10
+ jmp(L_get_last_two_xmms);
+
+ bind(L_less_than_16_left);
+ //use stack space to load data less than 16 bytes, zero - out the 16B in memory first.
+ pxor(xmm1, xmm1);
+ movptr(tmp1, rsp);
+ movdqu(Address(tmp1, 0 * 16), xmm1);
+
+ cmpl(len, 4);
+ jcc(Assembler::less, L_only_less_than_4);
+
+ //backup the counter value
+ movl(tmp2, len);
+ cmpl(len, 8);
+ jcc(Assembler::less, L_less_than_8_left);
+
+ //load 8 Bytes
+ movq(rax, Address(buf, pos, Address::times_1, 0 * 16));
+ movq(Address(tmp1, 0 * 16), rax);
+ addptr(tmp1, 8);
+ subl(len, 8);
+ addl(pos, 8);
+
+ bind(L_less_than_8_left);
+ cmpl(len, 4);
+ jcc(Assembler::less, L_less_than_4_left);
+
+ //load 4 Bytes
+ movl(rax, Address(buf, pos, Address::times_1, 0));
+ movl(Address(tmp1, 0 * 16), rax);
+ addptr(tmp1, 4);
+ subl(len, 4);
+ addl(pos, 4);
+
+ bind(L_less_than_4_left);
+ cmpl(len, 2);
+ jcc(Assembler::less, L_less_than_2_left);
+
+ // load 2 Bytes
+ movw(rax, Address(buf, pos, Address::times_1, 0));
+ movl(Address(tmp1, 0 * 16), rax);
+ addptr(tmp1, 2);
+ subl(len, 2);
+ addl(pos, 2);
+
+ bind(L_less_than_2_left);
+ cmpl(len, 1);
+ jcc(Assembler::less, L_zero_left);
+
+ // load 1 Byte
+ movb(rax, Address(buf, pos, Address::times_1, 0));
+ movb(Address(tmp1, 0 * 16), rax);
+
+ bind(L_zero_left);
+ movdqu(xmm7, Address(rsp, 0));
+ pxor(xmm7, xmm0); //xor the initial crc value
+
+ lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
+ movdqu(xmm0, Address(rax, tmp2));
+ pshufb(xmm7, xmm0);
+ jmp(L_128_done);
+
+ bind(L_exact_16_left);
+ movdqu(xmm7, Address(buf, pos, Address::times_1, 0));
+ pxor(xmm7, xmm0); //xor the initial crc value
+ jmp(L_128_done);
+
+ bind(L_only_less_than_4);
+ cmpl(len, 3);
+ jcc(Assembler::less, L_only_less_than_3);
+
+ // load 3 Bytes
+ movb(rax, Address(buf, pos, Address::times_1, 0));
+ movb(Address(tmp1, 0), rax);
+
+ movb(rax, Address(buf, pos, Address::times_1, 1));
+ movb(Address(tmp1, 1), rax);
+
+ movb(rax, Address(buf, pos, Address::times_1, 2));
+ movb(Address(tmp1, 2), rax);
+
+ movdqu(xmm7, Address(rsp, 0));
+ pxor(xmm7, xmm0); //xor the initial crc value
+
+ pslldq(xmm7, 0x5);
+ jmp(L_barrett);
+ bind(L_only_less_than_3);
+ cmpl(len, 2);
+ jcc(Assembler::less, L_only_less_than_2);
+
+ // load 2 Bytes
+ movb(rax, Address(buf, pos, Address::times_1, 0));
+ movb(Address(tmp1, 0), rax);
+
+ movb(rax, Address(buf, pos, Address::times_1, 1));
+ movb(Address(tmp1, 1), rax);
+
+ movdqu(xmm7, Address(rsp, 0));
+ pxor(xmm7, xmm0); //xor the initial crc value
+
+ pslldq(xmm7, 0x6);
+ jmp(L_barrett);
+
+ bind(L_only_less_than_2);
+ //load 1 Byte
+ movb(rax, Address(buf, pos, Address::times_1, 0));
+ movb(Address(tmp1, 0), rax);
+
+ movdqu(xmm7, Address(rsp, 0));
+ pxor(xmm7, xmm0); //xor the initial crc value
+
+ pslldq(xmm7, 0x7);
+}
+
+/**
+* Compute CRC32 using AVX512 instructions
+* param crc register containing existing CRC (32-bit)
+* param buf register pointing to input byte buffer (byte*)
+* param len register containing number of bytes
+* param tmp1 scratch register
+* param tmp2 scratch register
+* return rax result register
+*/
+void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register key, Register tmp1, Register tmp2) {
+ assert_different_registers(crc, buf, len, key, tmp1, tmp2, rax);
+
+ Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
+ Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
+ Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop;
+ Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop;
+ Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup;
+
+ const Register pos = r12;
+ push(r12);
+ subptr(rsp, 16 * 2 + 8);
+
+ // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
+ // context for the registers used, where all instructions below are using 128-bit mode
+ // On EVEX without VL and BW, these instructions will all be AVX.
+ lea(key, ExternalAddress(StubRoutines::x86::crc_table_avx512_addr()));
+ notl(crc);
+ movl(pos, 0);
+
+ // check if smaller than 256B
+ cmpl(len, 256);
+ jcc(Assembler::less, L_less_than_256);
+
+ // load the initial crc value
+ movdl(xmm10, crc);
+
+ // receive the initial 64B data, xor the initial crc value
+ evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
+ evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit);
+ evbroadcasti32x4(xmm10, Address(key, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4
+
+ subl(len, 256);
+ cmpl(len, 256);
+ jcc(Assembler::less, L_fold_128_B_loop);
+
+ evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
+ evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
+ evbroadcasti32x4(xmm16, Address(key, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2
+ subl(len, 256);
+
+ bind(L_fold_256_B_loop);
+ addl(pos, 256);
+ fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64);
+ fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64);
+ fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64);
+ fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64);
+
+ subl(len, 256);
+ jcc(Assembler::greaterEqual, L_fold_256_B_loop);
+
+ // Fold 256 into 128
+ addl(pos, 256);
+ evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit);
+ evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit);
+ vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC
+
+ evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit);
+ evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit);
+ vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC
+
+ evmovdquq(xmm0, xmm7, Assembler::AVX_512bit);
+ evmovdquq(xmm4, xmm8, Assembler::AVX_512bit);
+
+ addl(len, 128);
+ jmp(L_fold_128_B_register);
+
+ // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop
+ // loop will fold 128B at a time until we have 128 + y Bytes of buffer
+
+ // fold 128B at a time.This section of the code folds 8 xmm registers in parallel
+ bind(L_fold_128_B_loop);
+ addl(pos, 128);
+ fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64);
+ fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64);
+
+ subl(len, 128);
+ jcc(Assembler::greaterEqual, L_fold_128_B_loop);
+
+ addl(pos, 128);
+
+ // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128
+ // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
+ bind(L_fold_128_B_register);
+ evmovdquq(xmm16, Address(key, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16
+ evmovdquq(xmm11, Address(key, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0
+ evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit);
+ evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit);
+ // save last that has no multiplicand
+ vextracti64x2(xmm7, xmm4, 3);
+
+ evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit);
+ evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit);
+ // Needed later in reduction loop
+ movdqu(xmm10, Address(key, 1 * 16));
+ vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC
+ vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC
+
+ // Swap 1,0,3,2 - 01 00 11 10
+ evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit);
+ evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit);
+ vextracti128(xmm5, xmm8, 1);
+ evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit);
+
+ // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop
+ // instead of a cmp instruction, we use the negative flag with the jl instruction
+ addl(len, 128 - 16);
+ jcc(Assembler::less, L_final_reduction_for_128);
+
+ bind(L_16B_reduction_loop);
+ vpclmulqdq(xmm8, xmm7, xmm10, 0x1);
+ vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
+ vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
+ movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16));
+ vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
+ addl(pos, 16);
+ subl(len, 16);
+ jcc(Assembler::greaterEqual, L_16B_reduction_loop);
+
+ bind(L_final_reduction_for_128);
+ addl(len, 16);
+ jcc(Assembler::equal, L_128_done);
+
+ bind(L_get_last_two_xmms);
+ movdqu(xmm2, xmm7);
+ addl(pos, len);
+ movdqu(xmm1, Address(buf, pos, Address::times_1, -16));
+ subl(pos, len);
+
+ // get rid of the extra data that was loaded before
+ // load the shift constant
+ lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr()));
+ movdqu(xmm0, Address(rax, len));
+ addl(rax, len);
+
+ vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
+ //Change mask to 512
+ vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2);
+ vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit);
+
+ blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit);
+ vpclmulqdq(xmm8, xmm7, xmm10, 0x1);
+ vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
+ vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit);
+ vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit);
+
+ bind(L_128_done);
+ // compute crc of a 128-bit value
+ movdqu(xmm10, Address(key, 3 * 16));
+ movdqu(xmm0, xmm7);
+
+ // 64b fold
+ vpclmulqdq(xmm7, xmm7, xmm10, 0x0);
+ vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit);
+ vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
+
+ // 32b fold
+ movdqu(xmm0, xmm7);
+ vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit);
+ vpclmulqdq(xmm7, xmm7, xmm10, 0x10);
+ vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit);
+ jmp(L_barrett);
+
+ bind(L_less_than_256);
+ kernel_crc32_avx512_256B(crc, buf, len, key, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup);
+
+ //barrett reduction
+ bind(L_barrett);
+ vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2);
+ movdqu(xmm1, xmm7);
+ movdqu(xmm2, xmm7);
+ movdqu(xmm10, Address(key, 4 * 16));
+
+ pclmulqdq(xmm7, xmm10, 0x0);
+ pxor(xmm7, xmm2);
+ vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2);
+ movdqu(xmm2, xmm7);
+ pclmulqdq(xmm7, xmm10, 0x10);
+ pxor(xmm7, xmm2);
+ pxor(xmm7, xmm1);
+ pextrd(crc, xmm7, 2);
+
+ bind(L_cleanup);
+ notl(crc); // ~c
+ addptr(rsp, 16 * 2 + 8);
+ pop(r12);
+}
+
// S. Gueron / Information Processing Letters 112 (2012) 184
// Algorithm 4: Computing carry-less multiplication using a precomputed lookup table.
// Input: A 32 bit value B = [byte3, byte2, byte1, byte0].
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
index 58b9e1f77bb..a939e7794d4 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
@@ -1658,6 +1658,15 @@ public:
// CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
void update_byte_crc32(Register crc, Register val, Register table);
void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
+
+
+#ifdef _LP64
+ void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
+ void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
+ Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
+ Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
+#endif // _LP64
+
// CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
// Note on a naming convention:
// Prefix w = register only used on a Westmere+ architecture
@@ -1694,10 +1703,13 @@ public:
// Fold 128-bit data chunk
void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
+#ifdef _LP64
+ // Fold 512-bit data chunk
+ void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
+#endif // _LP64
// Fold 8-bit data
void fold_8bit_crc32(Register crc, Register table, Register tmp);
void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
- void fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
// Compress char[] array to byte[].
void char_array_compress(Register src, Register dst, Register len,
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
index 9de5886755a..b1a98598128 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
@@ -5325,13 +5325,20 @@ address generate_avx_ghash_processBlocks() {
const Register buf = c_rarg1; // source java byte array address
const Register len = c_rarg2; // length
const Register table = c_rarg3; // crc_table address (reuse register)
- const Register tmp = r11;
- assert_different_registers(crc, buf, len, table, tmp, rax);
+ const Register tmp1 = r11;
+ const Register tmp2 = r10;
+ assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax);
BLOCK_COMMENT("Entry:");
__ enter(); // required for proper stackwalking of RuntimeStub frame
- __ kernel_crc32(crc, buf, len, table, tmp);
+ if (VM_Version::supports_sse4_1() && VM_Version::supports_avx512_vpclmulqdq() &&
+ VM_Version::supports_avx512bw() &&
+ VM_Version::supports_avx512vl()) {
+ __ kernel_crc32_avx512(crc, buf, len, table, tmp1, tmp2);
+ } else {
+ __ kernel_crc32(crc, buf, len, table, tmp1);
+ }
__ movl(rax, crc);
__ vzeroupper();
diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.cpp b/src/hotspot/cpu/x86/stubRoutines_x86.cpp
index 4749a276d5f..5d93d118e7b 100644
--- a/src/hotspot/cpu/x86/stubRoutines_x86.cpp
+++ b/src/hotspot/cpu/x86/stubRoutines_x86.cpp
@@ -184,6 +184,38 @@ juint StubRoutines::x86::_crc_table[] =
0x2d02ef8dUL
};
+#ifdef _LP64
+juint StubRoutines::x86::_crc_table_avx512[] =
+{
+ 0xe95c1271UL, 0x00000000UL, 0xce3371cbUL, 0x00000000UL,
+ 0xccaa009eUL, 0x00000000UL, 0x751997d0UL, 0x00000001UL,
+ 0x4a7fe880UL, 0x00000001UL, 0xe88ef372UL, 0x00000001UL,
+ 0xccaa009eUL, 0x00000000UL, 0x63cd6124UL, 0x00000001UL,
+ 0xf7011640UL, 0x00000001UL, 0xdb710640UL, 0x00000001UL,
+ 0xd7cfc6acUL, 0x00000001UL, 0xea89367eUL, 0x00000001UL,
+ 0x8cb44e58UL, 0x00000001UL, 0xdf068dc2UL, 0x00000000UL,
+ 0xae0b5394UL, 0x00000000UL, 0xc7569e54UL, 0x00000001UL,
+ 0xc6e41596UL, 0x00000001UL, 0x54442bd4UL, 0x00000001UL,
+ 0x74359406UL, 0x00000001UL, 0x3db1ecdcUL, 0x00000000UL,
+ 0x5a546366UL, 0x00000001UL, 0xf1da05aaUL, 0x00000000UL,
+ 0xccaa009eUL, 0x00000000UL, 0x751997d0UL, 0x00000001UL,
+ 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL
+};
+
+juint StubRoutines::x86::_crc_by128_masks_avx512[] =
+{
+ 0xffffffffUL, 0xffffffffUL, 0x00000000UL, 0x00000000UL,
+ 0x00000000UL, 0xffffffffUL, 0xffffffffUL, 0xffffffffUL,
+ 0x80808080UL, 0x80808080UL, 0x80808080UL, 0x80808080UL
+};
+
+juint StubRoutines::x86::_shuf_table_crc32_avx512[] =
+{
+ 0x83828100UL, 0x87868584UL, 0x8b8a8988UL, 0x8f8e8d8cUL,
+ 0x03020100UL, 0x07060504UL, 0x0b0a0908UL, 0x000e0d0cUL
+};
+#endif // _LP64
+
#define D 32
#define P 0x82F63B78 // Reflection of Castagnoli (0x11EDC6F41)
diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.hpp b/src/hotspot/cpu/x86/stubRoutines_x86.hpp
index f68656d8aa1..a23ee3666a6 100644
--- a/src/hotspot/cpu/x86/stubRoutines_x86.hpp
+++ b/src/hotspot/cpu/x86/stubRoutines_x86.hpp
@@ -120,6 +120,11 @@ class x86 {
// masks and table for CRC32
static uint64_t _crc_by128_masks[];
static juint _crc_table[];
+#ifdef _LP64
+ static juint _crc_by128_masks_avx512[];
+ static juint _crc_table_avx512[];
+ static juint _shuf_table_crc32_avx512[];
+#endif // _LP64
// table for CRC32C
static juint* _crc32c_table;
// swap mask for ghash
@@ -210,6 +215,11 @@ class x86 {
static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; }
static address counter_shuffle_mask_addr() { return _counter_shuffle_mask_addr; }
static address crc_by128_masks_addr() { return (address)_crc_by128_masks; }
+#ifdef _LP64
+ static address crc_by128_masks_avx512_addr() { return (address)_crc_by128_masks_avx512; }
+ static address shuf_table_crc32_avx512_addr() { return (address)_shuf_table_crc32_avx512; }
+ static address crc_table_avx512_addr() { return (address)_crc_table_avx512; }
+#endif // _LP64
static address ghash_long_swap_mask_addr() { return _ghash_long_swap_mask_addr; }
static address ghash_byte_swap_mask_addr() { return _ghash_byte_swap_mask_addr; }
static address ghash_shufflemask_addr() { return _ghash_shuffmask_addr; }
diff --git a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
index 25942667b1b..ea9a54bcb39 100644
--- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zErrno.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zLargePages.inline.hpp"
@@ -72,16 +73,15 @@ static ZErrno mremap(uintptr_t from_addr, uintptr_t to_addr, size_t size) {
return (res == KERN_SUCCESS) ? ZErrno(0) : ZErrno(EINVAL);
}
-ZPhysicalMemoryBacking::ZPhysicalMemoryBacking() :
+ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
_base(0),
- _size(0),
_initialized(false) {
// Reserve address space for backing memory
- _base = (uintptr_t)os::reserve_memory(MaxHeapSize);
+ _base = (uintptr_t)os::reserve_memory(max_capacity);
if (_base == 0) {
// Failed
- log_error(gc)("Failed to reserve address space for backing memory");
+ log_error_pd(gc)("Failed to reserve address space for backing memory");
return;
}
@@ -93,15 +93,11 @@ bool ZPhysicalMemoryBacking::is_initialized() const {
return _initialized;
}
-void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
+void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
// Does nothing
}
-size_t ZPhysicalMemoryBacking::size() const {
- return _size;
-}
-
-bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
+bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
@@ -116,17 +112,11 @@ bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
return false;
}
- const size_t end = offset + length;
- if (end > _size) {
- // Record new size
- _size = end;
- }
-
// Success
return true;
}
-size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
+size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
// Try to commit the whole region
if (commit_inner(offset, length)) {
// Success
@@ -154,7 +144,7 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
}
}
-size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
+size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
diff --git a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp
index b3aab9baad9..ca61ec65eea 100644
--- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp
@@ -27,22 +27,19 @@
class ZPhysicalMemoryBacking {
private:
uintptr_t _base;
- size_t _size;
bool _initialized;
- bool commit_inner(size_t offset, size_t length);
+ bool commit_inner(size_t offset, size_t length) const;
public:
- ZPhysicalMemoryBacking();
+ ZPhysicalMemoryBacking(size_t max_capacity);
bool is_initialized() const;
- void warn_commit_limits(size_t max) const;
+ void warn_commit_limits(size_t max_capacity) const;
- size_t size() const;
-
- size_t commit(size_t offset, size_t length);
- size_t uncommit(size_t offset, size_t length);
+ size_t commit(size_t offset, size_t length) const;
+ size_t uncommit(size_t offset, size_t length) const;
void map(uintptr_t addr, size_t size, uintptr_t offset) const;
void unmap(uintptr_t addr, size_t size) const;
diff --git a/src/hotspot/os/linux/gc/z/zMountPoint_linux.cpp b/src/hotspot/os/linux/gc/z/zMountPoint_linux.cpp
index 6cacb9a5533..375148149f8 100644
--- a/src/hotspot/os/linux/gc/z/zMountPoint_linux.cpp
+++ b/src/hotspot/os/linux/gc/z/zMountPoint_linux.cpp
@@ -22,11 +22,11 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zArray.inline.hpp"
#include "gc/z/zErrno.hpp"
#include "gc/z/zMountPoint_linux.hpp"
#include "runtime/globals.hpp"
-#include "logging/log.hpp"
#include
#include
@@ -73,7 +73,7 @@ void ZMountPoint::get_mountpoints(const char* filesystem, ZArray* mountpo
FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
if (fd == NULL) {
ZErrno err;
- log_error(gc)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
+ log_error_p(gc)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
return;
}
@@ -114,10 +114,10 @@ char* ZMountPoint::find_preferred_mountpoint(const char* filesystem,
}
// Preferred mount point not found
- log_error(gc)("More than one %s filesystem found:", filesystem);
+ log_error_p(gc)("More than one %s filesystem found:", filesystem);
ZArrayIterator iter2(mountpoints);
for (char* mountpoint; iter2.next(&mountpoint);) {
- log_error(gc)(" %s", mountpoint);
+ log_error_p(gc)(" %s", mountpoint);
}
return NULL;
@@ -131,7 +131,7 @@ char* ZMountPoint::find_mountpoint(const char* filesystem, const char** preferre
if (mountpoints.size() == 0) {
// No mount point found
- log_error(gc)("Failed to find an accessible %s filesystem", filesystem);
+ log_error_p(gc)("Failed to find an accessible %s filesystem", filesystem);
} else if (mountpoints.size() == 1) {
// One mount point found
path = strdup(mountpoints.at(0));
diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
index 12d9ce15314..2cd3b95a72b 100644
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zArray.inline.hpp"
#include "gc/z/zErrno.hpp"
#include "gc/z/zGlobals.hpp"
@@ -112,9 +113,8 @@ static const char* z_preferred_hugetlbfs_mountpoints[] = {
static int z_fallocate_hugetlbfs_attempts = 3;
static bool z_fallocate_supported = true;
-ZPhysicalMemoryBacking::ZPhysicalMemoryBacking() :
+ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
_fd(-1),
- _size(0),
_filesystem(0),
_block_size(0),
_available(0),
@@ -126,11 +126,20 @@ ZPhysicalMemoryBacking::ZPhysicalMemoryBacking() :
return;
}
+ // Truncate backing file
+ while (ftruncate(_fd, max_capacity) == -1) {
+ if (errno != EINTR) {
+ ZErrno err;
+ log_error_p(gc)("Failed to truncate backing file (%s)", err.to_string());
+ return;
+ }
+ }
+
// Get filesystem statistics
struct statfs buf;
if (fstatfs(_fd, &buf) == -1) {
ZErrno err;
- log_error(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string());
+ log_error_p(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string());
return;
}
@@ -138,50 +147,50 @@ ZPhysicalMemoryBacking::ZPhysicalMemoryBacking() :
_block_size = buf.f_bsize;
_available = buf.f_bavail * _block_size;
- log_info(gc, init)("Heap Backing Filesystem: %s (0x" UINT64_FORMAT_X ")",
- is_tmpfs() ? ZFILESYSTEM_TMPFS : is_hugetlbfs() ? ZFILESYSTEM_HUGETLBFS : "other", _filesystem);
+ log_info_p(gc, init)("Heap Backing Filesystem: %s (0x" UINT64_FORMAT_X ")",
+ is_tmpfs() ? ZFILESYSTEM_TMPFS : is_hugetlbfs() ? ZFILESYSTEM_HUGETLBFS : "other", _filesystem);
// Make sure the filesystem type matches requested large page type
if (ZLargePages::is_transparent() && !is_tmpfs()) {
- log_error(gc)("-XX:+UseTransparentHugePages can only be enabled when using a %s filesystem",
- ZFILESYSTEM_TMPFS);
+ log_error_p(gc)("-XX:+UseTransparentHugePages can only be enabled when using a %s filesystem",
+ ZFILESYSTEM_TMPFS);
return;
}
if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
- log_error(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
- ZFILESYSTEM_TMPFS);
+ log_error_p(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
+ ZFILESYSTEM_TMPFS);
return;
}
if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
- log_error(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled "
- "when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
+ log_error_p(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled "
+ "when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
return;
}
if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
- log_error(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem",
- ZFILESYSTEM_HUGETLBFS);
+ log_error_p(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem",
+ ZFILESYSTEM_HUGETLBFS);
return;
}
if (ZLargePages::is_explicit() && os::large_page_size() != ZGranuleSize) {
- log_error(gc)("Incompatible large page size configured " SIZE_FORMAT " (expected " SIZE_FORMAT ")",
- os::large_page_size(), ZGranuleSize);
+ log_error_p(gc)("Incompatible large page size configured " SIZE_FORMAT " (expected " SIZE_FORMAT ")",
+ os::large_page_size(), ZGranuleSize);
return;
}
// Make sure the filesystem block size is compatible
if (ZGranuleSize % _block_size != 0) {
- log_error(gc)("Filesystem backing the heap has incompatible block size (" SIZE_FORMAT ")",
- _block_size);
+ log_error_p(gc)("Filesystem backing the heap has incompatible block size (" SIZE_FORMAT ")",
+ _block_size);
return;
}
if (is_hugetlbfs() && _block_size != ZGranuleSize) {
- log_error(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")",
- ZFILESYSTEM_HUGETLBFS, _block_size, ZGranuleSize);
+ log_error_p(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")",
+ ZFILESYSTEM_HUGETLBFS, _block_size, ZGranuleSize);
return;
}
@@ -199,12 +208,12 @@ int ZPhysicalMemoryBacking::create_mem_fd(const char* name) const {
const int fd = ZSyscall::memfd_create(filename, MFD_CLOEXEC | extra_flags);
if (fd == -1) {
ZErrno err;
- log_debug(gc, init)("Failed to create memfd file (%s)",
- ((ZLargePages::is_explicit() && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
+ log_debug_p(gc, init)("Failed to create memfd file (%s)",
+ ((ZLargePages::is_explicit() && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
return -1;
}
- log_info(gc, init)("Heap Backing File: /memfd:%s", filename);
+ log_info_p(gc, init)("Heap Backing File: /memfd:%s", filename);
return fd;
}
@@ -220,7 +229,7 @@ int ZPhysicalMemoryBacking::create_file_fd(const char* name) const {
// Find mountpoint
ZMountPoint mountpoint(filesystem, preferred_mountpoints);
if (mountpoint.get() == NULL) {
- log_error(gc)("Use -XX:AllocateHeapAt to specify the path to a %s filesystem", filesystem);
+ log_error_p(gc)("Use -XX:AllocateHeapAt to specify the path to a %s filesystem", filesystem);
return -1;
}
@@ -229,23 +238,23 @@ int ZPhysicalMemoryBacking::create_file_fd(const char* name) const {
const int fd_anon = os::open(mountpoint.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
if (fd_anon == -1) {
ZErrno err;
- log_debug(gc, init)("Failed to create anonymous file in %s (%s)", mountpoint.get(),
- (err == EINVAL ? "Not supported" : err.to_string()));
+ log_debug_p(gc, init)("Failed to create anonymous file in %s (%s)", mountpoint.get(),
+ (err == EINVAL ? "Not supported" : err.to_string()));
} else {
// Get inode number for anonymous file
struct stat stat_buf;
if (fstat(fd_anon, &stat_buf) == -1) {
ZErrno err;
- log_error(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string());
+ log_error_pd(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string());
return -1;
}
- log_info(gc, init)("Heap Backing File: %s/#" UINT64_FORMAT, mountpoint.get(), (uint64_t)stat_buf.st_ino);
+ log_info_p(gc, init)("Heap Backing File: %s/#" UINT64_FORMAT, mountpoint.get(), (uint64_t)stat_buf.st_ino);
return fd_anon;
}
- log_debug(gc, init)("Falling back to open/unlink");
+ log_debug_p(gc, init)("Falling back to open/unlink");
// Create file name
char filename[PATH_MAX];
@@ -255,18 +264,18 @@ int ZPhysicalMemoryBacking::create_file_fd(const char* name) const {
const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
if (fd == -1) {
ZErrno err;
- log_error(gc)("Failed to create file %s (%s)", filename, err.to_string());
+ log_error_p(gc)("Failed to create file %s (%s)", filename, err.to_string());
return -1;
}
// Unlink file
if (unlink(filename) == -1) {
ZErrno err;
- log_error(gc)("Failed to unlink file %s (%s)", filename, err.to_string());
+ log_error_p(gc)("Failed to unlink file %s (%s)", filename, err.to_string());
return -1;
}
- log_info(gc, init)("Heap Backing File: %s", filename);
+ log_info_p(gc, init)("Heap Backing File: %s", filename);
return fd;
}
@@ -283,7 +292,7 @@ int ZPhysicalMemoryBacking::create_fd(const char* name) const {
return fd;
}
- log_debug(gc, init)("Falling back to searching for an accessible mount point");
+ log_debug_p(gc)("Falling back to searching for an accessible mount point");
}
return create_file_fd(name);
@@ -293,37 +302,37 @@ bool ZPhysicalMemoryBacking::is_initialized() const {
return _initialized;
}
-void ZPhysicalMemoryBacking::warn_available_space(size_t max) const {
+void ZPhysicalMemoryBacking::warn_available_space(size_t max_capacity) const {
// Note that the available space on a tmpfs or a hugetlbfs filesystem
// will be zero if no size limit was specified when it was mounted.
if (_available == 0) {
// No size limit set, skip check
- log_info(gc, init)("Available space on backing filesystem: N/A");
+ log_info_p(gc, init)("Available space on backing filesystem: N/A");
return;
}
- log_info(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", _available / M);
+ log_info_p(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", _available / M);
// Warn if the filesystem doesn't currently have enough space available to hold
// the max heap size. The max heap size will be capped if we later hit this limit
// when trying to expand the heap.
- if (_available < max) {
- log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
- log_warning(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
- log_warning(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
- "(available", max / M);
- log_warning(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
- "size could", _available / M);
- log_warning(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
+ if (_available < max_capacity) {
+ log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
+ log_warning_p(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
+ log_warning_p(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
+ "(available", max_capacity / M);
+ log_warning_p(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
+ "size could", _available / M);
+ log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to commit memory.");
}
}
-void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const {
+void ZPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const {
const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
FILE* const file = fopen(filename, "r");
if (file == NULL) {
// Failed to open file, skip check
- log_debug(gc, init)("Failed to open %s", filename);
+ log_debug_p(gc, init)("Failed to open %s", filename);
return;
}
@@ -332,7 +341,7 @@ void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const {
fclose(file);
if (result != 1) {
// Failed to read file, skip check
- log_debug(gc, init)("Failed to read %s", filename);
+ log_debug_p(gc, init)("Failed to read %s", filename);
return;
}
@@ -341,28 +350,24 @@ void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const {
// However, ZGC tends to create the most mappings and dominate the total count.
// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
- const size_t required_max_map_count = (max / ZGranuleSize) * 3 * 1.2;
+ const size_t required_max_map_count = (max_capacity / ZGranuleSize) * 3 * 1.2;
if (actual_max_map_count < required_max_map_count) {
- log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
- log_warning(gc)("The system limit on number of memory mappings per process might be too low for the given");
- log_warning(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
- max / M, filename);
- log_warning(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "
- "with the current", required_max_map_count, actual_max_map_count);
- log_warning(gc)("limit could lead to a fatal error, due to failure to map memory.");
+ log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
+ log_warning_p(gc)("The system limit on number of memory mappings per process might be too low for the given");
+ log_warning_p(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
+ max_capacity / M, filename);
+ log_warning_p(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "
+ "with the current", required_max_map_count, actual_max_map_count);
+ log_warning_p(gc)("limit could lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
}
}
-void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
+void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
// Warn if available space is too low
- warn_available_space(max);
+ warn_available_space(max_capacity);
// Warn if max map count is too low
- warn_max_map_count(max);
-}
-
-size_t ZPhysicalMemoryBacking::size() const {
- return _size;
+ warn_max_map_count(max_capacity);
}
bool ZPhysicalMemoryBacking::is_tmpfs() const {
@@ -379,18 +384,6 @@ bool ZPhysicalMemoryBacking::tmpfs_supports_transparent_huge_pages() const {
return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
}
-ZErrno ZPhysicalMemoryBacking::fallocate_compat_ftruncate(size_t size) const {
- while (ftruncate(_fd, size) == -1) {
- if (errno != EINTR) {
- // Failed
- return errno;
- }
- }
-
- // Success
- return 0;
-}
-
ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const {
// On hugetlbfs, mapping a file segment will fail immediately, without
// the need to touch the mapped pages first, if there aren't enough huge
@@ -484,49 +477,21 @@ ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t len
return 0;
}
-ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) {
+ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const {
// fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
// since Linux 4.3. When fallocate(2) is not supported we emulate it using
// mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite
// (for tmpfs without transparent huge pages and other filesystem types).
-
- const size_t end = offset + length;
- if (end > _size) {
- // Increase file size
- const ZErrno err = fallocate_compat_ftruncate(end);
- if (err) {
- // Failed
- return err;
- }
+ if (ZLargePages::is_explicit()) {
+ return fallocate_compat_mmap_hugetlbfs(offset, length, false /* touch */);
+ } else if (ZLargePages::is_transparent()) {
+ return fallocate_compat_mmap_tmpfs(offset, length);
+ } else {
+ return fallocate_compat_pwrite(offset, length);
}
-
- // Allocate backing memory
- const ZErrno err = ZLargePages::is_explicit()
- ? fallocate_compat_mmap_hugetlbfs(offset, length, false /* touch */)
- : (ZLargePages::is_transparent()
- ? fallocate_compat_mmap_tmpfs(offset, length)
- : fallocate_compat_pwrite(offset, length));
-
- if (err) {
- if (end > _size) {
- // Restore file size
- fallocate_compat_ftruncate(_size);
- }
-
- // Failed
- return err;
- }
-
- if (end > _size) {
- // Record new file size
- _size = end;
- }
-
- // Success
- return 0;
}
-ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) {
+ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const {
const int mode = 0; // Allocate
const int res = ZSyscall::fallocate(_fd, mode, offset, length);
if (res == -1) {
@@ -534,17 +499,11 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t
return errno;
}
- const size_t end = offset + length;
- if (end > _size) {
- // Record new file size
- _size = end;
- }
-
// Success
return 0;
}
-ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) {
+ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const {
// Using compat mode is more efficient when allocating space on hugetlbfs.
// Note that allocating huge pages this way will only reserve them, and not
// associate them with segments of the file. We must guarantee that we at
@@ -564,14 +523,14 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length)
}
// Not supported
- log_debug(gc)("Falling back to fallocate() compatibility mode");
+ log_debug_p(gc)("Falling back to fallocate() compatibility mode");
z_fallocate_supported = false;
}
return fallocate_fill_hole_compat(offset, length);
}
-ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) {
+ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const {
if (ZLargePages::is_explicit()) {
// We can only punch hole in pages that have been touched. Non-touched
// pages are only reserved, and not associated with any specific file
@@ -594,7 +553,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length
return 0;
}
-ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) {
+ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const {
// Try first half
const size_t offset0 = offset;
const size_t length0 = align_up(length / 2, _block_size);
@@ -615,7 +574,7 @@ ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offse
return 0;
}
-ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) {
+ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const {
assert(is_aligned(offset, _block_size), "Invalid offset");
assert(is_aligned(length, _block_size), "Invalid length");
@@ -631,7 +590,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t
return err;
}
-bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
+bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
@@ -645,7 +604,7 @@ retry:
// will fail, since there is a delay between process termination and the
// huge pages owned by that process being returned to the huge page pool
// and made available for new allocations.
- log_debug(gc, init)("Failed to commit memory (%s), retrying", err.to_string());
+ log_debug_p(gc, init)("Failed to commit memory (%s), retrying", err.to_string());
// Wait and retry in one second, in the hope that huge pages will be
// available by then.
@@ -654,7 +613,7 @@ retry:
}
// Failed
- log_error(gc)("Failed to commit memory (%s)", err.to_string());
+ log_error_p(gc)("Failed to commit memory (%s)", err.to_string());
return false;
}
@@ -668,7 +627,7 @@ static int offset_to_node(size_t offset) {
return mapping->at((int)nindex);
}
-size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) {
+size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const {
size_t committed = 0;
// Commit one granule at a time, so that each granule
@@ -693,7 +652,7 @@ size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t len
return committed;
}
-size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) {
+size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const {
// Try to commit the whole region
if (commit_inner(offset, length)) {
// Success
@@ -721,7 +680,7 @@ size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) {
}
}
-size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
+size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
if (ZNUMA::is_enabled() && !ZLargePages::is_explicit()) {
// To get granule-level NUMA interleaving when using non-large pages,
// we must explicitly interleave the memory at commit/fallocate time.
@@ -731,7 +690,7 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
return commit_default(offset, length);
}
-size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
+size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp
index 06a13897b5d..655c5a4209b 100644
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp
@@ -35,8 +35,8 @@ private:
size_t _available;
bool _initialized;
- void warn_available_space(size_t max) const;
- void warn_max_map_count(size_t max) const;
+ void warn_available_space(size_t max_capacity) const;
+ void warn_max_map_count(size_t max_capacity) const;
int create_mem_fd(const char* name) const;
int create_file_fd(const char* name) const;
@@ -46,32 +46,29 @@ private:
bool is_hugetlbfs() const;
bool tmpfs_supports_transparent_huge_pages() const;
- ZErrno fallocate_compat_ftruncate(size_t size) const;
ZErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const;
ZErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const;
ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
- ZErrno fallocate_fill_hole_compat(size_t offset, size_t length);
- ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length);
- ZErrno fallocate_fill_hole(size_t offset, size_t length);
- ZErrno fallocate_punch_hole(size_t offset, size_t length);
- ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length);
- ZErrno fallocate(bool punch_hole, size_t offset, size_t length);
+ ZErrno fallocate_fill_hole_compat(size_t offset, size_t length) const;
+ ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length) const;
+ ZErrno fallocate_fill_hole(size_t offset, size_t length) const;
+ ZErrno fallocate_punch_hole(size_t offset, size_t length) const;
+ ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length) const;
+ ZErrno fallocate(bool punch_hole, size_t offset, size_t length) const;
- bool commit_inner(size_t offset, size_t length);
- size_t commit_numa_interleaved(size_t offset, size_t length);
- size_t commit_default(size_t offset, size_t length);
+ bool commit_inner(size_t offset, size_t length) const;
+ size_t commit_numa_interleaved(size_t offset, size_t length) const;
+ size_t commit_default(size_t offset, size_t length) const;
public:
- ZPhysicalMemoryBacking();
+ ZPhysicalMemoryBacking(size_t max_capacity);
bool is_initialized() const;
- void warn_commit_limits(size_t max) const;
+ void warn_commit_limits(size_t max_capacity) const;
- size_t size() const;
-
- size_t commit(size_t offset, size_t length);
- size_t uncommit(size_t offset, size_t length);
+ size_t commit(size_t offset, size_t length) const;
+ size_t uncommit(size_t offset, size_t length) const;
void map(uintptr_t addr, size_t size, uintptr_t offset) const;
void unmap(uintptr_t addr, size_t size) const;
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index 1ddfa5ece51..403ece15798 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -2089,40 +2089,42 @@ void os::print_dll_info(outputStream *st) {
}
}
-int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
- FILE *procmapsFile = NULL;
+struct loaded_modules_info_param {
+ os::LoadedModulesCallbackFunc callback;
+ void *param;
+};
- // Open the procfs maps file for the current process
- if ((procmapsFile = fopen("/proc/self/maps", "r")) != NULL) {
- // Allocate PATH_MAX for file name plus a reasonable size for other fields.
- char line[PATH_MAX + 100];
+static int dl_iterate_callback(struct dl_phdr_info *info, size_t size, void *data) {
+ if ((info->dlpi_name == NULL) || (*info->dlpi_name == '\0')) {
+ return 0;
+ }
- // Read line by line from 'file'
- while (fgets(line, sizeof(line), procmapsFile) != NULL) {
- u8 base, top, inode;
- char name[sizeof(line)];
+ struct loaded_modules_info_param *callback_param = reinterpret_cast(data);
+ address base = NULL;
+ address top = NULL;
+ for (int idx = 0; idx < info->dlpi_phnum; idx++) {
+ const ElfW(Phdr) *phdr = info->dlpi_phdr + idx;
+ if (phdr->p_type == PT_LOAD) {
+ address raw_phdr_base = reinterpret_cast(info->dlpi_addr + phdr->p_vaddr);
- // Parse fields from line, discard perms, offset and device
- int matches = sscanf(line, UINT64_FORMAT_X "-" UINT64_FORMAT_X " %*s %*s %*s " INT64_FORMAT " %s",
- &base, &top, &inode, name);
- // the last entry 'name' is empty for some entries, so we might have 3 matches instead of 4 for some lines
- if (matches < 3) continue;
- if (matches == 3) name[0] = '\0';
+ address phdr_base = align_down(raw_phdr_base, phdr->p_align);
+ if ((base == NULL) || (base > phdr_base)) {
+ base = phdr_base;
+ }
- // Filter by inode 0 so that we only get file system mapped files.
- if (inode != 0) {
-
- // Call callback with the fields of interest
- if(callback(name, (address)base, (address)top, param)) {
- // Oops abort, callback aborted
- fclose(procmapsFile);
- return 1;
- }
+ address phdr_top = align_up(raw_phdr_base + phdr->p_memsz, phdr->p_align);
+ if ((top == NULL) || (top < phdr_top)) {
+ top = phdr_top;
}
}
- fclose(procmapsFile);
}
- return 0;
+
+ return callback_param->callback(info->dlpi_name, base, top, callback_param->param);
+}
+
+int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
+ struct loaded_modules_info_param callback_param = {callback, param};
+ return dl_iterate_phdr(&dl_iterate_callback, &callback_param);
}
void os::print_os_info_brief(outputStream* st) {
diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp
index 2fcbe5623f0..4a22938d77e 100644
--- a/src/hotspot/os/posix/os_posix.cpp
+++ b/src/hotspot/os/posix/os_posix.cpp
@@ -370,6 +370,10 @@ void os::split_reserved_memory(char *base, size_t size, size_t split) {
assert(split > 0, "Sanity");
assert(is_aligned(base, os::vm_allocation_granularity()), "Sanity");
assert(is_aligned(split_address, os::vm_allocation_granularity()), "Sanity");
+
+ // NMT: tell NMT to track both parts individually from now on.
+ MemTracker::record_virtual_memory_split_reserved(base, size, split);
+
}
int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
diff --git a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp
index f710563bbd6..bc1821905b7 100644
--- a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp
+++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp
@@ -35,22 +35,17 @@
// committing and uncommitting, each ZGranuleSize'd chunk is mapped to
// a separate paging file mapping.
-ZPhysicalMemoryBacking::ZPhysicalMemoryBacking() :
- _handles(MaxHeapSize),
- _size(0) {}
+ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
+ _handles(max_capacity) {}
bool ZPhysicalMemoryBacking::is_initialized() const {
return true;
}
-void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
+void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
// Does nothing
}
-size_t ZPhysicalMemoryBacking::size() const {
- return _size;
-}
-
HANDLE ZPhysicalMemoryBacking::get_handle(uintptr_t offset) const {
HANDLE const handle = _handles.get(offset);
assert(handle != 0, "Should be set");
@@ -95,15 +90,7 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);
- const size_t committed = commit_from_paging_file(offset, length);
-
- const size_t end = offset + committed;
- if (end > _size) {
- // Update size
- _size = end;
- }
-
- return committed;
+ return commit_from_paging_file(offset, length);
}
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
diff --git a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp
index febc80742b8..c3179800b65 100644
--- a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp
+++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp
@@ -31,7 +31,6 @@
class ZPhysicalMemoryBacking {
private:
ZGranuleMap _handles;
- size_t _size;
HANDLE get_handle(uintptr_t offset) const;
void put_handle(uintptr_t offset, HANDLE handle);
@@ -41,13 +40,11 @@ private:
size_t uncommit_from_paging_file(size_t offset, size_t size);
public:
- ZPhysicalMemoryBacking();
+ ZPhysicalMemoryBacking(size_t max_capacity);
bool is_initialized() const;
- void warn_commit_limits(size_t max) const;
-
- size_t size() const;
+ void warn_commit_limits(size_t max_capacity) const;
size_t commit(size_t offset, size_t length);
size_t uncommit(size_t offset, size_t length);
diff --git a/src/hotspot/os/windows/gc/z/zSyscall_windows.cpp b/src/hotspot/os/windows/gc/z/zSyscall_windows.cpp
index c3f2da3ecd7..a5e824e7064 100644
--- a/src/hotspot/os/windows/gc/z/zSyscall_windows.cpp
+++ b/src/hotspot/os/windows/gc/z/zSyscall_windows.cpp
@@ -22,8 +22,8 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zSyscall_windows.hpp"
-#include "logging/log.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
@@ -38,13 +38,13 @@ static void lookup_symbol(Fn*& fn, const char* library, const char* symbol) {
char ebuf[1024];
void* const handle = os::dll_load(library, ebuf, sizeof(ebuf));
if (handle == NULL) {
- log_error(gc)("Failed to load library: %s", library);
+ log_error_p(gc)("Failed to load library: %s", library);
vm_exit_during_initialization("ZGC requires Windows version 1803 or later");
}
fn = reinterpret_cast(os::dll_lookup(handle, symbol));
if (fn == NULL) {
- log_error(gc)("Failed to lookup symbol: %s", symbol);
+ log_error_p(gc)("Failed to lookup symbol: %s", symbol);
vm_exit_during_initialization("ZGC requires Windows version 1803 or later");
}
}
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index 6a31ada9c76..32485c0b397 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -3237,6 +3237,10 @@ void os::split_reserved_memory(char *base, size_t size, size_t split) {
reserve_memory(split, base);
reserve_memory(size - split, split_address);
+ // NMT: nothing to do here. Since Windows implements the split by
+ // releasing and re-reserving memory, the parts are already registered
+ // as individual mappings with NMT.
+
}
// Multiple threads can race in this code but it's not possible to unmap small sections of
diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
index 05ee42ef061..9e3438e8f74 100644
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
@@ -438,25 +438,28 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
// stop on request
else if (sig == SIGTRAP && (stop_type = nativeInstruction_at(pc)->get_stop_type()) != -1) {
- const char *msg = NULL,
- *detail_msg = (const char*)(uc->uc_mcontext.jmp_context.gpr[0]);
+ bool msg_present = (stop_type & MacroAssembler::stop_msg_present);
+ stop_type = (stop_type &~ MacroAssembler::stop_msg_present);
+
+ const char *msg = NULL;
switch (stop_type) {
- case MacroAssembler::stop_stop : msg = "stop"; break;
- case MacroAssembler::stop_untested : msg = "untested"; break;
- case MacroAssembler::stop_unimplemented : msg = "unimplemented"; break;
- case MacroAssembler::stop_shouldnotreachhere: msg = "shouldnotreachhere"; detail_msg = NULL; break;
+ case MacroAssembler::stop_stop : msg = "stop"; break;
+ case MacroAssembler::stop_untested : msg = "untested"; break;
+ case MacroAssembler::stop_unimplemented : msg = "unimplemented"; break;
+ case MacroAssembler::stop_shouldnotreachhere: msg = "shouldnotreachhere"; break;
default: msg = "unknown"; break;
}
- if (detail_msg == NULL) {
- detail_msg = "no details provided";
- }
+
+ const char **detail_msg_ptr = (const char**)(pc + 4);
+ const char *detail_msg = msg_present ? *detail_msg_ptr : "no details provided";
if (TraceTraps) {
tty->print_cr("trap: %s: %s (SIGTRAP, stop type %d)", msg, detail_msg, stop_type);
}
va_list detail_args;
- VMError::report_and_die(t, ucVoid, NULL, 0, msg, detail_msg, detail_args);
+ VMError::report_and_die(INTERNAL_ERROR, msg, detail_msg, detail_args, thread,
+ pc, info, ucVoid, NULL, 0, 0);
va_end(detail_args);
}
diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
index b84411ae942..50c77a96f65 100644
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
@@ -468,25 +468,28 @@ JVM_handle_linux_signal(int sig,
// stop on request
else if (sig == SIGTRAP && (stop_type = nativeInstruction_at(pc)->get_stop_type()) != -1) {
- const char *msg = NULL,
- *detail_msg = (const char*)(uc->uc_mcontext.regs->gpr[0]);
+ bool msg_present = (stop_type & MacroAssembler::stop_msg_present);
+ stop_type = (stop_type &~ MacroAssembler::stop_msg_present);
+
+ const char *msg = NULL;
switch (stop_type) {
- case MacroAssembler::stop_stop : msg = "stop"; break;
- case MacroAssembler::stop_untested : msg = "untested"; break;
- case MacroAssembler::stop_unimplemented : msg = "unimplemented"; break;
- case MacroAssembler::stop_shouldnotreachhere: msg = "shouldnotreachhere"; detail_msg = NULL; break;
+ case MacroAssembler::stop_stop : msg = "stop"; break;
+ case MacroAssembler::stop_untested : msg = "untested"; break;
+ case MacroAssembler::stop_unimplemented : msg = "unimplemented"; break;
+ case MacroAssembler::stop_shouldnotreachhere: msg = "shouldnotreachhere"; break;
default: msg = "unknown"; break;
}
- if (detail_msg == NULL) {
- detail_msg = "no details provided";
- }
+
+ const char **detail_msg_ptr = (const char**)(pc + 4);
+ const char *detail_msg = msg_present ? *detail_msg_ptr : "no details provided";
if (TraceTraps) {
tty->print_cr("trap: %s: %s (SIGTRAP, stop type %d)", msg, detail_msg, stop_type);
}
va_list detail_args;
- VMError::report_and_die(t, ucVoid, NULL, 0, msg, detail_msg, detail_args);
+ VMError::report_and_die(INTERNAL_ERROR, msg, detail_msg, detail_args, thread,
+ pc, info, ucVoid, NULL, 0, 0);
va_end(detail_args);
}
diff --git a/src/hotspot/share/c1/c1_GraphBuilder.cpp b/src/hotspot/share/c1/c1_GraphBuilder.cpp
index ffd5051bbad..0b141091780 100644
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp
@@ -4050,7 +4050,7 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee, bool ignore_return
if (ciMethod::is_consistent_info(callee, target)) {
Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void());
- if (try_inline(target, /*holder_known*/ true, ignore_return, bc)) {
+ if (try_inline(target, /*holder_known*/ !callee->is_static(), ignore_return, bc)) {
return true;
}
} else {
@@ -4116,7 +4116,7 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee, bool ignore_return
// We don't do CHA here so only inline static and statically bindable methods.
if (target->is_static() || target->can_be_statically_bound()) {
Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual;
- if (try_inline(target, /*holder_known*/ true, ignore_return, bc)) {
+ if (try_inline(target, /*holder_known*/ !callee->is_static(), ignore_return, bc)) {
return true;
}
} else {
diff --git a/src/hotspot/share/c1/c1_LIR.cpp b/src/hotspot/share/c1/c1_LIR.cpp
index 464ed2b62bc..e0e2b2fb6e4 100644
--- a/src/hotspot/share/c1/c1_LIR.cpp
+++ b/src/hotspot/share/c1/c1_LIR.cpp
@@ -236,30 +236,27 @@ void LIR_Op2::verify() const {
}
-LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block)
+LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, BlockBegin* block)
: LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL)
, _cond(cond)
- , _type(type)
, _label(block->label())
, _block(block)
, _ublock(NULL)
, _stub(NULL) {
}
-LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub) :
+LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, CodeStub* stub) :
LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL)
, _cond(cond)
- , _type(type)
, _label(stub->entry())
, _block(NULL)
, _ublock(NULL)
, _stub(stub) {
}
-LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock)
+LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock)
: LIR_Op(lir_cond_float_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL)
, _cond(cond)
- , _type(type)
, _label(block->label())
, _block(block)
, _ublock(ublock)
@@ -1403,7 +1400,7 @@ void LIR_List::null_check(LIR_Opr opr, CodeEmitInfo* info, bool deoptimize_on_nu
// Emit an explicit null check and deoptimize if opr is null
CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_null_check, Deoptimization::Action_none);
cmp(lir_cond_equal, opr, LIR_OprFact::oopConst(NULL));
- branch(lir_cond_equal, T_OBJECT, deopt);
+ branch(lir_cond_equal, deopt);
} else {
// Emit an implicit null check
append(new LIR_Op1(lir_null_check, opr, info));
diff --git a/src/hotspot/share/c1/c1_LIR.hpp b/src/hotspot/share/c1/c1_LIR.hpp
index 8b7ab7fc9ed..787a22fff11 100644
--- a/src/hotspot/share/c1/c1_LIR.hpp
+++ b/src/hotspot/share/c1/c1_LIR.hpp
@@ -1404,30 +1404,27 @@ class LIR_OpBranch: public LIR_Op {
private:
LIR_Condition _cond;
- BasicType _type;
Label* _label;
BlockBegin* _block; // if this is a branch to a block, this is the block
BlockBegin* _ublock; // if this is a float-branch, this is the unorderd block
CodeStub* _stub; // if this is a branch to a stub, this is the stub
public:
- LIR_OpBranch(LIR_Condition cond, BasicType type, Label* lbl)
+ LIR_OpBranch(LIR_Condition cond, Label* lbl)
: LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
, _cond(cond)
- , _type(type)
, _label(lbl)
, _block(NULL)
, _ublock(NULL)
, _stub(NULL) { }
- LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block);
- LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub);
+ LIR_OpBranch(LIR_Condition cond, BlockBegin* block);
+ LIR_OpBranch(LIR_Condition cond, CodeStub* stub);
// for unordered comparisons
- LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock);
+ LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock);
LIR_Condition cond() const { return _cond; }
- BasicType type() const { return _type; }
Label* label() const { return _label; }
BlockBegin* block() const { return _block; }
BlockBegin* ublock() const { return _ublock; }
@@ -2176,23 +2173,25 @@ class LIR_List: public CompilationResourceObj {
// jump is an unconditional branch
void jump(BlockBegin* block) {
- append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, block));
+ append(new LIR_OpBranch(lir_cond_always, block));
}
void jump(CodeStub* stub) {
- append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
+ append(new LIR_OpBranch(lir_cond_always, stub));
}
- void branch(LIR_Condition cond, BasicType type, Label* lbl) { append(new LIR_OpBranch(cond, type, lbl)); }
- void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
- assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
- append(new LIR_OpBranch(cond, type, block));
+ void branch(LIR_Condition cond, Label* lbl) {
+ append(new LIR_OpBranch(cond, lbl));
}
- void branch(LIR_Condition cond, BasicType type, CodeStub* stub) {
- assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
- append(new LIR_OpBranch(cond, type, stub));
+ // Should not be used for fp comparisons
+ void branch(LIR_Condition cond, BlockBegin* block) {
+ append(new LIR_OpBranch(cond, block));
}
- void branch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* unordered) {
- assert(type == T_FLOAT || type == T_DOUBLE, "fp comparisons only");
- append(new LIR_OpBranch(cond, type, block, unordered));
+ // Should not be used for fp comparisons
+ void branch(LIR_Condition cond, CodeStub* stub) {
+ append(new LIR_OpBranch(cond, stub));
+ }
+ // Should only be used for fp comparisons
+ void branch(LIR_Condition cond, BlockBegin* block, BlockBegin* unordered) {
+ append(new LIR_OpBranch(cond, block, unordered));
}
void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp
index 153ebbc3f13..781839cd279 100644
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp
@@ -478,11 +478,11 @@ void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
if (index->is_constant()) {
cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
index->as_jint(), null_check_info);
- __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
+ __ branch(lir_cond_belowEqual, stub); // forward branch
} else {
cmp_reg_mem(lir_cond_aboveEqual, index, array,
arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
- __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
+ __ branch(lir_cond_aboveEqual, stub); // forward branch
}
}
@@ -491,11 +491,11 @@ void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result
CodeStub* stub = new RangeCheckStub(info, index);
if (index->is_constant()) {
cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
- __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
+ __ branch(lir_cond_belowEqual, stub); // forward branch
} else {
cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
java_nio_Buffer::limit_offset(), T_INT, info);
- __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
+ __ branch(lir_cond_aboveEqual, stub); // forward branch
}
__ move(index, result);
}
@@ -686,7 +686,7 @@ void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unr
oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
} else {
CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
- __ branch(lir_cond_always, T_ILLEGAL, slow_path);
+ __ branch(lir_cond_always, slow_path);
__ branch_destination(slow_path->continuation());
}
}
@@ -1591,7 +1591,7 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
if (GenerateRangeChecks && needs_range_check) {
if (use_length) {
__ cmp(lir_cond_belowEqual, length.result(), index.result());
- __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
+ __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
} else {
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
// range_check also does the null check
@@ -1780,11 +1780,11 @@ void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
LIR_Opr buf_obj = access_resolve(IS_NOT_NULL | ACCESS_READ, buf.result());
if (index.result()->is_constant()) {
cmp_mem_int(lir_cond_belowEqual, buf_obj, java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
- __ branch(lir_cond_belowEqual, T_INT, stub);
+ __ branch(lir_cond_belowEqual, stub);
} else {
cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf_obj,
java_nio_Buffer::limit_offset(), T_INT, info);
- __ branch(lir_cond_aboveEqual, T_INT, stub);
+ __ branch(lir_cond_aboveEqual, stub);
}
__ move(index.result(), result);
} else {
@@ -1858,12 +1858,12 @@ void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
if (GenerateRangeChecks && needs_range_check) {
if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
- __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result()));
+ __ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));
} else if (use_length) {
// TODO: use a (modified) version of array_range_check that does not require a
// constant length to be loaded to a register
__ cmp(lir_cond_belowEqual, length.result(), index.result());
- __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
+ __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
} else {
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
// The range check performs the null check, so clear it out for the load
@@ -2239,18 +2239,18 @@ void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegi
BlockBegin* dest = one_range->sux();
if (low_key == high_key) {
__ cmp(lir_cond_equal, value, low_key);
- __ branch(lir_cond_equal, T_INT, dest);
+ __ branch(lir_cond_equal, dest);
} else if (high_key - low_key == 1) {
__ cmp(lir_cond_equal, value, low_key);
- __ branch(lir_cond_equal, T_INT, dest);
+ __ branch(lir_cond_equal, dest);
__ cmp(lir_cond_equal, value, high_key);
- __ branch(lir_cond_equal, T_INT, dest);
+ __ branch(lir_cond_equal, dest);
} else {
LabelObj* L = new LabelObj();
__ cmp(lir_cond_less, value, low_key);
- __ branch(lir_cond_less, T_INT, L->label());
+ __ branch(lir_cond_less, L->label());
__ cmp(lir_cond_lessEqual, value, high_key);
- __ branch(lir_cond_lessEqual, T_INT, dest);
+ __ branch(lir_cond_lessEqual, dest);
__ branch_destination(L->label());
}
}
@@ -2370,7 +2370,7 @@ void LIRGenerator::do_TableSwitch(TableSwitch* x) {
} else {
for (int i = 0; i < len; i++) {
__ cmp(lir_cond_equal, value, i + lo_key);
- __ branch(lir_cond_equal, T_INT, x->sux_at(i));
+ __ branch(lir_cond_equal, x->sux_at(i));
}
__ jump(x->default_sux());
}
@@ -2429,7 +2429,7 @@ void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
int len = x->length();
for (int i = 0; i < len; i++) {
__ cmp(lir_cond_equal, value, x->key_at(i));
- __ branch(lir_cond_equal, T_INT, x->sux_at(i));
+ __ branch(lir_cond_equal, x->sux_at(i));
}
__ jump(x->default_sux());
}
@@ -2975,16 +2975,18 @@ void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
void LIRGenerator::do_getEventWriter(Intrinsic* x) {
LabelObj* L_end = new LabelObj();
+ // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
+ // meaning of these two is mixed up (see JDK-8026837).
LIR_Address* jobj_addr = new LIR_Address(getThreadPointer(),
in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR),
- T_OBJECT);
+ T_ADDRESS);
LIR_Opr result = rlock_result(x);
- __ move_wide(jobj_addr, result);
- __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
- __ branch(lir_cond_equal, T_OBJECT, L_end->label());
+ __ move(LIR_OprFact::oopConst(NULL), result);
+ LIR_Opr jobj = new_register(T_METADATA);
+ __ move_wide(jobj_addr, jobj);
+ __ cmp(lir_cond_equal, jobj, LIR_OprFact::metadataConst(0));
+ __ branch(lir_cond_equal, L_end->label());
- LIR_Opr jobj = new_register(T_OBJECT);
- __ move(result, jobj);
access_load(IN_NATIVE, T_OBJECT, LIR_OprFact::address(new LIR_Address(jobj, T_OBJECT)), result);
__ branch_destination(L_end->label());
@@ -3342,7 +3344,7 @@ void LIRGenerator::decrement_age(CodeEmitInfo* info) {
CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured,
Deoptimization::Action_make_not_entrant);
__ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0));
- __ branch(lir_cond_lessEqual, T_INT, deopt);
+ __ branch(lir_cond_lessEqual, deopt);
}
}
@@ -3389,9 +3391,9 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
if (freq == 0) {
if (!step->is_constant()) {
__ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
- __ branch(lir_cond_notEqual, T_ILLEGAL, overflow);
+ __ branch(lir_cond_notEqual, overflow);
} else {
- __ branch(lir_cond_always, T_ILLEGAL, overflow);
+ __ branch(lir_cond_always, overflow);
}
} else {
LIR_Opr mask = load_immediate(freq, T_INT);
@@ -3402,7 +3404,7 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
}
__ logical_and(result, mask, result);
__ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
- __ branch(lir_cond_equal, T_INT, overflow);
+ __ branch(lir_cond_equal, overflow);
}
__ branch_destination(overflow->continuation());
}
@@ -3516,7 +3518,7 @@ void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
CodeStub* stub = new PredicateFailedStub(info);
__ cmp(lir_cond(cond), left, right);
- __ branch(lir_cond(cond), right->type(), stub);
+ __ branch(lir_cond(cond), stub);
}
}
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index bf1a3ae1f74..5e81e786dc5 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -3212,6 +3212,41 @@ u2 ClassFileParser::parse_classfile_nest_members_attribute(const ClassFileStream
return length;
}
+u2 ClassFileParser::parse_classfile_permitted_subclasses_attribute(const ClassFileStream* const cfs,
+ const u1* const permitted_subclasses_attribute_start,
+ TRAPS) {
+ const u1* const current_mark = cfs->current();
+ u2 length = 0;
+ if (permitted_subclasses_attribute_start != NULL) {
+ cfs->set_current(permitted_subclasses_attribute_start);
+ cfs->guarantee_more(2, CHECK_0); // length
+ length = cfs->get_u2_fast();
+ }
+ if (length < 1) {
+ classfile_parse_error("PermittedSubclasses attribute is empty in class file %s", CHECK_0);
+ }
+ const int size = length;
+ Array* const permitted_subclasses = MetadataFactory::new_array(_loader_data, size, CHECK_0);
+ _permitted_subclasses = permitted_subclasses;
+
+ int index = 0;
+ cfs->guarantee_more(2 * length, CHECK_0);
+ for (int n = 0; n < length; n++) {
+ const u2 class_info_index = cfs->get_u2_fast();
+ check_property(
+ valid_klass_reference_at(class_info_index),
+ "Permitted subclass class_info_index %u has bad constant type in class file %s",
+ class_info_index, CHECK_0);
+ permitted_subclasses->at_put(index++, class_info_index);
+ }
+ assert(index == size, "wrong size");
+
+ // Restore buffer's current position.
+ cfs->set_current(current_mark);
+
+ return length;
+}
+
// Record {
// u2 attribute_name_index;
// u4 attribute_length;
@@ -3476,10 +3511,16 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(const ClassFil
CHECK);
}
+bool ClassFileParser::supports_sealed_types() {
+ return _major_version == JVM_CLASSFILE_MAJOR_VERSION &&
+ _minor_version == JAVA_PREVIEW_MINOR_VERSION &&
+ Arguments::enable_preview();
+}
+
bool ClassFileParser::supports_records() {
return _major_version == JVM_CLASSFILE_MAJOR_VERSION &&
- _minor_version == JAVA_PREVIEW_MINOR_VERSION &&
- Arguments::enable_preview();
+ _minor_version == JAVA_PREVIEW_MINOR_VERSION &&
+ Arguments::enable_preview();
}
void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cfs,
@@ -3494,11 +3535,14 @@ void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cf
_inner_classes = Universe::the_empty_short_array();
// Set nest members attribute to default sentinel
_nest_members = Universe::the_empty_short_array();
+ // Set _permitted_subclasses attribute to default sentinel
+ _permitted_subclasses = Universe::the_empty_short_array();
cfs->guarantee_more(2, CHECK); // attributes_count
u2 attributes_count = cfs->get_u2_fast();
bool parsed_sourcefile_attribute = false;
bool parsed_innerclasses_attribute = false;
bool parsed_nest_members_attribute = false;
+ bool parsed_permitted_subclasses_attribute = false;
bool parsed_nest_host_attribute = false;
bool parsed_record_attribute = false;
bool parsed_enclosingmethod_attribute = false;
@@ -3522,6 +3566,8 @@ void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cf
u4 nest_members_attribute_length = 0;
const u1* record_attribute_start = NULL;
u4 record_attribute_length = 0;
+ const u1* permitted_subclasses_attribute_start = NULL;
+ u4 permitted_subclasses_attribute_length = 0;
// Iterate over attributes
while (attributes_count--) {
@@ -3738,6 +3784,26 @@ void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cf
}
}
cfs->skip_u1(attribute_length, CHECK);
+ } else if (_major_version >= JAVA_15_VERSION) {
+ // Check for PermittedSubclasses tag
+ if (tag == vmSymbols::tag_permitted_subclasses()) {
+ if (supports_sealed_types()) {
+ if (parsed_permitted_subclasses_attribute) {
+ classfile_parse_error("Multiple PermittedSubclasses attributes in class file %s", CHECK);
+ }
+ // Classes marked ACC_FINAL cannot have a PermittedSubclasses attribute.
+ if (_access_flags.is_final()) {
+ classfile_parse_error("PermittedSubclasses attribute in final class file %s", CHECK);
+ }
+ parsed_permitted_subclasses_attribute = true;
+ permitted_subclasses_attribute_start = cfs->current();
+ permitted_subclasses_attribute_length = attribute_length;
+ }
+ cfs->skip_u1(attribute_length, CHECK);
+ } else {
+ // Unknown attribute
+ cfs->skip_u1(attribute_length, CHECK);
+ }
} else {
// Unknown attribute
cfs->skip_u1(attribute_length, CHECK);
@@ -3806,6 +3872,18 @@ void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cf
}
}
+ if (parsed_permitted_subclasses_attribute) {
+ const u2 num_subclasses = parse_classfile_permitted_subclasses_attribute(
+ cfs,
+ permitted_subclasses_attribute_start,
+ CHECK);
+ if (_need_verify) {
+ guarantee_property(
+ permitted_subclasses_attribute_length == sizeof(num_subclasses) + sizeof(u2) * num_subclasses,
+ "Wrong PermittedSubclasses attribute length in class file %s", CHECK);
+ }
+ }
+
if (_max_bootstrap_specifier_index >= 0) {
guarantee_property(parsed_bootstrap_methods_attribute,
"Missing BootstrapMethods attribute in class file %s", CHECK);
@@ -3871,11 +3949,12 @@ void ClassFileParser::apply_parsed_class_metadata(
this_klass->set_inner_classes(_inner_classes);
this_klass->set_nest_members(_nest_members);
this_klass->set_nest_host_index(_nest_host);
- this_klass->set_local_interfaces(_local_interfaces);
this_klass->set_annotations(_combined_annotations);
+ this_klass->set_permitted_subclasses(_permitted_subclasses);
this_klass->set_record_components(_record_components);
- // Delay the setting of _transitive_interfaces until after initialize_supers() in
- // fill_instance_klass(). It is because the _transitive_interfaces may be shared with
+ // Delay the setting of _local_interfaces and _transitive_interfaces until after
+ // initialize_supers() in fill_instance_klass(). It is because the _local_interfaces could
+ // be shared with _transitive_interfaces and _transitive_interfaces may be shared with
// its _super. If an OOM occurs while loading the current klass, its _super field
// may not have been set. When GC tries to free the klass, the _transitive_interfaces
// may be deallocated mistakenly in InstanceKlass::deallocate_interfaces(). Subsequent
@@ -4681,12 +4760,34 @@ static void check_super_class_access(const InstanceKlass* this_klass, TRAPS) {
const Klass* const super = this_klass->super();
if (super != NULL) {
+ const InstanceKlass* super_ik = InstanceKlass::cast(super);
+
+ if (super->is_final()) {
+ ResourceMark rm(THREAD);
+ Exceptions::fthrow(
+ THREAD_AND_LOCATION,
+ vmSymbols::java_lang_VerifyError(),
+ "class %s cannot inherit from final class %s",
+ this_klass->external_name(),
+ super_ik->external_name());
+ return;
+ }
+
+ if (super_ik->is_sealed() && !super_ik->has_as_permitted_subclass(this_klass)) {
+ ResourceMark rm(THREAD);
+ Exceptions::fthrow(
+ THREAD_AND_LOCATION,
+ vmSymbols::java_lang_IncompatibleClassChangeError(),
+ "class %s cannot inherit from sealed class %s",
+ this_klass->external_name(),
+ super_ik->external_name());
+ return;
+ }
// If the loader is not the boot loader then throw an exception if its
// superclass is in package jdk.internal.reflect and its loader is not a
// special reflection class loader
if (!this_klass->class_loader_data()->is_the_null_class_loader_data()) {
- assert(super->is_instance_klass(), "super is not instance klass");
PackageEntry* super_package = super->package();
if (super_package != NULL &&
super_package->name()->fast_compare(vmSymbols::jdk_internal_reflect()) == 0 &&
@@ -4742,6 +4843,19 @@ static void check_super_interface_access(const InstanceKlass* this_klass, TRAPS)
for (int i = lng - 1; i >= 0; i--) {
InstanceKlass* const k = local_interfaces->at(i);
assert (k != NULL && k->is_interface(), "invalid interface");
+
+ if (k->is_sealed() && !k->has_as_permitted_subclass(this_klass)) {
+ ResourceMark rm(THREAD);
+ Exceptions::fthrow(
+ THREAD_AND_LOCATION,
+ vmSymbols::java_lang_IncompatibleClassChangeError(),
+ "class %s cannot %s sealed interface %s",
+ this_klass->external_name(),
+ this_klass->is_interface() ? "extend" : "implement",
+ k->external_name());
+ return;
+ }
+
Reflection::VerifyClassAccessResults vca_result =
Reflection::verify_class_access(this_klass, k, false);
if (vca_result != Reflection::ACCESS_OK) {
@@ -5674,9 +5788,9 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik,
assert(NULL == _methods, "invariant");
assert(NULL == _inner_classes, "invariant");
assert(NULL == _nest_members, "invariant");
- assert(NULL == _local_interfaces, "invariant");
assert(NULL == _combined_annotations, "invariant");
assert(NULL == _record_components, "invariant");
+ assert(NULL == _permitted_subclasses, "invariant");
if (_has_final_method) {
ik->set_has_final_method();
@@ -5747,7 +5861,9 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik,
// Fill in information needed to compute superclasses.
ik->initialize_supers(const_cast(_super_klass), _transitive_interfaces, CHECK);
ik->set_transitive_interfaces(_transitive_interfaces);
+ ik->set_local_interfaces(_local_interfaces);
_transitive_interfaces = NULL;
+ _local_interfaces = NULL;
// Initialize itable offset tables
klassItable::setup_itable_offset_table(ik);
@@ -5965,6 +6081,7 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
_inner_classes(NULL),
_nest_members(NULL),
_nest_host(0),
+ _permitted_subclasses(NULL),
_record_components(NULL),
_local_interfaces(NULL),
_transitive_interfaces(NULL),
@@ -6073,7 +6190,7 @@ void ClassFileParser::clear_class_metadata() {
_methods = NULL;
_inner_classes = NULL;
_nest_members = NULL;
- _local_interfaces = NULL;
+ _permitted_subclasses = NULL;
_combined_annotations = NULL;
_class_annotations = _class_type_annotations = NULL;
_fields_annotations = _fields_type_annotations = NULL;
@@ -6109,6 +6226,10 @@ ClassFileParser::~ClassFileParser() {
InstanceKlass::deallocate_record_components(_loader_data, _record_components);
}
+ if (_permitted_subclasses != NULL && _permitted_subclasses != Universe::the_empty_short_array()) {
+ MetadataFactory::free_array(_loader_data, _permitted_subclasses);
+ }
+
// Free interfaces
InstanceKlass::deallocate_interfaces(_loader_data, _super_klass,
_local_interfaces, _transitive_interfaces);
@@ -6137,6 +6258,7 @@ ClassFileParser::~ClassFileParser() {
clear_class_metadata();
_transitive_interfaces = NULL;
+ _local_interfaces = NULL;
// deallocate the klass if already created. Don't directly deallocate, but add
// to the deallocate list so that the klass is removed from the CLD::_klasses list
@@ -6507,10 +6629,6 @@ void ClassFileParser::post_process_parsed_stream(const ClassFileStream* const st
);
return;
}
- // Make sure super class is not final
- if (_super_klass->is_final()) {
- THROW_MSG(vmSymbols::java_lang_VerifyError(), "Cannot inherit from final class");
- }
}
// Compute the transitive list of all unique interfaces implemented by this class
diff --git a/src/hotspot/share/classfile/classFileParser.hpp b/src/hotspot/share/classfile/classFileParser.hpp
index a6e48b1904d..bf5fccf340c 100644
--- a/src/hotspot/share/classfile/classFileParser.hpp
+++ b/src/hotspot/share/classfile/classFileParser.hpp
@@ -132,6 +132,7 @@ class ClassFileParser {
Array* _inner_classes;
Array* _nest_members;
u2 _nest_host;
+ Array* _permitted_subclasses;
Array* _record_components;
Array* _local_interfaces;
Array* _transitive_interfaces;
@@ -327,11 +328,16 @@ class ClassFileParser {
const u1* const nest_members_attribute_start,
TRAPS);
+ u2 parse_classfile_permitted_subclasses_attribute(const ClassFileStream* const cfs,
+ const u1* const permitted_subclasses_attribute_start,
+ TRAPS);
+
u2 parse_classfile_record_attribute(const ClassFileStream* const cfs,
const ConstantPool* cp,
const u1* const record_attribute_start,
TRAPS);
+ bool supports_sealed_types();
bool supports_records();
void parse_classfile_attributes(const ClassFileStream* const cfs,
diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.cpp b/src/hotspot/share/classfile/classLoaderDataGraph.cpp
index 971c495e70c..9b104d9ba4d 100644
--- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp
@@ -666,6 +666,13 @@ ClassLoaderMetaspace* ClassLoaderDataGraphMetaspaceIterator::get_next() {
return result;
}
+void ClassLoaderDataGraph::verify() {
+ ClassLoaderDataGraphIterator iter;
+ while (ClassLoaderData* cld = iter.get_next()) {
+ cld->verify();
+ }
+}
+
#ifndef PRODUCT
// callable from debugger
extern "C" int print_loader_data_graph() {
@@ -674,13 +681,6 @@ extern "C" int print_loader_data_graph() {
return 0;
}
-void ClassLoaderDataGraph::verify() {
- ClassLoaderDataGraphIterator iter;
- while (ClassLoaderData* cld = iter.get_next()) {
- cld->verify();
- }
-}
-
void ClassLoaderDataGraph::print_on(outputStream * const out) {
ClassLoaderDataGraphIterator iter;
while (ClassLoaderData* cld = iter.get_next()) {
diff --git a/src/hotspot/share/classfile/compactHashtable.cpp b/src/hotspot/share/classfile/compactHashtable.cpp
index 59ed93364c3..13ad65f33e8 100644
--- a/src/hotspot/share/classfile/compactHashtable.cpp
+++ b/src/hotspot/share/classfile/compactHashtable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
#include "memory/heapShared.inline.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceShared.hpp"
+#include "runtime/globals.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/numberSeq.hpp"
#include
@@ -212,11 +213,13 @@ size_t SimpleCompactHashtable::calculate_header_size() {
void SimpleCompactHashtable::serialize_header(SerializeClosure* soc) {
// NOTE: if you change this function, you MUST change the number 5 in
// calculate_header_size() accordingly.
- soc->do_ptr((void**)&_base_address);
soc->do_u4(&_entry_count);
soc->do_u4(&_bucket_count);
soc->do_ptr((void**)&_buckets);
soc->do_ptr((void**)&_entries);
+ if (soc->reading()) {
+ _base_address = (address)SharedBaseAddress;
+ }
}
#endif // INCLUDE_CDS
diff --git a/src/hotspot/share/classfile/dictionary.cpp b/src/hotspot/share/classfile/dictionary.cpp
index 686dab0973e..8b10f5abdb1 100644
--- a/src/hotspot/share/classfile/dictionary.cpp
+++ b/src/hotspot/share/classfile/dictionary.cpp
@@ -33,6 +33,7 @@
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
+#include "oops/oopHandle.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "utilities/hashtable.inline.hpp"
@@ -400,6 +401,20 @@ void Dictionary::clean_cached_protection_domains() {
}
}
+oop SymbolPropertyEntry::method_type() const {
+ return _method_type.resolve();
+}
+
+void SymbolPropertyEntry::set_method_type(oop p) {
+ _method_type = OopHandle::create(p);
+}
+
+void SymbolPropertyEntry::free_entry() {
+ // decrement Symbol refcount here because hashtable doesn't.
+ literal()->decrement_refcount();
+ // Free OopHandle
+ _method_type.release();
+}
SymbolPropertyTable::SymbolPropertyTable(int table_size)
: Hashtable(table_size, sizeof(SymbolPropertyEntry))
@@ -436,16 +451,6 @@ SymbolPropertyEntry* SymbolPropertyTable::add_entry(int index, unsigned int hash
return p;
}
-void SymbolPropertyTable::oops_do(OopClosure* f) {
- for (int index = 0; index < table_size(); index++) {
- for (SymbolPropertyEntry* p = bucket(index); p != NULL; p = p->next()) {
- if (p->method_type() != NULL) {
- f->do_oop(p->method_type_addr());
- }
- }
- }
-}
-
void SymbolPropertyTable::methods_do(void f(Method*)) {
for (int index = 0; index < table_size(); index++) {
for (SymbolPropertyEntry* p = bucket(index); p != NULL; p = p->next()) {
@@ -457,6 +462,11 @@ void SymbolPropertyTable::methods_do(void f(Method*)) {
}
}
+void SymbolPropertyTable::free_entry(SymbolPropertyEntry* entry) {
+ entry->free_entry();
+ Hashtable::free_entry(entry);
+}
+
void DictionaryEntry::verify_protection_domain_set() {
MutexLocker ml(ProtectionDomainSet_lock, Mutex::_no_safepoint_check_flag);
for (ProtectionDomainEntry* current = pd_set(); // accessed at a safepoint
diff --git a/src/hotspot/share/classfile/dictionary.hpp b/src/hotspot/share/classfile/dictionary.hpp
index 10037b3e935..99b9799753f 100644
--- a/src/hotspot/share/classfile/dictionary.hpp
+++ b/src/hotspot/share/classfile/dictionary.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "classfile/systemDictionary.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/oop.hpp"
+#include "oops/oopHandle.hpp"
#include "utilities/hashtable.hpp"
#include "utilities/ostream.hpp"
@@ -180,7 +181,7 @@ class SymbolPropertyEntry : public HashtableEntry {
private:
intptr_t _symbol_mode; // secondary key
Method* _method;
- oop _method_type;
+ OopHandle _method_type;
public:
Symbol* symbol() const { return literal(); }
@@ -191,9 +192,10 @@ class SymbolPropertyEntry : public HashtableEntry {
Method* method() const { return _method; }
void set_method(Method* p) { _method = p; }
- oop method_type() const { return _method_type; }
- oop* method_type_addr() { return &_method_type; }
- void set_method_type(oop p) { _method_type = p; }
+ oop method_type() const;
+ void set_method_type(oop p);
+
+ void free_entry();
SymbolPropertyEntry* next() const {
return (SymbolPropertyEntry*)HashtableEntry::next();
@@ -253,11 +255,7 @@ public:
SymbolPropertyTable(int table_size);
SymbolPropertyTable(int table_size, HashtableBucket* t, int number_of_entries);
- void free_entry(SymbolPropertyEntry* entry) {
- // decrement Symbol refcount here because hashtable doesn't.
- entry->literal()->decrement_refcount();
- Hashtable::free_entry(entry);
- }
+ void free_entry(SymbolPropertyEntry* entry);
unsigned int compute_hash(Symbol* sym, intptr_t symbol_mode) {
// Use the regular identity_hash.
@@ -274,9 +272,6 @@ public:
// must be done under SystemDictionary_lock
SymbolPropertyEntry* add_entry(int index, unsigned int hash, Symbol* name, intptr_t name_mode);
- // GC support
- void oops_do(OopClosure* f);
-
void methods_do(void f(Method*));
void verify();
diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp
index 6f17652e3f5..38ec88f5e3b 100644
--- a/src/hotspot/share/classfile/systemDictionary.cpp
+++ b/src/hotspot/share/classfile/systemDictionary.cpp
@@ -46,8 +46,6 @@
#include "code/codeCache.hpp"
#include "compiler/compileBroker.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/oopStorage.inline.hpp"
-#include "gc/shared/oopStorageSet.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/interpreter.hpp"
#include "jfr/jfrEvents.hpp"
@@ -68,6 +66,7 @@
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
+#include "oops/oopHandle.inline.hpp"
#include "oops/symbol.hpp"
#include "oops/typeArrayKlass.hpp"
#include "prims/jvmtiExport.hpp"
@@ -98,15 +97,15 @@ ResolutionErrorTable* SystemDictionary::_resolution_errors = NULL;
SymbolPropertyTable* SystemDictionary::_invoke_method_table = NULL;
ProtectionDomainCacheTable* SystemDictionary::_pd_cache_table = NULL;
-oop SystemDictionary::_system_loader_lock_obj = NULL;
-
InstanceKlass* SystemDictionary::_well_known_klasses[SystemDictionary::WKID_LIMIT]
= { NULL /*, NULL...*/ };
InstanceKlass* SystemDictionary::_box_klasses[T_VOID+1] = { NULL /*, NULL...*/ };
-oop SystemDictionary::_java_system_loader = NULL;
-oop SystemDictionary::_java_platform_loader = NULL;
+
+OopHandle SystemDictionary::_system_loader_lock_obj;
+OopHandle SystemDictionary::_java_system_loader;
+OopHandle SystemDictionary::_java_platform_loader;
// Default ProtectionDomainCacheSize value
@@ -155,12 +154,16 @@ ClassLoadInfo::ClassLoadInfo(Handle protection_domain,
// ----------------------------------------------------------------------------
// Java-level SystemLoader and PlatformLoader
+oop SystemDictionary::system_loader_lock() {
+ return _system_loader_lock_obj.resolve();
+}
+
oop SystemDictionary::java_system_loader() {
- return _java_system_loader;
+ return _java_system_loader.resolve();
}
oop SystemDictionary::java_platform_loader() {
- return _java_platform_loader;
+ return _java_platform_loader.resolve();
}
void SystemDictionary::compute_java_loaders(TRAPS) {
@@ -172,7 +175,7 @@ void SystemDictionary::compute_java_loaders(TRAPS) {
vmSymbols::void_classloader_signature(),
CHECK);
- _java_system_loader = (oop)result.get_jobject();
+ _java_system_loader = OopHandle::create((oop)result.get_jobject());
JavaCalls::call_static(&result,
class_loader_klass,
@@ -180,7 +183,7 @@ void SystemDictionary::compute_java_loaders(TRAPS) {
vmSymbols::void_classloader_signature(),
CHECK);
- _java_platform_loader = (oop)result.get_jobject();
+ _java_platform_loader = OopHandle::create((oop)result.get_jobject());
}
ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, bool create_mirror_cld) {
@@ -219,7 +222,7 @@ bool SystemDictionary::is_system_class_loader(oop class_loader) {
return false;
}
return (class_loader->klass() == SystemDictionary::jdk_internal_loader_ClassLoaders_AppClassLoader_klass() ||
- class_loader == _java_system_loader);
+ class_loader == _java_system_loader.peek());
}
// Returns true if the passed class loader is the platform class loader.
@@ -603,7 +606,8 @@ void SystemDictionary::double_lock_wait(Handle lockObject, TRAPS) {
bool calledholdinglock
= ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, lockObject);
assert(calledholdinglock,"must hold lock for notify");
- assert((lockObject() != _system_loader_lock_obj && !is_parallelCapable(lockObject)), "unexpected double_lock_wait");
+ assert((lockObject() != _system_loader_lock_obj.resolve() &&
+ !is_parallelCapable(lockObject)), "unexpected double_lock_wait");
ObjectSynchronizer::notifyall(lockObject, THREAD);
intx recursions = ObjectSynchronizer::complete_exit(lockObject, THREAD);
SystemDictionary_lock->wait();
@@ -1820,7 +1824,7 @@ InstanceKlass* SystemDictionary::find_or_define_instance_class(Symbol* class_nam
Handle SystemDictionary::compute_loader_lock_object(Handle class_loader, TRAPS) {
// If class_loader is NULL we synchronize on _system_loader_lock_obj
if (class_loader.is_null()) {
- return Handle(THREAD, _system_loader_lock_obj);
+ return Handle(THREAD, _system_loader_lock_obj.resolve());
} else {
return class_loader;
}
@@ -1841,7 +1845,7 @@ void SystemDictionary::check_loader_lock_contention(Handle loader_lock, TRAPS) {
== ObjectSynchronizer::owner_other) {
// contention will likely happen, so increment the corresponding
// contention counter.
- if (loader_lock() == _system_loader_lock_obj) {
+ if (loader_lock() == _system_loader_lock_obj.resolve()) {
ClassLoader::sync_systemLoaderLockContentionRate()->inc();
} else {
ClassLoader::sync_nonSystemLoaderLockContentionRate()->inc();
@@ -1958,20 +1962,6 @@ bool SystemDictionary::do_unloading(GCTimer* gc_timer) {
return unloading_occurred;
}
-void SystemDictionary::oops_do(OopClosure* f, bool include_handles) {
- f->do_oop(&_java_system_loader);
- f->do_oop(&_java_platform_loader);
- f->do_oop(&_system_loader_lock_obj);
- CDS_ONLY(SystemDictionaryShared::oops_do(f);)
-
- // Visit extra methods
- invoke_method_table()->oops_do(f);
-
- if (include_handles) {
- OopStorageSet::vm_global()->oops_do(f);
- }
-}
-
// CDS: scan and relocate all classes referenced by _well_known_klasses[].
void SystemDictionary::well_known_klasses_do(MetaspaceClosure* it) {
for (int id = FIRST_WKID; id < WKID_LIMIT; id++) {
@@ -1999,7 +1989,9 @@ void SystemDictionary::initialize(TRAPS) {
_pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
// Allocate private object used as system class loader lock
- _system_loader_lock_obj = oopFactory::new_intArray(0, CHECK);
+ oop lock_obj = oopFactory::new_intArray(0, CHECK);
+ _system_loader_lock_obj = OopHandle::create(lock_obj);
+
// Initialize basic classes
resolve_well_known_classes(CHECK);
}
diff --git a/src/hotspot/share/classfile/systemDictionary.hpp b/src/hotspot/share/classfile/systemDictionary.hpp
index 6abfc52e63d..1777af7ecb3 100644
--- a/src/hotspot/share/classfile/systemDictionary.hpp
+++ b/src/hotspot/share/classfile/systemDictionary.hpp
@@ -27,6 +27,7 @@
#include "classfile/classLoaderData.hpp"
#include "oops/objArrayOop.hpp"
+#include "oops/oopHandle.hpp"
#include "oops/symbol.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
@@ -381,13 +382,8 @@ public:
// loaders. Returns "true" iff something was unloaded.
static bool do_unloading(GCTimer* gc_timer);
- // Applies "f->do_oop" to all root oops in the system dictionary.
- // If include_handles is true (the default), then the handles in the
- // vm_global OopStorage object are included.
- static void oops_do(OopClosure* f, bool include_handles = true);
-
// System loader lock
- static oop system_loader_lock() { return _system_loader_lock_obj; }
+ static oop system_loader_lock();
// Protection Domain Table
static ProtectionDomainCacheTable* pd_cache_table() { return _pd_cache_table; }
@@ -585,7 +581,7 @@ public:
static PlaceholderTable* _placeholders;
// Lock object for system class loader
- static oop _system_loader_lock_obj;
+ static OopHandle _system_loader_lock_obj;
// Constraints on class loaders
static LoaderConstraintTable* _loader_constraints;
@@ -703,8 +699,8 @@ protected:
static InstanceKlass* _box_klasses[T_VOID+1];
private:
- static oop _java_system_loader;
- static oop _java_platform_loader;
+ static OopHandle _java_system_loader;
+ static OopHandle _java_platform_loader;
public:
static TableStatistics placeholders_statistics();
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp
index 63ee66d5e06..2da53ac5238 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp
@@ -61,9 +61,9 @@
#include "utilities/stringUtils.hpp"
-objArrayOop SystemDictionaryShared::_shared_protection_domains = NULL;
-objArrayOop SystemDictionaryShared::_shared_jar_urls = NULL;
-objArrayOop SystemDictionaryShared::_shared_jar_manifests = NULL;
+OopHandle SystemDictionaryShared::_shared_protection_domains = NULL;
+OopHandle SystemDictionaryShared::_shared_jar_urls = NULL;
+OopHandle SystemDictionaryShared::_shared_jar_manifests = NULL;
DEBUG_ONLY(bool SystemDictionaryShared::_no_class_loading_should_happen = false;)
class DumpTimeSharedClassInfo: public CHeapObj {
@@ -460,15 +460,15 @@ static RunTimeSharedDictionary _dynamic_builtin_dictionary;
static RunTimeSharedDictionary _dynamic_unregistered_dictionary;
oop SystemDictionaryShared::shared_protection_domain(int index) {
- return _shared_protection_domains->obj_at(index);
+ return ((objArrayOop)_shared_protection_domains.resolve())->obj_at(index);
}
oop SystemDictionaryShared::shared_jar_url(int index) {
- return _shared_jar_urls->obj_at(index);
+ return ((objArrayOop)_shared_jar_urls.resolve())->obj_at(index);
}
oop SystemDictionaryShared::shared_jar_manifest(int index) {
- return _shared_jar_manifests->obj_at(index);
+ return ((objArrayOop)_shared_jar_manifests.resolve())->obj_at(index);
}
Handle SystemDictionaryShared::get_shared_jar_manifest(int shared_path_index, TRAPS) {
@@ -926,30 +926,27 @@ InstanceKlass* SystemDictionaryShared::load_shared_class_for_builtin_loader(
return NULL;
}
-void SystemDictionaryShared::oops_do(OopClosure* f) {
- f->do_oop((oop*)&_shared_protection_domains);
- f->do_oop((oop*)&_shared_jar_urls);
- f->do_oop((oop*)&_shared_jar_manifests);
-}
-
void SystemDictionaryShared::allocate_shared_protection_domain_array(int size, TRAPS) {
- if (_shared_protection_domains == NULL) {
- _shared_protection_domains = oopFactory::new_objArray(
+ if (_shared_protection_domains.resolve() == NULL) {
+ oop spd = oopFactory::new_objArray(
SystemDictionary::ProtectionDomain_klass(), size, CHECK);
+ _shared_protection_domains = OopHandle::create(spd);
}
}
void SystemDictionaryShared::allocate_shared_jar_url_array(int size, TRAPS) {
- if (_shared_jar_urls == NULL) {
- _shared_jar_urls = oopFactory::new_objArray(
+ if (_shared_jar_urls.resolve() == NULL) {
+ oop sju = oopFactory::new_objArray(
SystemDictionary::URL_klass(), size, CHECK);
+ _shared_jar_urls = OopHandle::create(sju);
}
}
void SystemDictionaryShared::allocate_shared_jar_manifest_array(int size, TRAPS) {
- if (_shared_jar_manifests == NULL) {
- _shared_jar_manifests = oopFactory::new_objArray(
+ if (_shared_jar_manifests.resolve() == NULL) {
+ oop sjm = oopFactory::new_objArray(
SystemDictionary::Jar_Manifest_klass(), size, CHECK);
+ _shared_jar_manifests = OopHandle::create(sjm);
}
}
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.hpp b/src/hotspot/share/classfile/systemDictionaryShared.hpp
index f714104a9aa..ec1efb5af41 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.hpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp
@@ -122,9 +122,9 @@ private:
// java.security.ProtectionDomain objects associated with each shared class.
//
// See SystemDictionaryShared::init_security_info for more info.
- static objArrayOop _shared_protection_domains;
- static objArrayOop _shared_jar_urls;
- static objArrayOop _shared_jar_manifests;
+ static OopHandle _shared_protection_domains;
+ static OopHandle _shared_jar_urls;
+ static OopHandle _shared_jar_manifests;
static InstanceKlass* load_shared_class_for_builtin_loader(
Symbol* class_name,
@@ -180,12 +180,12 @@ private:
ModuleEntry* mod, TRAPS);
static Handle init_security_info(Handle class_loader, InstanceKlass* ik, PackageEntry* pkg_entry, TRAPS);
- static void atomic_set_array_index(objArrayOop array, int index, oop o) {
+ static void atomic_set_array_index(OopHandle array, int index, oop o) {
// Benign race condition: array.obj_at(index) may already be filled in.
// The important thing here is that all threads pick up the same result.
// It doesn't matter which racing thread wins, as long as only one
// result is used by all threads, and all future queries.
- array->atomic_compare_exchange_oop(index, o, NULL);
+ ((objArrayOop)array.resolve())->atomic_compare_exchange_oop(index, o, NULL);
}
static oop shared_protection_domain(int index);
@@ -235,7 +235,6 @@ public:
static void allocate_shared_data_arrays(int size, TRAPS);
- static void oops_do(OopClosure* f);
// Check if sharing is supported for the class loader.
static bool is_sharing_possible(ClassLoaderData* loader_data);
diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp
index 762f2b00720..13e5dd640ad 100644
--- a/src/hotspot/share/classfile/vmSymbols.hpp
+++ b/src/hotspot/share/classfile/vmSymbols.hpp
@@ -173,6 +173,7 @@
template(tag_runtime_invisible_type_annotations, "RuntimeInvisibleTypeAnnotations") \
template(tag_enclosing_method, "EnclosingMethod") \
template(tag_bootstrap_methods, "BootstrapMethods") \
+ template(tag_permitted_subclasses, "PermittedSubclasses") \
\
/* exception klasses: at least all exceptions thrown by the VM have entries here */ \
template(java_lang_ArithmeticException, "java/lang/ArithmeticException") \
diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
index 859c583cc35..403d9870a7a 100644
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -484,7 +484,7 @@ CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
*/
CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) {
// Possibly wakes up the sweeper thread.
- NMethodSweeper::notify(code_blob_type);
+ NMethodSweeper::report_allocation(code_blob_type);
assert_locked_or_safepoint(CodeCache_lock);
assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
if (size <= 0) {
diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp
index 69349430b9d..38a94dd410f 100644
--- a/src/hotspot/share/code/nmethod.cpp
+++ b/src/hotspot/share/code/nmethod.cpp
@@ -1116,7 +1116,9 @@ bool nmethod::can_convert_to_zombie() {
// not_entrant. However, with concurrent code cache unloading, the state
// might have moved on to unloaded if it is_unloading(), due to racing
// concurrent GC threads.
- assert(is_not_entrant() || is_unloading(), "must be a non-entrant method");
+ assert(is_not_entrant() || is_unloading() ||
+ !Thread::current()->is_Code_cache_sweeper_thread(),
+ "must be a non-entrant method if called from sweeper");
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
// count can be greater than the stack traversal count before it hits the
@@ -2004,23 +2006,22 @@ void nmethod::oops_do_marking_epilogue() {
nmethod* next = _oops_do_mark_nmethods;
_oops_do_mark_nmethods = NULL;
- if (next == NULL) {
- return;
- }
- nmethod* cur;
- do {
- cur = next;
- next = extract_nmethod(cur->_oops_do_mark_link);
- cur->_oops_do_mark_link = NULL;
- DEBUG_ONLY(cur->verify_oop_relocations());
+ if (next != NULL) {
+ nmethod* cur;
+ do {
+ cur = next;
+ next = extract_nmethod(cur->_oops_do_mark_link);
+ cur->_oops_do_mark_link = NULL;
+ DEBUG_ONLY(cur->verify_oop_relocations());
- LogTarget(Trace, gc, nmethod) lt;
- if (lt.is_enabled()) {
- LogStream ls(lt);
- CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
- }
- // End if self-loop has been detected.
- } while (cur != next);
+ LogTarget(Trace, gc, nmethod) lt;
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
+ }
+ // End if self-loop has been detected.
+ } while (cur != next);
+ }
log_trace(gc, nmethod)("oops_do_marking_epilogue");
}
diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp
index bb6ee664942..2861addffdd 100644
--- a/src/hotspot/share/compiler/compileBroker.cpp
+++ b/src/hotspot/share/compiler/compileBroker.cpp
@@ -885,6 +885,9 @@ JavaThread* CompileBroker::make_thread(jobject thread_handle, CompileQueue* queu
void CompileBroker::init_compiler_sweeper_threads() {
+ NMethodSweeper::set_sweep_threshold_bytes(static_cast(SweeperThreshold * ReservedCodeCacheSize / 100.0));
+ log_info(codecache, sweep)("Sweeper threshold: " SIZE_FORMAT " bytes", NMethodSweeper::sweep_threshold_bytes());
+
// Ensure any exceptions lead to vm_exit_during_initialization.
EXCEPTION_MARK;
#if !defined(ZERO)
@@ -1652,7 +1655,8 @@ void CompileBroker::wait_for_completion(CompileTask* task) {
bool free_task;
#if INCLUDE_JVMCI
AbstractCompiler* comp = compiler(task->comp_level());
- if (comp->is_jvmci()) {
+ if (comp->is_jvmci() && !task->should_wait_for_compilation()) {
+ // It may return before compilation is completed.
free_task = wait_for_jvmci_completion((JVMCICompiler*) comp, task, thread);
} else
#endif
diff --git a/src/hotspot/share/compiler/compileTask.hpp b/src/hotspot/share/compiler/compileTask.hpp
index 2b76ddbac65..78f76163f7f 100644
--- a/src/hotspot/share/compiler/compileTask.hpp
+++ b/src/hotspot/share/compiler/compileTask.hpp
@@ -134,6 +134,20 @@ class CompileTask : public CHeapObj {
}
}
#if INCLUDE_JVMCI
+ bool should_wait_for_compilation() const {
+ // Wait for blocking compilation to finish.
+ switch (_compile_reason) {
+ case Reason_CTW:
+ case Reason_Replay:
+ case Reason_Whitebox:
+ case Reason_MustBeCompiled:
+ case Reason_Bootstrap:
+ return _is_blocking;
+ default:
+ return false;
+ }
+ }
+
bool has_waiter() const { return _has_waiter; }
void clear_waiter() { _has_waiter = false; }
CompilerThread* jvmci_compiler_thread() const { return _jvmci_compiler_thread; }
diff --git a/src/hotspot/share/compiler/compilerDefinitions.cpp b/src/hotspot/share/compiler/compilerDefinitions.cpp
index 011494af95b..d09323a3da9 100644
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp
+++ b/src/hotspot/share/compiler/compilerDefinitions.cpp
@@ -484,6 +484,13 @@ void CompilerConfig::ergo_initialize() {
}
}
+ if (FLAG_IS_DEFAULT(SweeperThreshold)) {
+ if ((SweeperThreshold * ReservedCodeCacheSize / 100) > (1.2 * M)) {
+ // Cap default SweeperThreshold value to an equivalent of 1.2 Mb
+ FLAG_SET_ERGO(SweeperThreshold, (1.2 * M * 100) / ReservedCodeCacheSize);
+ }
+ }
+
if (UseOnStackReplacement && !UseLoopCounter) {
warning("On-stack-replacement requires loop counters; enabling loop counters");
FLAG_SET_DEFAULT(UseLoopCounter, true);
diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
index b262de0dfe4..3b81eb41996 100644
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
@@ -286,8 +286,10 @@ void EpsilonHeap::print_on(outputStream *st) const {
// Cast away constness:
((VirtualSpace)_virtual_space).print_on(st);
- st->print_cr("Allocation space:");
- _space->print_on(st);
+ if (_space != NULL) {
+ st->print_cr("Allocation space:");
+ _space->print_on(st);
+ }
MetaspaceUtils::print_on(st);
}
diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp
index 6147c80e956..8cf9c97187a 100644
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp
@@ -52,7 +52,8 @@ public:
static EpsilonHeap* heap();
EpsilonHeap() :
- _memory_manager("Epsilon Heap", "") {};
+ _memory_manager("Epsilon Heap", ""),
+ _space(NULL) {};
virtual Name kind() const {
return CollectedHeap::Epsilon;
@@ -116,7 +117,6 @@ public:
bool block_is_obj(const HeapWord* addr) const { return false; }
// No GC threads
- virtual void print_gc_threads_on(outputStream* st) const {}
virtual void gc_threads_do(ThreadClosure* tc) const {}
// No nmethod handling
diff --git a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp
index 4289e5e5c4b..60a213aec77 100644
--- a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp
+++ b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp
@@ -103,7 +103,7 @@ void G1BarrierSetC1::pre_barrier(LIRAccess& access, LIR_Opr addr_opr,
slow = new G1PreBarrierStub(pre_val);
}
- __ branch(lir_cond_notEqual, T_INT, slow);
+ __ branch(lir_cond_notEqual, slow);
__ branch_destination(slow->continuation());
}
@@ -171,7 +171,7 @@ void G1BarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, LIR_OprD
__ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
CodeStub* slow = new G1PostBarrierStub(addr, new_val);
- __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
+ __ branch(lir_cond_notEqual, slow);
__ branch_destination(slow->continuation());
}
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 7030b0c41af..0ec15986e63 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -871,7 +871,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
result = humongous_obj_allocate(word_size);
if (result != NULL) {
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
- policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
+ policy()->old_gen_alloc_tracker()->
+ add_allocated_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
return result;
}
@@ -2507,11 +2508,13 @@ void G1CollectedHeap::print_heap_regions() const {
void G1CollectedHeap::print_on(outputStream* st) const {
st->print(" %-20s", "garbage-first heap");
- st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
- capacity()/K, used_unlocked()/K);
- st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
- p2i(_hrm->reserved().start()),
- p2i(_hrm->reserved().end()));
+ if (_hrm != NULL) {
+ st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
+ capacity()/K, used_unlocked()/K);
+ st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
+ p2i(_hrm->reserved().start()),
+ p2i(_hrm->reserved().end()));
+ }
st->cr();
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
uint young_regions = young_regions_count();
@@ -2526,7 +2529,8 @@ void G1CollectedHeap::print_on(outputStream* st) const {
st->print(" remaining free region(s) on each NUMA node: ");
const int* node_ids = _numa->node_ids();
for (uint node_index = 0; node_index < num_nodes; node_index++) {
- st->print("%d=%u ", node_ids[node_index], _hrm->num_free_regions(node_index));
+ uint num_free_regions = (_hrm != NULL ? _hrm->num_free_regions(node_index) : 0);
+ st->print("%d=%u ", node_ids[node_index], num_free_regions);
}
st->cr();
}
@@ -2534,6 +2538,10 @@ void G1CollectedHeap::print_on(outputStream* st) const {
}
void G1CollectedHeap::print_regions_on(outputStream* st) const {
+ if (_hrm == NULL) {
+ return;
+ }
+
st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
"HS=humongous(starts), HC=humongous(continues), "
"CS=collection set, F=free, "
@@ -2559,18 +2567,6 @@ void G1CollectedHeap::print_on_error(outputStream* st) const {
}
}
-void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
- workers()->print_worker_threads_on(st);
- _cm_thread->print_on(st);
- st->cr();
- _cm->print_worker_threads_on(st);
- _cr->print_threads_on(st);
- _young_gen_sampling_thread->print_on(st);
- if (G1StringDedup::is_enabled()) {
- G1StringDedup::print_worker_threads_on(st);
- }
-}
-
void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
workers()->threads_do(tc);
tc->do_thread(_cm_thread);
@@ -4085,7 +4081,8 @@ void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_
}
void G1CollectedHeap::record_obj_copy_mem_stats() {
- policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
+ policy()->old_gen_alloc_tracker()->
+ add_allocated_bytes_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
_gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
create_g1_evac_summary(&_old_evac_stats));
@@ -4186,7 +4183,7 @@ class G1FreeCollectionSetTask : public AbstractGangTask {
g1h->alloc_buffer_stats(G1HeapRegionAttr::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
G1Policy *policy = g1h->policy();
- policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
+ policy->old_gen_alloc_tracker()->add_allocated_bytes_since_last_gc(_bytes_allocated_in_old_since_last_gc);
policy->record_rs_length(_rs_length);
policy->cset_regions_freed();
}
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index 4dba3e066d1..71f89d09184 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -1454,7 +1454,6 @@ public:
virtual void print_extended_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;
- virtual void print_gc_threads_on(outputStream* st) const;
virtual void gc_threads_do(ThreadClosure* tc) const;
// Override
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index 0a371228b6a..46234bcaf43 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -2026,10 +2026,6 @@ void G1ConcurrentMark::print_summary_info() {
cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
}
-void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
- _concurrent_workers->print_worker_threads_on(st);
-}
-
void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
_concurrent_workers->threads_do(tc);
}
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
index 39421ad5267..e37179e2710 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
@@ -583,7 +583,6 @@ public:
void print_summary_info();
- void print_worker_threads_on(outputStream* st) const;
void threads_do(ThreadClosure* tc) const;
void print_on_error(outputStream* st) const;
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
index 2607f48a127..b8bd67eb07a 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
@@ -114,15 +114,6 @@ void G1ConcurrentRefineThreadControl::maybe_activate_next(uint cur_worker_id) {
}
}
-void G1ConcurrentRefineThreadControl::print_on(outputStream* st) const {
- for (uint i = 0; i < _num_max_threads; ++i) {
- if (_threads[i] != NULL) {
- _threads[i]->print_on(st);
- st->cr();
- }
- }
-}
-
void G1ConcurrentRefineThreadControl::worker_threads_do(ThreadClosure* tc) {
for (uint i = 0; i < _num_max_threads; i++) {
if (_threads[i] != NULL) {
@@ -318,10 +309,6 @@ uint G1ConcurrentRefine::max_num_threads() {
return G1ConcRefinementThreads;
}
-void G1ConcurrentRefine::print_threads_on(outputStream* st) const {
- _thread_control.print_on(st);
-}
-
static size_t calc_new_green_zone(size_t green,
double logged_cards_scan_time,
size_t processed_logged_cards,
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp
index 3f32a5c3199..b904e47a76a 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp
@@ -57,7 +57,6 @@ public:
// activate it.
void maybe_activate_next(uint cur_worker_id);
- void print_on(outputStream* st) const;
void worker_threads_do(ThreadClosure* tc);
void stop();
};
@@ -139,8 +138,6 @@ public:
// Maximum number of refinement threads.
static uint max_num_threads();
- void print_threads_on(outputStream* st) const;
-
// Cards in the dirty card queue set.
size_t green_zone() const { return _green_zone; }
size_t yellow_zone() const { return _yellow_zone; }
diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
index e8bda9c26fb..45c6932efa8 100644
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
@@ -62,7 +62,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
_gc_par_phases[JNIRoots] = new WorkerDataArray("JNI Handles Roots (ms):", max_gc_threads);
_gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray("ObjectSynchronizer Roots (ms):", max_gc_threads);
_gc_par_phases[ManagementRoots] = new WorkerDataArray("Management Roots (ms):", max_gc_threads);
- _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray("SystemDictionary Roots (ms):", max_gc_threads);
+ _gc_par_phases[VMGlobalRoots] = new WorkerDataArray("VM Global Roots (ms):", max_gc_threads);
_gc_par_phases[CLDGRoots] = new WorkerDataArray("CLDG Roots (ms):", max_gc_threads);
_gc_par_phases[JVMTIRoots] = new WorkerDataArray("JVMTI Roots (ms):", max_gc_threads);
AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray("AOT Root Scan (ms):", max_gc_threads);)
@@ -561,7 +561,7 @@ const char* G1GCPhaseTimes::phase_name(GCParPhases phase) {
"JNIRoots",
"ObjectSynchronizerRoots",
"ManagementRoots",
- "SystemDictionaryRoots",
+ "VMGlobalRoots",
"CLDGRoots",
"JVMTIRoots",
AOT_ONLY("AOTCodeRoots" COMMA)
diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
index a83538a1071..a3b83c68582 100644
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
@@ -51,7 +51,7 @@ class G1GCPhaseTimes : public CHeapObj {
JNIRoots,
ObjectSynchronizerRoots,
ManagementRoots,
- SystemDictionaryRoots,
+ VMGlobalRoots,
CLDGRoots,
JVMTIRoots,
AOT_ONLY(AOTCodeRoots COMMA)
diff --git a/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.cpp b/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.cpp
new file mode 100644
index 00000000000..c7796718c68
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020, Amazon.com, Inc. or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1OldGenAllocationTracker.hpp"
+
+G1OldGenAllocationTracker::G1OldGenAllocationTracker() :
+ _last_cycle_old_bytes(0),
+ _last_cycle_duration(0.0),
+ _allocated_bytes_since_last_gc(0) {
+}
+
+void G1OldGenAllocationTracker::reset_after_full_gc() {
+ _last_cycle_duration = 0;
+ reset_cycle_after_gc();
+}
+
+void G1OldGenAllocationTracker::reset_after_young_gc(double allocation_duration_s) {
+ _last_cycle_duration = allocation_duration_s;
+ reset_cycle_after_gc();
+}
\ No newline at end of file
diff --git a/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.hpp b/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.hpp
new file mode 100644
index 00000000000..4d17d96fd10
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.hpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2020, Amazon.com, Inc. or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1OLDGENALLOCATIONTRACKER_HPP
+#define SHARE_VM_GC_G1_G1OLDGENALLOCATIONTRACKER_HPP
+
+#include "gc/g1/heapRegion.hpp"
+#include "memory/allocation.hpp"
+
+// Track allocation details in the old generation.
+class G1OldGenAllocationTracker : public CHeapObj {
+ // New bytes allocated in old gen between the end of the last GC and
+ // the end of the GC before that.
+ size_t _last_cycle_old_bytes;
+ // The number of seconds between the end of the last GC and
+ // the end of the GC before that.
+ double _last_cycle_duration;
+
+ size_t _allocated_bytes_since_last_gc;
+
+ void reset_cycle_after_gc() {
+ _last_cycle_old_bytes = _allocated_bytes_since_last_gc;
+ _allocated_bytes_since_last_gc = 0;
+ }
+
+public:
+ G1OldGenAllocationTracker();
+ // Add the given number of bytes to the total number of allocated bytes in the old gen.
+ void add_allocated_bytes_since_last_gc(size_t bytes) { _allocated_bytes_since_last_gc += bytes; }
+
+ size_t last_cycle_old_bytes() { return _last_cycle_old_bytes; }
+
+ double last_cycle_duration() { return _last_cycle_duration; }
+
+ // Reset stats after a collection.
+ void reset_after_full_gc();
+ void reset_after_young_gc(double allocation_duration_s);
+};
+
+#endif // SHARE_VM_GC_G1_G1OLDGENALLOCATIONTRACKER_HPP
\ No newline at end of file
diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp
index c046bbc0318..7808c0c90eb 100644
--- a/src/hotspot/share/gc/g1/g1Policy.cpp
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp
@@ -73,7 +73,7 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
_rs_length(0),
_rs_length_prediction(0),
_pending_cards_at_gc_start(0),
- _bytes_allocated_in_old_since_last_gc(0),
+ _old_gen_alloc_tracker(),
_initial_mark_to_mixed(),
_collection_set(NULL),
_g1h(NULL),
@@ -460,7 +460,7 @@ void G1Policy::record_full_collection_end() {
update_young_list_max_and_target_length();
update_rs_length_prediction();
- _bytes_allocated_in_old_since_last_gc = 0;
+ _old_gen_alloc_tracker.reset_after_full_gc();
record_pause(FullGC, _full_collection_start_sec, end_sec);
}
@@ -795,11 +795,11 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
// predicted target occupancy.
size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
- update_ihop_prediction(app_time_ms / 1000.0,
- _bytes_allocated_in_old_since_last_gc,
+ _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
+ update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
+ _old_gen_alloc_tracker.last_cycle_old_bytes(),
last_unrestrained_young_length * HeapRegion::GrainBytes,
this_pause_was_young_only);
- _bytes_allocated_in_old_since_last_gc = 0;
_ihop_control->send_trace_event(_g1h->gc_tracer_stw());
} else {
diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp
index b52a9f7013d..473ebbac392 100644
--- a/src/hotspot/share/gc/g1/g1Policy.hpp
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp
@@ -30,6 +30,7 @@
#include "gc/g1/g1HeapRegionAttr.hpp"
#include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
#include "gc/g1/g1MMUTracker.hpp"
+#include "gc/g1/g1OldGenAllocationTracker.hpp"
#include "gc/g1/g1RemSetTrackingPolicy.hpp"
#include "gc/g1/g1Predictions.hpp"
#include "gc/g1/g1YoungGenSizer.hpp"
@@ -102,9 +103,9 @@ class G1Policy: public CHeapObj {
size_t _pending_cards_at_gc_start;
- // The amount of allocated bytes in old gen during the last mutator and the following
- // young GC phase.
- size_t _bytes_allocated_in_old_since_last_gc;
+ // Tracking the allocation in the old generation between
+ // two GCs.
+ G1OldGenAllocationTracker _old_gen_alloc_tracker;
G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
@@ -119,8 +120,7 @@ public:
G1RemSetTrackingPolicy* remset_tracker() { return &_remset_tracker; }
- // Add the given number of bytes to the total number of allocated bytes in the old gen.
- void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
+ G1OldGenAllocationTracker* old_gen_alloc_tracker() { return &_old_gen_alloc_tracker; }
void set_region_eden(HeapRegion* hr) {
hr->set_eden();
diff --git a/src/hotspot/share/gc/g1/g1RootProcessor.cpp b/src/hotspot/share/gc/g1/g1RootProcessor.cpp
index 05e79be3978..139def1662a 100644
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp
@@ -26,7 +26,6 @@
#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
-#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CodeBlobClosure.hpp"
@@ -39,6 +38,8 @@
#include "gc/g1/g1RootClosures.hpp"
#include "gc/g1/g1RootProcessor.hpp"
#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/shared/oopStorage.inline.hpp"
+#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/universe.hpp"
@@ -225,9 +226,9 @@ void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
#endif
{
- G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_id);
- if (_process_strong_tasks.try_claim_task(G1RP_PS_SystemDictionary_oops_do)) {
- SystemDictionary::oops_do(strong_roots);
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::VMGlobalRoots, worker_id);
+ if (_process_strong_tasks.try_claim_task(G1RP_PS_VMGlobal_oops_do)) {
+ OopStorageSet::vm_global()->oops_do(strong_roots);
}
}
}
diff --git a/src/hotspot/share/gc/g1/g1RootProcessor.hpp b/src/hotspot/share/gc/g1/g1RootProcessor.hpp
index 9da113c5c6b..a75d53f7009 100644
--- a/src/hotspot/share/gc/g1/g1RootProcessor.hpp
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,7 @@ class G1RootProcessor : public StackObj {
G1RP_PS_JNIHandles_oops_do,
G1RP_PS_ObjectSynchronizer_oops_do,
G1RP_PS_Management_oops_do,
- G1RP_PS_SystemDictionary_oops_do,
+ G1RP_PS_VMGlobal_oops_do,
G1RP_PS_ClassLoaderDataGraph_oops_do,
G1RP_PS_jvmti_oops_do,
G1RP_PS_CodeCache_oops_do,
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
index ac6901dfa7b..2ed42c16f0e 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
@@ -595,8 +595,12 @@ bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
}
void ParallelScavengeHeap::print_on(outputStream* st) const {
- young_gen()->print_on(st);
- old_gen()->print_on(st);
+ if (young_gen() != NULL) {
+ young_gen()->print_on(st);
+ }
+ if (old_gen() != NULL) {
+ old_gen()->print_on(st);
+ }
MetaspaceUtils::print_on(st);
}
@@ -611,10 +615,6 @@ void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
ParallelScavengeHeap::heap()->workers().threads_do(tc);
}
-void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
- ParallelScavengeHeap::heap()->workers().print_worker_threads_on(st);
-}
-
void ParallelScavengeHeap::print_tracing_info() const {
AdaptiveSizePolicyOutput::print();
log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
index 0f818e55f7e..54f4ca72b73 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -217,7 +217,6 @@ class ParallelScavengeHeap : public CollectedHeap {
PSHeapSummary create_ps_heap_summary();
virtual void print_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;
- virtual void print_gc_threads_on(outputStream* st) const;
virtual void gc_threads_do(ThreadClosure* tc) const;
virtual void print_tracing_info() const;
diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
index ca3d663eab1..18bdacd743a 100644
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
@@ -49,6 +49,8 @@
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/isGCActiveMark.hpp"
+#include "gc/shared/oopStorage.inline.hpp"
+#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
@@ -2029,8 +2031,8 @@ static void mark_from_roots_work(ParallelRootType::Value root_type, uint worker_
JvmtiExport::oops_do(&mark_and_push_closure);
break;
- case ParallelRootType::system_dictionary:
- SystemDictionary::oops_do(&mark_and_push_closure);
+ case ParallelRootType::vm_global:
+ OopStorageSet::vm_global()->oops_do(&mark_and_push_closure);
break;
case ParallelRootType::class_loader_data:
@@ -2238,7 +2240,7 @@ void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
ObjectSynchronizer::oops_do(&oop_closure);
Management::oops_do(&oop_closure);
JvmtiExport::oops_do(&oop_closure);
- SystemDictionary::oops_do(&oop_closure);
+ OopStorageSet::vm_global()->oops_do(&oop_closure);
CLDToOopClosure cld_closure(&oop_closure, ClassLoaderData::_claim_strong);
ClassLoaderDataGraph::cld_do(&cld_closure);
diff --git a/src/hotspot/share/gc/parallel/psRootType.hpp b/src/hotspot/share/gc/parallel/psRootType.hpp
index 73c88df5993..f08854debf1 100644
--- a/src/hotspot/share/gc/parallel/psRootType.hpp
+++ b/src/hotspot/share/gc/parallel/psRootType.hpp
@@ -38,7 +38,7 @@ public:
jni_handles,
object_synchronizer,
management,
- system_dictionary,
+ vm_global,
class_loader_data,
jvmti,
code_cache,
diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp
index 7e9ef5ea3f6..4e9a01bcaae 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp
@@ -43,6 +43,8 @@
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/isGCActiveMark.hpp"
+#include "gc/shared/oopStorage.inline.hpp"
+#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
@@ -102,8 +104,8 @@ static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_i
ObjectSynchronizer::oops_do(&roots_closure);
break;
- case ParallelRootType::system_dictionary:
- SystemDictionary::oops_do(&roots_closure);
+ case ParallelRootType::vm_global:
+ OopStorageSet::vm_global()->oops_do(&roots_closure);
break;
case ParallelRootType::class_loader_data:
diff --git a/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp b/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp
index d8028945410..5588a027549 100644
--- a/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp
+++ b/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp
@@ -193,7 +193,7 @@ void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
if (mask_boolean) {
LabelObj* equalZeroLabel = new LabelObj();
__ cmp(lir_cond_equal, result, 0);
- __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
+ __ branch(lir_cond_equal, equalZeroLabel->label());
__ move(LIR_OprFact::intConst(1), result);
__ branch_destination(equalZeroLabel->label());
}
@@ -321,13 +321,13 @@ void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) {
__ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset()), referent_off);
}
__ cmp(lir_cond_notEqual, offset, referent_off);
- __ branch(lir_cond_notEqual, offset->type(), cont->label());
+ __ branch(lir_cond_notEqual, cont->label());
}
if (gen_source_check) {
// offset is a const and equals referent offset
// if (source == null) -> continue
__ cmp(lir_cond_equal, base_reg, LIR_OprFact::oopConst(NULL));
- __ branch(lir_cond_equal, T_OBJECT, cont->label());
+ __ branch(lir_cond_equal, cont->label());
}
LIR_Opr src_klass = gen->new_register(T_METADATA);
if (gen_type_check) {
@@ -338,7 +338,7 @@ void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) {
LIR_Opr reference_type = gen->new_register(T_INT);
__ move(reference_type_addr, reference_type);
__ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
- __ branch(lir_cond_equal, T_INT, cont->label());
+ __ branch(lir_cond_equal, cont->label());
}
}
}
diff --git a/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp b/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp
index 8311eb24c8c..5909d442b95 100644
--- a/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp
+++ b/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp
@@ -89,7 +89,7 @@ void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, L
LabelObj* L_already_dirty = new LabelObj();
__ cmp(lir_cond_equal, cur_value, dirty);
- __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
+ __ branch(lir_cond_equal, L_already_dirty->label());
__ move(dirty, card_addr);
__ branch_destination(L_already_dirty->label());
} else {
diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp
index bdac8053b98..25368631b58 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp
@@ -143,7 +143,10 @@ void CollectedHeap::print_on_error(outputStream* st) const {
print_extended_on(st);
st->cr();
- BarrierSet::barrier_set()->print_on(st);
+ BarrierSet* bs = BarrierSet::barrier_set();
+ if (bs != NULL) {
+ bs->print_on(st);
+ }
}
void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp
index e2c625dd1fc..8a6936d2e37 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp
@@ -429,13 +429,6 @@ class CollectedHeap : public CHeapObj {
// Used to print information about locations in the hs_err file.
virtual bool print_location(outputStream* st, void* addr) const = 0;
- // Print all GC threads (other than the VM thread)
- // used by this heap.
- virtual void print_gc_threads_on(outputStream* st) const = 0;
- // The default behavior is to call print_gc_threads_on() on tty.
- void print_gc_threads() {
- print_gc_threads_on(tty);
- }
// Iterator for all GC threads (other than VM thread)
virtual void gc_threads_do(ThreadClosure* tc) const = 0;
diff --git a/src/hotspot/share/gc/shared/gcLogPrecious.cpp b/src/hotspot/share/gc/shared/gcLogPrecious.cpp
new file mode 100644
index 00000000000..3e996c228c3
--- /dev/null
+++ b/src/hotspot/share/gc/shared/gcLogPrecious.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/mutexLocker.hpp"
+
+stringStream* GCLogPrecious::_lines = NULL;
+stringStream* GCLogPrecious::_temp = NULL;
+Mutex* GCLogPrecious::_lock = NULL;
+
+void GCLogPrecious::initialize() {
+ _lines = new (ResourceObj::C_HEAP, mtGC) stringStream();
+ _temp = new (ResourceObj::C_HEAP, mtGC) stringStream();
+ _lock = new Mutex(Mutex::tty,
+ "GCLogPrecious Lock",
+ true,
+ Mutex::_safepoint_check_never);
+}
+
+void GCLogPrecious::vwrite_inner(LogTargetHandle log, const char* format, va_list args) {
+ // Generate the string in the temp buffer
+ _temp->reset();
+ _temp->vprint(format, args);
+
+ // Save it in the precious lines buffer
+ _lines->print_cr(" %s", _temp->base());
+
+ // Log it to UL
+ log.print("%s", _temp->base());
+
+ // Leave _temp buffer to be used by vwrite_and_debug
+}
+
+void GCLogPrecious::vwrite(LogTargetHandle log, const char* format, va_list args) {
+ MutexLocker locker(_lock, Mutex::_no_safepoint_check_flag);
+ vwrite_inner(log, format, args);
+}
+
+void GCLogPrecious::vwrite_and_debug(LogTargetHandle log,
+ const char* format,
+ va_list args
+ DEBUG_ONLY(COMMA const char* file)
+ DEBUG_ONLY(COMMA int line)) {
+ DEBUG_ONLY(const char* debug_message;)
+
+ {
+ MutexLocker locker(_lock, Mutex::_no_safepoint_check_flag);
+ vwrite_inner(log, format, args);
+ DEBUG_ONLY(debug_message = strdup(_temp->base()));
+ }
+
+ // report error outside lock scope, since report_vm_error will call print_on_error
+ DEBUG_ONLY(report_vm_error(file, line, debug_message);)
+ DEBUG_ONLY(BREAKPOINT;)
+}
+
+void GCLogPrecious::print_on_error(outputStream* st) {
+ if (_lines != NULL) {
+ MutexLocker locker(_lock, Mutex::_no_safepoint_check_flag);
+ if (_lines->size() > 0) {
+ st->print_cr("GC Precious Log:");
+ st->print_cr("%s", _lines->base());
+ }
+ }
+}
diff --git a/src/hotspot/share/gc/shared/gcLogPrecious.hpp b/src/hotspot/share/gc/shared/gcLogPrecious.hpp
new file mode 100644
index 00000000000..5f1158caca7
--- /dev/null
+++ b/src/hotspot/share/gc/shared/gcLogPrecious.hpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_SHARED_GCLOGPRECIOUS_HPP
+#define SHARE_GC_SHARED_GCLOGPRECIOUS_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "logging/logHandle.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+class Mutex;
+class stringStream;
+
+// Log lines to both unified logging and save them to a buffer.
+// The lines will be printed when hs_err files are created.
+
+#define log_level_p(level, ...) \
+ GCLogPreciousHandle( \
+ LogTargetHandle::create() \
+ DEBUG_ONLY(COMMA __FILE__ COMMA __LINE__))
+
+#define log_info_p(...) log_level_p(Info, __VA_ARGS__).write
+#define log_debug_p(...) log_level_p(Debug, __VA_ARGS__).write
+#define log_trace_p(...) log_level_p(Trace, __VA_ARGS__).write
+#define log_warning_p(...) log_level_p(Warning, __VA_ARGS__).write
+#define log_error_p(...) log_level_p(Error, __VA_ARGS__).write
+
+// ... and report error in debug builds
+#define log_error_pd(...) \
+ DEBUG_ONLY(TOUCH_ASSERT_POISON;) \
+ log_level_p(Error, __VA_ARGS__).write_and_debug
+
+class GCLogPrecious : public AllStatic {
+private:
+ // Saved precious lines
+ static stringStream* _lines;
+ // Temporary line buffer
+ static stringStream* _temp;
+ // Protects the buffers
+ static Mutex* _lock;
+
+ static void vwrite_inner(LogTargetHandle log,
+ const char* format,
+ va_list args) ATTRIBUTE_PRINTF(2, 0);
+
+public:
+ static void initialize();
+
+ static void vwrite(LogTargetHandle log,
+ const char* format,
+ va_list args) ATTRIBUTE_PRINTF(2, 0);
+
+ static void vwrite_and_debug(LogTargetHandle log,
+ const char* format,
+ va_list args
+ DEBUG_ONLY(COMMA const char* file)
+ DEBUG_ONLY(COMMA int line)) ATTRIBUTE_PRINTF(2, 0);
+
+ static void print_on_error(outputStream* st);
+};
+
+class GCLogPreciousHandle {
+ LogTargetHandle _log;
+ DEBUG_ONLY(const char* _file);
+ DEBUG_ONLY(int _line);
+
+ public:
+ GCLogPreciousHandle(LogTargetHandle log
+ DEBUG_ONLY(COMMA const char* file)
+ DEBUG_ONLY(COMMA int line)) :
+ _log(log)
+ DEBUG_ONLY(COMMA _file(file))
+ DEBUG_ONLY(COMMA _line(line))
+ {}
+
+ void write(const char* format, ...) ATTRIBUTE_PRINTF(2, 3) {
+ va_list args;
+ va_start(args, format);
+ GCLogPrecious::vwrite(_log, format, args);
+ va_end(args);
+ }
+
+ void write_and_debug(const char* format, ...) ATTRIBUTE_PRINTF(2, 3) {
+ va_list args;
+ va_start(args, format);
+ GCLogPrecious::vwrite_and_debug(_log, format, args DEBUG_ONLY(COMMA _file COMMA _line));
+ va_end(args);
+ }
+};
+
+#endif // SHARE_GC_SHARED_GCLOGPRECIOUS_HPP
diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
index d5def394e42..c5cf68eda14 100644
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
@@ -27,7 +27,6 @@
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/stringTable.hpp"
-#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
@@ -49,6 +48,8 @@
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/gcInitLogger.hpp"
#include "gc/shared/locationPrinter.inline.hpp"
+#include "gc/shared/oopStorage.inline.hpp"
+#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/oopStorageParState.inline.hpp"
#include "gc/shared/scavengableNMethods.hpp"
#include "gc/shared/space.hpp"
@@ -82,6 +83,8 @@ GenCollectedHeap::GenCollectedHeap(Generation::Name young,
Generation::Name old,
const char* policy_counters_name) :
CollectedHeap(),
+ _young_gen(NULL),
+ _old_gen(NULL),
_young_gen_spec(new GenerationSpec(young,
NewSize,
MaxNewSize,
@@ -92,9 +95,13 @@ GenCollectedHeap::GenCollectedHeap(Generation::Name young,
GenAlignment)),
_rem_set(NULL),
_soft_ref_gen_policy(),
+ _size_policy(NULL),
_gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
+ _incremental_collection_failed(false),
_full_collections_completed(0),
- _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)) {
+ _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
+ _young_manager(NULL),
+ _old_manager(NULL) {
}
jint GenCollectedHeap::initialize() {
@@ -129,7 +136,6 @@ jint GenCollectedHeap::initialize() {
old_rs = old_rs.first_part(_old_gen_spec->max_size());
_old_gen = _old_gen_spec->init(old_rs, rem_set());
- clear_incremental_collection_failed();
GCInitLogger::print();
@@ -838,8 +844,8 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
AOTLoader::oops_do(strong_roots);
}
#endif
- if (_process_strong_tasks->try_claim_task(GCH_PS_SystemDictionary_oops_do)) {
- SystemDictionary::oops_do(strong_roots);
+ if (_process_strong_tasks->try_claim_task(GCH_PS_VMGlobal_oops_do)) {
+ OopStorageSet::vm_global()->oops_do(strong_roots);
}
if (_process_strong_tasks->try_claim_task(GCH_PS_CodeCache_oops_do)) {
@@ -1220,17 +1226,18 @@ void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
}
void GenCollectedHeap::print_on(outputStream* st) const {
- _young_gen->print_on(st);
- _old_gen->print_on(st);
+ if (_young_gen != NULL) {
+ _young_gen->print_on(st);
+ }
+ if (_old_gen != NULL) {
+ _old_gen->print_on(st);
+ }
MetaspaceUtils::print_on(st);
}
void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
}
-void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
-}
-
bool GenCollectedHeap::print_location(outputStream* st, void* addr) const {
return BlockLocationPrinter::print_location(st, addr);
}
diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.hpp b/src/hotspot/share/gc/shared/genCollectedHeap.hpp
index 835413f63b7..4bd5e32fc53 100644
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -110,7 +110,7 @@ protected:
GCH_PS_ObjectSynchronizer_oops_do,
GCH_PS_FlatProfiler_oops_do,
GCH_PS_Management_oops_do,
- GCH_PS_SystemDictionary_oops_do,
+ GCH_PS_VMGlobal_oops_do,
GCH_PS_ClassLoaderDataGraph_oops_do,
GCH_PS_jvmti_oops_do,
GCH_PS_CodeCache_oops_do,
@@ -331,7 +331,6 @@ public:
// Override.
virtual void print_on(outputStream* st) const;
- virtual void print_gc_threads_on(outputStream* st) const;
virtual void gc_threads_do(ThreadClosure* tc) const;
virtual void print_tracing_info() const;
diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedup.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedup.cpp
index c10902cb7dc..6b7f31091e3 100644
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedup.cpp
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedup.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -65,12 +65,6 @@ void StringDedup::threads_do(ThreadClosure* tc) {
tc->do_thread(StringDedupThread::thread());
}
-void StringDedup::print_worker_threads_on(outputStream* st) {
- assert(is_enabled(), "String deduplication not enabled");
- StringDedupThread::thread()->print_on(st);
- st->cr();
-}
-
void StringDedup::verify() {
assert(is_enabled(), "String deduplication not enabled");
StringDedupQueue::verify();
diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedup.hpp b/src/hotspot/share/gc/shared/stringdedup/stringDedup.hpp
index eb79ec79c15..7a3ccd92c67 100644
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedup.hpp
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedup.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -95,7 +95,7 @@ public:
static void parallel_unlink(StringDedupUnlinkOrOopsDoClosure* unlink, uint worker_id);
static void threads_do(ThreadClosure* tc);
- static void print_worker_threads_on(outputStream* st);
+
static void verify();
// GC support
diff --git a/src/hotspot/share/gc/shared/workgroup.cpp b/src/hotspot/share/gc/shared/workgroup.cpp
index e457301f6bf..bbed81b70dc 100644
--- a/src/hotspot/share/gc/shared/workgroup.cpp
+++ b/src/hotspot/share/gc/shared/workgroup.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,14 +86,6 @@ AbstractGangWorker* AbstractWorkGang::worker(uint i) const {
return result;
}
-void AbstractWorkGang::print_worker_threads_on(outputStream* st) const {
- uint workers = created_workers();
- for (uint i = 0; i < workers; i++) {
- worker(i)->print_on(st);
- st->cr();
- }
-}
-
void AbstractWorkGang::threads_do(ThreadClosure* tc) const {
assert(tc != NULL, "Null ThreadClosure");
uint workers = created_workers();
diff --git a/src/hotspot/share/gc/shared/workgroup.hpp b/src/hotspot/share/gc/shared/workgroup.hpp
index 260a556ac02..6cc63bd4589 100644
--- a/src/hotspot/share/gc/shared/workgroup.hpp
+++ b/src/hotspot/share/gc/shared/workgroup.hpp
@@ -188,12 +188,6 @@ class AbstractWorkGang : public CHeapObj {
// Debugging.
const char* name() const { return _name; }
- // Printing
- void print_worker_threads_on(outputStream *st) const;
- void print_worker_threads() const {
- print_worker_threads_on(tty);
- }
-
protected:
virtual AbstractGangWorker* allocate_worker(uint which) = 0;
};
diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
index 24605fcb99b..9754f495034 100644
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
@@ -103,7 +103,7 @@ void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info,
slow = new ShenandoahPreBarrierStub(pre_val);
}
- __ branch(lir_cond_notEqual, T_INT, slow);
+ __ branch(lir_cond_notEqual, slow);
__ branch_destination(slow->continuation());
}
@@ -149,7 +149,7 @@ LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, L
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2, is_native);
- __ branch(lir_cond_notEqual, T_INT, slow);
+ __ branch(lir_cond_notEqual, slow);
__ branch_destination(slow->continuation());
return result;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
index 1d30837bec6..81181e731ee 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
@@ -174,7 +174,6 @@ public:
rp = NULL;
}
- _cm->concurrent_scan_code_roots(worker_id, rp);
_cm->mark_loop(worker_id, _terminator, rp,
true, // cancellable
ShenandoahStringDedup::is_enabled()); // perform string dedup
@@ -215,6 +214,44 @@ public:
}
};
+// Process concurrent roots at safepoints
+template
+class ShenandoahProcessConcurrentRootsTask : public AbstractGangTask {
+private:
+ ShenandoahConcurrentRootScanner _rs;
+ ShenandoahConcurrentMark* const _cm;
+ ReferenceProcessor* _rp;
+public:
+
+ ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm,
+ ShenandoahPhaseTimings::Phase phase,
+ uint nworkers);
+ void work(uint worker_id);
+};
+
+template
+ShenandoahProcessConcurrentRootsTask::ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm,
+ ShenandoahPhaseTimings::Phase phase,
+ uint nworkers) :
+ AbstractGangTask("Shenandoah STW Concurrent Mark Task"),
+ _rs(nworkers, phase),
+ _cm(cm),
+ _rp(NULL) {
+ ShenandoahHeap* heap = ShenandoahHeap::heap();
+ if (heap->process_references()) {
+ _rp = heap->ref_processor();
+ shenandoah_assert_rp_isalive_installed();
+ }
+}
+
+template
+void ShenandoahProcessConcurrentRootsTask::work(uint worker_id) {
+ ShenandoahParallelWorkerSession worker_session(worker_id);
+ ShenandoahObjToScanQueue* q = _cm->task_queues()->queue(worker_id);
+ T cl(q, _rp);
+ _rs.oops_do(&cl, worker_id);
+}
+
class ShenandoahFinalMarkingTask : public AbstractGangTask {
private:
ShenandoahConcurrentMark* _cm;
@@ -267,13 +304,6 @@ public:
}
}
- if (heap->is_degenerated_gc_in_progress() || heap->is_full_gc_in_progress()) {
- // Full GC does not execute concurrent cycle.
- // Degenerated cycle may bypass concurrent cycle.
- // So code roots might not be scanned, let's scan here.
- _cm->concurrent_scan_code_roots(worker_id, rp);
- }
-
_cm->mark_loop(worker_id, _terminator, rp,
false, // not cancellable
_dedup_string);
@@ -308,8 +338,6 @@ void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_pha
ShenandoahInitMarkRootsTask mark_roots(&root_proc);
workers->run_task(&mark_roots);
}
-
- clear_claim_codecache();
}
void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
@@ -390,34 +418,47 @@ void ShenandoahConcurrentMark::initialize(uint workers) {
}
}
-void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
- if (_heap->unload_classes()) {
- return;
- }
+// Mark concurrent roots during concurrent phases
+class ShenandoahMarkConcurrentRootsTask : public AbstractGangTask {
+private:
+ SuspendibleThreadSetJoiner _sts_joiner;
+ ShenandoahConcurrentRootScanner _rs;
+ ShenandoahObjToScanQueueSet* const _queue_set;
+ ReferenceProcessor* const _rp;
- if (claim_codecache()) {
- ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
- MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- // TODO: We can not honor StringDeduplication here, due to lock ranking
- // inversion. So, we may miss some deduplication candidates.
- if (_heap->has_forwarded_objects()) {
- ShenandoahMarkResolveRefsClosure cl(q, rp);
- CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
- CodeCache::blobs_do(&blobs);
- } else {
- ShenandoahMarkRefsClosure cl(q, rp);
- CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
- CodeCache::blobs_do(&blobs);
- }
- }
+public:
+ ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs,
+ ReferenceProcessor* rp,
+ ShenandoahPhaseTimings::Phase phase,
+ uint nworkers);
+ void work(uint worker_id);
+};
+
+ShenandoahMarkConcurrentRootsTask::ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs,
+ ReferenceProcessor* rp,
+ ShenandoahPhaseTimings::Phase phase,
+ uint nworkers) :
+ AbstractGangTask("Shenandoah Concurrent Mark Task"),
+ _rs(nworkers, phase),
+ _queue_set(qs),
+ _rp(rp) {
+ assert(!ShenandoahHeap::heap()->has_forwarded_objects(), "Not expected");
+}
+
+void ShenandoahMarkConcurrentRootsTask::work(uint worker_id) {
+ ShenandoahConcurrentWorkerSession worker_session(worker_id);
+ ShenandoahObjToScanQueue* q = _queue_set->queue(worker_id);
+ ShenandoahMarkResolveRefsClosure cl(q, _rp);
+ _rs.oops_do(&cl, worker_id);
}
void ShenandoahConcurrentMark::mark_from_roots() {
WorkGang* workers = _heap->workers();
uint nworkers = workers->active_workers();
+ ReferenceProcessor* rp = NULL;
if (_heap->process_references()) {
- ReferenceProcessor* rp = _heap->ref_processor();
+ rp = _heap->ref_processor();
rp->set_active_mt_degree(nworkers);
// enable ("weak") refs discovery
@@ -431,6 +472,13 @@ void ShenandoahConcurrentMark::mark_from_roots() {
task_queues()->reserve(nworkers);
+ {
+ ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_mark_roots);
+ // Use separate task to mark concurrent roots, since it may hold ClassLoaderData_lock and CodeCache_lock
+ ShenandoahMarkConcurrentRootsTask task(task_queues(), rp, ShenandoahPhaseTimings::conc_mark_roots, nworkers);
+ workers->run_task(&task);
+ }
+
{
TaskTerminator terminator(nworkers, task_queues());
ShenandoahConcurrentMarkingTask task(this, &terminator);
@@ -445,30 +493,50 @@ void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
uint nworkers = _heap->workers()->active_workers();
- // Finally mark everything else we've got in our queues during the previous steps.
- // It does two different things for concurrent vs. mark-compact GC:
- // - For concurrent GC, it starts with empty task queues, drains the remaining
- // SATB buffers, and then completes the marking closure.
- // - For mark-compact GC, it starts out with the task queues seeded by initial
- // root scan, and completes the closure, thus marking through all live objects
- // The implementation is the same, so it's shared here.
{
- ShenandoahGCPhase phase(full_gc ?
- ShenandoahPhaseTimings::full_gc_mark_finish_queues :
- ShenandoahPhaseTimings::finish_queues);
- task_queues()->reserve(nworkers);
-
shenandoah_assert_rp_isalive_not_installed();
ShenandoahIsAliveSelector is_alive;
ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
- StrongRootsScope scope(nworkers);
- TaskTerminator terminator(nworkers, task_queues());
- ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
- _heap->workers()->run_task(&task);
- }
+ // Full GC does not execute concurrent cycle. Degenerated cycle may bypass concurrent cycle.
+ // In those cases, concurrent roots might not be scanned, scan them here. Ideally, this
+ // should piggyback to ShenandoahFinalMarkingTask, but it makes time tracking very hard.
+ // Given full GC and degenerated GC are rare, use a separate task.
+ if (_heap->is_degenerated_gc_in_progress() || _heap->is_full_gc_in_progress()) {
+ ShenandoahPhaseTimings::Phase phase = _heap->is_full_gc_in_progress() ?
+ ShenandoahPhaseTimings::full_gc_scan_conc_roots :
+ ShenandoahPhaseTimings::degen_gc_scan_conc_roots;
+ ShenandoahGCPhase gc_phase(phase);
+ if (_heap->has_forwarded_objects()) {
+ ShenandoahProcessConcurrentRootsTask task(this, phase, nworkers);
+ _heap->workers()->run_task(&task);
+ } else {
+ ShenandoahProcessConcurrentRootsTask task(this, phase, nworkers);
+ _heap->workers()->run_task(&task);
+ }
+ }
- assert(task_queues()->is_empty(), "Should be empty");
+ // Finally mark everything else we've got in our queues during the previous steps.
+ // It does two different things for concurrent vs. mark-compact GC:
+ // - For concurrent GC, it starts with empty task queues, drains the remaining
+ // SATB buffers, and then completes the marking closure.
+ // - For mark-compact GC, it starts out with the task queues seeded by initial
+ // root scan, and completes the closure, thus marking through all live objects
+ // The implementation is the same, so it's shared here.
+ {
+ ShenandoahGCPhase phase(full_gc ?
+ ShenandoahPhaseTimings::full_gc_mark_finish_queues :
+ ShenandoahPhaseTimings::finish_queues);
+ task_queues()->reserve(nworkers);
+
+ StrongRootsScope scope(nworkers);
+ TaskTerminator terminator(nworkers, task_queues());
+ ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
+ _heap->workers()->run_task(&task);
+ }
+
+ assert(task_queues()->is_empty(), "Should be empty");
+ }
// When we're done marking everything, we process weak references.
if (_heap->process_references()) {
@@ -942,11 +1010,3 @@ void ShenandoahConcurrentMark::mark_loop_work(T* cl, ShenandoahLiveData* live_da
}
}
}
-
-bool ShenandoahConcurrentMark::claim_codecache() {
- return _claimed_codecache.try_set();
-}
-
-void ShenandoahConcurrentMark::clear_claim_codecache() {
- _claimed_codecache.unset();
-}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp
index ef5707ee53a..554e45c818e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp
@@ -91,16 +91,6 @@ private:
public:
void preclean_weak_refs();
-// ---------- Concurrent code cache
-//
-private:
- ShenandoahSharedFlag _claimed_codecache;
-
-public:
- void concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp);
- bool claim_codecache();
- void clear_claim_codecache();
-
// ---------- Helpers
// Used from closures, need to be public
//
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index 4bca7007698..56a898895ab 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -1178,13 +1178,6 @@ void ShenandoahHeap::prepare_for_verify() {
}
}
-void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
- workers()->print_worker_threads_on(st);
- if (ShenandoahStringDedup::is_enabled()) {
- ShenandoahStringDedup::print_worker_threads_on(st);
- }
-}
-
void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
workers()->threads_do(tcl);
if (_safepoint_workers != NULL) {
@@ -1589,17 +1582,20 @@ void ShenandoahHeap::op_final_mark() {
}
if (ShenandoahVerify) {
- ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::None;
- if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
- types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots);
- types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CLDGRoots);
- types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::StringDedupRoots);
- }
+ // If OOM while evacuating/updating of roots, there is no guarantee of their consistencies
+ if (!cancelled_gc()) {
+ ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::None;
+ if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
+ types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots);
+ types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CLDGRoots);
+ types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::StringDedupRoots);
+ }
- if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
- types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CodeRoots);
+ if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
+ types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CodeRoots);
+ }
+ verifier()->verify_roots_no_forwarded_except(types);
}
- verifier()->verify_roots_no_forwarded_except(types);
verifier()->verify_during_evacuation();
}
} else {
@@ -1654,13 +1650,12 @@ class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
private:
ShenandoahVMRoots _vm_roots;
ShenandoahClassLoaderDataRoots _cld_roots;
- ShenandoahConcurrentStringDedupRoots _dedup_roots;
public:
ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots Task"),
_vm_roots(phase),
- _cld_roots(phase) {}
+ _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()) {}
void work(uint worker_id) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
@@ -1677,12 +1672,6 @@ public:
CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
_cld_roots.cld_do(&clds, worker_id);
}
-
- {
- ShenandoahForwardedIsAliveClosure is_alive;
- ShenandoahEvacuateUpdateRootsClosure keep_alive;
- _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
- }
}
};
@@ -1772,6 +1761,7 @@ private:
ShenandoahClassLoaderDataRoots
_cld_roots;
ShenandoahConcurrentNMethodIterator _nmethod_itr;
+ ShenandoahConcurrentStringDedupRoots _dedup_roots;
bool _concurrent_class_unloading;
public:
@@ -1781,8 +1771,9 @@ public:
_string_table_roots(OopStorageSet::string_table_weak(), phase, ShenandoahPhaseTimings::StringTableRoots),
_resolved_method_table_roots(OopStorageSet::resolved_method_table_weak(), phase, ShenandoahPhaseTimings::ResolvedMethodTableRoots),
_vm_roots(OopStorageSet::vm_weak(), phase, ShenandoahPhaseTimings::VMWeakRoots),
- _cld_roots(phase),
+ _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
_nmethod_itr(ShenandoahCodeRoots::table()),
+ _dedup_roots(phase),
_concurrent_class_unloading(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
StringTable::reset_dead_counter();
ResolvedMethodTable::reset_dead_counter();
@@ -1818,6 +1809,11 @@ public:
cl.reset_dead_counter();
_resolved_method_table_roots.oops_do(&cl, worker_id);
ResolvedMethodTable::inc_dead_counter(cl.dead_counter());
+
+ // String dedup weak roots
+ ShenandoahForwardedIsAliveClosure is_alive;
+ ShenandoahEvacuateUpdateRootsClosure keep_alive;
+ _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
}
// If we are going to perform concurrent class unloading later on, we need to
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index c1139c14b41..0657e73be9a 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -160,7 +160,6 @@ public:
void print_on(outputStream* st) const;
void print_extended_on(outputStream *st) const;
void print_tracing_info() const;
- void print_gc_threads_on(outputStream* st) const;
void print_heap_regions_on(outputStream* st) const;
void stop();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
index d78d152bce7..621c8bf542d 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
@@ -523,13 +523,13 @@ void ShenandoahNMethodList::transfer(ShenandoahNMethodList* const list, int limi
}
ShenandoahNMethodList* ShenandoahNMethodList::acquire() {
- assert(CodeCache_lock->owned_by_self(), "Lock must be held");
+ assert_locked_or_safepoint(CodeCache_lock);
_ref_count++;
return this;
}
void ShenandoahNMethodList::release() {
- assert(CodeCache_lock->owned_by_self(), "Lock must be held");
+ assert_locked_or_safepoint(CodeCache_lock);
_ref_count--;
if (_ref_count == 0) {
delete this;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp
index 80bd132c47f..e0f8cf58634 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp
@@ -103,12 +103,15 @@ bool ShenandoahPhaseTimings::is_worker_phase(Phase phase) {
case full_gc_scan_roots:
case full_gc_update_roots:
case full_gc_adjust_roots:
+ case degen_gc_scan_conc_roots:
case degen_gc_update_roots:
+ case full_gc_scan_conc_roots:
case full_gc_purge_class_unload:
case full_gc_purge_weak_par:
case purge_class_unload:
case purge_weak_par:
case heap_iteration_roots:
+ case conc_mark_roots:
case conc_weak_roots_work:
case conc_strong_roots:
return true;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp
index c9e3c21491b..4f30d0206e2 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp
@@ -48,7 +48,6 @@ class outputStream;
f(CNT_PREFIX ## VMWeakRoots, DESC_PREFIX "VM Weak Roots") \
f(CNT_PREFIX ## ObjectSynchronizerRoots, DESC_PREFIX "Synchronizer Roots") \
f(CNT_PREFIX ## ManagementRoots, DESC_PREFIX "Management Roots") \
- f(CNT_PREFIX ## SystemDictionaryRoots, DESC_PREFIX "System Dict Roots") \
f(CNT_PREFIX ## CLDGRoots, DESC_PREFIX "CLDG Roots") \
f(CNT_PREFIX ## JVMTIRoots, DESC_PREFIX "JVMTI Roots") \
f(CNT_PREFIX ## StringDedupTableRoots, DESC_PREFIX "Dedup Table Roots") \
@@ -68,6 +67,9 @@ class outputStream;
f(resize_tlabs, " Resize TLABs") \
\
f(conc_mark, "Concurrent Marking") \
+ f(conc_mark_roots, " Roots ") \
+ SHENANDOAH_PAR_PHASE_DO(conc_mark_roots, " CM: ", f) \
+ \
f(conc_preclean, "Concurrent Precleaning") \
\
f(final_mark_gross, "Pause Final Mark (G)") \
@@ -128,6 +130,8 @@ class outputStream;
\
f(degen_gc_gross, "Pause Degenerated GC (G)") \
f(degen_gc, "Pause Degenerated GC (N)") \
+ f(degen_gc_scan_conc_roots, " Degen Mark Roots") \
+ SHENANDOAH_PAR_PHASE_DO(degen_gc_conc_mark_, " DM: ", f) \
f(degen_gc_update_roots, " Degen Update Roots") \
SHENANDOAH_PAR_PHASE_DO(degen_gc_update_, " DU: ", f) \
\
@@ -137,6 +141,8 @@ class outputStream;
f(full_gc_prepare, " Prepare") \
f(full_gc_scan_roots, " Scan Roots") \
SHENANDOAH_PAR_PHASE_DO(full_gc_scan_roots_, " FS: ", f) \
+ f(full_gc_scan_conc_roots, " Scan Concurrent Roots") \
+ SHENANDOAH_PAR_PHASE_DO(full_gc_scan_conc_roots, " FCS: ", f) \
f(full_gc_update_roots, " Update Roots") \
SHENANDOAH_PAR_PHASE_DO(full_gc_update_roots_, " FU: ", f) \
f(full_gc_mark, " Mark") \
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
index 02fa6ffc2b8..095d80b69c8 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
@@ -26,8 +26,8 @@
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
-#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
+#include "code/nmethod.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
@@ -54,16 +54,10 @@ void ShenandoahSerialRoot::oops_do(OopClosure* cl, uint worker_id) {
}
}
-// Overwrite the second argument for SD::oops_do, don't include vm global oop storage.
-static void system_dictionary_oops_do(OopClosure* cl) {
- SystemDictionary::oops_do(cl, false);
-}
-
ShenandoahSerialRoots::ShenandoahSerialRoots(ShenandoahPhaseTimings::Phase phase) :
_universe_root(&Universe::oops_do, phase, ShenandoahPhaseTimings::UniverseRoots),
_object_synchronizer_root(&ObjectSynchronizer::oops_do, phase, ShenandoahPhaseTimings::ObjectSynchronizerRoots),
_management_root(&Management::oops_do, phase, ShenandoahPhaseTimings::ManagementRoots),
- _system_dictionary_root(&system_dictionary_oops_do, phase, ShenandoahPhaseTimings::SystemDictionaryRoots),
_jvmti_root(&JvmtiExport::oops_do, phase, ShenandoahPhaseTimings::JVMTIRoots) {
}
@@ -71,7 +65,6 @@ void ShenandoahSerialRoots::oops_do(OopClosure* cl, uint worker_id) {
_universe_root.oops_do(cl, worker_id);
_object_synchronizer_root.oops_do(cl, worker_id);
_management_root.oops_do(cl, worker_id);
- _system_dictionary_root.oops_do(cl, worker_id);
_jvmti_root.oops_do(cl, worker_id);
}
@@ -148,7 +141,8 @@ void ShenandoahStringDedupRoots::oops_do(BoolObjectClosure* is_alive, OopClosure
}
}
-ShenandoahConcurrentStringDedupRoots::ShenandoahConcurrentStringDedupRoots() {
+ShenandoahConcurrentStringDedupRoots::ShenandoahConcurrentStringDedupRoots(ShenandoahPhaseTimings::Phase phase) :
+ _phase(phase) {
if (ShenandoahStringDedup::is_enabled()) {
StringDedupTable_lock->lock_without_safepoint_check();
StringDedupQueue_lock->lock_without_safepoint_check();
@@ -170,8 +164,15 @@ void ShenandoahConcurrentStringDedupRoots::oops_do(BoolObjectClosure* is_alive,
assert_locked_or_safepoint_weak(StringDedupTable_lock);
StringDedupUnlinkOrOopsDoClosure sd_cl(is_alive, keep_alive);
- StringDedupQueue::unlink_or_oops_do(&sd_cl);
- StringDedupTable::unlink_or_oops_do(&sd_cl, worker_id);
+ {
+ ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::StringDedupQueueRoots, worker_id);
+ StringDedupQueue::unlink_or_oops_do(&sd_cl);
+ }
+
+ {
+ ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::StringDedupTableRoots, worker_id);
+ StringDedupTable::unlink_or_oops_do(&sd_cl, worker_id);
+ }
}
}
@@ -199,10 +200,12 @@ ShenandoahRootScanner::ShenandoahRootScanner(uint n_workers, ShenandoahPhaseTimi
ShenandoahRootProcessor(phase),
_serial_roots(phase),
_thread_roots(phase, n_workers > 1),
- _code_roots(phase),
- _vm_roots(phase),
- _dedup_roots(phase),
- _cld_roots(phase) {
+ _dedup_roots(phase) {
+ nmethod::oops_do_marking_prologue();
+}
+
+ShenandoahRootScanner::~ShenandoahRootScanner() {
+ nmethod::oops_do_marking_epilogue();
}
void ShenandoahRootScanner::roots_do(uint worker_id, OopClosure* oops) {
@@ -221,29 +224,33 @@ void ShenandoahRootScanner::roots_do(uint worker_id, OopClosure* oops, CLDClosur
assert(!ShenandoahSafepoint::is_at_shenandoah_safepoint() ||
!ShenandoahHeap::heap()->unload_classes(),
"Expect class unloading when Shenandoah cycle is running");
- ResourceMark rm;
-
- _serial_roots.oops_do(oops, worker_id);
- _vm_roots.oops_do(oops, worker_id);
-
assert(clds != NULL, "Only possible with CLD closure");
- _cld_roots.cld_do(clds, worker_id);
-
- ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
- _thread_roots.threads_do(&tc_cl, worker_id);
AlwaysTrueClosure always_true;
+ ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
+
+ ResourceMark rm;
+
+ // Process serial-claiming roots first
+ _serial_roots.oops_do(oops, worker_id);
+
+ // Process light-weight/limited parallel roots then
_dedup_roots.oops_do(&always_true, oops, worker_id);
+
+ // Process heavy-weight/fully parallel roots the last
+ _thread_roots.threads_do(&tc_cl, worker_id);
}
void ShenandoahRootScanner::strong_roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code, ThreadClosure* tc) {
assert(ShenandoahHeap::heap()->unload_classes(), "Should be used during class unloading");
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
+
ResourceMark rm;
+ // Process serial-claiming roots first
_serial_roots.oops_do(oops, worker_id);
- _vm_roots.oops_do(oops, worker_id);
- _cld_roots.always_strong_cld_do(clds, worker_id);
+
+ // Process heavy-weight/fully parallel roots the last
_thread_roots.threads_do(&tc_cl, worker_id);
}
@@ -254,7 +261,7 @@ ShenandoahRootEvacuator::ShenandoahRootEvacuator(uint n_workers,
ShenandoahRootProcessor(phase),
_serial_roots(phase),
_vm_roots(phase),
- _cld_roots(phase),
+ _cld_roots(phase, n_workers),
_thread_roots(phase, n_workers > 1),
_serial_weak_roots(phase),
_weak_roots(phase),
@@ -272,17 +279,23 @@ void ShenandoahRootEvacuator::roots_do(uint worker_id, OopClosure* oops) {
static_cast(&blobsCl);
AlwaysTrueClosure always_true;
+ // Process serial-claiming roots first
_serial_roots.oops_do(oops, worker_id);
_serial_weak_roots.weak_oops_do(oops, worker_id);
+
+ // Process light-weight/limited parallel roots then
if (_stw_roots_processing) {
_vm_roots.oops_do(oops, worker_id);
_weak_roots.oops_do(oops, worker_id);
_dedup_roots.oops_do(&always_true, oops, worker_id);
}
-
if (_stw_class_unloading) {
CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
_cld_roots.cld_do(&clds, worker_id);
+ }
+
+ // Process heavy-weight/fully parallel roots the last
+ if (_stw_class_unloading) {
_code_roots.code_blobs_do(codes_cl, worker_id);
_thread_roots.oops_do(oops, NULL, worker_id);
} else {
@@ -294,7 +307,7 @@ ShenandoahRootUpdater::ShenandoahRootUpdater(uint n_workers, ShenandoahPhaseTimi
ShenandoahRootProcessor(phase),
_serial_roots(phase),
_vm_roots(phase),
- _cld_roots(phase),
+ _cld_roots(phase, n_workers),
_thread_roots(phase, n_workers > 1),
_serial_weak_roots(phase),
_weak_roots(phase),
@@ -306,7 +319,7 @@ ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTi
ShenandoahRootProcessor(phase),
_serial_roots(phase),
_vm_roots(phase),
- _cld_roots(phase),
+ _cld_roots(phase, n_workers),
_thread_roots(phase, n_workers > 1),
_serial_weak_roots(phase),
_weak_roots(phase),
@@ -324,16 +337,19 @@ void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
CLDToOopClosure adjust_cld_closure(oops, ClassLoaderData::_claim_strong);
AlwaysTrueClosure always_true;
+ // Process serial-claiming roots first
_serial_roots.oops_do(oops, worker_id);
- _vm_roots.oops_do(oops, worker_id);
-
- _thread_roots.oops_do(oops, NULL, worker_id);
- _cld_roots.cld_do(&adjust_cld_closure, worker_id);
- _code_roots.code_blobs_do(adjust_code_closure, worker_id);
-
_serial_weak_roots.weak_oops_do(oops, worker_id);
+
+ // Process light-weight/limited parallel roots then
+ _vm_roots.oops_do(oops, worker_id);
_weak_roots.oops_do(oops, worker_id);
_dedup_roots.oops_do(&always_true, oops, worker_id);
+ _cld_roots.cld_do(&adjust_cld_closure, worker_id);
+
+ // Process heavy-weight/fully parallel roots the last
+ _code_roots.code_blobs_do(adjust_code_closure, worker_id);
+ _thread_roots.oops_do(oops, NULL, worker_id);
}
ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() :
@@ -341,9 +357,10 @@ ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() :
_serial_roots(ShenandoahPhaseTimings::heap_iteration_roots),
_thread_roots(ShenandoahPhaseTimings::heap_iteration_roots, false /*is par*/),
_vm_roots(ShenandoahPhaseTimings::heap_iteration_roots),
- _cld_roots(ShenandoahPhaseTimings::heap_iteration_roots),
+ _cld_roots(ShenandoahPhaseTimings::heap_iteration_roots, 1),
_serial_weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
_weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
+ _dedup_roots(ShenandoahPhaseTimings::heap_iteration_roots),
_code_roots(ShenandoahPhaseTimings::heap_iteration_roots) {
}
@@ -354,15 +371,20 @@ ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() :
MarkingCodeBlobClosure code(oops, !CodeBlobToOopClosure::FixRelocations);
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL);
AlwaysTrueClosure always_true;
+
ResourceMark rm;
+ // Process serial-claiming roots first
_serial_roots.oops_do(oops, 0);
- _vm_roots.oops_do(oops, 0);
- _cld_roots.cld_do(&clds, 0);
- _thread_roots.threads_do(&tc_cl, 0);
- _code_roots.code_blobs_do(&code, 0);
-
_serial_weak_roots.weak_oops_do(oops, 0);
+
+ // Process light-weight/limited parallel roots then
+ _vm_roots.oops_do(oops, 0);
_weak_roots.oops_do(oops, 0);
_dedup_roots.oops_do(&always_true, oops, 0);
+ _cld_roots.cld_do(&clds, 0);
+
+ // Process heavy-weight/fully parallel roots the last
+ _code_roots.code_blobs_do(&code, 0);
+ _thread_roots.threads_do(&tc_cl, 0);
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
index 1e339828b71..7cdb510f783 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
@@ -54,7 +54,6 @@ private:
ShenandoahSerialRoot _universe_root;
ShenandoahSerialRoot _object_synchronizer_root;
ShenandoahSerialRoot _management_root;
- ShenandoahSerialRoot _system_dictionary_root;
ShenandoahSerialRoot _jvmti_root;
public:
ShenandoahSerialRoots(ShenandoahPhaseTimings::Phase phase);
@@ -205,8 +204,11 @@ public:
};
class ShenandoahConcurrentStringDedupRoots {
+private:
+ ShenandoahPhaseTimings::Phase _phase;
+
public:
- ShenandoahConcurrentStringDedupRoots();
+ ShenandoahConcurrentStringDedupRoots(ShenandoahPhaseTimings::Phase phase);
~ShenandoahConcurrentStringDedupRoots();
void oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id);
@@ -226,9 +228,18 @@ public:
template
class ShenandoahClassLoaderDataRoots {
private:
+ ShenandoahSharedSemaphore _semaphore;
ShenandoahPhaseTimings::Phase _phase;
+
+ static uint worker_count(uint n_workers) {
+ // Limit concurrency a bit, otherwise it wastes resources when workers are tripping
+ // over each other. This also leaves free workers to process other parts of the root
+ // set, while admitted workers are busy with doing the CLDG walk.
+ return MAX2(1u, MIN2(ShenandoahSharedSemaphore::max_tokens(), n_workers / 2));
+ }
+
public:
- ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase);
+ ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase, uint n_workers);
~ShenandoahClassLoaderDataRoots();
void always_strong_cld_do(CLDClosure* clds, uint worker_id);
@@ -250,13 +261,11 @@ class ShenandoahRootScanner : public ShenandoahRootProcessor {
private:
ShenandoahSerialRoots _serial_roots;
ShenandoahThreadRoots _thread_roots;
- ShenandoahCodeCacheRoots _code_roots;
- ShenandoahVMRoots _vm_roots;
ShenandoahStringDedupRoots _dedup_roots;
- ShenandoahClassLoaderDataRoots
- _cld_roots;
+
public:
ShenandoahRootScanner(uint n_workers, ShenandoahPhaseTimings::Phase phase);
+ ~ShenandoahRootScanner();
// Apply oops, clds and blobs to all strongly reachable roots in the system,
// during class unloading cycle
@@ -269,6 +278,22 @@ public:
void roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code, ThreadClosure* tc = NULL);
};
+template
+class ShenandoahConcurrentRootScanner {
+private:
+ ShenandoahVMRoots _vm_roots;
+ ShenandoahClassLoaderDataRoots
+ _cld_roots;
+ ShenandoahNMethodTableSnapshot* _codecache_snapshot;
+ ShenandoahPhaseTimings::Phase _phase;
+
+public:
+ ShenandoahConcurrentRootScanner(uint n_workers, ShenandoahPhaseTimings::Phase phase);
+ ~ShenandoahConcurrentRootScanner();
+
+ void oops_do(OopClosure* oops, uint worker_id);
+};
+
// This scanner is only for SH::object_iteration() and only supports single-threaded
// root scanning
class ShenandoahHeapIterationRootScanner : public ShenandoahRootProcessor {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp
index 1c884ba13b3..10703db29d9 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp
@@ -122,7 +122,8 @@ void ShenandoahVMRoots::oops_do(T* cl, uint worker_id) {
}
template
-ShenandoahClassLoaderDataRoots::ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase) :
+ShenandoahClassLoaderDataRoots::ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase, uint n_workers) :
+ _semaphore(worker_count(n_workers)),
_phase(phase) {
if (!SINGLE_THREADED) {
ClassLoaderDataGraph::clear_claimed_marks();
@@ -146,9 +147,10 @@ void ShenandoahClassLoaderDataRoots::always_strong_
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
assert(Thread::current()->is_VM_thread(), "Single threaded CLDG iteration can only be done by VM thread");
ClassLoaderDataGraph::always_strong_cld_do(clds);
- } else {
+ } else if (_semaphore.try_acquire()) {
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CLDGRoots, worker_id);
ClassLoaderDataGraph::always_strong_cld_do(clds);
+ _semaphore.claim_all();
}
}
@@ -158,9 +160,10 @@ void ShenandoahClassLoaderDataRoots::cld_do(CLDClos
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
assert(Thread::current()->is_VM_thread(), "Single threaded CLDG iteration can only be done by VM thread");
ClassLoaderDataGraph::cld_do(clds);
- } else {
+ } else if (_semaphore.try_acquire()) {
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CLDGRoots, worker_id);
ClassLoaderDataGraph::cld_do(clds);
+ _semaphore.claim_all();
}
}
@@ -181,6 +184,51 @@ public:
}
};
+template
+ShenandoahConcurrentRootScanner::ShenandoahConcurrentRootScanner(uint n_workers,
+ ShenandoahPhaseTimings::Phase phase) :
+ _vm_roots(phase),
+ _cld_roots(phase, n_workers),
+ _codecache_snapshot(NULL),
+ _phase(phase) {
+ if (!ShenandoahHeap::heap()->unload_classes()) {
+ if (CONCURRENT) {
+ CodeCache_lock->lock_without_safepoint_check();
+ } else {
+ assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
+ }
+ _codecache_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
+ }
+ assert(!CONCURRENT || !ShenandoahHeap::heap()->has_forwarded_objects(), "Not expecting forwarded pointers during concurrent marking");
+}
+
+template
+ShenandoahConcurrentRootScanner::~ShenandoahConcurrentRootScanner() {
+ if (!ShenandoahHeap::heap()->unload_classes()) {
+ ShenandoahCodeRoots::table()->finish_iteration(_codecache_snapshot);
+ if (CONCURRENT) {
+ CodeCache_lock->unlock();
+ }
+ }
+}
+
+template
+void ShenandoahConcurrentRootScanner::oops_do(OopClosure* oops, uint worker_id) {
+ ShenandoahHeap* const heap = ShenandoahHeap::heap();
+ CLDToOopClosure clds_cl(oops, CONCURRENT ? ClassLoaderData::_claim_strong : ClassLoaderData::_claim_none);
+ _vm_roots.oops_do(oops, worker_id);
+
+ if (!heap->unload_classes()) {
+ _cld_roots.cld_do(&clds_cl, worker_id);
+
+ ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
+ CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
+ _codecache_snapshot->parallel_blobs_do(&blobs);
+ } else {
+ _cld_roots.always_strong_cld_do(&clds_cl, worker_id);
+ }
+}
+
template
void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAlive* keep_alive) {
CodeBlobToOopClosure update_blobs(keep_alive, CodeBlobToOopClosure::FixRelocations);
@@ -191,16 +239,19 @@ void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAliv
CLDToOopClosure clds(keep_alive, ClassLoaderData::_claim_strong);
+ // Process serial-claiming roots first
_serial_roots.oops_do(keep_alive, worker_id);
- _vm_roots.oops_do(keep_alive, worker_id);
-
- _cld_roots.cld_do(&clds, worker_id);
- _code_roots.code_blobs_do(codes_cl, worker_id);
- _thread_roots.oops_do(keep_alive, NULL, worker_id);
-
_serial_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);
+
+ // Process light-weight/limited parallel roots then
+ _vm_roots.oops_do(keep_alive, worker_id);
_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);
_dedup_roots.oops_do(is_alive, keep_alive, worker_id);
+ _cld_roots.cld_do(&clds, worker_id);
+
+ // Process heavy-weight/fully parallel roots the last
+ _code_roots.code_blobs_do(codes_cl, worker_id);
+ _thread_roots.oops_do(keep_alive, NULL, worker_id);
}
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
index 46cf349dc46..cdd4ceec9ce 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
@@ -27,7 +27,6 @@
#include "classfile/classLoaderDataGraph.hpp"
-#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahHeap.hpp"
@@ -35,6 +34,8 @@
#include "gc/shenandoah/shenandoahRootVerifier.hpp"
#include "gc/shenandoah/shenandoahStringDedup.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "gc/shared/oopStorage.inline.hpp"
+#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
#include "memory/universe.hpp"
#include "runtime/thread.hpp"
@@ -78,12 +79,12 @@ void ShenandoahRootVerifier::oops_do(OopClosure* oops) {
Management::oops_do(oops);
JvmtiExport::oops_do(oops);
ObjectSynchronizer::oops_do(oops);
- SystemDictionary::oops_do(oops);
}
if (verify(JNIHandleRoots)) {
shenandoah_assert_safepoint();
JNIHandles::oops_do(oops);
+ OopStorageSet::vm_global()->oops_do(oops);
}
if (verify(WeakRoots)) {
@@ -125,7 +126,7 @@ void ShenandoahRootVerifier::roots_do(OopClosure* oops) {
JvmtiExport::oops_do(oops);
JNIHandles::oops_do(oops);
ObjectSynchronizer::oops_do(oops);
- SystemDictionary::oops_do(oops);
+ OopStorageSet::vm_global()->oops_do(oops);
AlwaysTrueClosure always_true;
WeakProcessor::weak_oops_do(&always_true, oops);
@@ -153,7 +154,7 @@ void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) {
JvmtiExport::oops_do(oops);
JNIHandles::oops_do(oops);
ObjectSynchronizer::oops_do(oops);
- SystemDictionary::oops_do(oops);
+ OopStorageSet::vm_global()->oops_do(oops);
// Do thread roots the last. This allows verification code to find
// any broken objects from those special roots first, not the accidental
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp
index 99b25eb8ee1..af8db83890f 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp
@@ -245,4 +245,38 @@ private:
};
+typedef struct ShenandoahSharedSemaphore {
+ shenandoah_padding(0);
+ volatile ShenandoahSharedValue value;
+ shenandoah_padding(1);
+
+ static uint max_tokens() {
+ return sizeof(ShenandoahSharedValue) * CHAR_MAX;
+ }
+
+ ShenandoahSharedSemaphore(uint tokens) {
+ assert(tokens <= max_tokens(), "sanity");
+ Atomic::release_store_fence(&value, (ShenandoahSharedValue)tokens);
+ }
+
+ bool try_acquire() {
+ while (true) {
+ ShenandoahSharedValue ov = Atomic::load_acquire(&value);
+ if (ov == 0) {
+ return false;
+ }
+ ShenandoahSharedValue nv = ov - 1;
+ if (Atomic::cmpxchg(&value, ov, nv) == ov) {
+ // successfully set
+ return true;
+ }
+ }
+ }
+
+ void claim_all() {
+ Atomic::release_store_fence(&value, (ShenandoahSharedValue)0);
+ }
+
+} ShenandoahSharedSemaphore;
+
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHSHAREDVARIABLES_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp b/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp
index 62dd15625a8..1ab8bd3d0de 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp
@@ -98,18 +98,6 @@ void ShenandoahStringDedup::oops_do_slow(OopClosure* cl) {
StringDedupTable::unlink_or_oops_do(&sd_cl, 0);
}
-class ShenandoahIsMarkedNextClosure : public BoolObjectClosure {
-private:
- ShenandoahMarkingContext* const _mark_context;
-
-public:
- ShenandoahIsMarkedNextClosure() : _mark_context(ShenandoahHeap::heap()->marking_context()) { }
-
- bool do_object_b(oop obj) {
- return _mark_context->is_marked(obj);
- }
-};
-
//
// Task for parallel unlink_or_oops_do() operation on the deduplication queue
// and table.
diff --git a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp
index 89b45afb47d..4f2e36a8304 100644
--- a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp
+++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp
@@ -149,7 +149,7 @@ void ZBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const {
// Slow path
const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators());
CodeStub* const stub = new ZLoadBarrierStubC1(access, result, runtime_stub);
- __ branch(lir_cond_notEqual, T_ADDRESS, stub);
+ __ branch(lir_cond_notEqual, stub);
__ branch_destination(stub->continuation());
}
diff --git a/src/hotspot/share/gc/z/vmStructs_z.hpp b/src/hotspot/share/gc/z/vmStructs_z.hpp
index 8d489bc0d2c..b8415a06364 100644
--- a/src/hotspot/share/gc/z/vmStructs_z.hpp
+++ b/src/hotspot/share/gc/z/vmStructs_z.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -77,8 +77,8 @@ typedef ZAttachedArray ZAttachedArrayForForwardin
volatile_nonstatic_field(ZPage, _top, uintptr_t) \
\
nonstatic_field(ZPageAllocator, _max_capacity, const size_t) \
- nonstatic_field(ZPageAllocator, _capacity, size_t) \
- nonstatic_field(ZPageAllocator, _used, size_t) \
+ volatile_nonstatic_field(ZPageAllocator, _capacity, size_t) \
+ volatile_nonstatic_field(ZPageAllocator, _used, size_t) \
\
nonstatic_field(ZPageTable, _map, ZGranuleMapForPageTable) \
\
diff --git a/src/hotspot/share/gc/z/zAllocationFlags.hpp b/src/hotspot/share/gc/z/zAllocationFlags.hpp
index f15295d5746..a102d761a31 100644
--- a/src/hotspot/share/gc/z/zAllocationFlags.hpp
+++ b/src/hotspot/share/gc/z/zAllocationFlags.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,20 +31,22 @@
// Allocation flags layout
// -----------------------
//
-// 7 3 2 1 0
-// +----+-+-+-+-+
-// |0000|1|1|1|1|
-// +----+-+-+-+-+
-// | | | | |
-// | | | | * 0-0 Worker Thread Flag (1-bit)
-// | | | |
-// | | | * 1-1 Non-Blocking Flag (1-bit)
-// | | |
-// | | * 2-2 Relocation Flag (1-bit)
-// | |
-// | * 3-3 No Reserve Flag (1-bit)
+// 7 4 3 2 1 0
+// +---+-+-+-+-+-+
+// |000|1|1|1|1|1|
+// +---+-+-+-+-+-+
+// | | | | | |
+// | | | | | * 0-0 Worker Thread Flag (1-bit)
+// | | | | |
+// | | | | * 1-1 Non-Blocking Flag (1-bit)
+// | | | |
+// | | | * 2-2 Relocation Flag (1-bit)
+// | | |
+// | | * 3-3 No Reserve Flag (1-bit)
+// | |
+// | * 4-4 Low Address Flag (1-bit)
// |
-// * 7-4 Unused (4-bits)
+// * 7-5 Unused (3-bits)
//
class ZAllocationFlags {
@@ -53,6 +55,7 @@ private:
typedef ZBitField field_non_blocking;
typedef ZBitField field_relocation;
typedef ZBitField field_no_reserve;
+ typedef ZBitField field_low_address;
uint8_t _flags;
@@ -76,6 +79,10 @@ public:
_flags |= field_no_reserve::encode(true);
}
+ void set_low_address() {
+ _flags |= field_low_address::encode(true);
+ }
+
bool worker_thread() const {
return field_worker_thread::decode(_flags);
}
@@ -91,6 +98,10 @@ public:
bool no_reserve() const {
return field_no_reserve::decode(_flags);
}
+
+ bool low_address() const {
+ return field_low_address::decode(_flags);
+ }
};
#endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
diff --git a/src/hotspot/share/gc/z/zArguments.cpp b/src/hotspot/share/gc/z/zArguments.cpp
index c891ee9cd3a..ce786cc30fd 100644
--- a/src/hotspot/share/gc/z/zArguments.cpp
+++ b/src/hotspot/share/gc/z/zArguments.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,6 +70,18 @@ void ZArguments::initialize() {
vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
}
+ // Select medium page size so that we can calculate the max reserve
+ ZHeuristics::set_medium_page_size();
+
+ // MinHeapSize/InitialHeapSize must be at least as large as the max reserve
+ const size_t max_reserve = ZHeuristics::max_reserve();
+ if (MinHeapSize < max_reserve) {
+ FLAG_SET_ERGO(MinHeapSize, max_reserve);
+ }
+ if (InitialHeapSize < max_reserve) {
+ FLAG_SET_ERGO(InitialHeapSize, max_reserve);
+ }
+
#ifdef COMPILER2
// Enable loop strip mining by default
if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
diff --git a/src/hotspot/share/gc/z/zBarrierSet.cpp b/src/hotspot/share/gc/z/zBarrierSet.cpp
index 28535d99391..94b9554b54b 100644
--- a/src/hotspot/share/gc/z/zBarrierSet.cpp
+++ b/src/hotspot/share/gc/z/zBarrierSet.cpp
@@ -95,3 +95,7 @@ void ZBarrierSet::on_thread_detach(Thread* thread) {
// Flush and free any remaining mark stacks
ZHeap::heap()->mark_flush_and_free(thread);
}
+
+void ZBarrierSet::print_on(outputStream* st) const {
+ st->print_cr("ZBarrierSet");
+}
diff --git a/src/hotspot/share/gc/z/zBarrierSet.hpp b/src/hotspot/share/gc/z/zBarrierSet.hpp
index d70b82e06bf..ebb80e106af 100644
--- a/src/hotspot/share/gc/z/zBarrierSet.hpp
+++ b/src/hotspot/share/gc/z/zBarrierSet.hpp
@@ -40,7 +40,7 @@ public:
virtual void on_thread_attach(Thread* thread);
virtual void on_thread_detach(Thread* thread);
- virtual void print_on(outputStream* st) const {}
+ virtual void print_on(outputStream* st) const;
template
class AccessBarrier : public BarrierSet::AccessBarrier {
diff --git a/src/hotspot/share/gc/z/zCPU.cpp b/src/hotspot/share/gc/z/zCPU.cpp
index 37b1d43e2dc..c3159d401c5 100644
--- a/src/hotspot/share/gc/z/zCPU.cpp
+++ b/src/hotspot/share/gc/z/zCPU.cpp
@@ -22,8 +22,8 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zCPU.inline.hpp"
-#include "logging/log.hpp"
#include "memory/padded.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.inline.hpp"
@@ -46,9 +46,9 @@ void ZCPU::initialize() {
_affinity[i]._thread = ZCPU_UNKNOWN_AFFINITY;
}
- log_info(gc, init)("CPUs: %u total, %u available",
- os::processor_count(),
- os::initial_active_processor_count());
+ log_info_p(gc, init)("CPUs: %u total, %u available",
+ os::processor_count(),
+ os::initial_active_processor_count());
}
uint32_t ZCPU::id_slow() {
diff --git a/src/hotspot/share/gc/z/zCollectedHeap.cpp b/src/hotspot/share/gc/z/zCollectedHeap.cpp
index d1263f26785..80056a6cd84 100644
--- a/src/hotspot/share/gc/z/zCollectedHeap.cpp
+++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp
@@ -25,6 +25,8 @@
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zDirector.hpp"
+#include "gc/z/zDriver.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zNMethod.hpp"
@@ -52,7 +54,6 @@ ZCollectedHeap::ZCollectedHeap() :
_heap(),
_director(new ZDirector()),
_driver(new ZDriver()),
- _uncommitter(new ZUncommitter()),
_stat(new ZStat()),
_runtime_workers() {}
@@ -78,11 +79,19 @@ void ZCollectedHeap::initialize_serviceability() {
_heap.serviceability_initialize();
}
+class ZStopConcurrentGCThreadClosure : public ThreadClosure {
+public:
+ virtual void do_thread(Thread* thread) {
+ if (thread->is_ConcurrentGC_thread() &&
+ !thread->is_GC_task_thread()) {
+ static_cast(thread)->stop();
+ }
+ }
+};
+
void ZCollectedHeap::stop() {
- _director->stop();
- _driver->stop();
- _uncommitter->stop();
- _stat->stop();
+ ZStopConcurrentGCThreadClosure cl;
+ gc_threads_do(&cl);
}
SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
@@ -278,9 +287,8 @@ jlong ZCollectedHeap::millis_since_last_gc() {
void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
tc->do_thread(_director);
tc->do_thread(_driver);
- tc->do_thread(_uncommitter);
tc->do_thread(_stat);
- _heap.worker_threads_do(tc);
+ _heap.threads_do(tc);
_runtime_workers.threads_do(tc);
}
@@ -305,39 +313,30 @@ void ZCollectedHeap::print_on(outputStream* st) const {
}
void ZCollectedHeap::print_on_error(outputStream* st) const {
+ st->print_cr("ZGC Globals:");
+ st->print_cr(" GlobalPhase: %u (%s)", ZGlobalPhase, ZGlobalPhaseToString());
+ st->print_cr(" GlobalSeqNum: %u", ZGlobalSeqNum);
+ st->print_cr(" Offset Max: " SIZE_FORMAT "%s (" PTR_FORMAT ")",
+ byte_size_in_exact_unit(ZAddressOffsetMax),
+ exact_unit_for_byte_size(ZAddressOffsetMax),
+ ZAddressOffsetMax);
+ st->print_cr(" Page Size Small: " SIZE_FORMAT "M", ZPageSizeSmall / M);
+ st->print_cr(" Page Size Medium: " SIZE_FORMAT "M", ZPageSizeMedium / M);
+ st->cr();
+ st->print_cr("ZGC Metadata Bits:");
+ st->print_cr(" Good: " PTR_FORMAT, ZAddressGoodMask);
+ st->print_cr(" Bad: " PTR_FORMAT, ZAddressBadMask);
+ st->print_cr(" WeakBad: " PTR_FORMAT, ZAddressWeakBadMask);
+ st->print_cr(" Marked: " PTR_FORMAT, ZAddressMetadataMarked);
+ st->print_cr(" Remapped: " PTR_FORMAT, ZAddressMetadataRemapped);
+ st->cr();
CollectedHeap::print_on_error(st);
-
- st->print_cr( "Heap");
- st->print_cr( " GlobalPhase: %u", ZGlobalPhase);
- st->print_cr( " GlobalSeqNum: %u", ZGlobalSeqNum);
- st->print_cr( " Offset Max: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
- st->print_cr( " Page Size Small: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
- st->print_cr( " Page Size Medium: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
- st->print_cr( "Metadata Bits");
- st->print_cr( " Good: " PTR_FORMAT, ZAddressGoodMask);
- st->print_cr( " Bad: " PTR_FORMAT, ZAddressBadMask);
- st->print_cr( " WeakBad: " PTR_FORMAT, ZAddressWeakBadMask);
- st->print_cr( " Marked: " PTR_FORMAT, ZAddressMetadataMarked);
- st->print_cr( " Remapped: " PTR_FORMAT, ZAddressMetadataRemapped);
}
void ZCollectedHeap::print_extended_on(outputStream* st) const {
_heap.print_extended_on(st);
}
-void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
- _director->print_on(st);
- st->cr();
- _driver->print_on(st);
- st->cr();
- _uncommitter->print_on(st);
- st->cr();
- _stat->print_on(st);
- st->cr();
- _heap.print_worker_threads_on(st);
- _runtime_workers.print_threads_on(st);
-}
-
void ZCollectedHeap::print_tracing_info() const {
// Does nothing
}
diff --git a/src/hotspot/share/gc/z/zCollectedHeap.hpp b/src/hotspot/share/gc/z/zCollectedHeap.hpp
index a0a887fb90a..a2851e67c3a 100644
--- a/src/hotspot/share/gc/z/zCollectedHeap.hpp
+++ b/src/hotspot/share/gc/z/zCollectedHeap.hpp
@@ -27,13 +27,13 @@
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "gc/z/zBarrierSet.hpp"
-#include "gc/z/zDirector.hpp"
-#include "gc/z/zDriver.hpp"
#include "gc/z/zHeap.hpp"
#include "gc/z/zInitialize.hpp"
#include "gc/z/zRuntimeWorkers.hpp"
-#include "gc/z/zStat.hpp"
-#include "gc/z/zUncommitter.hpp"
+
+class ZDirector;
+class ZDriver;
+class ZStat;
class ZCollectedHeap : public CollectedHeap {
friend class VMStructs;
@@ -45,7 +45,6 @@ private:
ZHeap _heap;
ZDirector* _director;
ZDriver* _driver;
- ZUncommitter* _uncommitter;
ZStat* _stat;
ZRuntimeWorkers _runtime_workers;
@@ -120,7 +119,6 @@ public:
virtual void print_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;
virtual void print_extended_on(outputStream* st) const;
- virtual void print_gc_threads_on(outputStream* st) const;
virtual void print_tracing_info() const;
virtual bool print_location(outputStream* st, void* addr) const;
diff --git a/src/hotspot/share/gc/z/zDirector.cpp b/src/hotspot/share/gc/z/zDirector.cpp
index b4be47a270a..345d202e063 100644
--- a/src/hotspot/share/gc/z/zDirector.cpp
+++ b/src/hotspot/share/gc/z/zDirector.cpp
@@ -73,10 +73,10 @@ bool ZDirector::rule_warmup() const {
// Perform GC if heap usage passes 10/20/30% and no other GC has been
// performed yet. This allows us to get some early samples of the GC
// duration, which is needed by the other rules.
- const size_t max_capacity = ZHeap::heap()->soft_max_capacity();
+ const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
const size_t used = ZHeap::heap()->used();
const double used_threshold_percent = (ZStatCycle::nwarmup_cycles() + 1) * 0.1;
- const size_t used_threshold = max_capacity * used_threshold_percent;
+ const size_t used_threshold = soft_max_capacity * used_threshold_percent;
log_debug(gc, director)("Rule: Warmup %.0f%%, Used: " SIZE_FORMAT "MB, UsedThreshold: " SIZE_FORMAT "MB",
used_threshold_percent * 100, used / M, used_threshold / M);
@@ -99,10 +99,10 @@ bool ZDirector::rule_allocation_rate() const {
// Calculate amount of free memory available to Java threads. Note that
// the heap reserve is not available to Java threads and is therefore not
// considered part of the free memory.
- const size_t max_capacity = ZHeap::heap()->soft_max_capacity();
+ const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
const size_t max_reserve = ZHeap::heap()->max_reserve();
const size_t used = ZHeap::heap()->used();
- const size_t free_with_reserve = max_capacity - MIN2(max_capacity, used);
+ const size_t free_with_reserve = soft_max_capacity - MIN2(soft_max_capacity, used);
const size_t free = free_with_reserve - MIN2(free_with_reserve, max_reserve);
// Calculate time until OOM given the max allocation rate and the amount
@@ -183,12 +183,12 @@ bool ZDirector::rule_high_usage() const {
// Calculate amount of free memory available to Java threads. Note that
// the heap reserve is not available to Java threads and is therefore not
// considered part of the free memory.
- const size_t max_capacity = ZHeap::heap()->soft_max_capacity();
+ const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
const size_t max_reserve = ZHeap::heap()->max_reserve();
const size_t used = ZHeap::heap()->used();
- const size_t free_with_reserve = max_capacity - used;
+ const size_t free_with_reserve = soft_max_capacity - MIN2(soft_max_capacity, used);
const size_t free = free_with_reserve - MIN2(free_with_reserve, max_reserve);
- const double free_percent = percent_of(free, max_capacity);
+ const double free_percent = percent_of(free, soft_max_capacity);
log_debug(gc, director)("Rule: High Usage, Free: " SIZE_FORMAT "MB(%.1f%%)",
free / M, free_percent);
diff --git a/src/hotspot/share/gc/z/zFuture.hpp b/src/hotspot/share/gc/z/zFuture.hpp
index 5d4efeafe68..e64e0ef9c1a 100644
--- a/src/hotspot/share/gc/z/zFuture.hpp
+++ b/src/hotspot/share/gc/z/zFuture.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,6 @@ public:
ZFuture();
void set(T value);
- T peek();
T get();
};
diff --git a/src/hotspot/share/gc/z/zFuture.inline.hpp b/src/hotspot/share/gc/z/zFuture.inline.hpp
index 8d7ee5b23d7..26d58b69d49 100644
--- a/src/hotspot/share/gc/z/zFuture.inline.hpp
+++ b/src/hotspot/share/gc/z/zFuture.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,11 +41,6 @@ inline void ZFuture::set(T value) {
_sema.signal();
}
-template
-inline T ZFuture::peek() {
- return _value;
-}
-
template
inline T ZFuture::get() {
// Wait for notification
diff --git a/src/hotspot/share/gc/z/zGlobals.cpp b/src/hotspot/share/gc/z/zGlobals.cpp
index 366a79f59e2..0cb9c598423 100644
--- a/src/hotspot/share/gc/z/zGlobals.cpp
+++ b/src/hotspot/share/gc/z/zGlobals.cpp
@@ -54,3 +54,19 @@ uintptr_t ZAddressMetadataMarked0;
uintptr_t ZAddressMetadataMarked1;
uintptr_t ZAddressMetadataRemapped;
uintptr_t ZAddressMetadataFinalizable;
+
+const char* ZGlobalPhaseToString() {
+ switch (ZGlobalPhase) {
+ case ZPhaseMark:
+ return "Mark";
+
+ case ZPhaseMarkCompleted:
+ return "MarkCompleted";
+
+ case ZPhaseRelocate:
+ return "Relocate";
+
+ default:
+ return "Unknown";
+ }
+}
diff --git a/src/hotspot/share/gc/z/zGlobals.hpp b/src/hotspot/share/gc/z/zGlobals.hpp
index 1d2315ccf8f..138b9486f03 100644
--- a/src/hotspot/share/gc/z/zGlobals.hpp
+++ b/src/hotspot/share/gc/z/zGlobals.hpp
@@ -36,6 +36,7 @@ extern uint32_t ZGlobalPhase;
const uint32_t ZPhaseMark = 0;
const uint32_t ZPhaseMarkCompleted = 1;
const uint32_t ZPhaseRelocate = 2;
+const char* ZGlobalPhaseToString();
// Global sequence number
extern uint32_t ZGlobalSeqNum;
diff --git a/src/hotspot/share/gc/z/zHeap.cpp b/src/hotspot/share/gc/z/zHeap.cpp
index 68167d35827..73bdd1dea84 100644
--- a/src/hotspot/share/gc/z/zHeap.cpp
+++ b/src/hotspot/share/gc/z/zHeap.cpp
@@ -27,6 +27,7 @@
#include "gc/z/zGlobals.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zHeapIterator.hpp"
+#include "gc/z/zHeuristics.hpp"
#include "gc/z/zMark.inline.hpp"
#include "gc/z/zPage.inline.hpp"
#include "gc/z/zPageTable.inline.hpp"
@@ -57,7 +58,7 @@ ZHeap* ZHeap::_heap = NULL;
ZHeap::ZHeap() :
_workers(),
_object_allocator(),
- _page_allocator(&_workers, heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
+ _page_allocator(&_workers, MinHeapSize, InitialHeapSize, MaxHeapSize, ZHeuristics::max_reserve()),
_page_table(),
_forwarding_table(),
_mark(&_workers, &_page_table),
@@ -66,32 +67,13 @@ ZHeap::ZHeap() :
_relocate(&_workers),
_relocation_set(),
_unload(&_workers),
- _serviceability(heap_min_size(), heap_max_size()) {
+ _serviceability(min_capacity(), max_capacity()) {
// Install global heap instance
assert(_heap == NULL, "Already initialized");
_heap = this;
// Update statistics
- ZStatHeap::set_at_initialize(heap_min_size(), heap_max_size(), heap_max_reserve_size());
-}
-
-size_t ZHeap::heap_min_size() const {
- return MinHeapSize;
-}
-
-size_t ZHeap::heap_initial_size() const {
- return InitialHeapSize;
-}
-
-size_t ZHeap::heap_max_size() const {
- return MaxHeapSize;
-}
-
-size_t ZHeap::heap_max_reserve_size() const {
- // Reserve one small page per worker plus one shared medium page. This is still just
- // an estimate and doesn't guarantee that we can't run out of memory during relocation.
- const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
- return MIN2(max_reserve_size, heap_max_size());
+ ZStatHeap::set_at_initialize(min_capacity(), max_capacity(), max_reserve());
}
bool ZHeap::is_initialized() const {
@@ -198,14 +180,11 @@ void ZHeap::set_boost_worker_threads(bool boost) {
_workers.set_boost(boost);
}
-void ZHeap::worker_threads_do(ThreadClosure* tc) const {
+void ZHeap::threads_do(ThreadClosure* tc) const {
+ _page_allocator.threads_do(tc);
_workers.threads_do(tc);
}
-void ZHeap::print_worker_threads_on(outputStream* st) const {
- _workers.print_threads_on(st);
-}
-
void ZHeap::out_of_memory() {
ResourceMark rm;
@@ -241,10 +220,6 @@ void ZHeap::free_page(ZPage* page, bool reclaimed) {
_page_allocator.free_page(page, reclaimed);
}
-uint64_t ZHeap::uncommit(uint64_t delay) {
- return _page_allocator.uncommit(delay);
-}
-
void ZHeap::flip_to_marked() {
ZVerifyViewsFlip flip(&_page_allocator);
ZAddress::flip_to_marked();
@@ -505,6 +480,7 @@ void ZHeap::print_extended_on(outputStream* st) const {
_page_allocator.enable_deferred_delete();
// Print all pages
+ st->print_cr("ZGC Page Table:");
ZPageTableIterator iter(&_page_table);
for (ZPage* page; iter.next(&page);) {
page->print_on(st);
@@ -512,8 +488,6 @@ void ZHeap::print_extended_on(outputStream* st) const {
// Allow pages to be deleted
_page_allocator.enable_deferred_delete();
-
- st->cr();
}
bool ZHeap::print_location(outputStream* st, uintptr_t addr) const {
diff --git a/src/hotspot/share/gc/z/zHeap.hpp b/src/hotspot/share/gc/z/zHeap.hpp
index 0b91ef7d9fa..2f488bfca68 100644
--- a/src/hotspot/share/gc/z/zHeap.hpp
+++ b/src/hotspot/share/gc/z/zHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -60,11 +60,6 @@ private:
ZUnload _unload;
ZServiceability _serviceability;
- size_t heap_min_size() const;
- size_t heap_initial_size() const;
- size_t heap_max_size() const;
- size_t heap_max_reserve_size() const;
-
void flip_to_marked();
void flip_to_remapped();
@@ -99,12 +94,11 @@ public:
bool is_in(uintptr_t addr) const;
uint32_t hash_oop(uintptr_t addr) const;
- // Workers
+ // Threads
uint nconcurrent_worker_threads() const;
uint nconcurrent_no_boost_worker_threads() const;
void set_boost_worker_threads(bool boost);
- void worker_threads_do(ThreadClosure* tc) const;
- void print_worker_threads_on(outputStream* st) const;
+ void threads_do(ThreadClosure* tc) const;
// Reference processing
ReferenceDiscoverer* reference_discoverer();
@@ -118,9 +112,6 @@ public:
void undo_alloc_page(ZPage* page);
void free_page(ZPage* page, bool reclaimed);
- // Uncommit memory
- uint64_t uncommit(uint64_t delay);
-
// Object allocation
uintptr_t alloc_tlab(size_t size);
uintptr_t alloc_object(size_t size);
diff --git a/src/hotspot/share/gc/z/zHeuristics.cpp b/src/hotspot/share/gc/z/zHeuristics.cpp
index a705ae9fd25..3092223271e 100644
--- a/src/hotspot/share/gc/z/zHeuristics.cpp
+++ b/src/hotspot/share/gc/z/zHeuristics.cpp
@@ -22,10 +22,10 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zCPU.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zHeuristics.hpp"
-#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -49,13 +49,18 @@ void ZHeuristics::set_medium_page_size() {
ZObjectSizeLimitMedium = ZPageSizeMedium / 8;
ZObjectAlignmentMediumShift = (int)ZPageSizeMediumShift - 13;
ZObjectAlignmentMedium = 1 << ZObjectAlignmentMediumShift;
-
- log_info(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M);
- } else {
- log_info(gc, init)("Medium Page Size: N/A");
}
}
+size_t ZHeuristics::max_reserve() {
+ // Reserve one small page per worker plus one shared medium page. This is
+ // still just an estimate and doesn't guarantee that we can't run out of
+ // memory during relocation.
+ const uint nworkers = MAX2(ParallelGCThreads, ConcGCThreads);
+ const size_t reserve = (nworkers * ZPageSizeSmall) + ZPageSizeMedium;
+ return MIN2(MaxHeapSize, reserve);
+}
+
bool ZHeuristics::use_per_cpu_shared_small_pages() {
// Use per-CPU shared small pages only if these pages occupy at most 3.125%
// of the max heap size. Otherwise fall back to using a single shared small
diff --git a/src/hotspot/share/gc/z/zHeuristics.hpp b/src/hotspot/share/gc/z/zHeuristics.hpp
index 17fb9c1192e..b8bec8e99c2 100644
--- a/src/hotspot/share/gc/z/zHeuristics.hpp
+++ b/src/hotspot/share/gc/z/zHeuristics.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,8 @@ class ZHeuristics : public AllStatic {
public:
static void set_medium_page_size();
+ static size_t max_reserve();
+
static bool use_per_cpu_shared_small_pages();
static uint nparallel_workers();
diff --git a/src/hotspot/share/gc/z/zInitialize.cpp b/src/hotspot/share/gc/z/zInitialize.cpp
index 800038d89fa..adeb6e9772a 100644
--- a/src/hotspot/share/gc/z/zInitialize.cpp
+++ b/src/hotspot/share/gc/z/zInitialize.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,6 @@ ZInitialize::ZInitialize(ZBarrierSet* barrier_set) {
ZThreadLocalAllocBuffer::initialize();
ZTracer::initialize();
ZLargePages::initialize();
- ZHeuristics::set_medium_page_size();
ZBarrierSet::set_barrier_set(barrier_set);
initialize_os();
diff --git a/src/hotspot/share/gc/z/zLargePages.cpp b/src/hotspot/share/gc/z/zLargePages.cpp
index 214bf82deae..0a93b9ee6be 100644
--- a/src/hotspot/share/gc/z/zLargePages.cpp
+++ b/src/hotspot/share/gc/z/zLargePages.cpp
@@ -22,8 +22,8 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zLargePages.hpp"
-#include "logging/log.hpp"
#include "runtime/os.hpp"
ZLargePages::State ZLargePages::_state;
@@ -31,8 +31,8 @@ ZLargePages::State ZLargePages::_state;
void ZLargePages::initialize() {
initialize_platform();
- log_info(gc, init)("Memory: " JULONG_FORMAT "M", os::physical_memory() / M);
- log_info(gc, init)("Large Page Support: %s", to_string());
+ log_info_p(gc, init)("Memory: " JULONG_FORMAT "M", os::physical_memory() / M);
+ log_info_p(gc, init)("Large Page Support: %s", to_string());
}
const char* ZLargePages::to_string() {
diff --git a/src/hotspot/share/gc/z/zList.hpp b/src/hotspot/share/gc/z/zList.hpp
index 143540b4aaf..dcb20f97cbd 100644
--- a/src/hotspot/share/gc/z/zList.hpp
+++ b/src/hotspot/share/gc/z/zList.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -88,7 +88,7 @@ public:
void transfer(ZList* list);
};
-template
+template
class ZListIteratorImpl : public StackObj {
private:
const ZList* const _list;
@@ -100,6 +100,17 @@ public:
bool next(T** elem);
};
+template
+class ZListRemoveIteratorImpl : public StackObj {
+private:
+ ZList* const _list;
+
+public:
+ ZListRemoveIteratorImpl(ZList* list);
+
+ bool next(T** elem);
+};
+
// Iterator types
#define ZLIST_FORWARD true
#define ZLIST_REVERSE false
@@ -116,4 +127,10 @@ public:
ZListReverseIterator(const ZList* list);
};
+template
+class ZListRemoveIterator : public ZListRemoveIteratorImpl {
+public:
+ ZListRemoveIterator(ZList* list);
+};
+
#endif // SHARE_GC_Z_ZLIST_HPP
diff --git a/src/hotspot/share/gc/z/zList.inline.hpp b/src/hotspot/share/gc/z/zList.inline.hpp
index c37c9f98bb2..660d868f5a6 100644
--- a/src/hotspot/share/gc/z/zList.inline.hpp
+++ b/src/hotspot/share/gc/z/zList.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -205,16 +205,16 @@ inline void ZList::transfer(ZList* list) {
}
}
-template
-inline ZListIteratorImpl::ZListIteratorImpl(const ZList* list) :
+template
+inline ZListIteratorImpl::ZListIteratorImpl(const ZList* list) :
_list(list),
- _next(forward ? list->first() : list->last()) {}
+ _next(Forward ? list->first() : list->last()) {}
-template
-inline bool ZListIteratorImpl::next(T** elem) {
+template
+inline bool ZListIteratorImpl::next(T** elem) {
if (_next != NULL) {
*elem = _next;
- _next = forward ? _list->next(_next) : _list->prev(_next);
+ _next = Forward ? _list->next(_next) : _list->prev(_next);
return true;
}
@@ -222,6 +222,16 @@ inline bool ZListIteratorImpl::next(T** elem) {
return false;
}
+template
+inline ZListRemoveIteratorImpl::ZListRemoveIteratorImpl(ZList* list) :
+ _list(list) {}
+
+template
+inline bool ZListRemoveIteratorImpl::next(T** elem) {
+ *elem = Forward ? _list->remove_first() : _list->remove_last();
+ return *elem != NULL;
+}
+
template
inline ZListIterator::ZListIterator(const ZList* list) :
ZListIteratorImpl(list) {}
@@ -230,4 +240,8 @@ template
inline ZListReverseIterator::ZListReverseIterator(const ZList* list) :
ZListIteratorImpl(list) {}
+template
+inline ZListRemoveIterator::ZListRemoveIterator(ZList* list) :
+ ZListRemoveIteratorImpl(list) {}
+
#endif // SHARE_GC_Z_ZLIST_INLINE_HPP
diff --git a/src/hotspot/share/gc/z/zLock.hpp b/src/hotspot/share/gc/z/zLock.hpp
index 5b764a3d2b8..b3f2d8568cf 100644
--- a/src/hotspot/share/gc/z/zLock.hpp
+++ b/src/hotspot/share/gc/z/zLock.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,6 +52,20 @@ public:
bool is_owned() const;
};
+class ZConditionLock {
+private:
+ os::PlatformMonitor _lock;
+
+public:
+ void lock();
+ bool try_lock();
+ void unlock();
+
+ bool wait(uint64_t millis = 0);
+ void notify();
+ void notify_all();
+};
+
template
class ZLocker : public StackObj {
private:
diff --git a/src/hotspot/share/gc/z/zLock.inline.hpp b/src/hotspot/share/gc/z/zLock.inline.hpp
index ce70397b48f..b0600da8338 100644
--- a/src/hotspot/share/gc/z/zLock.inline.hpp
+++ b/src/hotspot/share/gc/z/zLock.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -77,6 +77,30 @@ inline bool ZReentrantLock::is_owned() const {
return owner == thread;
}
+inline void ZConditionLock::lock() {
+ _lock.lock();
+}
+
+inline bool ZConditionLock::try_lock() {
+ return _lock.try_lock();
+}
+
+inline void ZConditionLock::unlock() {
+ _lock.unlock();
+}
+
+inline bool ZConditionLock::wait(uint64_t millis) {
+ return _lock.wait(millis) == OS_OK;
+}
+
+inline void ZConditionLock::notify() {
+ _lock.notify();
+}
+
+inline void ZConditionLock::notify_all() {
+ _lock.notify_all();
+}
+
template
inline ZLocker::ZLocker(T* lock) :
_lock(lock) {
diff --git a/src/hotspot/share/gc/z/zMarkStackAllocator.cpp b/src/hotspot/share/gc/z/zMarkStackAllocator.cpp
index b965d68010a..8b7d2642188 100644
--- a/src/hotspot/share/gc/z/zMarkStackAllocator.cpp
+++ b/src/hotspot/share/gc/z/zMarkStackAllocator.cpp
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zLock.inline.hpp"
#include "gc/z/zMarkStack.inline.hpp"
#include "gc/z/zMarkStackAllocator.hpp"
@@ -44,7 +45,7 @@ ZMarkStackSpace::ZMarkStackSpace() :
const size_t alignment = (size_t)os::vm_allocation_granularity();
const uintptr_t addr = (uintptr_t)os::reserve_memory(size, NULL, alignment, mtGC);
if (addr == 0) {
- log_error(gc, marking)("Failed to reserve address space for mark stacks");
+ log_error_pd(gc, marking)("Failed to reserve address space for mark stacks");
return;
}
diff --git a/src/hotspot/share/gc/z/zMemory.cpp b/src/hotspot/share/gc/z/zMemory.cpp
index 2b13c9b3650..616d31f7fa4 100644
--- a/src/hotspot/share/gc/z/zMemory.cpp
+++ b/src/hotspot/share/gc/z/zMemory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
#include "precompiled.hpp"
#include "gc/z/zList.inline.hpp"
+#include "gc/z/zLock.inline.hpp"
#include "gc/z/zMemory.inline.hpp"
#include "memory/allocation.inline.hpp"
@@ -86,6 +87,8 @@ void ZMemoryManager::register_callbacks(const Callbacks& callbacks) {
}
uintptr_t ZMemoryManager::alloc_from_front(size_t size) {
+ ZLocker locker(&_lock);
+
ZListIterator iter(&_freelist);
for (ZMemory* area; iter.next(&area);) {
if (area->size() >= size) {
@@ -109,6 +112,8 @@ uintptr_t ZMemoryManager::alloc_from_front(size_t size) {
}
uintptr_t ZMemoryManager::alloc_from_front_at_most(size_t size, size_t* allocated) {
+ ZLocker locker(&_lock);
+
ZMemory* area = _freelist.first();
if (area != NULL) {
if (area->size() <= size) {
@@ -133,6 +138,8 @@ uintptr_t ZMemoryManager::alloc_from_front_at_most(size_t size, size_t* allocate
}
uintptr_t ZMemoryManager::alloc_from_back(size_t size) {
+ ZLocker locker(&_lock);
+
ZListReverseIterator iter(&_freelist);
for (ZMemory* area; iter.next(&area);) {
if (area->size() >= size) {
@@ -155,6 +162,8 @@ uintptr_t ZMemoryManager::alloc_from_back(size_t size) {
}
uintptr_t ZMemoryManager::alloc_from_back_at_most(size_t size, size_t* allocated) {
+ ZLocker locker(&_lock);
+
ZMemory* area = _freelist.last();
if (area != NULL) {
if (area->size() <= size) {
@@ -181,6 +190,8 @@ void ZMemoryManager::free(uintptr_t start, size_t size) {
assert(start != UINTPTR_MAX, "Invalid address");
const uintptr_t end = start + size;
+ ZLocker locker(&_lock);
+
ZListIterator iter(&_freelist);
for (ZMemory* area; iter.next(&area);) {
if (start < area->start()) {
diff --git a/src/hotspot/share/gc/z/zMemory.hpp b/src/hotspot/share/gc/z/zMemory.hpp
index b0255b378c3..c20815035ed 100644
--- a/src/hotspot/share/gc/z/zMemory.hpp
+++ b/src/hotspot/share/gc/z/zMemory.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#define SHARE_GC_Z_ZMEMORY_HPP
#include "gc/z/zList.hpp"
+#include "gc/z/zLock.hpp"
#include "memory/allocation.hpp"
class ZMemory : public CHeapObj {
@@ -65,6 +66,7 @@ public:
};
private:
+ ZLock _lock;
ZList _freelist;
Callbacks _callbacks;
diff --git a/src/hotspot/share/gc/z/zNUMA.cpp b/src/hotspot/share/gc/z/zNUMA.cpp
index 51a0012ba83..6871e4434de 100644
--- a/src/hotspot/share/gc/z/zNUMA.cpp
+++ b/src/hotspot/share/gc/z/zNUMA.cpp
@@ -22,17 +22,17 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zNUMA.hpp"
-#include "logging/log.hpp"
bool ZNUMA::_enabled;
void ZNUMA::initialize() {
initialize_platform();
- log_info(gc, init)("NUMA Support: %s", to_string());
+ log_info_p(gc, init)("NUMA Support: %s", to_string());
if (_enabled) {
- log_info(gc, init)("NUMA Nodes: %u", count());
+ log_info_p(gc, init)("NUMA Nodes: %u", count());
}
}
diff --git a/src/hotspot/share/gc/z/zPage.cpp b/src/hotspot/share/gc/z/zPage.cpp
index c919e315d0a..8de20fca7fe 100644
--- a/src/hotspot/share/gc/z/zPage.cpp
+++ b/src/hotspot/share/gc/z/zPage.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -58,6 +58,7 @@ ZPage::~ZPage() {}
void ZPage::assert_initialized() const {
assert(!_virtual.is_null(), "Should not be null");
assert(!_physical.is_null(), "Should not be null");
+ assert(_virtual.size() == _physical.size(), "Virtual/Physical size mismatch");
assert((_type == ZPageTypeSmall && size() == ZPageSizeSmall) ||
(_type == ZPageTypeMedium && size() == ZPageSizeMedium) ||
(_type == ZPageTypeLarge && is_aligned(size(), ZGranuleSize)),
@@ -99,6 +100,27 @@ ZPage* ZPage::split(uint8_t type, size_t size) {
return page;
}
+ZPage* ZPage::split_committed() {
+ // Split any committed part of this page into a separate page,
+ // leaving this page with only uncommitted physical memory.
+ const ZPhysicalMemory pmem = _physical.split_committed();
+ if (pmem.is_null()) {
+ // Nothing committed
+ return NULL;
+ }
+
+ assert(!_physical.is_null(), "Should not be null");
+
+ // Resize this page
+ const ZVirtualMemory vmem = _virtual.split(pmem.size());
+ _type = type_from_size(_virtual.size());
+ _top = start();
+ _livemap.resize(object_max_count());
+
+ // Create new page
+ return new ZPage(vmem, pmem);
+}
+
void ZPage::print_on(outputStream* out) const {
out->print_cr(" %-6s " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s%s",
type_to_string(), start(), top(), end(),
diff --git a/src/hotspot/share/gc/z/zPage.hpp b/src/hotspot/share/gc/z/zPage.hpp
index f172596073c..7115ef4536e 100644
--- a/src/hotspot/share/gc/z/zPage.hpp
+++ b/src/hotspot/share/gc/z/zPage.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -69,17 +69,15 @@ public:
uintptr_t top() const;
size_t remaining() const;
- const ZPhysicalMemory& physical_memory() const;
const ZVirtualMemory& virtual_memory() const;
+ const ZPhysicalMemory& physical_memory() const;
+ ZPhysicalMemory& physical_memory();
uint8_t numa_id();
bool is_allocating() const;
bool is_relocatable() const;
- bool is_mapped() const;
- void set_pre_mapped();
-
uint64_t last_used() const;
void set_last_used();
@@ -88,6 +86,7 @@ public:
ZPage* retype(uint8_t type);
ZPage* split(size_t size);
ZPage* split(uint8_t type, size_t size);
+ ZPage* split_committed();
bool is_in(uintptr_t addr) const;
diff --git a/src/hotspot/share/gc/z/zPage.inline.hpp b/src/hotspot/share/gc/z/zPage.inline.hpp
index 7fdb452560c..b57727a1111 100644
--- a/src/hotspot/share/gc/z/zPage.inline.hpp
+++ b/src/hotspot/share/gc/z/zPage.inline.hpp
@@ -126,12 +126,16 @@ inline size_t ZPage::remaining() const {
return end() - top();
}
+inline const ZVirtualMemory& ZPage::virtual_memory() const {
+ return _virtual;
+}
+
inline const ZPhysicalMemory& ZPage::physical_memory() const {
return _physical;
}
-inline const ZVirtualMemory& ZPage::virtual_memory() const {
- return _virtual;
+inline ZPhysicalMemory& ZPage::physical_memory() {
+ return _physical;
}
inline uint8_t ZPage::numa_id() {
@@ -150,17 +154,6 @@ inline bool ZPage::is_relocatable() const {
return _seqnum < ZGlobalSeqNum;
}
-inline bool ZPage::is_mapped() const {
- return _seqnum > 0;
-}
-
-inline void ZPage::set_pre_mapped() {
- // The _seqnum variable is also used to signal that the virtual and physical
- // memory has been mapped. So, we need to set it to non-zero when the memory
- // has been pre-mapped.
- _seqnum = 1;
-}
-
inline uint64_t ZPage::last_used() const {
return _last_used;
}
diff --git a/src/hotspot/share/gc/z/zPageAllocator.cpp b/src/hotspot/share/gc/z/zPageAllocator.cpp
index 02808f1264b..8b82367dd2c 100644
--- a/src/hotspot/share/gc/z/zPageAllocator.cpp
+++ b/src/hotspot/share/gc/z/zPageAllocator.cpp
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zCollectedHeap.hpp"
@@ -30,42 +31,57 @@
#include "gc/z/zLock.inline.hpp"
#include "gc/z/zPage.inline.hpp"
#include "gc/z/zPageAllocator.hpp"
-#include "gc/z/zPageCache.inline.hpp"
+#include "gc/z/zPageCache.hpp"
#include "gc/z/zSafeDelete.inline.hpp"
#include "gc/z/zStat.hpp"
#include "gc/z/zTask.hpp"
#include "gc/z/zTracer.inline.hpp"
+#include "gc/z/zUncommitter.hpp"
+#include "gc/z/zUnmapper.hpp"
#include "gc/z/zWorkers.hpp"
#include "jfr/jfrEvents.hpp"
+#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "runtime/init.hpp"
#include "runtime/java.hpp"
#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
static const ZStatCounter ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
static const ZStatCounter ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
-static const ZStatCounter ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
-class ZPageAllocRequest : public StackObj {
- friend class ZList;
+enum ZPageAllocationStall {
+ ZPageAllocationStallSuccess,
+ ZPageAllocationStallFailed,
+ ZPageAllocationStallStartGC
+};
+
+class ZPageAllocation : public StackObj {
+ friend class ZList;
private:
- const uint8_t _type;
- const size_t _size;
- const ZAllocationFlags _flags;
- const unsigned int _total_collections;
- ZListNode _node;
- ZFuture _result;
+ const uint8_t _type;
+ const size_t _size;
+ const ZAllocationFlags _flags;
+ const uint32_t _seqnum;
+ size_t _flushed;
+ size_t _committed;
+ ZList _pages;
+ ZListNode _node;
+ ZFuture _stall_result;
public:
- ZPageAllocRequest(uint8_t type, size_t size, ZAllocationFlags flags, unsigned int total_collections) :
+ ZPageAllocation(uint8_t type, size_t size, ZAllocationFlags flags) :
_type(type),
_size(size),
_flags(flags),
- _total_collections(total_collections),
+ _seqnum(ZGlobalSeqNum),
+ _flushed(0),
+ _committed(0),
+ _pages(),
_node(),
- _result() {}
+ _stall_result() {}
uint8_t type() const {
return _type;
@@ -79,82 +95,92 @@ public:
return _flags;
}
- unsigned int total_collections() const {
- return _total_collections;
+ uint32_t seqnum() const {
+ return _seqnum;
}
- ZPage* peek() {
- return _result.peek();
+ size_t flushed() const {
+ return _flushed;
}
- ZPage* wait() {
- return _result.get();
+ void set_flushed(size_t flushed) {
+ _flushed = flushed;
}
- void satisfy(ZPage* page) {
- _result.set(page);
+ size_t committed() const {
+ return _committed;
+ }
+
+ void set_committed(size_t committed) {
+ _committed = committed;
+ }
+
+ ZPageAllocationStall wait() {
+ return _stall_result.get();
+ }
+
+ ZList* pages() {
+ return &_pages;
+ }
+
+ void satisfy(ZPageAllocationStall result) {
+ _stall_result.set(result);
}
};
-ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1;
-
ZPageAllocator::ZPageAllocator(ZWorkers* workers,
size_t min_capacity,
size_t initial_capacity,
size_t max_capacity,
size_t max_reserve) :
_lock(),
- _virtual(max_capacity),
- _physical(),
_cache(),
+ _virtual(max_capacity),
+ _physical(max_capacity),
_min_capacity(min_capacity),
_max_capacity(max_capacity),
_max_reserve(max_reserve),
_current_max_capacity(max_capacity),
_capacity(0),
+ _claimed(0),
+ _used(0),
_used_high(0),
_used_low(0),
- _used(0),
_allocated(0),
_reclaimed(0),
- _queue(),
+ _stalled(),
_satisfied(),
+ _unmapper(new ZUnmapper(this)),
+ _uncommitter(new ZUncommitter(this)),
_safe_delete(),
- _uncommit(false),
_initialized(false) {
if (!_virtual.is_initialized() || !_physical.is_initialized()) {
return;
}
- log_info(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
- log_info(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
- log_info(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
- log_info(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
- log_info(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
+ log_info_p(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
+ log_info_p(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
+ log_info_p(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
+ log_info_p(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
+ if (ZPageSizeMedium > 0) {
+ log_info_p(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M);
+ } else {
+ log_info_p(gc, init)("Medium Page Size: N/A");
+ }
+ log_info_p(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
// Warn if system limits could stop us from reaching max capacity
_physical.warn_commit_limits(max_capacity);
- // Commit initial capacity
- _capacity = _physical.commit(initial_capacity);
- if (_capacity != initial_capacity) {
- log_error(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
- return;
- }
-
- // If uncommit is not explicitly disabled, max capacity is greater than
- // min capacity, and uncommit is supported by the platform, then we will
- // try to uncommit unused memory.
- _uncommit = ZUncommit && (max_capacity > min_capacity) && _physical.supports_uncommit();
- if (_uncommit) {
- log_info(gc, init)("Uncommit: Enabled, Delay: " UINTX_FORMAT "s", ZUncommitDelay);
- } else {
- log_info(gc, init)("Uncommit: Disabled");
- }
+ // Check if uncommit should and can be enabled
+ _physical.try_enable_uncommit(min_capacity, max_capacity);
// Pre-map initial capacity
- prime_cache(workers, initial_capacity);
+ if (!prime_cache(workers, initial_capacity)) {
+ log_error_p(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
+ return;
+ }
// Successfully initialized
_initialized = true;
@@ -189,21 +215,16 @@ public:
}
};
-void ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
- // Allocate physical memory
- const ZPhysicalMemory pmem = _physical.alloc(size);
- guarantee(!pmem.is_null(), "Invalid size");
+bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
+ ZAllocationFlags flags;
- // Allocate virtual memory
- const ZVirtualMemory vmem = _virtual.alloc(size, true /* alloc_from_front */);
- guarantee(!vmem.is_null(), "Invalid size");
+ flags.set_non_blocking();
+ flags.set_low_address();
- // Allocate page
- ZPage* const page = new ZPage(vmem, pmem);
-
- // Map page
- map_page(page);
- page->set_pre_mapped();
+ ZPage* const page = alloc_page(ZPageTypeLarge, size, flags);
+ if (page == NULL) {
+ return false;
+ }
if (AlwaysPreTouch) {
// Pre-touch page
@@ -211,9 +232,9 @@ void ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
workers->run_parallel(&task);
}
- // Add page to cache
- page->set_last_used();
- _cache.free_page(page);
+ free_page(page, false /* reclaimed */);
+
+ return true;
}
bool ZPageAllocator::is_initialized() const {
@@ -230,11 +251,13 @@ size_t ZPageAllocator::max_capacity() const {
size_t ZPageAllocator::soft_max_capacity() const {
// Note that SoftMaxHeapSize is a manageable flag
- return MIN2(SoftMaxHeapSize, _current_max_capacity);
+ const size_t soft_max_capacity = Atomic::load(&SoftMaxHeapSize);
+ const size_t current_max_capacity = Atomic::load(&_current_max_capacity);
+ return MIN2(soft_max_capacity, current_max_capacity);
}
size_t ZPageAllocator::capacity() const {
- return _capacity;
+ return Atomic::load(&_capacity);
}
size_t ZPageAllocator::max_reserve() const {
@@ -250,11 +273,15 @@ size_t ZPageAllocator::used_low() const {
}
size_t ZPageAllocator::used() const {
- return _used;
+ return Atomic::load(&_used);
}
size_t ZPageAllocator::unused() const {
- const ssize_t unused = (ssize_t)_capacity - (ssize_t)_used - (ssize_t)_max_reserve;
+ const ssize_t capacity = (ssize_t)Atomic::load(&_capacity);
+ const ssize_t used = (ssize_t)Atomic::load(&_used);
+ const ssize_t claimed = (ssize_t)Atomic::load(&_claimed);
+ const ssize_t max_reserve = (ssize_t)_max_reserve;
+ const ssize_t unused = capacity - used - claimed - max_reserve;
return unused > 0 ? (size_t)unused : 0;
}
@@ -273,6 +300,40 @@ void ZPageAllocator::reset_statistics() {
_used_high = _used_low = _used;
}
+size_t ZPageAllocator::increase_capacity(size_t size) {
+ const size_t increased = MIN2(size, _current_max_capacity - _capacity);
+
+ if (increased > 0) {
+ // Update atomically since we have concurrent readers
+ Atomic::add(&_capacity, increased);
+
+ // Record time of last commit. When allocation, we prefer increasing
+ // the capacity over flushing the cache. That means there could be
+ // expired pages in the cache at this time. However, since we are
+ // increasing the capacity we are obviously in need of committed
+ // memory and should therefore not be uncommitting memory.
+ _cache.set_last_commit();
+ }
+
+ return increased;
+}
+
+void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) {
+ // Update atomically since we have concurrent readers
+ Atomic::sub(&_capacity, size);
+
+ if (set_max_capacity) {
+ // Adjust current max capacity to avoid further attempts to increase capacity
+ log_error_p(gc)("Forced to lower max Java heap size from "
+ SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)",
+ _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
+ _capacity / M, percent_of(_capacity, _max_capacity));
+
+ // Update atomically since we have concurrent readers
+ Atomic::store(&_current_max_capacity, _capacity);
+ }
+}
+
void ZPageAllocator::increase_used(size_t size, bool relocation) {
if (relocation) {
// Allocating a page for the purpose of relocation has a
@@ -280,9 +341,11 @@ void ZPageAllocator::increase_used(size_t size, bool relocation) {
_reclaimed -= size;
}
_allocated += size;
- _used += size;
- if (_used > _used_high) {
- _used_high = _used;
+
+ // Update atomically since we have concurrent readers
+ const size_t used = Atomic::add(&_used, size);
+ if (used > _used_high) {
+ _used_high = used;
}
}
@@ -296,236 +359,335 @@ void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
} else {
_allocated -= size;
}
- _used -= size;
- if (_used < _used_low) {
- _used_low = _used;
+
+ // Update atomically since we have concurrent readers
+ const size_t used = Atomic::sub(&_used, size);
+ if (used < _used_low) {
+ _used_low = used;
}
}
-ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) {
- // Allocate virtual memory
- const ZVirtualMemory vmem = _virtual.alloc(size);
- if (vmem.is_null()) {
- // Out of address space
- return NULL;
+bool ZPageAllocator::commit_page(ZPage* page) {
+ // Commit physical memory
+ return _physical.commit(page->physical_memory());
+}
+
+void ZPageAllocator::uncommit_page(ZPage* page) {
+ if (!ZUncommit) {
+ return;
}
- // Allocate physical memory
- const ZPhysicalMemory pmem = _physical.alloc(size);
- assert(!pmem.is_null(), "Invalid size");
+ // Uncommit physical memory
+ _physical.uncommit(page->physical_memory());
+}
- // Allocate page
- return new ZPage(type, vmem, pmem);
+void ZPageAllocator::map_page(const ZPage* page) const {
+ // Map physical memory
+ _physical.map(page->start(), page->physical_memory());
+}
+
+void ZPageAllocator::unmap_page(const ZPage* page) const {
+ // Unmap physical memory
+ _physical.unmap(page->start(), page->size());
}
void ZPageAllocator::destroy_page(ZPage* page) {
- const ZVirtualMemory& vmem = page->virtual_memory();
- const ZPhysicalMemory& pmem = page->physical_memory();
-
- // Unmap memory
- _physical.unmap(pmem, vmem.start());
+ // Free virtual memory
+ _virtual.free(page->virtual_memory());
// Free physical memory
- _physical.free(pmem);
-
- // Free virtual memory
- _virtual.free(vmem);
+ _physical.free(page->physical_memory());
// Delete page safely
_safe_delete(page);
}
-void ZPageAllocator::map_page(const ZPage* page) const {
- // Map physical memory
- _physical.map(page->physical_memory(), page->start());
-}
-
-size_t ZPageAllocator::max_available(bool no_reserve) const {
- size_t available = _current_max_capacity - _used;
+bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const {
+ size_t available = _current_max_capacity - _used - _claimed;
if (no_reserve) {
// The reserve should not be considered available
available -= MIN2(available, _max_reserve);
}
- return available;
+ return available >= size;
}
-bool ZPageAllocator::ensure_available(size_t size, bool no_reserve) {
- if (max_available(no_reserve) < size) {
- // Not enough free memory
+bool ZPageAllocator::is_alloc_allowed_from_cache(size_t size, bool no_reserve) const {
+ size_t available = _capacity - _used - _claimed;
+
+ if (no_reserve) {
+ // The reserve should not be considered available
+ available -= MIN2(available, _max_reserve);
+ } else if (_capacity != _current_max_capacity) {
+ // Always increase capacity before using the reserve
return false;
}
- // We add the max_reserve to the requested size to avoid losing
- // the reserve because of failure to increase capacity before
- // reaching max capacity.
- size += _max_reserve;
+ return available >= size;
+}
- // Don't try to increase capacity if enough unused capacity
- // is available or if current max capacity has been reached.
- const size_t available = _capacity - _used;
- if (available < size && _capacity < _current_max_capacity) {
- // Try to increase capacity
- const size_t commit = MIN2(size - available, _current_max_capacity - _capacity);
- const size_t committed = _physical.commit(commit);
- _capacity += committed;
+bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList* pages) {
+ if (!is_alloc_allowed(size, no_reserve)) {
+ // Out of memory
+ return false;
+ }
- log_trace(gc, heap)("Make Available: Size: " SIZE_FORMAT "M, NoReserve: %s, "
- "Available: " SIZE_FORMAT "M, Commit: " SIZE_FORMAT "M, "
- "Committed: " SIZE_FORMAT "M, Capacity: " SIZE_FORMAT "M",
- size / M, no_reserve ? "True" : "False", available / M,
- commit / M, committed / M, _capacity / M);
-
- if (committed != commit) {
- // Failed, or partly failed, to increase capacity. Adjust current
- // max capacity to avoid further attempts to increase capacity.
- log_error(gc)("Forced to lower max Java heap size from "
- SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)",
- _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
- _capacity / M, percent_of(_capacity, _max_capacity));
-
- _current_max_capacity = _capacity;
+ // Try allocate from the page cache
+ if (is_alloc_allowed_from_cache(size, no_reserve)) {
+ ZPage* const page = _cache.alloc_page(type, size);
+ if (page != NULL) {
+ // Success
+ pages->insert_last(page);
+ return true;
}
}
- if (!no_reserve) {
- size -= _max_reserve;
+ // Try increase capacity
+ const size_t increased = increase_capacity(size);
+ if (increased < size) {
+ // Could not increase capacity enough to satisfy the allocation
+ // completely. Flush the page cache to satisfy the remainder.
+ const size_t remaining = size - increased;
+ _cache.flush_for_allocation(remaining, pages);
}
- const size_t new_available = _capacity - _used;
- return new_available >= size;
+ // Success
+ return true;
}
-void ZPageAllocator::ensure_uncached_available(size_t size) {
- assert(_capacity - _used >= size, "Invalid size");
- const size_t uncached_available = _capacity - _used - _cache.available();
- if (size > uncached_available) {
- flush_cache_for_allocation(size - uncached_available);
- }
-}
+bool ZPageAllocator::alloc_page_common(ZPageAllocation* allocation) {
+ const uint8_t type = allocation->type();
+ const size_t size = allocation->size();
+ const ZAllocationFlags flags = allocation->flags();
+ ZList* const pages = allocation->pages();
-ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve) {
- if (!ensure_available(size, no_reserve)) {
- // Not enough free memory
- return NULL;
+ // Try allocate without using the reserve
+ if (!alloc_page_common_inner(type, size, true /* no_reserve */, pages)) {
+ // If allowed to, try allocate using the reserve
+ if (flags.no_reserve() || !alloc_page_common_inner(type, size, false /* no_reserve */, pages)) {
+ // Out of memory
+ return false;
+ }
}
- // Try allocate page from the cache
- ZPage* const page = _cache.alloc_page(type, size);
- if (page != NULL) {
- return page;
- }
-
- // Try flush pages from the cache
- ensure_uncached_available(size);
-
- // Create new page
- return create_page(type, size);
-}
-
-ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) {
- EventZPageAllocation event;
-
- ZPage* const page = alloc_page_common_inner(type, size, flags.no_reserve());
- if (page == NULL) {
- // Out of memory
- return NULL;
- }
-
- // Update used statistics
+ // Updated used statistics
increase_used(size, flags.relocation());
- // Send trace event
- event.commit(type, size, _used, max_available(flags.no_reserve()),
- _cache.available(), flags.non_blocking(), flags.no_reserve());
-
- return page;
+ // Success
+ return true;
}
-void ZPageAllocator::check_out_of_memory_during_initialization() {
+static void check_out_of_memory_during_initialization() {
if (!is_init_completed()) {
vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
}
}
-ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) {
- // Prepare to block
- ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections());
+bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) {
+ ZStatTimer timer(ZCriticalPhaseAllocationStall);
+ EventZAllocationStall event;
+ ZPageAllocationStall result;
- _lock.lock();
+ // We can only block if the VM is fully initialized
+ check_out_of_memory_during_initialization();
- // Try non-blocking allocation
- ZPage* page = alloc_page_common(type, size, flags);
- if (page == NULL) {
- // Allocation failed, enqueue request
- _queue.insert_last(&request);
+ do {
+ // Start asynchronous GC
+ ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
+
+ // Wait for allocation to complete, fail or request a GC
+ result = allocation->wait();
+ } while (result == ZPageAllocationStallStartGC);
+
+ {
+ //
+ // We grab the lock here for two different reasons:
+ //
+ // 1) Guard deletion of underlying semaphore. This is a workaround for
+ // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
+ // the semaphore immediately after returning from sem_wait(). The
+ // reason is that sem_post() can touch the semaphore after a waiting
+ // thread have returned from sem_wait(). To avoid this race we are
+ // forcing the waiting thread to acquire/release the lock held by the
+ // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
+ //
+ // 2) Guard the list of satisfied pages.
+ //
+ ZLocker locker(&_lock);
+ _satisfied.remove(allocation);
}
- _lock.unlock();
+ // Send event
+ event.commit(allocation->type(), allocation->size());
- if (page == NULL) {
- // Allocation failed
- ZStatTimer timer(ZCriticalPhaseAllocationStall);
- EventZAllocationStall event;
-
- // We can only block if VM is fully initialized
- check_out_of_memory_during_initialization();
-
- do {
- // Start asynchronous GC
- ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
-
- // Wait for allocation to complete or fail
- page = request.wait();
- } while (page == gc_marker);
-
- {
- //
- // We grab the lock here for two different reasons:
- //
- // 1) Guard deletion of underlying semaphore. This is a workaround for
- // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
- // the semaphore immediately after returning from sem_wait(). The
- // reason is that sem_post() can touch the semaphore after a waiting
- // thread have returned from sem_wait(). To avoid this race we are
- // forcing the waiting thread to acquire/release the lock held by the
- // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
- //
- // 2) Guard the list of satisfied pages.
- //
- ZLocker locker(&_lock);
- _satisfied.remove(&request);
- }
-
- event.commit(type, size);
- }
-
- return page;
+ return (result == ZPageAllocationStallSuccess);
}
-ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) {
+bool ZPageAllocator::alloc_page_or_stall(ZPageAllocation* allocation) {
+ {
+ ZLocker locker(&_lock);
+
+ if (alloc_page_common(allocation)) {
+ // Success
+ return true;
+ }
+
+ // Failed
+ if (allocation->flags().non_blocking()) {
+ // Don't stall
+ return false;
+ }
+
+ // Enqueue allocation request
+ _stalled.insert_last(allocation);
+ }
+
+ // Stall
+ return alloc_page_stall(allocation);
+}
+
+ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
+ const size_t size = allocation->size();
+
+ // Allocate virtual memory. To make error handling a lot more straight
+ // forward, we allocate virtual memory before destroying flushed pages.
+ // Flushed pages are also unmapped and destroyed asynchronously, so we
+ // can't immediately reuse that part of the address space anyway.
+ const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address());
+ if (vmem.is_null()) {
+ log_error(gc)("Out of address space");
+ return NULL;
+ }
+
+ ZPhysicalMemory pmem;
+ size_t flushed = 0;
+
+ // Harvest physical memory from flushed pages
+ ZListRemoveIterator iter(allocation->pages());
+ for (ZPage* page; iter.next(&page);) {
+ flushed += page->size();
+
+ // Harvest flushed physical memory
+ ZPhysicalMemory& fmem = page->physical_memory();
+ pmem.add_segments(fmem);
+ fmem.remove_segments();
+
+ // Unmap and destroy page
+ _unmapper->unmap_and_destroy_page(page);
+ }
+
+ if (flushed > 0) {
+ allocation->set_flushed(flushed);
+
+ // Update statistics
+ ZStatInc(ZCounterPageCacheFlush, flushed);
+ log_debug(gc, heap)("Page Cache Flushed: " SIZE_FORMAT "M", flushed / M);
+ }
+
+ // Allocate any remaining physical memory. Capacity and used has
+ // already been adjusted, we just need to fetch the memory, which
+ // is guaranteed to succeed.
+ if (flushed < size) {
+ const size_t remaining = size - flushed;
+ allocation->set_committed(remaining);
+ _physical.alloc(pmem, remaining);
+ }
+
+ // Create new page
+ return new ZPage(allocation->type(), vmem, pmem);
+}
+
+static bool is_alloc_satisfied(ZPageAllocation* allocation) {
+ // The allocation is immediately satisfied if the list of pages contains
+ // exactly one page, with the type and size that was requested.
+ return allocation->pages()->size() == 1 &&
+ allocation->pages()->first()->type() == allocation->type() &&
+ allocation->pages()->first()->size() == allocation->size();
+}
+
+ZPage* ZPageAllocator::alloc_page_finalize(ZPageAllocation* allocation) {
+ // Fast path
+ if (is_alloc_satisfied(allocation)) {
+ return allocation->pages()->remove_first();
+ }
+
+ // Slow path
+ ZPage* const page = alloc_page_create(allocation);
+ if (page == NULL) {
+ // Out of address space
+ return NULL;
+ }
+
+ // Commit page
+ if (commit_page(page)) {
+ // Success
+ map_page(page);
+ return page;
+ }
+
+ // Failed or partially failed. Split of any successfully committed
+ // part of the page into a new page and insert it into list of pages,
+ // so that it will be re-inserted into the page cache.
+ ZPage* const committed_page = page->split_committed();
+ destroy_page(page);
+
+ if (committed_page != NULL) {
+ map_page(committed_page);
+ allocation->pages()->insert_last(committed_page);
+ }
+
+ return NULL;
+}
+
+void ZPageAllocator::alloc_page_failed(ZPageAllocation* allocation) {
ZLocker locker(&_lock);
- return alloc_page_common(type, size, flags);
+
+ size_t freed = 0;
+
+ // Free any allocated/flushed pages
+ ZListRemoveIterator iter(allocation->pages());
+ for (ZPage* page; iter.next(&page);) {
+ freed += page->size();
+ free_page_inner(page, false /* reclaimed */);
+ }
+
+ // Adjust capacity and used to reflect the failed capacity increase
+ const size_t remaining = allocation->size() - freed;
+ decrease_used(remaining, false /* reclaimed */);
+ decrease_capacity(remaining, true /* set_max_capacity */);
+
+ // Try satisfy stalled allocations
+ satisfy_stalled();
}
ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
- ZPage* const page = flags.non_blocking()
- ? alloc_page_nonblocking(type, size, flags)
- : alloc_page_blocking(type, size, flags);
- if (page == NULL) {
+ EventZPageAllocation event;
+
+retry:
+ ZPageAllocation allocation(type, size, flags);
+
+ // Allocate one or more pages from the page cache. If the allocation
+ // succeeds but the returned pages don't cover the complete allocation,
+ // then finalize phase is allowed to allocate the remaining memory
+ // directly from the physical memory manager. Note that this call might
+ // block in a safepoint if the non-blocking flag is not set.
+ if (!alloc_page_or_stall(&allocation)) {
// Out of memory
return NULL;
}
- // Map page if needed
- if (!page->is_mapped()) {
- map_page(page);
+ ZPage* const page = alloc_page_finalize(&allocation);
+ if (page == NULL) {
+ // Failed to commit or map. Clean up and retry, in the hope that
+ // we can still allocate by flushing the page cache (more aggressively).
+ alloc_page_failed(&allocation);
+ goto retry;
}
// Reset page. This updates the page's sequence number and must
- // be done after page allocation, which potentially blocked in
- // a safepoint where the global sequence number was updated.
+ // be done after we potentially blocked in a safepoint (stalled)
+ // where the global sequence number was updated.
page->reset();
// Update allocation statistics. Exclude worker threads to avoid
@@ -538,35 +700,36 @@ ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags fl
ZStatInc(ZStatAllocRate::counter(), bytes);
}
+ // Send event
+ event.commit(type, size, allocation.flushed(), allocation.committed(),
+ page->physical_memory().nsegments(), flags.non_blocking(), flags.no_reserve());
+
return page;
}
-void ZPageAllocator::satisfy_alloc_queue() {
+void ZPageAllocator::satisfy_stalled() {
for (;;) {
- ZPageAllocRequest* const request = _queue.first();
- if (request == NULL) {
+ ZPageAllocation* const allocation = _stalled.first();
+ if (allocation == NULL) {
// Allocation queue is empty
return;
}
- ZPage* const page = alloc_page_common(request->type(), request->size(), request->flags());
- if (page == NULL) {
+ if (!alloc_page_common(allocation)) {
// Allocation could not be satisfied, give up
return;
}
- // Allocation succeeded, dequeue and satisfy request. Note that
- // the dequeue operation must happen first, since the request
- // will immediately be deallocated once it has been satisfied.
- _queue.remove(request);
- _satisfied.insert_first(request);
- request->satisfy(page);
+ // Allocation succeeded, dequeue and satisfy allocation request.
+ // Note that we must dequeue the allocation request first, since
+ // it will immediately be deallocated once it has been satisfied.
+ _stalled.remove(allocation);
+ _satisfied.insert_last(allocation);
+ allocation->satisfy(ZPageAllocationStallSuccess);
}
}
-void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
- ZLocker locker(&_lock);
-
+void ZPageAllocator::free_page_inner(ZPage* page, bool reclaimed) {
// Update used statistics
decrease_used(page->size(), reclaimed);
@@ -575,171 +738,70 @@ void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
// Cache page
_cache.free_page(page);
-
- // Try satisfy blocked allocations
- satisfy_alloc_queue();
}
-size_t ZPageAllocator::flush_cache(ZPageCacheFlushClosure* cl, bool for_allocation) {
- EventZPageCacheFlush event;
+void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
+ ZLocker locker(&_lock);
- ZList list;
+ // Free page
+ free_page_inner(page, reclaimed);
- // Flush pages
- _cache.flush(cl, &list);
+ // Try satisfy stalled allocations
+ satisfy_stalled();
+}
- const size_t overflushed = cl->overflushed();
- if (overflushed > 0) {
- // Overflushed, keep part of last page
- ZPage* const page = list.last()->split(overflushed);
- _cache.free_page(page);
+size_t ZPageAllocator::uncommit(uint64_t* timeout) {
+ // We need to join the suspendible thread set while manipulating capacity and
+ // used, to make sure GC safepoints will have a consistent view. However, when
+ // ZVerifyViews is enabled we need to join at a broader scope to also make sure
+ // we don't change the address good mask after pages have been flushed, and
+ // thereby made invisible to pages_do(), but before they have been unmapped.
+ SuspendibleThreadSetJoiner joiner(ZVerifyViews);
+ ZList pages;
+ size_t flushed;
+
+ {
+ SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
+ ZLocker locker(&_lock);
+
+ // Never uncommit the reserve, and never uncommit below min capacity. We flush
+ // out and uncommit chunks at a time (~0.8% of the max capacity, but at least
+ // one granule and at most 256M), in case demand for memory increases while we
+ // are uncommitting.
+ const size_t retain = clamp(_used + _max_reserve, _min_capacity, _capacity);
+ const size_t release = _capacity - retain;
+ const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), 256 * M);
+ const size_t flush = MIN2(release, limit);
+
+ // Flush pages to uncommit
+ flushed = _cache.flush_for_uncommit(flush, &pages, timeout);
+ if (flushed == 0) {
+ // Nothing flushed
+ return 0;
+ }
+
+ // Record flushed pages as claimed
+ Atomic::add(&_claimed, flushed);
}
- // Destroy pages
- size_t flushed = 0;
- for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
- flushed += page->size();
+ // Unmap, uncommit, and destroy flushed pages
+ ZListRemoveIterator iter(&pages);
+ for (ZPage* page; iter.next(&page);) {
+ unmap_page(page);
+ uncommit_page(page);
destroy_page(page);
}
- // Send event
- event.commit(flushed, for_allocation);
-
- return flushed;
-}
-
-class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
-public:
- ZPageCacheFlushForAllocationClosure(size_t requested) :
- ZPageCacheFlushClosure(requested) {}
-
- virtual bool do_page(const ZPage* page) {
- if (_flushed < _requested) {
- // Flush page
- _flushed += page->size();
- return true;
- }
-
- // Don't flush page
- return false;
- }
-};
-
-void ZPageAllocator::flush_cache_for_allocation(size_t requested) {
- assert(requested <= _cache.available(), "Invalid request");
-
- // Flush pages
- ZPageCacheFlushForAllocationClosure cl(requested);
- const size_t flushed = flush_cache(&cl, true /* for_allocation */);
-
- assert(requested == flushed, "Failed to flush");
-
- const size_t cached_after = _cache.available();
- const size_t cached_before = cached_after + flushed;
-
- log_info(gc, heap)("Page Cache: " SIZE_FORMAT "M(%.0f%%)->" SIZE_FORMAT "M(%.0f%%), "
- "Flushed: " SIZE_FORMAT "M",
- cached_before / M, percent_of(cached_before, max_capacity()),
- cached_after / M, percent_of(cached_after, max_capacity()),
- flushed / M);
-
- // Update statistics
- ZStatInc(ZCounterPageCacheFlush, flushed);
-}
-
-class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure {
-private:
- const uint64_t _now;
- const uint64_t _delay;
- uint64_t _timeout;
-
-public:
- ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t delay) :
- ZPageCacheFlushClosure(requested),
- _now(os::elapsedTime()),
- _delay(delay),
- _timeout(_delay) {}
-
- virtual bool do_page(const ZPage* page) {
- const uint64_t expires = page->last_used() + _delay;
- const uint64_t timeout = expires - MIN2(expires, _now);
-
- if (_flushed < _requested && timeout == 0) {
- // Flush page
- _flushed += page->size();
- return true;
- }
-
- // Record shortest non-expired timeout
- _timeout = MIN2(_timeout, timeout);
-
- // Don't flush page
- return false;
- }
-
- uint64_t timeout() const {
- return _timeout;
- }
-};
-
-uint64_t ZPageAllocator::uncommit(uint64_t delay) {
- // Set the default timeout, when no pages are found in the
- // cache or when uncommit is disabled, equal to the delay.
- uint64_t timeout = delay;
-
- if (!_uncommit) {
- // Disabled
- return timeout;
- }
-
- EventZUncommit event;
- size_t capacity_before;
- size_t capacity_after;
- size_t uncommitted;
-
{
- SuspendibleThreadSetJoiner joiner;
+ SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
ZLocker locker(&_lock);
- // Don't flush more than we will uncommit. Never uncommit
- // the reserve, and never uncommit below min capacity.
- const size_t needed = MIN2(_used + _max_reserve, _current_max_capacity);
- const size_t guarded = MAX2(needed, _min_capacity);
- const size_t uncommittable = _capacity - guarded;
- const size_t uncached_available = _capacity - _used - _cache.available();
- size_t uncommit = MIN2(uncommittable, uncached_available);
- const size_t flush = uncommittable - uncommit;
-
- if (flush > 0) {
- // Flush pages to uncommit
- ZPageCacheFlushForUncommitClosure cl(flush, delay);
- uncommit += flush_cache(&cl, false /* for_allocation */);
- timeout = cl.timeout();
- }
-
- // Uncommit
- uncommitted = _physical.uncommit(uncommit);
- _capacity -= uncommitted;
-
- capacity_after = _capacity;
- capacity_before = capacity_after + uncommitted;
+ // Adjust claimed and capacity to reflect the uncommit
+ Atomic::sub(&_claimed, flushed);
+ decrease_capacity(flushed, false /* set_max_capacity */);
}
- if (uncommitted > 0) {
- log_info(gc, heap)("Capacity: " SIZE_FORMAT "M(%.0f%%)->" SIZE_FORMAT "M(%.0f%%), "
- "Uncommitted: " SIZE_FORMAT "M",
- capacity_before / M, percent_of(capacity_before, max_capacity()),
- capacity_after / M, percent_of(capacity_after, max_capacity()),
- uncommitted / M);
-
- // Send event
- event.commit(capacity_before, capacity_after, uncommitted);
-
- // Update statistics
- ZStatInc(ZCounterUncommit, uncommitted);
- }
-
- return timeout;
+ return flushed;
}
void ZPageAllocator::enable_deferred_delete() const {
@@ -752,19 +814,21 @@ void ZPageAllocator::disable_deferred_delete() const {
void ZPageAllocator::debug_map_page(const ZPage* page) const {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
- _physical.debug_map(page->physical_memory(), page->start());
+ _physical.debug_map(page->start(), page->physical_memory());
}
void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
- _physical.debug_unmap(page->physical_memory(), page->start());
+ _physical.debug_unmap(page->start(), page->size());
}
void ZPageAllocator::pages_do(ZPageClosure* cl) const {
- ZListIterator iter(&_satisfied);
- for (ZPageAllocRequest* request; iter.next(&request);) {
- const ZPage* const page = request->peek();
- if (page != NULL) {
+ assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+
+ ZListIterator iter_satisfied(&_satisfied);
+ for (ZPageAllocation* allocation; iter_satisfied.next(&allocation);) {
+ ZListIterator iter_pages(allocation->pages());
+ for (ZPage* page; iter_pages.next(&page);) {
cl->do_page(page);
}
}
@@ -774,7 +838,7 @@ void ZPageAllocator::pages_do(ZPageClosure* cl) const {
bool ZPageAllocator::is_alloc_stalled() const {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
- return !_queue.is_empty();
+ return !_stalled.is_empty();
}
void ZPageAllocator::check_out_of_memory() {
@@ -782,16 +846,21 @@ void ZPageAllocator::check_out_of_memory() {
// Fail allocation requests that were enqueued before the
// last GC cycle started, otherwise start a new GC cycle.
- for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) {
- if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) {
+ for (ZPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) {
+ if (allocation->seqnum() == ZGlobalSeqNum) {
// Start a new GC cycle, keep allocation requests enqueued
- request->satisfy(gc_marker);
+ allocation->satisfy(ZPageAllocationStallStartGC);
return;
}
// Out of memory, fail allocation request
- _queue.remove(request);
- _satisfied.insert_first(request);
- request->satisfy(NULL);
+ _stalled.remove(allocation);
+ _satisfied.insert_last(allocation);
+ allocation->satisfy(ZPageAllocationStallFailed);
}
}
+
+void ZPageAllocator::threads_do(ThreadClosure* tc) const {
+ tc->do_thread(_unmapper);
+ tc->do_thread(_uncommitter);
+}
diff --git a/src/hotspot/share/gc/z/zPageAllocator.hpp b/src/hotspot/share/gc/z/zPageAllocator.hpp
index 8f605637674..800ef4d9875 100644
--- a/src/hotspot/share/gc/z/zPageAllocator.hpp
+++ b/src/hotspot/share/gc/z/zPageAllocator.hpp
@@ -31,60 +31,73 @@
#include "gc/z/zPhysicalMemory.hpp"
#include "gc/z/zSafeDelete.hpp"
#include "gc/z/zVirtualMemory.hpp"
-#include "memory/allocation.hpp"
-class ZPageAllocRequest;
+class ThreadClosure;
+class ZPageAllocation;
class ZWorkers;
+class ZUncommitter;
+class ZUnmapper;
class ZPageAllocator {
friend class VMStructs;
+ friend class ZUnmapper;
+ friend class ZUncommitter;
private:
ZLock _lock;
+ ZPageCache _cache;
ZVirtualMemoryManager _virtual;
ZPhysicalMemoryManager _physical;
- ZPageCache _cache;
const size_t _min_capacity;
const size_t _max_capacity;
const size_t _max_reserve;
- size_t _current_max_capacity;
- size_t _capacity;
+ volatile size_t _current_max_capacity;
+ volatile size_t _capacity;
+ volatile size_t _claimed;
+ volatile size_t _used;
size_t _used_high;
size_t _used_low;
- size_t _used;
size_t _allocated;
ssize_t _reclaimed;
- ZList _queue;
- ZList _satisfied;
+ ZList _stalled;
+ ZList _satisfied;
+ ZUnmapper* _unmapper;
+ ZUncommitter* _uncommitter;
mutable ZSafeDelete _safe_delete;
- bool _uncommit;
bool _initialized;
- static ZPage* const gc_marker;
+ bool prime_cache(ZWorkers* workers, size_t size);
- void prime_cache(ZWorkers* workers, size_t size);
+ size_t increase_capacity(size_t size);
+ void decrease_capacity(size_t size, bool set_max_capacity);
void increase_used(size_t size, bool relocation);
void decrease_used(size_t size, bool reclaimed);
- ZPage* create_page(uint8_t type, size_t size);
+ bool commit_page(ZPage* page);
+ void uncommit_page(ZPage* page);
+
+ void map_page(const ZPage* page) const;
+ void unmap_page(const ZPage* page) const;
+
void destroy_page(ZPage* page);
- size_t max_available(bool no_reserve) const;
- bool ensure_available(size_t size, bool no_reserve);
- void ensure_uncached_available(size_t size);
+ bool is_alloc_allowed(size_t size, bool no_reserve) const;
+ bool is_alloc_allowed_from_cache(size_t size, bool no_reserve) const;
- void check_out_of_memory_during_initialization();
+ bool alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList* pages);
+ bool alloc_page_common(ZPageAllocation* allocation);
+ bool alloc_page_stall(ZPageAllocation* allocation);
+ bool alloc_page_or_stall(ZPageAllocation* allocation);
+ ZPage* alloc_page_create(ZPageAllocation* allocation);
+ ZPage* alloc_page_finalize(ZPageAllocation* allocation);
+ void alloc_page_failed(ZPageAllocation* allocation);
- ZPage* alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve);
- ZPage* alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags);
- ZPage* alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags);
- ZPage* alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags);
+ void satisfy_stalled();
- size_t flush_cache(ZPageCacheFlushClosure* cl, bool for_allocation);
- void flush_cache_for_allocation(size_t requested);
+ void free_page_inner(ZPage* page, bool reclaimed);
- void satisfy_alloc_queue();
+ size_t uncommit(uint64_t* timeout);
public:
ZPageAllocator(ZWorkers* workers,
@@ -112,13 +125,9 @@ public:
ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
void free_page(ZPage* page, bool reclaimed);
- uint64_t uncommit(uint64_t delay);
-
void enable_deferred_delete() const;
void disable_deferred_delete() const;
- void map_page(const ZPage* page) const;
-
void debug_map_page(const ZPage* page) const;
void debug_unmap_page(const ZPage* page) const;
@@ -126,6 +135,8 @@ public:
void check_out_of_memory();
void pages_do(ZPageClosure* cl) const;
+
+ void threads_do(ThreadClosure* tc) const;
};
#endif // SHARE_GC_Z_ZPAGEALLOCATOR_HPP
diff --git a/src/hotspot/share/gc/z/zPageCache.cpp b/src/hotspot/share/gc/z/zPageCache.cpp
index 8fe8699da45..0b1a2e95a2c 100644
--- a/src/hotspot/share/gc/z/zPageCache.cpp
+++ b/src/hotspot/share/gc/z/zPageCache.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "gc/z/zGlobals.hpp"
#include "gc/z/zList.inline.hpp"
#include "gc/z/zNUMA.hpp"
#include "gc/z/zPage.inline.hpp"
@@ -29,25 +30,36 @@
#include "gc/z/zStat.hpp"
#include "gc/z/zValue.inline.hpp"
#include "logging/log.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond);
static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond);
static const ZStatCounter ZCounterPageCacheHitL3("Memory", "Page Cache Hit L3", ZStatUnitOpsPerSecond);
static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond);
+class ZPageCacheFlushClosure : public StackObj {
+ friend class ZPageCache;
+
+protected:
+ const size_t _requested;
+ size_t _flushed;
+
+public:
+ ZPageCacheFlushClosure(size_t requested);
+ virtual bool do_page(const ZPage* page) = 0;
+};
+
ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested) :
_requested(requested),
_flushed(0) {}
-size_t ZPageCacheFlushClosure::overflushed() const {
- return _flushed > _requested ? _flushed - _requested : 0;
-}
-
ZPageCache::ZPageCache() :
- _available(0),
_small(),
_medium(),
- _large() {}
+ _large(),
+ _last_commit(0) {}
ZPage* ZPageCache::alloc_small_page() {
const uint32_t numa_id = ZNUMA::id();
@@ -161,7 +173,7 @@ ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) {
page = oversized->split(type, size);
// Cache remainder
- free_page_inner(oversized);
+ free_page(oversized);
} else {
// Re-type correctly sized page
page = oversized->retype(type);
@@ -169,16 +181,14 @@ ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) {
}
}
- if (page != NULL) {
- _available -= page->size();
- } else {
+ if (page == NULL) {
ZStatInc(ZCounterPageCacheMiss);
}
return page;
}
-void ZPageCache::free_page_inner(ZPage* page) {
+void ZPageCache::free_page(ZPage* page) {
const uint8_t type = page->type();
if (type == ZPageTypeSmall) {
_small.get(page->numa_id()).insert_first(page);
@@ -189,11 +199,6 @@ void ZPageCache::free_page_inner(ZPage* page) {
}
}
-void ZPageCache::free_page(ZPage* page) {
- free_page_inner(page);
- _available += page->size();
-}
-
bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList* from, ZList* to) {
ZPage* const page = from->last();
if (page == NULL || !cl->do_page(page)) {
@@ -202,7 +207,6 @@ bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList* from
}
// Flush page
- _available -= page->size();
from->remove(page);
to->insert_last(page);
return true;
@@ -239,6 +243,94 @@ void ZPageCache::flush(ZPageCacheFlushClosure* cl, ZList* to) {
flush_list(cl, &_large, to);
flush_list(cl, &_medium, to);
flush_per_numa_lists(cl, &_small, to);
+
+ if (cl->_flushed > cl->_requested) {
+ // Overflushed, re-insert part of last page into the cache
+ const size_t overflushed = cl->_flushed - cl->_requested;
+ ZPage* const reinsert = to->last()->split(overflushed);
+ free_page(reinsert);
+ cl->_flushed -= overflushed;
+ }
+}
+
+class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
+public:
+ ZPageCacheFlushForAllocationClosure(size_t requested) :
+ ZPageCacheFlushClosure(requested) {}
+
+ virtual bool do_page(const ZPage* page) {
+ if (_flushed < _requested) {
+ // Flush page
+ _flushed += page->size();
+ return true;
+ }
+
+ // Don't flush page
+ return false;
+ }
+};
+
+void ZPageCache::flush_for_allocation(size_t requested, ZList* to) {
+ ZPageCacheFlushForAllocationClosure cl(requested);
+ flush(&cl, to);
+}
+
+class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure {
+private:
+ const uint64_t _now;
+ uint64_t* _timeout;
+
+public:
+ ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t now, uint64_t* timeout) :
+ ZPageCacheFlushClosure(requested),
+ _now(now),
+ _timeout(timeout) {
+ // Set initial timeout
+ *_timeout = ZUncommitDelay;
+ }
+
+ virtual bool do_page(const ZPage* page) {
+ const uint64_t expires = page->last_used() + ZUncommitDelay;
+ if (expires > _now) {
+ // Don't flush page, record shortest non-expired timeout
+ *_timeout = MIN2(*_timeout, expires - _now);
+ return false;
+ }
+
+ if (_flushed >= _requested) {
+ // Don't flush page, requested amount flushed
+ return false;
+ }
+
+ // Flush page
+ _flushed += page->size();
+ return true;
+ }
+};
+
+size_t ZPageCache::flush_for_uncommit(size_t requested, ZList* to, uint64_t* timeout) {
+ const uint64_t now = os::elapsedTime();
+ const uint64_t expires = _last_commit + ZUncommitDelay;
+ if (expires > now) {
+ // Delay uncommit, set next timeout
+ *timeout = expires - now;
+ return 0;
+ }
+
+ if (requested == 0) {
+ // Nothing to flush, set next timeout
+ *timeout = ZUncommitDelay;
+ return 0;
+ }
+
+ ZPageCacheFlushForUncommitClosure cl(requested, now, timeout);
+ flush(&cl, to);
+
+ return cl._flushed;
+}
+
+void ZPageCache::set_last_commit() {
+ _last_commit = os::elapsedTime();
}
void ZPageCache::pages_do(ZPageClosure* cl) const {
diff --git a/src/hotspot/share/gc/z/zPageCache.hpp b/src/hotspot/share/gc/z/zPageCache.hpp
index 4e983f2ad52..b641e0e4be1 100644
--- a/src/hotspot/share/gc/z/zPageCache.hpp
+++ b/src/hotspot/share/gc/z/zPageCache.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,25 +27,15 @@
#include "gc/z/zList.hpp"
#include "gc/z/zPage.hpp"
#include "gc/z/zValue.hpp"
-#include "memory/allocation.hpp"
-class ZPageCacheFlushClosure : public StackObj {
-protected:
- const size_t _requested;
- size_t _flushed;
-
-public:
- ZPageCacheFlushClosure(size_t requested);
- size_t overflushed() const;
- virtual bool do_page(const ZPage* page) = 0;
-};
+class ZPageCacheFlushClosure;
class ZPageCache {
private:
- size_t _available;
ZPerNUMA > _small;
ZList _medium;
ZList _large;
+ uint64_t _last_commit;
ZPage* alloc_small_page();
ZPage* alloc_medium_page();
@@ -55,21 +45,21 @@ private:
ZPage* alloc_oversized_large_page(size_t size);
ZPage* alloc_oversized_page(size_t size);
- void free_page_inner(ZPage* page);
-
bool flush_list_inner(ZPageCacheFlushClosure* cl, ZList* from, ZList* to);
void flush_list(ZPageCacheFlushClosure* cl, ZList* from, ZList* to);
void flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA >* from, ZList* to);
+ void flush(ZPageCacheFlushClosure* cl, ZList* to);
public:
ZPageCache();
- size_t available() const;
-
ZPage* alloc_page(uint8_t type, size_t size);
void free_page(ZPage* page);
- void flush(ZPageCacheFlushClosure* cl, ZList* to);
+ void flush_for_allocation(size_t requested, ZList* to);
+ size_t flush_for_uncommit(size_t requested, ZList* to, uint64_t* timeout);
+
+ void set_last_commit();
void pages_do(ZPageClosure* cl) const;
};
diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.cpp b/src/hotspot/share/gc/z/zPhysicalMemory.cpp
index 57f810a0b71..5929b222b5c 100644
--- a/src/hotspot/share/gc/z/zPhysicalMemory.cpp
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.cpp
@@ -27,92 +27,212 @@
#include "gc/z/zLargePages.inline.hpp"
#include "gc/z/zNUMA.inline.hpp"
#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "services/memTracker.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "utilities/powerOfTwo.hpp"
ZPhysicalMemory::ZPhysicalMemory() :
+ _nsegments_max(0),
_nsegments(0),
_segments(NULL) {}
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
+ _nsegments_max(0),
_nsegments(0),
_segments(NULL) {
add_segment(segment);
}
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) :
+ _nsegments_max(0),
_nsegments(0),
_segments(NULL) {
-
- // Copy segments
- for (size_t i = 0; i < pmem.nsegments(); i++) {
- add_segment(pmem.segment(i));
- }
+ add_segments(pmem);
}
const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) {
- // Free segments
- delete [] _segments;
- _segments = NULL;
- _nsegments = 0;
-
- // Copy segments
- for (size_t i = 0; i < pmem.nsegments(); i++) {
- add_segment(pmem.segment(i));
- }
-
+ remove_segments();
+ add_segments(pmem);
return *this;
}
ZPhysicalMemory::~ZPhysicalMemory() {
- delete [] _segments;
- _segments = NULL;
- _nsegments = 0;
+ remove_segments();
}
size_t ZPhysicalMemory::size() const {
size_t size = 0;
- for (size_t i = 0; i < _nsegments; i++) {
+ for (uint32_t i = 0; i < _nsegments; i++) {
size += _segments[i].size();
}
return size;
}
+void ZPhysicalMemory::insert_segment(uint32_t index, uintptr_t start, size_t size, bool committed) {
+ assert(index <= _nsegments, "Invalid index");
+
+ ZPhysicalMemorySegment* const from_segments = _segments;
+
+ if (_nsegments + 1 > _nsegments_max) {
+ // Resize array
+ _nsegments_max = round_up_power_of_2(_nsegments_max + 1);
+ _segments = new ZPhysicalMemorySegment[_nsegments_max];
+
+ // Copy segments before index
+ for (uint32_t i = 0; i < index; i++) {
+ _segments[i] = from_segments[i];
+ }
+ }
+
+ // Copy/Move segments after index
+ for (uint32_t i = _nsegments; i > index; i--) {
+ _segments[i] = from_segments[i - 1];
+ }
+
+ // Insert new segment
+ _segments[index] = ZPhysicalMemorySegment(start, size, committed);
+ _nsegments++;
+
+ // Delete old array
+ if (from_segments != _segments) {
+ delete [] from_segments;
+ }
+}
+
+void ZPhysicalMemory::replace_segment(uint32_t index, uintptr_t start, size_t size, bool committed) {
+ assert(index < _nsegments, "Invalid index");
+ _segments[index] = ZPhysicalMemorySegment(start, size, committed);;
+}
+
+void ZPhysicalMemory::remove_segment(uint32_t index) {
+ assert(index < _nsegments, "Invalid index");
+
+ // Move segments after index
+ for (uint32_t i = index + 1; i < _nsegments; i++) {
+ _segments[i - 1] = _segments[i];
+ }
+
+ _nsegments--;
+}
+
+void ZPhysicalMemory::add_segments(const ZPhysicalMemory& pmem) {
+ for (uint32_t i = 0; i < pmem.nsegments(); i++) {
+ add_segment(pmem.segment(i));
+ }
+}
+
+void ZPhysicalMemory::remove_segments() {
+ delete [] _segments;
+ _segments = NULL;
+ _nsegments_max = 0;
+ _nsegments = 0;
+}
+
+static bool is_mergable(const ZPhysicalMemorySegment& before, const ZPhysicalMemorySegment& after) {
+ return before.end() == after.start() && before.is_committed() == after.is_committed();
+}
+
void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
- // Try merge with last segment
- if (_nsegments > 0) {
- ZPhysicalMemorySegment& last = _segments[_nsegments - 1];
- assert(last.end() <= segment.start(), "Segments added out of order");
- if (last.end() == segment.start()) {
- last = ZPhysicalMemorySegment(last.start(), last.size() + segment.size());
+ // Insert segments in address order, merge segments when possible
+ for (uint32_t i = _nsegments; i > 0; i--) {
+ const uint32_t current = i - 1;
+
+ if (_segments[current].end() <= segment.start()) {
+ if (is_mergable(_segments[current], segment)) {
+ if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) {
+ // Merge with end of current segment and start of next segment
+ const size_t start = _segments[current].start();
+ const size_t size = _segments[current].size() + segment.size() + _segments[current + 1].size();
+ replace_segment(current, start, size, segment.is_committed());
+ remove_segment(current + 1);
+ return;
+ }
+
+ // Merge with end of current segment
+ const size_t start = _segments[current].start();
+ const size_t size = _segments[current].size() + segment.size();
+ replace_segment(current, start, size, segment.is_committed());
+ return;
+ } else if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) {
+ // Merge with start of next segment
+ const size_t start = segment.start();
+ const size_t size = segment.size() + _segments[current + 1].size();
+ replace_segment(current + 1, start, size, segment.is_committed());
+ return;
+ }
+
+ // Insert after current segment
+ insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed());
return;
}
}
- // Resize array
- ZPhysicalMemorySegment* const old_segments = _segments;
- _segments = new ZPhysicalMemorySegment[_nsegments + 1];
- for (size_t i = 0; i < _nsegments; i++) {
- _segments[i] = old_segments[i];
+ if (_nsegments > 0 && is_mergable(segment, _segments[0])) {
+ // Merge with start of first segment
+ const size_t start = segment.start();
+ const size_t size = segment.size() + _segments[0].size();
+ replace_segment(0, start, size, segment.is_committed());
+ return;
}
- delete [] old_segments;
- // Add new segment
- _segments[_nsegments] = segment;
- _nsegments++;
+ // Insert before first segment
+ insert_segment(0, segment.start(), segment.size(), segment.is_committed());
+}
+
+bool ZPhysicalMemory::commit_segment(uint32_t index, size_t size) {
+ assert(index < _nsegments, "Invalid index");
+ assert(size <= _segments[index].size(), "Invalid size");
+ assert(!_segments[index].is_committed(), "Invalid state");
+
+ if (size == _segments[index].size()) {
+ // Completely committed
+ _segments[index].set_committed(true);
+ return true;
+ }
+
+ if (size > 0) {
+ // Partially committed, split segment
+ insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, false /* committed */);
+ replace_segment(index, _segments[index].start(), size, true /* committed */);
+ }
+
+ return false;
+}
+
+bool ZPhysicalMemory::uncommit_segment(uint32_t index, size_t size) {
+ assert(index < _nsegments, "Invalid index");
+ assert(size <= _segments[index].size(), "Invalid size");
+ assert(_segments[index].is_committed(), "Invalid state");
+
+ if (size == _segments[index].size()) {
+ // Completely uncommitted
+ _segments[index].set_committed(false);
+ return true;
+ }
+
+ if (size > 0) {
+ // Partially uncommitted, split segment
+ insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, true /* committed */);
+ replace_segment(index, _segments[index].start(), size, false /* committed */);
+ }
+
+ return false;
}
ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
ZPhysicalMemory pmem;
- size_t nsegments = 0;
+ uint32_t nsegments = 0;
- for (size_t i = 0; i < _nsegments; i++) {
+ for (uint32_t i = 0; i < _nsegments; i++) {
const ZPhysicalMemorySegment& segment = _segments[i];
if (pmem.size() < size) {
if (pmem.size() + segment.size() <= size) {
@@ -121,8 +241,8 @@ ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
} else {
// Split segment
const size_t split_size = size - pmem.size();
- pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size));
- _segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size);
+ pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size, segment.is_committed()));
+ _segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed());
}
} else {
// Keep segment
@@ -135,143 +255,157 @@ ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
return pmem;
}
+ZPhysicalMemory ZPhysicalMemory::split_committed() {
+ ZPhysicalMemory pmem;
+ uint32_t nsegments = 0;
+
+ for (uint32_t i = 0; i < _nsegments; i++) {
+ const ZPhysicalMemorySegment& segment = _segments[i];
+ if (segment.is_committed()) {
+ // Transfer segment
+ pmem.add_segment(segment);
+ } else {
+ // Keep segment
+ _segments[nsegments++] = segment;
+ }
+ }
+
+ _nsegments = nsegments;
+
+ return pmem;
+}
+
+ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) :
+ _backing(max_capacity) {
+ // Make the whole range free
+ _manager.free(0, max_capacity);
+}
+
bool ZPhysicalMemoryManager::is_initialized() const {
return _backing.is_initialized();
}
-void ZPhysicalMemoryManager::warn_commit_limits(size_t max) const {
- _backing.warn_commit_limits(max);
+void ZPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const {
+ _backing.warn_commit_limits(max_capacity);
}
-bool ZPhysicalMemoryManager::supports_uncommit() {
+void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max_capacity) {
assert(!is_init_completed(), "Invalid state");
- assert(_backing.size() >= ZGranuleSize, "Invalid size");
- // Test if uncommit is supported by uncommitting and then re-committing a granule
- return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
+ // If uncommit is not explicitly disabled, max capacity is greater than
+ // min capacity, and uncommit is supported by the platform, then uncommit
+ // will be enabled.
+ if (!ZUncommit) {
+ log_info(gc, init)("Uncommit: Disabled");
+ return;
+ }
+
+ if (max_capacity == min_capacity) {
+ log_info(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)");
+ FLAG_SET_ERGO(ZUncommit, false);
+ return;
+ }
+
+ // Test if uncommit is supported by the operating system by committing
+ // and then uncommitting a granule.
+ ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZGranuleSize, false /* committed */));
+ if (!commit(pmem) || !uncommit(pmem)) {
+ log_info(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)");
+ FLAG_SET_ERGO(ZUncommit, false);
+ return;
+ }
+
+ log_info(gc, init)("Uncommit: Enabled");
+ log_info(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay);
}
-void ZPhysicalMemoryManager::nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+void ZPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const {
// From an NMT point of view we treat the first heap view (marked0) as committed
const uintptr_t addr = ZAddress::marked0(offset);
- const size_t size = pmem.size();
MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
}
-void ZPhysicalMemoryManager::nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+void ZPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const {
if (MemTracker::tracking_level() > NMT_minimal) {
const uintptr_t addr = ZAddress::marked0(offset);
- const size_t size = pmem.size();
Tracker tracker(Tracker::uncommit);
tracker.record((address)addr, size);
}
}
-size_t ZPhysicalMemoryManager::commit(size_t size) {
- size_t committed = 0;
-
- // Fill holes in the backing memory
- while (committed < size) {
- size_t allocated = 0;
- const size_t remaining = size - committed;
- const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
- if (start == UINTPTR_MAX) {
- // No holes to commit
- break;
- }
-
- // Try commit hole
- const size_t filled = _backing.commit(start, allocated);
- if (filled > 0) {
- // Successful or partialy successful
- _committed.free(start, filled);
- committed += filled;
- }
- if (filled < allocated) {
- // Failed or partialy failed
- _uncommitted.free(start + filled, allocated - filled);
- return committed;
- }
- }
-
- // Expand backing memory
- if (committed < size) {
- const size_t remaining = size - committed;
- const uintptr_t start = _backing.size();
- const size_t expanded = _backing.commit(start, remaining);
- if (expanded > 0) {
- // Successful or partialy successful
- _committed.free(start, expanded);
- committed += expanded;
- }
- }
-
- return committed;
-}
-
-size_t ZPhysicalMemoryManager::uncommit(size_t size) {
- size_t uncommitted = 0;
-
- // Punch holes in backing memory
- while (uncommitted < size) {
- size_t allocated = 0;
- const size_t remaining = size - uncommitted;
- const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
- assert(start != UINTPTR_MAX, "Allocation should never fail");
-
- // Try punch hole
- const size_t punched = _backing.uncommit(start, allocated);
- if (punched > 0) {
- // Successful or partialy successful
- _uncommitted.free(start, punched);
- uncommitted += punched;
- }
- if (punched < allocated) {
- // Failed or partialy failed
- _committed.free(start + punched, allocated - punched);
- return uncommitted;
- }
- }
-
- return uncommitted;
-}
-
-ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) {
+void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Invalid size");
- ZPhysicalMemory pmem;
-
// Allocate segments
- for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
- const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
+ while (size > 0) {
+ size_t allocated = 0;
+ const uintptr_t start = _manager.alloc_from_front_at_most(size, &allocated);
assert(start != UINTPTR_MAX, "Allocation should never fail");
- pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
+ pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */));
+ size -= allocated;
}
-
- return pmem;
}
void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
- const size_t nsegments = pmem.nsegments();
-
// Free segments
- for (size_t i = 0; i < nsegments; i++) {
+ for (uint32_t i = 0; i < pmem.nsegments(); i++) {
const ZPhysicalMemorySegment& segment = pmem.segment(i);
- _committed.free(segment.start(), segment.size());
+ _manager.free(segment.start(), segment.size());
}
}
+bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) {
+ // Commit segments
+ for (uint32_t i = 0; i < pmem.nsegments(); i++) {
+ const ZPhysicalMemorySegment& segment = pmem.segment(i);
+ if (segment.is_committed()) {
+ // Segment already committed
+ continue;
+ }
+
+ // Commit segment
+ const size_t committed = _backing.commit(segment.start(), segment.size());
+ if (!pmem.commit_segment(i, committed)) {
+ // Failed or partially failed
+ return false;
+ }
+ }
+
+ // Success
+ return true;
+}
+
+bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) {
+ // Commit segments
+ for (uint32_t i = 0; i < pmem.nsegments(); i++) {
+ const ZPhysicalMemorySegment& segment = pmem.segment(i);
+ if (!segment.is_committed()) {
+ // Segment already uncommitted
+ continue;
+ }
+
+ // Uncommit segment
+ const size_t uncommitted = _backing.uncommit(segment.start(), segment.size());
+ if (!pmem.uncommit_segment(i, uncommitted)) {
+ // Failed or partially failed
+ return false;
+ }
+ }
+
+ // Success
+ return true;
+}
+
void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const {
const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size();
os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
}
-void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
- const size_t nsegments = pmem.nsegments();
+void ZPhysicalMemoryManager::map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const {
size_t size = 0;
// Map segments
- for (size_t i = 0; i < nsegments; i++) {
+ for (uint32_t i = 0; i < pmem.nsegments(); i++) {
const ZPhysicalMemorySegment& segment = pmem.segment(i);
_backing.map(addr + size, segment.size(), segment.start());
size += segment.size();
@@ -286,8 +420,8 @@ void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t add
}
}
-void ZPhysicalMemoryManager::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
- _backing.unmap(addr, pmem.size());
+void ZPhysicalMemoryManager::unmap_view(uintptr_t addr, size_t size) const {
+ _backing.unmap(addr, size);
}
void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
@@ -302,42 +436,44 @@ void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
}
}
-void ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+void ZPhysicalMemoryManager::map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
+ const size_t size = pmem.size();
+
if (ZVerifyViews) {
// Map good view
- map_view(pmem, ZAddress::good(offset));
+ map_view(ZAddress::good(offset), pmem);
} else {
// Map all views
- map_view(pmem, ZAddress::marked0(offset));
- map_view(pmem, ZAddress::marked1(offset));
- map_view(pmem, ZAddress::remapped(offset));
+ map_view(ZAddress::marked0(offset), pmem);
+ map_view(ZAddress::marked1(offset), pmem);
+ map_view(ZAddress::remapped(offset), pmem);
}
- nmt_commit(pmem, offset);
+ nmt_commit(offset, size);
}
-void ZPhysicalMemoryManager::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
- nmt_uncommit(pmem, offset);
+void ZPhysicalMemoryManager::unmap(uintptr_t offset, size_t size) const {
+ nmt_uncommit(offset, size);
if (ZVerifyViews) {
// Unmap good view
- unmap_view(pmem, ZAddress::good(offset));
+ unmap_view(ZAddress::good(offset), size);
} else {
// Unmap all views
- unmap_view(pmem, ZAddress::marked0(offset));
- unmap_view(pmem, ZAddress::marked1(offset));
- unmap_view(pmem, ZAddress::remapped(offset));
+ unmap_view(ZAddress::marked0(offset), size);
+ unmap_view(ZAddress::marked1(offset), size);
+ unmap_view(ZAddress::remapped(offset), size);
}
}
-void ZPhysicalMemoryManager::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+void ZPhysicalMemoryManager::debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
// Map good view
assert(ZVerifyViews, "Should be enabled");
- map_view(pmem, ZAddress::good(offset));
+ map_view(ZAddress::good(offset), pmem);
}
-void ZPhysicalMemoryManager::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+void ZPhysicalMemoryManager::debug_unmap(uintptr_t offset, size_t size) const {
// Unmap good view
assert(ZVerifyViews, "Should be enabled");
- unmap_view(pmem, ZAddress::good(offset));
+ unmap_view(ZAddress::good(offset), size);
}
diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.hpp b/src/hotspot/share/gc/z/zPhysicalMemory.hpp
index cd6c13eb54c..8d051a91c48 100644
--- a/src/hotspot/share/gc/z/zPhysicalMemory.hpp
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.hpp
@@ -32,21 +32,30 @@ class ZPhysicalMemorySegment : public CHeapObj {
private:
uintptr_t _start;
uintptr_t _end;
+ bool _committed;
public:
ZPhysicalMemorySegment();
- ZPhysicalMemorySegment(uintptr_t start, size_t size);
+ ZPhysicalMemorySegment(uintptr_t start, size_t size, bool committed);
uintptr_t start() const;
uintptr_t end() const;
size_t size() const;
+
+ bool is_committed() const;
+ void set_committed(bool committed);
};
class ZPhysicalMemory {
private:
- size_t _nsegments;
+ uint32_t _nsegments_max;
+ uint32_t _nsegments;
ZPhysicalMemorySegment* _segments;
+ void insert_segment(uint32_t index, uintptr_t start, size_t size, bool committed);
+ void replace_segment(uint32_t index, uintptr_t start, size_t size, bool committed);
+ void remove_segment(uint32_t index);
+
public:
ZPhysicalMemory();
ZPhysicalMemory(const ZPhysicalMemorySegment& segment);
@@ -57,45 +66,53 @@ public:
bool is_null() const;
size_t size() const;
- size_t nsegments() const;
- const ZPhysicalMemorySegment& segment(size_t index) const;
+ uint32_t nsegments() const;
+ const ZPhysicalMemorySegment& segment(uint32_t index) const;
+
+ void add_segments(const ZPhysicalMemory& pmem);
+ void remove_segments();
+
void add_segment(const ZPhysicalMemorySegment& segment);
+ bool commit_segment(uint32_t index, size_t size);
+ bool uncommit_segment(uint32_t index, size_t size);
ZPhysicalMemory split(size_t size);
+ ZPhysicalMemory split_committed();
};
class ZPhysicalMemoryManager {
private:
ZPhysicalMemoryBacking _backing;
- ZMemoryManager _committed;
- ZMemoryManager _uncommitted;
+ ZMemoryManager _manager;
- void nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const;
- void nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+ void nmt_commit(uintptr_t offset, size_t size) const;
+ void nmt_uncommit(uintptr_t offset, size_t size) const;
void pretouch_view(uintptr_t addr, size_t size) const;
- void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
- void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
+ void map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const;
+ void unmap_view(uintptr_t addr, size_t size) const;
public:
+ ZPhysicalMemoryManager(size_t max_capacity);
+
bool is_initialized() const;
- void warn_commit_limits(size_t max) const;
- bool supports_uncommit();
+ void warn_commit_limits(size_t max_capacity) const;
+ void try_enable_uncommit(size_t min_capacity, size_t max_capacity);
- size_t commit(size_t size);
- size_t uncommit(size_t size);
-
- ZPhysicalMemory alloc(size_t size);
+ void alloc(ZPhysicalMemory& pmem, size_t size);
void free(const ZPhysicalMemory& pmem);
+ bool commit(ZPhysicalMemory& pmem);
+ bool uncommit(ZPhysicalMemory& pmem);
+
void pretouch(uintptr_t offset, size_t size) const;
- void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
- void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+ void map(uintptr_t offset, const ZPhysicalMemory& pmem) const;
+ void unmap(uintptr_t offset, size_t size) const;
- void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
- void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+ void debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const;
+ void debug_unmap(uintptr_t offset, size_t size) const;
};
#endif // SHARE_GC_Z_ZPHYSICALMEMORY_HPP
diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp b/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp
index 3a68eabb30e..df29d058f25 100644
--- a/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,11 +29,13 @@
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment() :
_start(UINTPTR_MAX),
- _end(UINTPTR_MAX) {}
+ _end(UINTPTR_MAX),
+ _committed(false) {}
-inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(uintptr_t start, size_t size) :
+inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(uintptr_t start, size_t size, bool committed) :
_start(start),
- _end(start + size) {}
+ _end(start + size),
+ _committed(committed) {}
inline uintptr_t ZPhysicalMemorySegment::start() const {
return _start;
@@ -47,15 +49,23 @@ inline size_t ZPhysicalMemorySegment::size() const {
return _end - _start;
}
+inline bool ZPhysicalMemorySegment::is_committed() const {
+ return _committed;
+}
+
+inline void ZPhysicalMemorySegment::set_committed(bool committed) {
+ _committed = committed;
+}
+
inline bool ZPhysicalMemory::is_null() const {
return _nsegments == 0;
}
-inline size_t ZPhysicalMemory::nsegments() const {
+inline uint32_t ZPhysicalMemory::nsegments() const {
return _nsegments;
}
-inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(size_t index) const {
+inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(uint32_t index) const {
assert(index < _nsegments, "Invalid segment index");
return _segments[index];
}
diff --git a/src/hotspot/share/gc/z/zRootsIterator.cpp b/src/hotspot/share/gc/z/zRootsIterator.cpp
index b1ad519f452..2eb68e65460 100644
--- a/src/hotspot/share/gc/z/zRootsIterator.cpp
+++ b/src/hotspot/share/gc/z/zRootsIterator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
-#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSet.hpp"
@@ -63,7 +62,6 @@ static const ZStatSubPhase ZSubPhasePauseRootsObjectSynchronizer("Pause Roots Ob
static const ZStatSubPhase ZSubPhasePauseRootsManagement("Pause Roots Management");
static const ZStatSubPhase ZSubPhasePauseRootsJVMTIExport("Pause Roots JVMTIExport");
static const ZStatSubPhase ZSubPhasePauseRootsJVMTIWeakExport("Pause Roots JVMTIWeakExport");
-static const ZStatSubPhase ZSubPhasePauseRootsSystemDictionary("Pause Roots SystemDictionary");
static const ZStatSubPhase ZSubPhasePauseRootsVMThread("Pause Roots VM Thread");
static const ZStatSubPhase ZSubPhasePauseRootsJavaThreads("Pause Roots Java Threads");
static const ZStatSubPhase ZSubPhasePauseRootsCodeCache("Pause Roots CodeCache");
@@ -202,7 +200,6 @@ ZRootsIterator::ZRootsIterator(bool visit_jvmti_weak_export) :
_management(this),
_jvmti_export(this),
_jvmti_weak_export(this),
- _system_dictionary(this),
_vm_thread(this),
_java_threads(this),
_code_cache(this) {
@@ -254,12 +251,6 @@ void ZRootsIterator::do_jvmti_weak_export(ZRootsIteratorClosure* cl) {
JvmtiExport::weak_oops_do(&always_alive, cl);
}
-void ZRootsIterator::do_system_dictionary(ZRootsIteratorClosure* cl) {
- ZStatTimer timer(ZSubPhasePauseRootsSystemDictionary);
- // Handles are processed via _vm_handles.
- SystemDictionary::oops_do(cl, false /* include_handles */);
-}
-
void ZRootsIterator::do_vm_thread(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhasePauseRootsVMThread);
ZRootsIteratorThreadClosure thread_cl(cl);
@@ -283,7 +274,6 @@ void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
_object_synchronizer.oops_do(cl);
_management.oops_do(cl);
_jvmti_export.oops_do(cl);
- _system_dictionary.oops_do(cl);
_vm_thread.oops_do(cl);
_java_threads.oops_do(cl);
if (!ClassUnloading) {
diff --git a/src/hotspot/share/gc/z/zRootsIterator.hpp b/src/hotspot/share/gc/z/zRootsIterator.hpp
index b198dc0a67c..622999d42c6 100644
--- a/src/hotspot/share/gc/z/zRootsIterator.hpp
+++ b/src/hotspot/share/gc/z/zRootsIterator.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -112,7 +112,6 @@ private:
void do_management(ZRootsIteratorClosure* cl);
void do_jvmti_export(ZRootsIteratorClosure* cl);
void do_jvmti_weak_export(ZRootsIteratorClosure* cl);
- void do_system_dictionary(ZRootsIteratorClosure* cl);
void do_vm_thread(ZRootsIteratorClosure* cl);
void do_java_threads(ZRootsIteratorClosure* cl);
void do_code_cache(ZRootsIteratorClosure* cl);
@@ -122,7 +121,6 @@ private:
ZSerialOopsDo _management;
ZSerialOopsDo _jvmti_export;
ZSerialOopsDo _jvmti_weak_export;
- ZSerialOopsDo _system_dictionary;
ZSerialOopsDo _vm_thread;
ZParallelOopsDo _java_threads;
ZParallelOopsDo _code_cache;
diff --git a/src/hotspot/share/gc/z/zRuntimeWorkers.cpp b/src/hotspot/share/gc/z/zRuntimeWorkers.cpp
index 247fd14e0e2..e6f375a18f4 100644
--- a/src/hotspot/share/gc/z/zRuntimeWorkers.cpp
+++ b/src/hotspot/share/gc/z/zRuntimeWorkers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/workgroup.hpp"
#include "gc/z/zRuntimeWorkers.hpp"
#include "gc/z/zThread.hpp"
@@ -66,7 +67,7 @@ ZRuntimeWorkers::ZRuntimeWorkers() :
false /* are_GC_task_threads */,
false /* are_ConcurrentGC_threads */) {
- log_info(gc, init)("Runtime Workers: %u parallel", nworkers());
+ log_info_p(gc, init)("Runtime Workers: %u parallel", nworkers());
// Initialize worker threads
_workers.initialize_workers();
@@ -93,7 +94,3 @@ WorkGang* ZRuntimeWorkers::workers() {
void ZRuntimeWorkers::threads_do(ThreadClosure* tc) const {
_workers.threads_do(tc);
}
-
-void ZRuntimeWorkers::print_threads_on(outputStream* st) const {
- _workers.print_worker_threads_on(st);
-}
diff --git a/src/hotspot/share/gc/z/zRuntimeWorkers.hpp b/src/hotspot/share/gc/z/zRuntimeWorkers.hpp
index a201c79fa23..54115c3c85f 100644
--- a/src/hotspot/share/gc/z/zRuntimeWorkers.hpp
+++ b/src/hotspot/share/gc/z/zRuntimeWorkers.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,6 @@ public:
WorkGang* workers();
void threads_do(ThreadClosure* tc) const;
- void print_threads_on(outputStream* st) const;
};
#endif // SHARE_GC_Z_ZRUNTIMEWORKERS_HPP
diff --git a/src/hotspot/share/gc/z/zUncommitter.cpp b/src/hotspot/share/gc/z/zUncommitter.cpp
index c997cccb5b3..42fe602ca8b 100644
--- a/src/hotspot/share/gc/z/zUncommitter.cpp
+++ b/src/hotspot/share/gc/z/zUncommitter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,52 +23,73 @@
#include "precompiled.hpp"
#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zLock.inline.hpp"
+#include "gc/z/zStat.hpp"
#include "gc/z/zUncommitter.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/os.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "logging/log.hpp"
-ZUncommitter::ZUncommitter() :
- _monitor(Monitor::leaf, "ZUncommitter", false, Monitor::_safepoint_check_never),
+static const ZStatCounter ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
+
+ZUncommitter::ZUncommitter(ZPageAllocator* page_allocator) :
+ _page_allocator(page_allocator),
+ _lock(),
_stop(false) {
set_name("ZUncommitter");
create_and_start();
}
-bool ZUncommitter::idle(uint64_t timeout) {
- // Idle for at least one second
- const uint64_t expires = os::elapsedTime() + MAX2(timeout, 1);
-
- for (;;) {
- // We might wake up spuriously from wait, so always recalculate
- // the timeout after a wakeup to see if we need to wait again.
- const uint64_t now = os::elapsedTime();
- const uint64_t remaining = expires - MIN2(expires, now);
-
- MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag);
- if (remaining > 0 && !_stop) {
- ml.wait(remaining * MILLIUNITS);
- } else {
- return !_stop;
- }
+bool ZUncommitter::wait(uint64_t timeout) const {
+ ZLocker locker(&_lock);
+ while (!ZUncommit && !_stop) {
+ _lock.wait();
}
+
+ if (!_stop && timeout > 0) {
+ log_debug(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout);
+ _lock.wait(timeout * MILLIUNITS);
+ }
+
+ return !_stop;
+}
+
+bool ZUncommitter::should_continue() const {
+ ZLocker locker(&_lock);
+ return !_stop;
}
void ZUncommitter::run_service() {
- for (;;) {
- // Try uncommit unused memory
- const uint64_t timeout = ZHeap::heap()->uncommit(ZUncommitDelay);
+ uint64_t timeout = 0;
- log_trace(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout);
+ while (wait(timeout)) {
+ EventZUncommit event;
+ size_t uncommitted = 0;
- // Idle until next attempt
- if (!idle(timeout)) {
- return;
+ while (should_continue()) {
+ // Uncommit chunk
+ const size_t flushed = _page_allocator->uncommit(&timeout);
+ if (flushed == 0) {
+ // Done
+ break;
+ }
+
+ uncommitted += flushed;
+ }
+
+ if (uncommitted > 0) {
+ // Update statistics
+ ZStatInc(ZCounterUncommit, uncommitted);
+ log_info(gc, heap)("Uncommitted: " SIZE_FORMAT "M(%.0f%%)",
+ uncommitted / M, percent_of(uncommitted, ZHeap::heap()->max_capacity()));
+
+ // Send event
+ event.commit(uncommitted);
}
}
}
void ZUncommitter::stop_service() {
- MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag);
+ ZLocker locker(&_lock);
_stop = true;
- ml.notify();
+ _lock.notify_all();
}
diff --git a/src/hotspot/share/gc/z/zUncommitter.hpp b/src/hotspot/share/gc/z/zUncommitter.hpp
index 24b84dc8d0c..0e8384012e2 100644
--- a/src/hotspot/share/gc/z/zUncommitter.hpp
+++ b/src/hotspot/share/gc/z/zUncommitter.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,22 +24,26 @@
#ifndef SHARE_GC_Z_ZUNCOMMITTER_HPP
#define SHARE_GC_Z_ZUNCOMMITTER_HPP
+#include "gc/z/zLock.hpp"
#include "gc/shared/concurrentGCThread.hpp"
-#include "runtime/mutex.hpp"
+
+class ZPageAllocation;
class ZUncommitter : public ConcurrentGCThread {
private:
- Monitor _monitor;
- bool _stop;
+ ZPageAllocator* const _page_allocator;
+ mutable ZConditionLock _lock;
+ bool _stop;
- bool idle(uint64_t timeout);
+ bool wait(uint64_t timeout) const;
+ bool should_continue() const;
protected:
virtual void run_service();
virtual void stop_service();
public:
- ZUncommitter();
+ ZUncommitter(ZPageAllocator* page_allocator);
};
#endif // SHARE_GC_Z_ZUNCOMMITTER_HPP
diff --git a/src/hotspot/share/gc/z/zUnmapper.cpp b/src/hotspot/share/gc/z/zUnmapper.cpp
new file mode 100644
index 00000000000..eae5b5dbeab
--- /dev/null
+++ b/src/hotspot/share/gc/z/zUnmapper.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zList.inline.hpp"
+#include "gc/z/zLock.inline.hpp"
+#include "gc/z/zPage.inline.hpp"
+#include "gc/z/zPageAllocator.hpp"
+#include "gc/z/zUnmapper.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "runtime/globals.hpp"
+
+ZUnmapper::ZUnmapper(ZPageAllocator* page_allocator) :
+ _page_allocator(page_allocator),
+ _lock(),
+ _queue(),
+ _stop(false) {
+ set_name("ZUnmapper");
+ create_and_start();
+}
+
+ZPage* ZUnmapper::dequeue() {
+ ZLocker locker(&_lock);
+
+ for (;;) {
+ if (_stop) {
+ return NULL;
+ }
+
+ ZPage* const page = _queue.remove_first();
+ if (page != NULL) {
+ return page;
+ }
+
+ _lock.wait();
+ }
+}
+
+void ZUnmapper::do_unmap_and_destroy_page(ZPage* page) const {
+ EventZUnmap event;
+ const size_t unmapped = page->size();
+
+ // Unmap and destroy
+ _page_allocator->unmap_page(page);
+ _page_allocator->destroy_page(page);
+
+ // Send event
+ event.commit(unmapped);
+}
+
+void ZUnmapper::unmap_and_destroy_page(ZPage* page) {
+ // Asynchronous unmap and destroy is not supported with ZVerifyViews
+ if (ZVerifyViews) {
+ // Immediately unmap and destroy
+ do_unmap_and_destroy_page(page);
+ } else {
+ // Enqueue for asynchronous unmap and destroy
+ ZLocker locker(&_lock);
+ _queue.insert_last(page);
+ _lock.notify_all();
+ }
+}
+
+void ZUnmapper::run_service() {
+ for (;;) {
+ ZPage* const page = dequeue();
+ if (page == NULL) {
+ // Stop
+ return;
+ }
+
+ do_unmap_and_destroy_page(page);
+ }
+}
+
+void ZUnmapper::stop_service() {
+ ZLocker locker(&_lock);
+ _stop = true;
+ _lock.notify_all();
+}
diff --git a/src/hotspot/share/gc/z/zUnmapper.hpp b/src/hotspot/share/gc/z/zUnmapper.hpp
new file mode 100644
index 00000000000..894926cb25e
--- /dev/null
+++ b/src/hotspot/share/gc/z/zUnmapper.hpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZUNMAPPER_HPP
+#define SHARE_GC_Z_ZUNMAPPER_HPP
+
+#include "gc/z/zList.hpp"
+#include "gc/z/zLock.hpp"
+#include "gc/shared/concurrentGCThread.hpp"
+
+class ZPage;
+class ZPageAllocator;
+
+class ZUnmapper : public ConcurrentGCThread {
+private:
+ ZPageAllocator* const _page_allocator;
+ ZConditionLock _lock;
+ ZList _queue;
+ bool _stop;
+
+ ZPage* dequeue();
+ void do_unmap_and_destroy_page(ZPage* page) const;
+
+protected:
+ virtual void run_service();
+ virtual void stop_service();
+
+public:
+ ZUnmapper(ZPageAllocator* page_allocator);
+
+ void unmap_and_destroy_page(ZPage* page);
+};
+
+#endif // SHARE_GC_Z_ZUNMAPPER_HPP
diff --git a/src/hotspot/share/gc/z/zVirtualMemory.cpp b/src/hotspot/share/gc/z/zVirtualMemory.cpp
index 5d5a958f120..d4598a478d1 100644
--- a/src/hotspot/share/gc/z/zVirtualMemory.cpp
+++ b/src/hotspot/share/gc/z/zVirtualMemory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,10 +22,10 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zAddressSpaceLimit.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zVirtualMemory.inline.hpp"
-#include "logging/log.hpp"
#include "services/memTracker.hpp"
#include "utilities/debug.hpp"
#include "utilities/align.hpp"
@@ -36,14 +36,14 @@ ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) :
// Check max supported heap size
if (max_capacity > ZAddressOffsetMax) {
- log_error(gc)("Java heap too large (max supported heap size is " SIZE_FORMAT "G)",
- ZAddressOffsetMax / G);
+ log_error_p(gc)("Java heap too large (max supported heap size is " SIZE_FORMAT "G)",
+ ZAddressOffsetMax / G);
return;
}
// Reserve address space
if (!reserve(max_capacity)) {
- log_error(gc)("Failed to reserve enough address space for Java heap");
+ log_error_pd(gc)("Failed to reserve enough address space for Java heap");
return;
}
@@ -132,12 +132,12 @@ bool ZVirtualMemoryManager::reserve(size_t max_capacity) {
contiguous = false;
}
- log_info(gc, init)("Address Space Type: %s/%s/%s",
- (contiguous ? "Contiguous" : "Discontiguous"),
- (limit == ZAddressOffsetMax ? "Unrestricted" : "Restricted"),
- (reserved == size ? "Complete" : "Degraded"));
- log_info(gc, init)("Address Space Size: " SIZE_FORMAT "M x " SIZE_FORMAT " = " SIZE_FORMAT "M",
- reserved / M, ZHeapViews, (reserved * ZHeapViews) / M);
+ log_info_p(gc, init)("Address Space Type: %s/%s/%s",
+ (contiguous ? "Contiguous" : "Discontiguous"),
+ (limit == ZAddressOffsetMax ? "Unrestricted" : "Restricted"),
+ (reserved == size ? "Complete" : "Degraded"));
+ log_info_p(gc, init)("Address Space Size: " SIZE_FORMAT "M x " SIZE_FORMAT " = " SIZE_FORMAT "M",
+ reserved / M, ZHeapViews, (reserved * ZHeapViews) / M);
return reserved >= max_capacity;
}
@@ -151,14 +151,14 @@ bool ZVirtualMemoryManager::is_initialized() const {
return _initialized;
}
-ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool alloc_from_front) {
+ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool force_low_address) {
uintptr_t start;
- if (alloc_from_front || size <= ZPageSizeSmall) {
- // Small page
+ // Small pages are allocated at low addresses, while medium/large pages
+ // are allocated at high addresses (unless forced to be at a low address).
+ if (force_low_address || size <= ZPageSizeSmall) {
start = _manager.alloc_from_front(size);
} else {
- // Medium/Large page
start = _manager.alloc_from_back(size);
}
diff --git a/src/hotspot/share/gc/z/zVirtualMemory.hpp b/src/hotspot/share/gc/z/zVirtualMemory.hpp
index 4f865b7bd9b..f743e7aaee4 100644
--- a/src/hotspot/share/gc/z/zVirtualMemory.hpp
+++ b/src/hotspot/share/gc/z/zVirtualMemory.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,7 @@ public:
bool is_initialized() const;
- ZVirtualMemory alloc(size_t size, bool alloc_from_front = false);
+ ZVirtualMemory alloc(size_t size, bool force_low_address);
void free(const ZVirtualMemory& vmem);
};
diff --git a/src/hotspot/share/gc/z/zWorkers.cpp b/src/hotspot/share/gc/z/zWorkers.cpp
index e7d1d0db7be..128b43a0a24 100644
--- a/src/hotspot/share/gc/z/zWorkers.cpp
+++ b/src/hotspot/share/gc/z/zWorkers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zTask.hpp"
#include "gc/z/zThread.hpp"
@@ -69,7 +70,7 @@ ZWorkers::ZWorkers() :
true /* are_GC_task_threads */,
true /* are_ConcurrentGC_threads */) {
- log_info(gc, init)("Workers: %u parallel, %u concurrent", nparallel(), nconcurrent());
+ log_info_p(gc, init)("Workers: %u parallel, %u concurrent", nparallel(), nconcurrent());
// Initialize worker threads
_workers.initialize_workers();
@@ -110,7 +111,3 @@ void ZWorkers::run_concurrent(ZTask* task) {
void ZWorkers::threads_do(ThreadClosure* tc) const {
_workers.threads_do(tc);
}
-
-void ZWorkers::print_threads_on(outputStream* st) const {
- _workers.print_worker_threads_on(st);
-}
diff --git a/src/hotspot/share/gc/z/zWorkers.hpp b/src/hotspot/share/gc/z/zWorkers.hpp
index 329a1a21c42..c677a438c6c 100644
--- a/src/hotspot/share/gc/z/zWorkers.hpp
+++ b/src/hotspot/share/gc/z/zWorkers.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,6 @@ public:
void run_concurrent(ZTask* task);
void threads_do(ThreadClosure* tc) const;
- void print_threads_on(outputStream* st) const;
};
#endif // SHARE_GC_Z_ZWORKERS_HPP
diff --git a/src/hotspot/share/include/jvm.h b/src/hotspot/share/include/jvm.h
index 331924f5939..78d1952729f 100644
--- a/src/hotspot/share/include/jvm.h
+++ b/src/hotspot/share/include/jvm.h
@@ -567,6 +567,11 @@ JVM_IsRecord(JNIEnv *env, jclass cls);
JNIEXPORT jobjectArray JNICALL
JVM_GetRecordComponents(JNIEnv *env, jclass ofClass);
+/* Sealed types - since JDK 15 */
+
+JNIEXPORT jobjectArray JNICALL
+JVM_GetPermittedSubclasses(JNIEnv *env, jclass current);
+
/* The following two reflection routines are still needed due to startup time issues */
/*
* java.lang.reflect.Method
diff --git a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp
index 4c359f07c8e..793ee757879 100644
--- a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp
+++ b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp
@@ -37,7 +37,8 @@
#include "jfr/jfr.hpp"
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/jni/jfrUpcalls.hpp"
-#include "jfr/support/jfrEventClass.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
+#include "jfr/support/jfrJdkJfrEvent.hpp"
#include "jfr/utilities/jfrBigEndian.hpp"
#include "jfr/writers/jfrBigEndianWriter.hpp"
#include "logging/log.hpp"
@@ -683,6 +684,10 @@ static u2 position_stream_after_cp(const ClassFileStream* stream) {
}
}
continue;
+ case JVM_CONSTANT_Dynamic:
+ stream->skip_u2_fast(1);
+ stream->skip_u2_fast(1);
+ continue;
default:
assert(false, "error in skip logic!");
break;
@@ -1404,7 +1409,7 @@ static ClassFileStream* create_new_bytes_for_subklass(const InstanceKlass* ik, c
jint size_instrumented_data = 0;
unsigned char* instrumented_data = NULL;
const jclass super = (jclass)JNIHandles::make_local(ik->super()->java_mirror());
- JfrUpcalls::new_bytes_eager_instrumentation(TRACE_ID(ik),
+ JfrUpcalls::new_bytes_eager_instrumentation(JfrTraceId::load_raw(ik),
force_instrumentation,
super,
size_of_new_bytes,
diff --git a/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp b/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp
index ef481d50775..100d9deb371 100644
--- a/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp
+++ b/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include "jfr/jni/jfrUpcalls.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
-#include "jfr/support/jfrEventClass.hpp"
+#include "jfr/support/jfrJdkJfrEvent.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvmtiEnvBase.hpp"
@@ -91,7 +91,7 @@ extern "C" void JNICALL jfr_on_class_file_load_hook(jvmtiEnv *jvmti_env,
JavaThread* jt = JavaThread::thread_from_jni_environment(jni_env);
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));;
ThreadInVMfromNative tvmfn(jt);
- JfrUpcalls::on_retransform(JfrTraceId::get(class_being_redefined),
+ JfrUpcalls::on_retransform(JfrTraceId::load_raw(class_being_redefined),
class_being_redefined,
class_data_len,
class_data,
diff --git a/src/hotspot/share/jfr/jfr.cpp b/src/hotspot/share/jfr/jfr.cpp
index ecd535e5ddb..406f811491f 100644
--- a/src/hotspot/share/jfr/jfr.cpp
+++ b/src/hotspot/share/jfr/jfr.cpp
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "jfr/jfr.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
-#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
@@ -33,6 +32,7 @@
#include "jfr/recorder/repository/jfrRepository.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "runtime/java.hpp"
+#include "runtime/thread.hpp"
bool Jfr::is_enabled() {
return JfrRecorder::is_enabled();
@@ -66,7 +66,7 @@ void Jfr::on_create_vm_3() {
void Jfr::on_unloading_classes() {
if (JfrRecorder::is_created()) {
- JfrCheckpointManager::write_type_set_for_unloaded_classes();
+ JfrCheckpointManager::on_unloading_classes();
}
}
diff --git a/src/hotspot/share/jfr/jni/jfrJniMethod.cpp b/src/hotspot/share/jfr/jni/jfrJniMethod.cpp
index 39f3b5eb8b7..5e481740399 100644
--- a/src/hotspot/share/jfr/jni/jfrJniMethod.cpp
+++ b/src/hotspot/share/jfr/jni/jfrJniMethod.cpp
@@ -38,12 +38,13 @@
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
#include "jfr/recorder/stringpool/jfrStringPool.hpp"
-#include "jfr/jni/jfrGetAllEventClasses.hpp"
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/jni/jfrJniMethodRegistration.hpp"
#include "jfr/instrumentation/jfrEventClassTransformer.hpp"
#include "jfr/instrumentation/jfrJvmtiAgent.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/support/jfrJdkJfrEvent.hpp"
+#include "jfr/support/jfrKlassUnloading.hpp"
#include "jfr/utilities/jfrJavaLog.hpp"
#include "jfr/utilities/jfrTimeConverter.hpp"
#include "jfr/utilities/jfrTime.hpp"
@@ -163,7 +164,7 @@ NO_TRANSITION(jlong, jfr_get_epoch_address(JNIEnv* env, jobject jvm))
NO_TRANSITION_END
NO_TRANSITION(jlong, jfr_get_unloaded_event_classes_count(JNIEnv* env, jobject jvm))
- return JfrEventClasses::unloaded_event_classes_count();
+ return JfrKlassUnloading::event_class_count();
NO_TRANSITION_END
NO_TRANSITION(jdouble, jfr_time_conv_factor(JNIEnv* env, jobject jvm))
@@ -234,11 +235,11 @@ JVM_ENTRY_NO_ENV(jboolean, jfr_emit_event(JNIEnv* env, jobject jvm, jlong eventT
JVM_END
JVM_ENTRY_NO_ENV(jobject, jfr_get_all_event_classes(JNIEnv* env, jobject jvm))
- return JfrEventClasses::get_all_event_classes(thread);
+ return JdkJfrEvent::get_all_klasses(thread);
JVM_END
JVM_ENTRY_NO_ENV(jlong, jfr_class_id(JNIEnv* env, jclass jvm, jclass jc))
- return JfrTraceId::use(jc);
+ return JfrTraceId::load(jc);
JVM_END
JVM_ENTRY_NO_ENV(jlong, jfr_stacktrace_id(JNIEnv* env, jobject jvm, jint skip))
@@ -311,7 +312,7 @@ JVM_ENTRY_NO_ENV(void, jfr_abort(JNIEnv* env, jobject jvm, jstring errorMsg))
JVM_END
JVM_ENTRY_NO_ENV(jlong, jfr_type_id(JNIEnv* env, jobject jvm, jclass jc))
- return JfrTraceId::get(jc);
+ return JfrTraceId::load_raw(jc);
JVM_END
JVM_ENTRY_NO_ENV(jboolean, jfr_add_string_constant(JNIEnv* env, jclass jvm, jboolean epoch, jlong id, jstring string))
diff --git a/src/hotspot/share/jfr/jni/jfrUpcalls.cpp b/src/hotspot/share/jfr/jni/jfrUpcalls.cpp
index 7b2177c903b..43c52c6ef81 100644
--- a/src/hotspot/share/jfr/jni/jfrUpcalls.cpp
+++ b/src/hotspot/share/jfr/jni/jfrUpcalls.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "classfile/systemDictionary.hpp"
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/jni/jfrUpcalls.hpp"
-#include "jfr/support/jfrEventClass.hpp"
+#include "jfr/support/jfrJdkJfrEvent.hpp"
#include "logging/log.hpp"
#include "memory/oopFactory.hpp"
#include "oops/oop.inline.hpp"
diff --git a/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp b/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp
index 5f12a146d52..84dd64aa30a 100644
--- a/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,8 @@
#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
-#include "classfile/systemDictionary.hpp"
+#include "gc/shared/oopStorage.inline.hpp"
+#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
@@ -77,7 +78,7 @@ void RootSetClosure::process() {
Universe::oops_do(this);
JNIHandles::oops_do(this);
JvmtiExport::oops_do(this);
- SystemDictionary::oops_do(this);
+ OopStorageSet::vm_global()->oops_do(this);
Management::oops_do(this);
AOTLoader::oops_do(this);
}
diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp
index bc1e6108e8d..1ac1ebf54e4 100644
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp
@@ -35,36 +35,15 @@
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/support/jfrKlassUnloading.hpp"
#include "jfr/support/jfrMethodLookup.hpp"
#include "jfr/utilities/jfrHashtable.hpp"
-#include "jfr/utilities/jfrTypes.hpp"
+#include "jfr/utilities/jfrPredicate.hpp"
+#include "jfr/utilities/jfrRelation.hpp"
#include "oops/instanceKlass.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.hpp"
-#include "utilities/growableArray.hpp"
-
-static bool predicate(GrowableArray* set, traceid id) {
- assert(set != NULL, "invariant");
- bool found = false;
- set->find_sorted(id, found);
- return found;
-}
-
-static bool mutable_predicate(GrowableArray* set, traceid id) {
- assert(set != NULL, "invariant");
- bool found = false;
- const int location = set->find_sorted(id, found);
- if (!found) {
- set->insert_before(location, id);
- }
- return found;
-}
-
-static bool add(GrowableArray* set, traceid id) {
- assert(set != NULL, "invariant");
- return mutable_predicate(set, id);
-}
const int initial_array_size = 64;
@@ -87,7 +66,12 @@ Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1);
static bool has_thread_exited(traceid tid) {
assert(tid != 0, "invariant");
- return unloaded_thread_id_set != NULL && predicate(unloaded_thread_id_set, tid);
+ return unloaded_thread_id_set != NULL && JfrPredicate::test(unloaded_thread_id_set, tid);
+}
+
+static bool add(GrowableArray* set, traceid id) {
+ assert(set != NULL, "invariant");
+ return JfrMutablePredicate::test(set, id);
}
static void add_to_unloaded_thread_set(traceid tid) {
@@ -105,31 +89,6 @@ void ObjectSampleCheckpoint::on_thread_exit(JavaThread* jt) {
}
}
-// Track the set of unloaded klasses during a chunk / epoch.
-// Methods in stacktraces belonging to unloaded klasses must not be accessed.
-static GrowableArray* unloaded_klass_set = NULL;
-
-static void add_to_unloaded_klass_set(traceid klass_id) {
- assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
- if (unloaded_klass_set == NULL) {
- unloaded_klass_set = c_heap_allocate_array();
- }
- unloaded_klass_set->append(klass_id);
-}
-
-static void sort_unloaded_klass_set() {
- assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
- if (unloaded_klass_set != NULL && unloaded_klass_set->length() > 1) {
- unloaded_klass_set->sort(sort_traceid);
- }
-}
-
-void ObjectSampleCheckpoint::on_klass_unload(const Klass* k) {
- assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
- assert(k != NULL, "invariant");
- add_to_unloaded_klass_set(JfrTraceId::get(k));
-}
-
template
static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor& processor) {
assert(sample != NULL, "invariant");
@@ -228,7 +187,6 @@ static GrowableArray* id_set = NULL;
static void prepare_for_resolution() {
id_set = new GrowableArray(JfrOptionSet::old_object_queue_size());
- sort_unloaded_klass_set();
}
static bool stack_trace_precondition(const ObjectSample* sample) {
@@ -290,6 +248,7 @@ static void install_stack_traces(const ObjectSampler* sampler, JfrStackTraceRepo
assert(sampler != NULL, "invariant");
const ObjectSample* const last = sampler->last();
if (last != sampler->last_resolved()) {
+ JfrKlassUnloading::sort();
StackTraceBlobInstaller installer(stack_trace_repo);
iterate_samples(installer);
}
@@ -307,13 +266,13 @@ void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler, JfrStackT
static bool is_klass_unloaded(traceid klass_id) {
assert(ClassLoaderDataGraph_lock->owned_by_self(), "invariant");
- return unloaded_klass_set != NULL && predicate(unloaded_klass_set, klass_id);
+ return JfrKlassUnloading::is_unloaded(klass_id);
}
static bool is_processed(traceid method_id) {
assert(method_id != 0, "invariant");
assert(id_set != NULL, "invariant");
- return mutable_predicate(id_set, method_id);
+ return JfrMutablePredicate::test(id_set, method_id);
}
void ObjectSampleCheckpoint::add_to_leakp_set(const InstanceKlass* ik, traceid method_id) {
@@ -324,7 +283,7 @@ void ObjectSampleCheckpoint::add_to_leakp_set(const InstanceKlass* ik, traceid m
const Method* const method = JfrMethodLookup::lookup(ik, method_id);
assert(method != NULL, "invariant");
assert(method->method_holder() == ik, "invariant");
- JfrTraceId::set_leakp(ik, method);
+ JfrTraceId::load_leakp(ik, method);
}
void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer) {
@@ -419,13 +378,6 @@ void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge
}
}
-static void clear_unloaded_klass_set() {
- assert(ClassLoaderDataGraph_lock->owned_by_self(), "invariant");
- if (unloaded_klass_set != NULL && unloaded_klass_set->is_nonempty()) {
- unloaded_klass_set->clear();
- }
-}
-
// A linked list of saved type set blobs for the epoch.
// The link consist of a reference counted handle.
static JfrBlobHandle saved_type_set_blobs;
@@ -433,7 +385,6 @@ static JfrBlobHandle saved_type_set_blobs;
static void release_state_for_previous_epoch() {
// decrements the reference count and the list is reinitialized
saved_type_set_blobs = JfrBlobHandle();
- clear_unloaded_klass_set();
}
class BlobInstaller {
diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp
index c8cd972400e..ff08d9a5336 100644
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp
@@ -50,7 +50,6 @@ class ObjectSampleCheckpoint : AllStatic {
static void write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer);
static void write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
public:
- static void on_klass_unload(const Klass* k);
static void on_type_set(JfrCheckpointWriter& writer);
static void on_type_set_unload(JfrCheckpointWriter& writer);
static void on_thread_exit(JavaThread* jt);
diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp
index f6a71583270..61a24949918 100644
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,8 @@
#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
+#include "gc/shared/oopStorage.inline.hpp"
+#include "gc/shared/oopStorageSet.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "jfr/leakprofiler/checkpoint/rootResolver.hpp"
@@ -98,7 +100,7 @@ class ReferenceToRootClosure : public StackObj {
bool do_universe_roots();
bool do_jni_handle_roots();
bool do_jvmti_roots();
- bool do_system_dictionary_roots();
+ bool do_vm_global_roots();
bool do_management_roots();
bool do_string_table_roots();
bool do_aot_loader_roots();
@@ -160,10 +162,10 @@ bool ReferenceToRootClosure::do_jvmti_roots() {
return rlc.complete();
}
-bool ReferenceToRootClosure::do_system_dictionary_roots() {
+bool ReferenceToRootClosure::do_vm_global_roots() {
assert(!complete(), "invariant");
- ReferenceLocateClosure rlc(_callback, OldObjectRoot::_system_dictionary, OldObjectRoot::_type_undetermined, NULL);
- SystemDictionary::oops_do(&rlc);
+ ReferenceLocateClosure rlc(_callback, OldObjectRoot::_vm_global, OldObjectRoot::_type_undetermined, NULL);
+ OopStorageSet::vm_global()->oops_do(&rlc);
return rlc.complete();
}
@@ -211,7 +213,7 @@ bool ReferenceToRootClosure::do_roots() {
return true;
}
- if (do_system_dictionary_roots()) {
+ if (do_vm_global_roots()) {
_complete = true;
return true;
}
diff --git a/src/hotspot/share/jfr/leakprofiler/utilities/rootType.hpp b/src/hotspot/share/jfr/leakprofiler/utilities/rootType.hpp
index 2d824a9859d..c8129a4ec41 100644
--- a/src/hotspot/share/jfr/leakprofiler/utilities/rootType.hpp
+++ b/src/hotspot/share/jfr/leakprofiler/utilities/rootType.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,7 @@ class OldObjectRoot : public AllStatic {
_global_jni_handles,
_threads,
_object_synchronizer,
- _system_dictionary,
+ _vm_global,
_class_loader_data,
_management,
_jvmti,
@@ -67,8 +67,8 @@ class OldObjectRoot : public AllStatic {
return "Threads";
case _object_synchronizer:
return "Object Monitor";
- case _system_dictionary:
- return "System Dictionary";
+ case _vm_global:
+ return "VM Global";
case _class_loader_data:
return "Class Loader Data";
case _management:
diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml
index d9704d1a2d4..8c4f23ad89f 100644
--- a/src/hotspot/share/jfr/metadata/metadata.xml
+++ b/src/hotspot/share/jfr/metadata/metadata.xml
@@ -890,6 +890,7 @@
+
@@ -1001,18 +1002,13 @@
-
-
-
+
+
+
-
-
-
-
-
@@ -1046,11 +1042,13 @@
-
-
+
+
+
+
diff --git a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp
index 72f102d0bf4..f29394b88f5 100644
--- a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp
+++ b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp
@@ -617,6 +617,7 @@ TRACE_REQUEST_FUNC(CodeSweeperConfiguration) {
EventCodeSweeperConfiguration event;
event.set_sweeperEnabled(MethodFlushing);
event.set_flushingEnabled(UseCodeCacheFlushing);
+ event.set_sweepThreshold(NMethodSweeper::sweep_threshold_bytes());
event.commit();
}
diff --git a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp
index 4a9788240bf..55e94860258 100644
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp
@@ -36,8 +36,10 @@
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
+#include "jfr/support/jfrKlassUnloading.hpp"
#include "jfr/utilities/jfrBigEndian.hpp"
#include "jfr/utilities/jfrIterator.hpp"
+#include "jfr/utilities/jfrLinkedList.inline.hpp"
#include "jfr/utilities/jfrThreadIterator.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "jfr/writers/jfrJavaEventWriter.hpp"
@@ -50,9 +52,7 @@
#include "runtime/os.inline.hpp"
#include "runtime/safepoint.hpp"
-typedef JfrCheckpointManager::Buffer* BufferPtr;
-
-static JfrCheckpointManager* _instance = NULL;
+typedef JfrCheckpointManager::BufferPtr BufferPtr;
static volatile bool constant_pending = false;
@@ -70,6 +70,8 @@ static void set_constant_pending() {
}
}
+static JfrCheckpointManager* _instance = NULL;
+
JfrCheckpointManager& JfrCheckpointManager::instance() {
return *_instance;
}
@@ -87,79 +89,54 @@ void JfrCheckpointManager::destroy() {
}
JfrCheckpointManager::JfrCheckpointManager(JfrChunkWriter& cw) :
- _free_list_mspace(NULL),
- _epoch_transition_mspace(NULL),
- _lock(NULL),
- _service_thread(NULL),
- _chunkwriter(cw),
- _checkpoint_epoch_state(JfrTraceIdEpoch::epoch()) {}
+ _mspace(NULL),
+ _chunkwriter(cw) {}
JfrCheckpointManager::~JfrCheckpointManager() {
- if (_free_list_mspace != NULL) {
- delete _free_list_mspace;
- }
- if (_epoch_transition_mspace != NULL) {
- delete _epoch_transition_mspace;
- }
- if (_lock != NULL) {
- delete _lock;
- }
+ JfrTraceIdLoadBarrier::destroy();
JfrTypeManager::destroy();
+ delete _mspace;
}
-static const size_t unlimited_mspace_size = 0;
-static const size_t checkpoint_buffer_cache_count = 2;
-static const size_t checkpoint_buffer_size = 512 * K;
+static const size_t buffer_count = 2;
+static const size_t buffer_size = 512 * K;
-static JfrCheckpointMspace* allocate_mspace(size_t size, size_t limit, size_t cache_count, JfrCheckpointManager* mgr) {
- return create_mspace(size, limit, cache_count, mgr);
+static JfrCheckpointMspace* allocate_mspace(size_t min_elem_size,
+ size_t free_list_cache_count_limit,
+ size_t cache_prealloc_count,
+ bool prealloc_to_free_list,
+ JfrCheckpointManager* mgr) {
+ return create_mspace(min_elem_size,
+ free_list_cache_count_limit,
+ cache_prealloc_count,
+ prealloc_to_free_list,
+ mgr);
}
bool JfrCheckpointManager::initialize() {
- assert(_free_list_mspace == NULL, "invariant");
- _free_list_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
- if (_free_list_mspace == NULL) {
+ assert(_mspace == NULL, "invariant");
+ _mspace = allocate_mspace(buffer_size, 0, 0, false, this); // post-pone preallocation
+ if (_mspace == NULL) {
return false;
}
- assert(_epoch_transition_mspace == NULL, "invariant");
- _epoch_transition_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this);
- if (_epoch_transition_mspace == NULL) {
- return false;
+ // preallocate buffer count to each of the epoch live lists
+ for (size_t i = 0; i < buffer_count * 2; ++i) {
+ Buffer* const buffer = mspace_allocate(buffer_size, _mspace);
+ _mspace->add_to_live_list(buffer, i % 2 == 0);
}
- assert(_lock == NULL, "invariant");
- _lock = new Mutex(Monitor::leaf - 1, "Checkpoint mutex", Mutex::_allow_vm_block_flag, Monitor::_safepoint_check_never);
- if (_lock == NULL) {
- return false;
- }
- return JfrTypeManager::initialize();
+ assert(_mspace->free_list_is_empty(), "invariant");
+ return JfrTypeManager::initialize() && JfrTraceIdLoadBarrier::initialize();
}
-void JfrCheckpointManager::register_service_thread(const Thread* thread) {
- _service_thread = thread;
-}
-
-void JfrCheckpointManager::register_full(BufferPtr t, Thread* thread) {
+void JfrCheckpointManager::register_full(BufferPtr buffer, Thread* thread) {
// nothing here at the moment
- assert(t != NULL, "invariant");
- assert(t->acquired_by(thread), "invariant");
- assert(t->retired(), "invariant");
-}
-
-void JfrCheckpointManager::lock() {
- assert(!_lock->owned_by_self(), "invariant");
- _lock->lock_without_safepoint_check();
-}
-
-void JfrCheckpointManager::unlock() {
- _lock->unlock();
+ assert(buffer != NULL, "invariant");
+ assert(buffer->acquired_by(thread), "invariant");
+ assert(buffer->retired(), "invariant");
}
#ifdef ASSERT
-bool JfrCheckpointManager::is_locked() const {
- return _lock->owned_by_self();
-}
-
-static void assert_free_lease(const BufferPtr buffer) {
+static void assert_lease(const BufferPtr buffer) {
assert(buffer != NULL, "invariant");
assert(buffer->acquired_by_self(), "invariant");
assert(buffer->lease(), "invariant");
@@ -172,45 +149,36 @@ static void assert_release(const BufferPtr buffer) {
}
#endif // ASSERT
-static BufferPtr lease_free(size_t size, JfrCheckpointMspace* mspace, size_t retry_count, Thread* thread) {
- static const size_t max_elem_size = mspace->min_elem_size(); // min is max
+static BufferPtr lease(size_t size, JfrCheckpointMspace* mspace, size_t retry_count, Thread* thread, bool previous_epoch) {
+ assert(mspace != NULL, "invariant");
+ static const size_t max_elem_size = mspace->min_element_size(); // min is max
BufferPtr buffer;
if (size <= max_elem_size) {
- BufferPtr buffer = mspace_get_free_lease_with_retry(size, mspace, retry_count, thread);
+ buffer = mspace_acquire_lease_with_retry(size, mspace, retry_count, thread, previous_epoch);
if (buffer != NULL) {
- DEBUG_ONLY(assert_free_lease(buffer);)
+ DEBUG_ONLY(assert_lease(buffer);)
return buffer;
}
}
- buffer = mspace_allocate_transient_lease_to_free(size, mspace, thread);
- DEBUG_ONLY(assert_free_lease(buffer);)
+ buffer = mspace_allocate_transient_lease_to_live_list(size, mspace, thread, previous_epoch);
+ DEBUG_ONLY(assert_lease(buffer);)
return buffer;
}
-bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
- return _service_thread != thread && Atomic::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch();
+static const size_t lease_retry = 100;
+
+BufferPtr JfrCheckpointManager::lease(Thread* thread, bool previous_epoch /* false */, size_t size /* 0 */) {
+ return ::lease(size, instance()._mspace, lease_retry, thread, previous_epoch);
}
-static const size_t lease_retry = 10;
-
-BufferPtr JfrCheckpointManager::lease_buffer(Thread* thread, size_t size /* 0 */) {
- JfrCheckpointManager& manager = instance();
- if (manager.use_epoch_transition_mspace(thread)) {
- return lease_free(size, manager._epoch_transition_mspace, lease_retry, thread);
- }
- return lease_free(size, manager._free_list_mspace, lease_retry, thread);
-}
-
-JfrCheckpointMspace* JfrCheckpointManager::lookup(BufferPtr old) const {
+bool JfrCheckpointManager::lookup(BufferPtr old) const {
assert(old != NULL, "invariant");
- return _free_list_mspace->in_free_list(old) ? _free_list_mspace : _epoch_transition_mspace;
+ return !_mspace->in_current_epoch_list(old);
}
-BufferPtr JfrCheckpointManager::lease_buffer(BufferPtr old, Thread* thread, size_t size /* 0 */) {
+BufferPtr JfrCheckpointManager::lease(BufferPtr old, Thread* thread, size_t size /* 0 */) {
assert(old != NULL, "invariant");
- JfrCheckpointMspace* mspace = instance().lookup(old);
- assert(mspace != NULL, "invariant");
- return lease_free(size, mspace, lease_retry, thread);
+ return ::lease(size, instance()._mspace, lease_retry, thread, instance().lookup(old));
}
/*
@@ -219,10 +187,14 @@ BufferPtr JfrCheckpointManager::lease_buffer(BufferPtr old, Thread* thread, size
* The buffer is effectively invalidated for the thread post-return,
* and the caller should take means to ensure that it is not referenced.
*/
-static void release(BufferPtr const buffer, Thread* thread) {
+static void release(BufferPtr buffer, Thread* thread) {
DEBUG_ONLY(assert_release(buffer);)
buffer->clear_lease();
- buffer->release();
+ if (buffer->transient()) {
+ buffer->set_retired();
+ } else {
+ buffer->release();
+ }
}
BufferPtr JfrCheckpointManager::flush(BufferPtr old, size_t used, size_t requested, Thread* thread) {
@@ -235,7 +207,7 @@ BufferPtr JfrCheckpointManager::flush(BufferPtr old, size_t used, size_t request
return NULL;
}
// migration of in-flight information
- BufferPtr const new_buffer = lease_buffer(old, thread, used + requested);
+ BufferPtr const new_buffer = lease(old, thread, used + requested);
if (new_buffer != NULL) {
migrate_outstanding_writes(old, new_buffer, used, requested);
}
@@ -335,20 +307,10 @@ class CheckpointWriteOp {
size_t processed() const { return _processed; }
};
-typedef CheckpointWriteOp WriteOperation;
-typedef ReleaseOp CheckpointReleaseOperation;
-
-template class WriterHost, template class CompositeOperation>
-static size_t write_mspace(JfrCheckpointMspace* mspace, JfrChunkWriter& chunkwriter) {
- assert(mspace != NULL, "invariant");
- WriteOperation wo(chunkwriter);
- WriterHost