diff --git a/.hgtags b/.hgtags index 93926ee960b..0a1b78991e9 100644 --- a/.hgtags +++ b/.hgtags @@ -459,3 +459,4 @@ e6278add9ff28fab70fe1cc4c1d65f7363dc9445 jdk-10+31 a2008587c13fa05fa2dbfcb09fe987576fbedfd1 jdk-10+32 bbd692ad4fa300ecca7939ffbe3b1d5e52a28cc6 jdk-10+33 89deac44e51517841491ba86ff44aa82a5ca96b3 jdk-10+34 +d8c634b016c628622c9abbdc6bf50509e5dedbec jdk-10+35 diff --git a/doc/testing.html b/doc/testing.html index d71f7e569e8..be4c23302bd 100644 --- a/doc/testing.html +++ b/doc/testing.html @@ -57,6 +57,7 @@ $ make exploded-run-test TEST=hotspot_tier1
Since the Hotspot Gtest suite is so quick, the default is to run all tests. This is specified by just gtest, or as a fully qualified test descriptor gtest:all.
If you want, you can single out an individual test or a group of tests, for instance gtest:LogDecorations or gtest:LogDecorations.level_test_vm. This can be particularly useful if you want to run a shaky test repeatedly.
For Gtest, there is a separate test suite for each JVM variant. The JVM variant is defined by adding /<variant> to the test descriptor, e.g. gtest:Log/client. If you specify no variant, gtest will run once for each JVM variant present (e.g. server, client). So if you only have the server JVM present, then gtest:all will be equivalent to gtest:all/server.
At the end of the test run, a summary of all tests run will be presented. This will have a consistent look, regardless of what test suites were used. This is a sample summary:
==============================
diff --git a/doc/testing.md b/doc/testing.md
index ffd685ff516..da9b8ca7624 100644
--- a/doc/testing.md
+++ b/doc/testing.md
@@ -81,6 +81,12 @@ If you want, you can single out an individual test or a group of tests, for
instance `gtest:LogDecorations` or `gtest:LogDecorations.level_test_vm`. This
can be particularly useful if you want to run a shaky test repeatedly.
+For Gtest, there is a separate test suite for each JVM variant. The JVM variant
+is defined by adding `/` to the test descriptor, e.g.
+`gtest:Log/client`. If you specify no variant, gtest will run once for each JVM
+variant present (e.g. server, client). So if you only have the server JVM
+present, then `gtest:all` will be equivalent to `gtest:all/server`.
+
## Test results and summary
At the end of the test run, a summary of all tests run will be presented. This
diff --git a/make/CompileInterimLangtools.gmk b/make/CompileInterimLangtools.gmk
index afdb14a936a..b8ff3af48ad 100644
--- a/make/CompileInterimLangtools.gmk
+++ b/make/CompileInterimLangtools.gmk
@@ -69,8 +69,8 @@ define SetupInterimModule
Standard.java, \
EXTRA_FILES := $(BUILDTOOLS_OUTPUTDIR)/gensrc/$1.interim/module-info.java, \
COPY := .gif .png .xml .css .js javax.tools.JavaCompilerTool, \
- BIN := $(BUILDTOOLS_OUTPUTDIR)/interim_modules/$1.interim, \
- ADD_JAVAC_FLAGS := --module-path $(BUILDTOOLS_OUTPUTDIR)/interim_modules \
+ BIN := $(BUILDTOOLS_OUTPUTDIR)/interim_langtools_modules/$1.interim, \
+ ADD_JAVAC_FLAGS := --module-path $(BUILDTOOLS_OUTPUTDIR)/interim_langtools_modules \
$$(INTERIM_LANGTOOLS_ADD_EXPORTS) \
-Xlint:-module, \
))
diff --git a/make/CompileInterimRmic.gmk b/make/CompileInterimRmic.gmk
index acc02c34bc8..6127e930c09 100644
--- a/make/CompileInterimRmic.gmk
+++ b/make/CompileInterimRmic.gmk
@@ -65,10 +65,10 @@ $(eval $(call SetupJavaCompilation, BUILD_jdk.rmic.interim, \
EXCLUDE_FILES := $(TOPDIR)/src/jdk.rmic/share/classes/module-info.java, \
EXTRA_FILES := $(BUILDTOOLS_OUTPUTDIR)/gensrc/jdk.rmic.interim/module-info.java, \
INCLUDES := $(RMIC_PKGS), \
- BIN := $(BUILDTOOLS_OUTPUTDIR)/interim_modules/jdk.rmic.interim, \
+ BIN := $(BUILDTOOLS_OUTPUTDIR)/interim_rmic_modules/jdk.rmic.interim, \
COPY := .properties, \
ADD_JAVAC_FLAGS := \
- --module-path $(BUILDTOOLS_OUTPUTDIR)/interim_modules \
+ --module-path $(BUILDTOOLS_OUTPUTDIR)/interim_rmic_modules \
--add-modules java.corba \
--add-exports java.corba/com.sun.corba.se.impl.util=jdk.rmic.interim \
$(INTERIM_RMIC_ADD_EXPORTS), \
diff --git a/make/CreateJmods.gmk b/make/CreateJmods.gmk
index d1e05718dcf..0b530bb8b6b 100644
--- a/make/CreateJmods.gmk
+++ b/make/CreateJmods.gmk
@@ -1,5 +1,4 @@
-
-# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -33,6 +32,8 @@ ifeq ($(MODULE), )
$(error MODULE must be set when calling CreateJmods.gmk)
endif
+$(eval $(call IncludeCustomExtension, CreateJmods.gmk))
+
################################################################################
JMODS_DIR := $(IMAGES_OUTPUTDIR)/jmods
diff --git a/make/Help.gmk b/make/Help.gmk
index e7f0bfa29de..b6974c71de4 100644
--- a/make/Help.gmk
+++ b/make/Help.gmk
@@ -115,6 +115,13 @@ print-configurations:
# We need a dummy rule otherwise make will complain
@true
-ALL_GLOBAL_TARGETS := help print-configurations
+# This is not really a "help" target, but it is a global target, and those are
+# all contained in this file.
+run-test-prebuilt:
+ @( cd $(topdir) && \
+ $(MAKE) --no-print-directory -r -R -I make/common/ -f make/RunTestsPrebuilt.gmk \
+ run-test-prebuilt TEST="$(TEST)" )
+
+ALL_GLOBAL_TARGETS := help print-configurations run-test-prebuilt
.PHONY: $(ALL_GLOBAL_TARGETS)
diff --git a/make/InitSupport.gmk b/make/InitSupport.gmk
index f4605766ccf..48af7838450 100644
--- a/make/InitSupport.gmk
+++ b/make/InitSupport.gmk
@@ -279,7 +279,9 @@ ifeq ($(HAS_SPEC),)
# generated files.
ifeq ($$(MAKE_RESTARTS),)
ifeq ($$(words $$(matching_confs)), 1)
- $$(info Building configuration '$$(matching_confs)' (matching CONF=$$(CONF)))
+ ifneq ($$(findstring $$(LOG_LEVEL), info debug trace),)
+ $$(info Building configuration '$$(matching_confs)' (matching CONF=$$(CONF)))
+ endif
else
$$(info Building these configurations (matching CONF=$$(CONF)):)
$$(foreach var, $$(matching_confs), $$(info * $$(var)))
diff --git a/make/RunTests.gmk b/make/RunTests.gmk
index 1153bae8f02..81cb8ff2ba8 100644
--- a/make/RunTests.gmk
+++ b/make/RunTests.gmk
@@ -88,6 +88,9 @@ ifneq ($(wildcard $(JTREG_FAILURE_HANDLER)), )
-timeoutHandlerTimeout:0
endif
+GTEST_LAUNCHER_DIRS := $(patsubst %/gtestLauncher, %, $(wildcard $(TEST_IMAGE_DIR)/hotspot/gtest/*/gtestLauncher))
+GTEST_VARIANTS := $(strip $(patsubst $(TEST_IMAGE_DIR)/hotspot/gtest/%, %, $(GTEST_LAUNCHER_DIRS)))
+
################################################################################
# Parse control variables
################################################################################
@@ -166,16 +169,23 @@ hotspot_JTREG_PROBLEM_LIST += $(TOPDIR)/test/hotspot/jtreg/ProblemList.txt
# Helper function to determine if a test specification is a Gtest test
#
# It is a Gtest test if it is either "gtest", or "gtest:" followed by an optional
-# test filter string.
+# test filter string, and an optional "/" to select a specific JVM
+# variant. If no variant is specified, all found variants are tested.
define ParseGtestTestSelection
$(if $(filter gtest%, $1), \
$(if $(filter gtest, $1), \
- gtest:all \
+ $(addprefix gtest:all/, $(GTEST_VARIANTS)) \
, \
- $(if $(filter gtest:, $1), \
- gtest:all \
+ $(if $(strip $(or $(filter gtest/%, $1) $(filter gtest:/%, $1))), \
+ $(patsubst gtest:/%, gtest:all/%, $(patsubst gtest/%, gtest:/%, $1)) \
, \
- $1 \
+ $(if $(filter gtest:%, $1), \
+ $(if $(findstring /, $1), \
+ $1 \
+ , \
+ $(addprefix $1/, $(GTEST_VARIANTS)) \
+ ) \
+ ) \
) \
) \
)
@@ -253,6 +263,15 @@ define ParseJtregTestSelection
)
endef
+# Helper function to determine if a test specification is a special test
+#
+# It is a special test if it is "special:" followed by a test name.
+define ParseSpecialTestSelection
+ $(if $(filter special:%, $1), \
+ $1 \
+ )
+endef
+
ifeq ($(TEST), )
$(info No test selection given in TEST!)
$(info Please use e.g. 'run-test TEST=tier1' or 'run-test-tier1')
@@ -271,6 +290,9 @@ $(foreach test, $(TEST), \
$(if $(strip $(PARSED_TESTS)), , \
$(eval PARSED_TESTS += $(call ParseJtregTestSelection, $(test))) \
) \
+ $(if $(strip $(PARSED_TESTS)), , \
+ $(eval PARSED_TESTS += $(call ParseSpecialTestSelection, $(test))) \
+ ) \
$(if $(strip $(PARSED_TESTS)), , \
$(eval UNKNOWN_TEST := $(test)) \
) \
@@ -320,7 +342,12 @@ define SetupRunGtestTestBody
$1_TEST_SUPPORT_DIR := $$(TEST_SUPPORT_DIR)/$1
$1_EXITCODE := $$($1_TEST_RESULTS_DIR)/exitcode.txt
- $1_TEST_NAME := $$(strip $$(patsubst gtest:%, %, $$($1_TEST)))
+ $1_VARIANT := $$(lastword $$(subst /, , $$($1_TEST)))
+ ifeq ($$(filter $$($1_VARIANT), $$(GTEST_VARIANTS)), )
+ $$(error Invalid gtest variant '$$($1_VARIANT)'. Valid variants: $$(GTEST_VARIANTS))
+ endif
+ $1_TEST_NAME := $$(strip $$(patsubst %/$$($1_VARIANT), %, \
+ $$(patsubst gtest:%, %, $$($1_TEST))))
ifneq ($$($1_TEST_NAME), all)
$1_GTEST_FILTER := --gtest_filter=$$($1_TEST_NAME)*
endif
@@ -334,7 +361,7 @@ define SetupRunGtestTestBody
$$(call LogWarn, Running test '$$($1_TEST)')
$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR))
$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/gtest, \
- $$(FIXPATH) $$(TEST_IMAGE_DIR)/hotspot/gtest/server/gtestLauncher \
+ $$(FIXPATH) $$(TEST_IMAGE_DIR)/hotspot/gtest/$$($1_VARIANT)/gtestLauncher \
-jdk $(JDK_IMAGE_DIR) $$($1_GTEST_FILTER) \
--gtest_output=xml:$$($1_TEST_RESULTS_DIR)/gtest.xml \
$$($1_GTEST_REPEAT) $$(GTEST_OPTIONS) $$(GTEST_VM_OPTIONS) \
@@ -550,6 +577,69 @@ define SetupRunJtregTestBody
TARGETS += $1
endef
+################################################################################
+
+### Rules for special tests
+
+SetupRunSpecialTest = $(NamedParamsMacroTemplate)
+define SetupRunSpecialTestBody
+ $1_TEST_RESULTS_DIR := $$(TEST_RESULTS_DIR)/$1
+ $1_TEST_SUPPORT_DIR := $$(TEST_SUPPORT_DIR)/$1
+ $1_EXITCODE := $$($1_TEST_RESULTS_DIR)/exitcode.txt
+
+ $1_FULL_TEST_NAME := $$(strip $$(patsubst special:%, %, $$($1_TEST)))
+ ifneq ($$(findstring :, $$($1_FULL_TEST_NAME)), )
+ $1_TEST_NAME := $$(firstword $$(subst :, ,$$($1_FULL_TEST_NAME)))
+ $1_TEST_ARGS := $$(strip $$(patsubst special:$$($1_TEST_NAME):%, %, $$($1_TEST)))
+ else
+ $1_TEST_NAME := $$($1_FULL_TEST_NAME)
+ $1_TEST_ARGS :=
+ endif
+
+ ifeq ($$($1_TEST_NAME), hotspot-internal)
+ $1_TEST_COMMAND_LINE := \
+ $$(JDK_IMAGE_DIR)/bin/java -XX:+ExecuteInternalVMTests \
+ -XX:+ShowMessageBoxOnError -version
+ else ifeq ($$($1_TEST_NAME), failure-handler)
+ $1_TEST_COMMAND_LINE := \
+ ($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) -f \
+ BuildFailureHandler.gmk test)
+ else ifeq ($$($1_TEST_NAME), make)
+ $1_TEST_COMMAND_LINE := \
+ ($(CD) $(TOPDIR)/test/make && $(MAKE) $(MAKE_ARGS) -f \
+ TestMake.gmk $$($1_TEST_ARGS))
+ else
+ $$(error Invalid special test specification: $$($1_TEST_NAME))
+ endif
+
+ run-test-$1:
+ $$(call LogWarn)
+ $$(call LogWarn, Running test '$$($1_TEST)')
+ $$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR))
+ $$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/test-execution, \
+ $$($1_TEST_COMMAND_LINE) \
+ > >($(TEE) $$($1_TEST_RESULTS_DIR)/test-output.txt) \
+ && $$(ECHO) $$$$? > $$($1_EXITCODE) \
+ || $$(ECHO) $$$$? > $$($1_EXITCODE) \
+ )
+
+ $1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/gtest.txt
+
+ # We can not parse the various "special" tests.
+ parse-test-$1: run-test-$1
+ $$(call LogWarn, Finished running test '$$($1_TEST)')
+ $$(call LogWarn, Test report is stored in $$(strip \
+ $$(subst $$(TOPDIR)/, , $$($1_TEST_RESULTS_DIR))))
+ $$(call LogWarn, Warning: Special test results are not properly parsed!)
+ $$(eval $1_PASSED := 0)
+ $$(eval $1_FAILED := 0)
+ $$(eval $1_ERROR := 0)
+ $$(eval $1_TOTAL := 0)
+
+ $1: run-test-$1 parse-test-$1
+
+ TARGETS += $1
+endef
################################################################################
# Setup and execute make rules for all selected tests
@@ -562,6 +652,9 @@ UseGtestTestHandler = \
UseJtregTestHandler = \
$(if $(filter jtreg:%, $1), true)
+UseSpecialTestHandler = \
+ $(if $(filter special:%, $1), true)
+
# Now process each test to run and setup a proper make rule
$(foreach test, $(TESTS_TO_RUN), \
$(eval TEST_ID := $(shell $(ECHO) $(strip $(test)) | \
@@ -582,6 +675,11 @@ $(foreach test, $(TESTS_TO_RUN), \
TEST := $(test), \
)) \
) \
+ $(if $(call UseSpecialTestHandler, $(test)), \
+ $(eval $(call SetupRunSpecialTest, $(TEST_ID), \
+ TEST := $(test), \
+ )) \
+ ) \
)
# Sort also removes duplicates, so if there is any we'll get fewer words.
diff --git a/make/RunTestsPrebuilt.gmk b/make/RunTestsPrebuilt.gmk
new file mode 100644
index 00000000000..ca5bc6f49e7
--- /dev/null
+++ b/make/RunTestsPrebuilt.gmk
@@ -0,0 +1,283 @@
+#
+# Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+################################################################################
+# Initial bootstrapping, copied and stripped down from Makefile and Init.gmk
+################################################################################
+
+# In Cygwin, the MAKE variable gets prepended with the current directory if the
+# make executable is called using a Windows mixed path (c:/cygwin/bin/make.exe).
+ifneq ($(findstring :, $(MAKE)), )
+ export MAKE := $(patsubst $(CURDIR)%, %, $(patsubst $(CURDIR)/%, %, $(MAKE)))
+endif
+
+# Locate this Makefile
+ifeq ($(filter /%, $(lastword $(MAKEFILE_LIST))),)
+ makefile_path := $(CURDIR)/$(strip $(lastword $(MAKEFILE_LIST)))
+else
+ makefile_path := $(lastword $(MAKEFILE_LIST))
+endif
+TOPDIR := $(strip $(patsubst %/make/, %, $(dir $(makefile_path))))
+
+################################################################################
+# Functions
+################################################################################
+
+# Setup a required or optional variable, and/or check that it is properly
+# given.
+# Note: No spaces are allowed around the arguments.
+#
+# $1: The name of the argument
+# $2: The default value, if any, or OPTIONAL (do not provide a default but
+# do not exit if it is missing)
+# $3: If NO_CHECK, disable checking for target file/directory existence
+define SetupVariable
+ ifeq ($$($1), )
+ ifeq ($2, )
+ $$(info Error: Prebuilt variable $1 is missing, needed for run-tests-prebuilt)
+ $$(error Cannot continue.)
+ else ifeq ($2, OPTIONAL)
+ ifneq ($$(findstring $$(LOG), info debug trace), )
+ $$(info Prebuilt variable $1 is not provided)
+ endif
+ else
+ ifneq ($$(findstring $$(LOG), info debug trace), )
+ $$(info Prebuilt variable $1=$2 (default value))
+ endif
+ $1:=$2
+ endif
+ else
+ ifneq ($$(findstring $$(LOG), info debug trace), )
+ $$(info Prebuilt variable $1=$$($1))
+ endif
+ endif
+ # If $1 has a value (is not optional), and $3 is not set (to NO_CHECK),
+ # and if wildcard is empty, then complain that the file is missing.
+ ifeq ($$(strip $$(if $$($1), , OPTIONAL) $$(wildcard $$($1)) $3), )
+ $$(info Error: Prebuilt variable $1 points to missing file/directory:)
+ $$(info '$$($1)')
+ $$(error Cannot continue.)
+ endif
+endef
+
+# Create an ephemeral spec file
+#
+# $1: The output file name
+# $2..$N: The lines to output to the file
+define CreateNewSpec
+ $(if $(strip $(26)), \
+ $(error Internal makefile error: \
+ Too many arguments to macro, please update CreateNewSpec in RunTestsPrebuilt.gmk) \
+ ) \
+ $(shell $(RM) $1) \
+ $(foreach i, $(call sequence, 2, 25), \
+ $(if $(strip $($i)), \
+ $(call AppendFile, $(strip $($i)), $1) \
+ ) \
+ )
+endef
+
+################################################################################
+# Check input and setup basic buildsystem support
+################################################################################
+
+# Verify that user has given correct additional input.
+
+# These variables are absolutely necessary
+$(eval $(call SetupVariable,OUTPUTDIR))
+$(eval $(call SetupVariable,BOOT_JDK))
+$(eval $(call SetupVariable,JT_HOME))
+
+# These can have default values based on the ones above
+$(eval $(call SetupVariable,JDK_IMAGE_DIR,$(OUTPUTDIR)/images/jdk))
+$(eval $(call SetupVariable,TEST_IMAGE_DIR,$(OUTPUTDIR)/images/test))
+
+# Provide default values for tools that we need
+$(eval $(call SetupVariable,MAKE,make,NO_CHECK))
+$(eval $(call SetupVariable,BASH,bash,NO_CHECK))
+
+# Check optional variables
+$(eval $(call SetupVariable,JIB_JAR,OPTIONAL))
+
+# Now that we have verified that we have the required variables available, we
+# can include the prebuilt spec file ourselves, without an ephemeral spec
+# wrapper. This is required so we can include MakeBase which is needed for
+# CreateNewSpec.
+HAS_SPEC :=
+include $(TOPDIR)/make/InitSupport.gmk
+
+$(eval $(call CheckDeprecatedEnvironment))
+$(eval $(call CheckInvalidMakeFlags))
+$(eval $(call ParseLogLevel))
+
+SPEC := $(TOPDIR)/make/RunTestsPrebuiltSpec.gmk
+include $(SPEC)
+include $(TOPDIR)/make/common/MakeBase.gmk
+
+################################################################################
+# Determine what platform we're running on
+################################################################################
+UNAME := uname
+
+# Get OS name from uname (Cygwin inexplicably adds _NT-x.x)
+UNAME_OS := $(shell $(UNAME) -s | $(CUT) -f1 -d_)
+
+ifeq ($(UNAME_OS), CYGWIN)
+ OPENJDK_TARGET_OS := windows
+ OPENJDK_TARGET_OS_TYPE := windows
+ OPENJDK_TARGET_OS_ENV := windows.cygwin
+else
+ OPENJDK_TARGET_OS_TYPE:=unix
+ ifeq ($(UNAME_OS), Linux)
+ OPENJDK_TARGET_OS := linux
+ else ifeq ($(UNAME_OS), Darwin)
+ OPENJDK_TARGET_OS := macosx
+ else ifeq ($(UNAME_OS), SunOS)
+ OPENJDK_TARGET_OS := solaris
+ else
+ OPENJDK_TARGET_OS := $(UNAME_OS)
+ endif
+ OPENJDK_TARGET_OS_ENV := $(OPENJDK_TARGET_OS)
+endif
+
+# Assume little endian unless otherwise specified
+OPENJDK_TARGET_CPU_ENDIAN := little
+
+ifeq ($(OPENJDK_TARGET_OS), solaris)
+ # On solaris, use uname -p
+ UNAME_CPU := $(shell $(UNAME) -p)
+ # Assume 64-bit platform
+ OPENJDK_TARGET_CPU_BITS := 64
+ ifeq ($(UNAME_CPU), i386)
+ OPENJDK_TARGET_CPU := x86_64
+ else ifeq ($(UNAME_CPU), sparc)
+ OPENJDK_TARGET_CPU := sparcv9
+ OPENJDK_TARGET_CPU_ENDIAN := big
+ else
+ OPENJDK_TARGET_CPU := $(UNAME_CPU)
+ endif
+else
+ # ... all others use uname -m
+ UNAME_CPU := $(shell $(UNAME) -m)
+ ifeq ($(UNAME_CPU), i686)
+ OPENJDK_TARGET_CPU := x86
+ OPENJDK_TARGET_CPU_BITS := 32
+ else
+ # Assume all others are 64-bit. We use the same CPU name as uname for
+ # at least x86_64 and aarch64.
+ OPENJDK_TARGET_CPU := $(UNAME_CPU)
+ OPENJDK_TARGET_CPU_BITS := 64
+ endif
+endif
+
+OPENJDK_TARGET_CPU_ARCH := $(OPENJDK_TARGET_CPU)
+ifeq ($(OPENJDK_TARGET_CPU), x86_64)
+ OPENJDK_TARGET_CPU_ARCH := x86
+else ifeq ($(OPENJDK_TARGET_CPU), sparcv9)
+ OPENJDK_TARGET_CPU_ARCH := sparc
+endif
+
+ifeq ($(OPENJDK_TARGET_OS), windows)
+ ifeq ($(wildcard $(TEST_IMAGE_DIR)/bin/fixpath.exe), )
+ $$(info Error: fixpath is missing from test image '$(TEST_IMAGE_DIR)')
+ $$(error Cannot continue.)
+ endif
+ FIXPATH := $(TEST_IMAGE_DIR)/bin/fixpath.exe -c
+ PATH_SEP:=;
+else
+ FIXPATH :=
+ PATH_SEP:=:
+endif
+
+# Check number of cores
+ifeq ($(OPENJDK_TARGET_OS), linux)
+ NUM_CORES := $(shell $(CAT) /proc/cpuinfo | $(GREP) -c processor)
+else ifeq ($(OPENJDK_TARGET_OS), macosx)
+ NUM_CORES := $(shell /usr/sbin/sysctl -n hw.ncpu)
+else ifeq ($(OPENJDK_TARGET_OS), solaris)
+ NUM_CORES := $(shell LC_MESSAGES=C /usr/sbin/psrinfo -v | $(GREP) -c on-line)
+else ifeq ($(OPENJDK_TARGET_OS), windows)
+ NUM_CORES := $(NUMBER_OF_PROCESSORS)
+else
+ NUM_CORES := 1
+endif
+
+################################################################################
+# Generate the ephemeral spec file
+################################################################################
+
+# Now we can include additional custom support.
+# This might define CUSTOM_NEW_SPEC_LINE
+ifneq ($(CUSTOM_MAKE_DIR), )
+ include $(CUSTOM_MAKE_DIR)/RunTestsPrebuilt.gmk
+endif
+
+NEW_SPEC := $(OUTPUTDIR)/run-test-spec.gmk
+
+$(call CreateNewSpec, $(NEW_SPEC), \
+ # Generated file -- do not edit!, \
+ SPEC := $(NEW_SPEC), \
+ TOPDIR := $(TOPDIR), \
+ OUTPUTDIR := $(OUTPUTDIR), \
+ BOOT_JDK := $(BOOT_JDK), \
+ JT_HOME := $(JT_HOME), \
+ JDK_IMAGE_DIR := $(JDK_IMAGE_DIR), \
+ TEST_IMAGE_DIR := $(TEST_IMAGE_DIR), \
+ MAKE := $(MAKE), \
+ BASH := $(BASH), \
+ JIB_JAR := $(JIB_JAR), \
+ FIXPATH := $(FIXPATH), \
+ PATH_SEP := $(PATH_SEP), \
+ OPENJDK_TARGET_OS := $(OPENJDK_TARGET_OS), \
+ OPENJDK_TARGET_OS_TYPE := $(OPENJDK_TARGET_OS_TYPE), \
+ OPENJDK_TARGET_OS_ENV := $(OPENJDK_TARGET_OS_ENV), \
+ OPENJDK_TARGET_CPU := $(OPENJDK_TARGET_CPU), \
+ OPENJDK_TARGET_CPU_ARCH := $(OPENJDK_TARGET_CPU_ARCH), \
+ OPENJDK_TARGET_CPU_BITS := $(OPENJDK_TARGET_CPU_BITS), \
+ OPENJDK_TARGET_CPU_ENDIAN := $(OPENJDK_TARGET_CPU_ENDIAN), \
+ NUM_CORES := $(NUM_CORES), \
+ include $(TOPDIR)/make/RunTestsPrebuiltSpec.gmk, \
+ $(CUSTOM_NEW_SPEC_LINE), \
+)
+
+################################################################################
+# The run-test-prebuilt target
+################################################################################
+
+SPEC := $(NEW_SPEC)
+
+default: all
+
+run-test-prebuilt:
+ @$(RM) -f $(MAKESUPPORT_OUTPUTDIR)/exit-with-error
+ @cd $(TOPDIR) && $(MAKE) $(MAKE_ARGS) -f make/RunTests.gmk run-test \
+ TEST="$(TEST)"
+ @if test -f $(MAKESUPPORT_OUTPUTDIR)/exit-with-error ; then \
+ exit 1 ; \
+ fi
+
+all: run-test-prebuilt
+
+.PHONY: default all
diff --git a/make/RunTestsPrebuiltSpec.gmk b/make/RunTestsPrebuiltSpec.gmk
new file mode 100644
index 00000000000..5194099fd62
--- /dev/null
+++ b/make/RunTestsPrebuiltSpec.gmk
@@ -0,0 +1,175 @@
+#
+# Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+################################################################################
+# Fake minimalistic spec file for RunTestsPrebuilt.gmk.
+################################################################################
+
+define VerifyVariable
+ ifeq ($$($1), )
+ $$(info Error: Variable $1 is missing, needed by RunTestPrebuiltSpec.gmk)
+ $$(error Cannot continue.)
+ else
+ ifneq ($$(findstring $$(LOG_LEVEL), debug trace), )
+ $$(info Prebuilt variable $1=$$($1))
+ endif
+ endif
+endef
+
+# It is the responsibility of the file including us to have set these up.
+# Verify that this is correct.
+$(eval $(call VerifyVariable,SPEC))
+$(eval $(call VerifyVariable,TOPDIR))
+$(eval $(call VerifyVariable,OUTPUTDIR))
+$(eval $(call VerifyVariable,BOOT_JDK))
+$(eval $(call VerifyVariable,JT_HOME))
+$(eval $(call VerifyVariable,JDK_IMAGE_DIR))
+$(eval $(call VerifyVariable,TEST_IMAGE_DIR))
+$(eval $(call VerifyVariable,MAKE))
+$(eval $(call VerifyVariable,BASH))
+
+################################################################################
+# The "human readable" name of this configuration
+CONF_NAME := run-test-prebuilt
+
+# Number of parallel jobs to use for compilation
+JOBS ?= $(NUM_CORES)
+TEST_JOBS ?= 0
+
+# Use hard-coded values for java flags (one size, fits all!)
+JAVA_FLAGS := -Duser.language=en -Duser.country=US
+JAVA_FLAGS_BIG:= -Xms64M -Xmx1600M -XX:ThreadStackSize=1536
+JAVA_FLAGS_SMALL:= -XX:+UseSerialGC -Xms32M -Xmx512M -XX:TieredStopAtLevel=1
+BUILD_JAVA_FLAGS := $(JAVA_FLAGS_BIG)
+
+################################################################################
+# Hard-coded values copied from spec.gmk.in.
+X:=
+SPACE:=$(X) $(X)
+COMMA:=,
+MAKE_ARGS = $(MAKE_LOG_FLAGS) -r -R -I $(TOPDIR)/make/common SPEC=$(SPEC) \
+ MAKE_LOG_FLAGS="$(MAKE_LOG_FLAGS)" LOG_LEVEL=$(LOG_LEVEL)
+BASH_ARGS := -o pipefail -e
+SHELL := $(BASH) $(BASH_ARGS)
+
+################################################################################
+# Set some reasonable defaults for features
+DEBUG_LEVEL := release
+HOTSPOT_DEBUG_LEVEL := release
+BUILD_GTEST := true
+BUILD_FAILURE_HANDLER := true
+
+################################################################################
+# Alias some paths (that should not really be used) to our JDK image under test.
+SUPPORT_OUTPUTDIR := $(OUTPUTDIR)/support
+BUILDTOOLS_OUTPUTDIR := $(OUTPUTDIR)/buildtools
+HOTSPOT_OUTPUTDIR := $(OUTPUTDIR)/hotspot
+JDK_OUTPUTDIR := $(OUTPUTDIR)/jdk
+IMAGES_OUTPUTDIR := $(OUTPUTDIR)/images
+BUNDLES_OUTPUTDIR := $(OUTPUTDIR)/bundles
+TESTMAKE_OUTPUTDIR := $(OUTPUTDIR)/test-make
+MAKESUPPORT_OUTPUTDIR := $(OUTPUTDIR)/make-support
+BUILDJDK_OUTPUTDIR := $(OUTPUTDIR)/buildjdk
+
+JRE_IMAGE_DIR := $(JDK_IMAGE_DIR)
+
+################################################################################
+# Assume build platform is same as target platform
+OPENJDK_BUILD_OS := $(OPENJDK_TARGET_OS)
+OPENJDK_BUILD_OS_TYPE := $(OPENJDK_TARGET_OS_TYPE)
+OPENJDK_BUILD_OS_ENV := $(OPENJDK_TARGET_OS_ENV)
+
+OPENJDK_BUILD_CPU := $(OPENJDK_TARGET_CPU)
+OPENJDK_BUILD_CPU_ARCH := $(OPENJDK_TARGET_CPU_ARCH)
+OPENJDK_BUILD_CPU_BITS := $(OPENJDK_TARGET_CPU_BITS)
+OPENJDK_BUILD_CPU_ENDIAN := $(OPENJDK_TARGET_CPU_ENDIAN)
+
+################################################################################
+# Java executable definitions
+JAVA_CMD := $(BOOT_JDK)/bin/java
+JAVAC_CMD := $(BOOT_JDK)/bin/javac
+JAVAH_CMD := $(BOOT_JDK)/bin/javah
+JAR_CMD := $(BOOT_JDK)/bin/jar
+JLINK_CMD := $(JDK_OUTPUTDIR)/bin/jlink
+JMOD_CMD := $(JDK_OUTPUTDIR)/bin/jmod
+JARSIGNER_CMD := $(BOOT_JDK)/bin/jarsigner
+
+JAVA := $(FIXPATH) $(JAVA_CMD) $(JAVA_FLAGS_BIG) $(JAVA_FLAGS)
+JAVA_SMALL := $(FIXPATH) $(JAVA_CMD) $(JAVA_FLAGS_SMALL) $(JAVA_FLAGS)
+JAVA_JAVAC := $(FIXPATH) $(JAVA_CMD) $(JAVA_FLAGS_SMALL) $(JAVA_FLAGS)
+JAVAC := $(FIXPATH) $(JAVAC_CMD)
+JAVAH := $(FIXPATH) $(JAVAH_CMD)
+JAR := $(FIXPATH) $(JAR_CMD)
+JLINK := $(FIXPATH) $(JLINK_CMD)
+JMOD := $(FIXPATH) $(JMOD_CMD)
+JARSIGNER := $(FIXPATH) $(JARSIGNER_CMD)
+
+BUILD_JAVA := $(JAVA)
+################################################################################
+# Some common tools. Assume most common name and no path.
+AWK := awk
+BASENAME := basename
+CAT := cat
+CD := cd
+CHMOD := chmod
+CP := cp
+CUT := cut
+DATE := date
+DIFF := diff
+DIRNAME := dirname
+FIND := find
+FIND_DELETE := -delete
+ECHO := echo
+EGREP := grep -E
+FGREP := grep -F
+GREP := grep
+GZIP := gzip
+HEAD := head
+LS := ls
+LN := ln
+MKDIR := mkdir
+MV := mv
+NAWK := nawk
+NICE := nice
+PATCH := patch
+PRINTF := printf
+RM := rm -f
+RMDIR := rmdir
+SED := sed
+SH := sh
+SORT := sort
+TAR := tar
+TAIL := tail
+TEE := tee
+TR := tr
+TOUCH := touch
+UNIQ := uniq
+WC := wc
+XARGS := xargs
+ZIPEXE := zip
+UNZIP := unzip
+EXPR := expr
+FILE := file
+HG := hg
diff --git a/make/autoconf/boot-jdk.m4 b/make/autoconf/boot-jdk.m4
index 0c0abd61fa6..161b0f18f2a 100644
--- a/make/autoconf/boot-jdk.m4
+++ b/make/autoconf/boot-jdk.m4
@@ -353,9 +353,6 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
AC_MSG_CHECKING([flags for boot jdk java command] )
- # Disable special log output when a debug build is used as Boot JDK...
- ADD_JVM_ARG_IF_OK([-XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput],boot_jdk_jvmargs,[$JAVA])
-
# Force en-US environment
ADD_JVM_ARG_IF_OK([-Duser.language=en -Duser.country=US],boot_jdk_jvmargs,[$JAVA])
diff --git a/make/autoconf/generated-configure.sh b/make/autoconf/generated-configure.sh
index d3cb5f606c6..e856469fd0b 100644
--- a/make/autoconf/generated-configure.sh
+++ b/make/autoconf/generated-configure.sh
@@ -5159,7 +5159,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1512410983
+DATE_WHEN_GENERATED=1512479382
###############################################################################
#
@@ -67379,23 +67379,6 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command " >&5
$as_echo_n "checking flags for boot jdk java command ... " >&6; }
- # Disable special log output when a debug build is used as Boot JDK...
-
- $ECHO "Check if jvm arg is ok: -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput" >&5
- $ECHO "Command: $JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version" >&5
- OUTPUT=`$JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version 2>&1`
- FOUND_WARN=`$ECHO "$OUTPUT" | $GREP -i warn`
- FOUND_VERSION=`$ECHO $OUTPUT | $GREP " version \""`
- if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
- boot_jdk_jvmargs="$boot_jdk_jvmargs -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput"
- JVM_ARG_OK=true
- else
- $ECHO "Arg failed:" >&5
- $ECHO "$OUTPUT" >&5
- JVM_ARG_OK=false
- fi
-
-
# Force en-US environment
$ECHO "Check if jvm arg is ok: -Duser.language=en -Duser.country=US" >&5
diff --git a/make/autoconf/spec.gmk.in b/make/autoconf/spec.gmk.in
index b8449f78048..dda3fbebaca 100644
--- a/make/autoconf/spec.gmk.in
+++ b/make/autoconf/spec.gmk.in
@@ -565,6 +565,7 @@ JAVAC_FLAGS?=@JAVAC_FLAGS@
BUILD_JAVA_FLAGS := @BOOTCYCLE_JVM_ARGS_BIG@
BUILD_JAVA=@FIXPATH@ $(BUILD_JDK)/bin/java $(BUILD_JAVA_FLAGS)
+BUILD_JAR=@FIXPATH@ $(BUILD_JDK)/bin/jar
# Interim langtools and rmic modules and arguments
INTERIM_LANGTOOLS_BASE_MODULES := java.compiler jdk.compiler jdk.javadoc
@@ -577,7 +578,7 @@ INTERIM_LANGTOOLS_MODULES_COMMA := $(strip $(subst $(SPACE),$(COMMA),$(strip \
INTERIM_LANGTOOLS_ARGS := \
--limit-modules java.base,jdk.zipfs,$(INTERIM_LANGTOOLS_MODULES_COMMA) \
--add-modules $(INTERIM_LANGTOOLS_MODULES_COMMA) \
- --module-path $(BUILDTOOLS_OUTPUTDIR)/interim_modules \
+ --module-path $(BUILDTOOLS_OUTPUTDIR)/interim_langtools_modules \
$(INTERIM_LANGTOOLS_ADD_EXPORTS) \
#
JAVAC_MAIN_CLASS = -m jdk.compiler.interim/com.sun.tools.javac.Main
@@ -588,8 +589,10 @@ INTERIM_RMIC_MODULES := $(addsuffix .interim, $(INTERIM_RMIC_BASE_MODULES))
INTERIM_RMIC_ADD_EXPORTS := \
--add-exports java.corba/com.sun.corba.se.impl.util=jdk.rmic.interim \
#
-INTERIM_RMIC_ARGS := --limit-modules java.base,jdk.compiler,jdk.javadoc,java.corba \
- --module-path $(BUILDTOOLS_OUTPUTDIR)/interim_modules \
+# Use = to delay expansion of PathList since it's not available in this file.
+INTERIM_RMIC_ARGS = --limit-modules java.base,jdk.compiler,jdk.javadoc,java.corba \
+ --module-path $(call PathList, $(BUILDTOOLS_OUTPUTDIR)/interim_rmic_modules \
+ $(BUILDTOOLS_OUTPUTDIR)/interim_langtools_modules) \
$(INTERIM_RMIC_ADD_EXPORTS) \
#
diff --git a/make/common/JarArchive.gmk b/make/common/JarArchive.gmk
index 1e1c017ab61..b6487913dd4 100644
--- a/make/common/JarArchive.gmk
+++ b/make/common/JarArchive.gmk
@@ -56,6 +56,7 @@ FALSE_FIND_PATTERN:=-name FILE_NAME_THAT_DOESNT_EXIST
# added to the archive.
# EXTRA_MANIFEST_ATTR:=Extra attribute to add to manifest.
# CHECK_COMPRESS_JAR Check the COMPRESS_JAR variable
+# JAR_CMD:=Optionally override the jar command to use when creating the archive.
SetupJarArchive = $(NamedParamsMacroTemplate)
define SetupJarArchiveBody
@@ -65,6 +66,7 @@ define SetupJarArchiveBody
$1_DELETESS_FILE:=$$(dir $$($1_JAR))_the.$$($1_JARNAME)_deletess
$1_DELETES_FILE:=$$(dir $$($1_JAR))_the.$$($1_JARNAME)_deletes
$1_BIN:=$$(dir $$($1_JAR))
+ $$(call SetIfEmpty, $1_JAR_CMD, $$(JAR))
ifeq (,$$($1_SUFFIXES))
# No suffix was set, default to classes.
@@ -109,7 +111,7 @@ define SetupJarArchiveBody
# Check if this jar needs to have its index generated.
ifneq (,$$($1_JARINDEX))
- $1_JARINDEX = (cd $$(dir $$@) && $(JAR) -i $$(notdir $$@))
+ $1_JARINDEX = (cd $$(dir $$@) && $$($1_JAR_CMD) -i $$(notdir $$@))
else
$1_JARINDEX = true
endif
@@ -189,7 +191,7 @@ define SetupJarArchiveBody
$1_UPDATE_CONTENTS=\
if [ "`$(WC) -l $$($1_BIN)/_the.$$($1_JARNAME)_contents | $(AWK) '{ print $$$$1 }'`" -gt "0" ]; then \
$(ECHO) " updating" `$(WC) -l $$($1_BIN)/_the.$$($1_JARNAME)_contents | $(AWK) '{ print $$$$1 }'` files && \
- $(JAR) $$($1_JAR_UPDATE_OPTIONS) $$@ @$$($1_BIN)/_the.$$($1_JARNAME)_contents; \
+ $$($1_JAR_CMD) $$($1_JAR_UPDATE_OPTIONS) $$@ @$$($1_BIN)/_the.$$($1_JARNAME)_contents; \
fi $$(NEWLINE)
# The s-variants of the above macros are used when the jar is created from scratch.
# NOTICE: please leave the parentheses space separated otherwise the AIX build will break!
@@ -208,7 +210,7 @@ define SetupJarArchiveBody
| $(SED) 's|$$(src)/|-C $$(src) |g' >> \
$$($1_BIN)/_the.$$($1_JARNAME)_contents) $$(NEWLINE) )
endif
- $1_SUPDATE_CONTENTS=$(JAR) $$($1_JAR_UPDATE_OPTIONS) $$@ @$$($1_BIN)/_the.$$($1_JARNAME)_contents $$(NEWLINE)
+ $1_SUPDATE_CONTENTS=$$($1_JAR_CMD) $$($1_JAR_UPDATE_OPTIONS) $$@ @$$($1_BIN)/_the.$$($1_JARNAME)_contents $$(NEWLINE)
# Use a slightly shorter name for logging, but with enough path to identify this jar.
$1_NAME:=$$(subst $$(OUTPUTDIR)/,,$$($1_JAR))
@@ -226,7 +228,7 @@ define SetupJarArchiveBody
endif
# Include all variables of significance in the vardeps file
- $1_VARDEPS := $(JAR) $$($1_JAR_CREATE_OPTIONS) $$($1_MANIFEST) \
+ $1_VARDEPS := $$($1_JAR_CMD) $$($1_JAR_CREATE_OPTIONS) $$($1_MANIFEST) \
$$($1_JARMAIN) $$($1_EXTRA_MANIFEST_ATTR) $$($1_ORIG_DEPS) $$($1_SRCS) \
$$($1_INCLUDES) $$($1_EXCLUDES) $$($1_EXCLUDE_FILES) $$($1_EXTRA_FILES)
$1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, $$(dir $$($1_JAR))_the.$$($1_JARNAME).vardeps)
@@ -250,7 +252,7 @@ define SetupJarArchiveBody
$$(if $$($1_EXTRA_MANIFEST_ATTR), \
$(PRINTF) "$$($1_EXTRA_MANIFEST_ATTR)\n" >> $$($1_MANIFEST_FILE) $$(NEWLINE)) \
$(ECHO) Creating $$($1_NAME) $$(NEWLINE) \
- $(JAR) $$($1_JAR_CREATE_OPTIONS) $$@ $$($1_MANIFEST_FILE) $$(NEWLINE) \
+ $$($1_JAR_CMD) $$($1_JAR_CREATE_OPTIONS) $$@ $$($1_MANIFEST_FILE) $$(NEWLINE) \
$$($1_SCAPTURE_CONTENTS) \
$$($1_SCAPTURE_METAINF) \
$$($1_SUPDATE_CONTENTS) \
diff --git a/make/common/MakeBase.gmk b/make/common/MakeBase.gmk
index 69dc9fbffe1..61a27155a84 100644
--- a/make/common/MakeBase.gmk
+++ b/make/common/MakeBase.gmk
@@ -912,6 +912,17 @@ else
$(shell $(PRINTF) "%s" $(call ShellQuote, $1) > $2)
endif
+# Param 1 - Text to write
+# Param 2 - File to write to
+ifeq ($(HAS_FILE_FUNCTION), true)
+ AppendFile = \
+ $(file >>$2,$(strip $1))
+else
+ # Use printf to get consistent behavior on all platforms.
+ AppendFile = \
+ $(shell $(PRINTF) "%s" $(call ShellQuote, $1) >> $2)
+endif
+
################################################################################
# DependOnVariable
#
diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js
index 9bf032e0e8e..172aabb7fe4 100644
--- a/make/conf/jib-profiles.js
+++ b/make/conf/jib-profiles.js
@@ -662,6 +662,16 @@ var getJibProfilesProfiles = function (input, common, data) {
}
});
+ // For open profiles, the non-debug jdk bundles, need an "open" prefix on the
+ // remote bundle names, forming the word "openjdk". See JDK-8188789.
+ common.main_profile_names.forEach(function (name) {
+ var openName = name + common.open_suffix;
+ profiles[openName].artifacts["jdk"].remote = replaceAll(
+ "\/jdk-", "/openjdk-",
+ replaceAll("\/\\1", "/open\\1",
+ profiles[openName].artifacts["jdk"].remote));
+ });
+
// Profiles used to run tests. Used in JPRT and Mach 5.
var testOnlyProfiles = {
"run-test-jprt": {
@@ -779,6 +789,10 @@ var getJibProfilesDependencies = function (input, common) {
macosx_x64: "2.7.1-Xcode6.3-MacOSX10.9+1.0"
}[input.target_platform];
+ var makeBinDir = (input.build_os == "windows"
+ ? input.get("gnumake", "install_path") + "/cygwin/bin"
+ : input.get("gnumake", "install_path") + "/bin");
+
var dependencies = {
boot_jdk: {
@@ -831,13 +845,13 @@ var getJibProfilesDependencies = function (input, common) {
? "gnumake-" + input.build_osenv_platform
: "gnumake-" + input.build_platform),
- configure_args: (input.build_os == "windows"
- ? "MAKE=" + input.get("gnumake", "install_path") + "/cygwin/bin/make"
- : "MAKE=" + input.get("gnumake", "install_path") + "/bin/make"),
+ configure_args: "MAKE=" + makeBinDir + "/make",
- environment_path: (input.build_os == "windows"
- ? input.get("gnumake", "install_path") + "/cygwin/bin"
- : input.get("gnumake", "install_path") + "/bin")
+ environment: {
+ "MAKE": makeBinDir + "/make"
+ },
+
+ environment_path: makeBinDir
},
freetype: {
diff --git a/make/hotspot/lib/CompileJvm.gmk b/make/hotspot/lib/CompileJvm.gmk
index 136efbd756e..66ba5033562 100644
--- a/make/hotspot/lib/CompileJvm.gmk
+++ b/make/hotspot/lib/CompileJvm.gmk
@@ -59,6 +59,7 @@ JVM_CFLAGS_INCLUDES += \
-I$(TOPDIR)/src/hotspot/share/precompiled \
-I$(TOPDIR)/src/java.base/share/native/include \
-I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/include \
+ -I$(TOPDIR)/src/java.management/share/native/include \
-I$(TOPDIR)/src/java.base/share/native/libjimage \
#
diff --git a/make/jdk/src/classes/build/tools/jdwpgen/RootNode.java b/make/jdk/src/classes/build/tools/jdwpgen/RootNode.java
index 8fbd71036b8..2361f335949 100644
--- a/make/jdk/src/classes/build/tools/jdwpgen/RootNode.java
+++ b/make/jdk/src/classes/build/tools/jdwpgen/RootNode.java
@@ -40,6 +40,7 @@ class RootNode extends AbstractNamedNode {
}
void document(PrintWriter writer) {
+ writer.println("");
writer.println("" + comment() + " ");
writer.println("");
for (Node node : components) {
diff --git a/make/mapfiles/libjava/mapfile-vers b/make/mapfiles/libjava/mapfile-vers
index a2f7303f06e..ce5100c539e 100644
--- a/make/mapfiles/libjava/mapfile-vers
+++ b/make/mapfiles/libjava/mapfile-vers
@@ -74,7 +74,7 @@ SUNWprivate_1.1 {
JNU_ThrowStringIndexOutOfBoundsException;
JNU_ToString;
- Java_java_io_FileDescriptor_close;
+ Java_java_io_FileDescriptor_close0;
Java_java_io_FileDescriptor_initIDs;
Java_java_io_FileDescriptor_sync;
Java_java_io_FileDescriptor_getAppend;
diff --git a/make/test/JtregNativeHotspot.gmk b/make/test/JtregNativeHotspot.gmk
index 9c621f53410..8cf3b0ed6f3 100644
--- a/make/test/JtregNativeHotspot.gmk
+++ b/make/test/JtregNativeHotspot.gmk
@@ -79,6 +79,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/ModuleAwareAgents/ClassLoadPrepare \
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/ModuleAwareAgents/ThreadStart \
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/StartPhase/AllowedFunctions \
+ $(TOPDIR)/test/hotspot/jtreg/serviceability/dcmd/jvmti/AttachFailed \
#
# Add conditional directories here when needed.
@@ -110,6 +111,8 @@ ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libRedefineDoubleDelete := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libHandshakeTransitionTest := -lc
+ BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libHasNoEntryPoint := -lc
+ BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libReturnError := -lc
endif
ifeq ($(OPENJDK_TARGET_OS), linux)
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
index ab1bcd55817..2a9766f5143 100644
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
@@ -985,12 +985,33 @@ public:
}
void hint(int imm) {
- system(0b00, 0b011, 0b0010, imm, 0b000);
+ system(0b00, 0b011, 0b0010, 0b0000, imm);
}
void nop() {
hint(0);
}
+
+ void yield() {
+ hint(1);
+ }
+
+ void wfe() {
+ hint(2);
+ }
+
+ void wfi() {
+ hint(3);
+ }
+
+ void sev() {
+ hint(4);
+ }
+
+ void sevl() {
+ hint(5);
+ }
+
// we only provide mrs and msr for the special purpose system
// registers where op1 (instr[20:19]) == 11 and, (currently) only
// use it for FPSR n.b msr has L (instr[21]) == 0 mrs has L == 1
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index 36fefc866aa..ecd4a8e19f2 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -494,42 +494,6 @@ void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
}
}
-// Rather than take a segfault when the polling page is protected,
-// explicitly check for a safepoint in progress and if there is one,
-// fake a call to the handler as if a segfault had been caught.
-void LIR_Assembler::poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info) {
- __ mov(rscratch1, SafepointSynchronize::address_of_state());
- __ ldrb(rscratch1, Address(rscratch1));
- Label nope, poll;
- __ cbz(rscratch1, nope);
- __ block_comment("safepoint");
- __ enter();
- __ push(0x3, sp); // r0 & r1
- __ push(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1
- __ adr(r0, poll);
- __ str(r0, Address(rthread, JavaThread::saved_exception_pc_offset()));
- __ mov(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::get_poll_stub));
- __ blrt(rscratch1, 1, 0, 1);
- __ maybe_isb();
- __ pop(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1
- __ mov(rscratch1, r0);
- __ pop(0x3, sp); // r0 & r1
- __ leave();
- __ br(rscratch1);
- address polling_page(os::get_polling_page());
- assert(os::is_poll_address(polling_page), "should be");
- unsigned long off;
- __ adrp(rscratch1, Address(polling_page, rtype), off);
- __ bind(poll);
- if (info)
- add_debug_info_for_branch(info); // This isn't just debug info:
- // it's the oop map
- else
- __ code_section()->relocate(pc(), rtype);
- __ ldrw(zr, Address(rscratch1, off));
- __ bind(nope);
-}
-
void LIR_Assembler::return_op(LIR_Opr result) {
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
@@ -549,11 +513,9 @@ int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
address polling_page(os::get_polling_page());
guarantee(info != NULL, "Shouldn't be NULL");
assert(os::is_poll_address(polling_page), "should be");
- unsigned long off;
- __ adrp(rscratch1, Address(polling_page, relocInfo::poll_type), off);
- assert(off == 0, "must be");
+ __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type);
add_debug_info_for_branch(info); // This isn't just debug info:
- // it's the oop map
+ // it's the oop map
__ read_polling_page(rscratch1, relocInfo::poll_type);
return __ offset();
}
diff --git a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
index ad6b12de22d..5977764c2d2 100644
--- a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
@@ -51,4 +51,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORT_RESERVED_STACK_AREA
+#define THREAD_LOCAL_POLL
+
#endif // CPU_AARCH64_VM_GLOBALDEFINITIONS_AARCH64_HPP
diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
index bf34c157c9e..dfd984e4fb0 100644
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
@@ -79,7 +79,7 @@ define_pd_global(bool, CompactStrings, true);
// Clear short arrays bigger than one word in an arch-specific way
define_pd_global(intx, InitArrayShortSize, BytesPerLong);
-define_pd_global(bool, ThreadLocalHandshakes, false);
+define_pd_global(bool, ThreadLocalHandshakes, true);
#if defined(COMPILER1) || defined(COMPILER2)
define_pd_global(intx, InlineSmallCode, 1000);
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
index b1b3c5e5273..550bf100764 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
@@ -30,12 +30,13 @@
#include "logging/log.hpp"
#include "oops/arrayOop.hpp"
#include "oops/markOop.hpp"
-#include "oops/methodData.hpp"
#include "oops/method.hpp"
+#include "oops/methodData.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.inline.hpp"
@@ -438,13 +439,26 @@ void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
void InterpreterMacroAssembler::dispatch_base(TosState state,
address* table,
- bool verifyoop) {
+ bool verifyoop,
+ bool generate_poll) {
if (VerifyActivationFrameSize) {
Unimplemented();
}
if (verifyoop) {
verify_oop(r0, state);
}
+
+ Label safepoint;
+ address* const safepoint_table = Interpreter::safept_table(state);
+ bool needs_thread_local_poll = generate_poll &&
+ SafepointMechanism::uses_thread_local_poll() && table != safepoint_table;
+
+ if (needs_thread_local_poll) {
+ NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
+ ldr(rscratch2, Address(rthread, Thread::polling_page_offset()));
+ tbnz(rscratch2, exact_log2(SafepointMechanism::poll_bit()), safepoint);
+ }
+
if (table == Interpreter::dispatch_table(state)) {
addw(rscratch2, rscratch1, Interpreter::distance_from_dispatch_table(state));
ldr(rscratch2, Address(rdispatch, rscratch2, Address::uxtw(3)));
@@ -453,10 +467,17 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3)));
}
br(rscratch2);
+
+ if (needs_thread_local_poll) {
+ bind(safepoint);
+ lea(rscratch2, ExternalAddress((address)safepoint_table));
+ ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3)));
+ br(rscratch2);
+ }
}
-void InterpreterMacroAssembler::dispatch_only(TosState state) {
- dispatch_base(state, Interpreter::dispatch_table(state));
+void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
+ dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
}
void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
@@ -468,10 +489,10 @@ void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
}
-void InterpreterMacroAssembler::dispatch_next(TosState state, int step) {
+void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
// load next bytecode
ldrb(rscratch1, Address(pre(rbcp, step)));
- dispatch_base(state, Interpreter::dispatch_table(state));
+ dispatch_base(state, Interpreter::dispatch_table(state), generate_poll);
}
void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
@@ -1585,6 +1606,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
}
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
+ assert_different_registers(obj, rscratch1);
Label update, next, none;
verify_oop(obj);
@@ -1745,6 +1767,7 @@ void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret,
}
void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
+ assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
if (ProfileInterpreter && MethodData::profile_parameters()) {
Label profile_continue, done;
@@ -1752,8 +1775,8 @@ void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register t
// Load the offset of the area within the MDO used for
// parameters. If it's negative we're not profiling any parameters
- ldr(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
- tbnz(tmp1, 63, profile_continue); // i.e. sign bit set
+ ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
+ tbnz(tmp1, 31, profile_continue); // i.e. sign bit set
// Compute a pointer to the area for parameters from the offset
// and move the pointer to the slot for the last
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
index 637ae481f5b..59b33a17d22 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
@@ -55,7 +55,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool check_exceptions);
// base routine for all dispatches
- void dispatch_base(TosState state, address* table, bool verifyoop = true);
+ void dispatch_base(TosState state, address* table,
+ bool verifyoop = true, bool generate_poll = false);
public:
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
@@ -165,12 +166,12 @@ class InterpreterMacroAssembler: public MacroAssembler {
void dispatch_prolog(TosState state, int step = 0);
void dispatch_epilog(TosState state, int step = 0);
// dispatch via rscratch1
- void dispatch_only(TosState state);
+ void dispatch_only(TosState state, bool generate_poll = false);
// dispatch normal table via rscratch1 (assume rscratch1 is loaded already)
void dispatch_only_normal(TosState state);
void dispatch_only_noverify(TosState state);
// load rscratch1 from [rbcp + step] and dispatch via rscratch1
- void dispatch_next(TosState state, int step = 0);
+ void dispatch_next(TosState state, int step = 0, bool generate_poll = false);
// load rscratch1 from [esi] and dispatch via rscratch1 and table
void dispatch_via (TosState state, address* table);
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 81c00ffd106..bef4fd4e573 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -287,6 +287,40 @@ void MacroAssembler::serialize_memory(Register thread, Register tmp) {
dsb(Assembler::SY);
}
+void MacroAssembler::safepoint_poll(Label& slow_path) {
+ if (SafepointMechanism::uses_thread_local_poll()) {
+ ldr(rscratch1, Address(rthread, Thread::polling_page_offset()));
+ tbnz(rscratch1, exact_log2(SafepointMechanism::poll_bit()), slow_path);
+ } else {
+ unsigned long offset;
+ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
+ ldrw(rscratch1, Address(rscratch1, offset));
+ assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
+ cbnz(rscratch1, slow_path);
+ }
+}
+
+// Just like safepoint_poll, but use an acquiring load for thread-
+// local polling.
+//
+// We need an acquire here to ensure that any subsequent load of the
+// global SafepointSynchronize::_state flag is ordered after this load
+// of the local Thread::_polling page. We don't want this poll to
+// return false (i.e. not safepointing) and a later poll of the global
+// SafepointSynchronize::_state spuriously to return true.
+//
+// This is to avoid a race when we're in a native->Java transition
+// racing the code which wakes up from a safepoint.
+//
+void MacroAssembler::safepoint_poll_acquire(Label& slow_path) {
+ if (SafepointMechanism::uses_thread_local_poll()) {
+ lea(rscratch1, Address(rthread, Thread::polling_page_offset()));
+ ldar(rscratch1, rscratch1);
+ tbnz(rscratch1, exact_log2(SafepointMechanism::poll_bit()), slow_path);
+ } else {
+ safepoint_poll(slow_path);
+ }
+}
void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
// we must set sp to zero to clear frame
@@ -4336,15 +4370,26 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
}
-address MacroAssembler::read_polling_page(Register r, address page, relocInfo::relocType rtype) {
- unsigned long off;
- adrp(r, Address(page, rtype), off);
- InstructionMark im(this);
- code_section()->relocate(inst_mark(), rtype);
- ldrw(zr, Address(r, off));
- return inst_mark();
+// Move the address of the polling page into dest.
+void MacroAssembler::get_polling_page(Register dest, address page, relocInfo::relocType rtype) {
+ if (SafepointMechanism::uses_thread_local_poll()) {
+ ldr(dest, Address(rthread, Thread::polling_page_offset()));
+ } else {
+ unsigned long off;
+ adrp(dest, Address(page, rtype), off);
+ assert(off == 0, "polling page must be page aligned");
+ }
}
+// Move the address of the polling page into r, then read the polling
+// page.
+address MacroAssembler::read_polling_page(Register r, address page, relocInfo::relocType rtype) {
+ get_polling_page(r, page, rtype);
+ return read_polling_page(r, rtype);
+}
+
+// Read the polling page. The address of the polling page must
+// already be in r.
address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
InstructionMark im(this);
code_section()->relocate(inst_mark(), rtype);
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
index f5cab401535..94c8c037164 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
@@ -97,6 +97,9 @@ class MacroAssembler: public Assembler {
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
+ void safepoint_poll(Label& slow_path);
+ void safepoint_poll_acquire(Label& slow_path);
+
// Biased locking support
// lock_reg and obj_reg must be loaded up with the appropriate values.
// swap_reg is killed.
@@ -995,12 +998,12 @@ public:
void atomic_xchgalw(Register prev, Register newv, Register addr);
void orptr(Address adr, RegisterOrConstant src) {
- ldr(rscratch2, adr);
+ ldr(rscratch1, adr);
if (src.is_register())
- orr(rscratch2, rscratch2, src.as_register());
+ orr(rscratch1, rscratch1, src.as_register());
else
- orr(rscratch2, rscratch2, src.as_constant());
- str(rscratch2, adr);
+ orr(rscratch1, rscratch1, src.as_constant());
+ str(rscratch1, adr);
}
// A generic CAS; success or failure is in the EQ flag.
@@ -1199,6 +1202,7 @@ public:
address read_polling_page(Register r, address page, relocInfo::relocType rtype);
address read_polling_page(Register r, relocInfo::relocType rtype);
+ void get_polling_page(Register dest, address page, relocInfo::relocType rtype);
// CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
void update_byte_crc32(Register crc, Register val, Register table);
diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
index 88e1d3d1460..6a313273ea6 100644
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
@@ -245,6 +245,11 @@ bool NativeInstruction::is_safepoint_poll() {
// mov(reg, polling_page);
// ldr(zr, [reg, #offset]);
//
+ // or
+ //
+ // ldr(reg, [rthread, #offset]);
+ // ldr(zr, [reg, #offset]);
+ //
// however, we cannot rely on the polling page address load always
// directly preceding the read from the page. C1 does that but C2
// has to do the load and read as two independent instruction
diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
index 9b67fe7a5f2..0a5b696b575 100644
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
@@ -1664,7 +1664,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// critical natives they are offset down.
GrowableArray arg_order(2 * total_in_args);
VMRegPair tmp_vmreg;
- tmp_vmreg.set1(r19->as_VMReg());
+ tmp_vmreg.set2(r19->as_VMReg());
if (!is_critical_native) {
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
@@ -1952,7 +1952,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
// Force this write out before the read below
- __ dmb(Assembler::SY);
+ __ dmb(Assembler::ISH);
} else {
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
__ stlrw(rscratch1, rscratch2);
@@ -1970,13 +1970,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// check for safepoint operation in progress and/or pending suspend requests
Label safepoint_in_progress, safepoint_in_progress_done;
{
- assert(SafepointSynchronize::_not_synchronized == 0, "fix this code");
- unsigned long offset;
- __ adrp(rscratch1,
- ExternalAddress((address)SafepointSynchronize::address_of_state()),
- offset);
- __ ldrw(rscratch1, Address(rscratch1, offset));
- __ cbnzw(rscratch1, safepoint_in_progress);
+ __ safepoint_poll_acquire(safepoint_in_progress);
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
__ cbnzw(rscratch1, safepoint_in_progress);
__ bind(safepoint_in_progress_done);
@@ -2932,8 +2926,11 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
if (!cause_return) {
// overwrite the return address pushed by save_live_registers
- __ ldr(c_rarg0, Address(rthread, JavaThread::saved_exception_pc_offset()));
- __ str(c_rarg0, Address(rfp, wordSize));
+ // Additionally, r20 is a callee-saved register so we can look at
+ // it later to determine if someone changed the return address for
+ // us!
+ __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
+ __ str(r20, Address(rfp, wordSize));
}
// Do the call
@@ -2968,11 +2965,40 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// No exception case
__ bind(noException);
+ Label no_adjust, bail;
+ if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
+ // If our stashed return pc was modified by the runtime we avoid touching it
+ __ ldr(rscratch1, Address(rfp, wordSize));
+ __ cmp(r20, rscratch1);
+ __ br(Assembler::NE, no_adjust);
+
+#ifdef ASSERT
+ // Verify the correct encoding of the poll we're about to skip.
+ // See NativeInstruction::is_ldrw_to_zr()
+ __ ldrw(rscratch1, Address(r20));
+ __ ubfx(rscratch2, rscratch1, 22, 10);
+ __ cmpw(rscratch2, 0b1011100101);
+ __ br(Assembler::NE, bail);
+ __ ubfx(rscratch2, rscratch1, 0, 5);
+ __ cmpw(rscratch2, 0b11111);
+ __ br(Assembler::NE, bail);
+#endif
+ // Adjust return pc forward to step over the safepoint poll instruction
+ __ add(r20, r20, NativeInstruction::instruction_size);
+ __ str(r20, Address(rfp, wordSize));
+ }
+
+ __ bind(no_adjust);
// Normal exit, restore registers and exit.
RegisterSaver::restore_live_registers(masm, save_vectors);
__ ret(lr);
+#ifdef ASSERT
+ __ bind(bail);
+ __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
+#endif
+
// Make sure all code is generated
masm->flush();
diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
index 9174c554bcc..f33c6e513a8 100644
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
@@ -414,6 +414,14 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ restore_constant_pool_cache();
__ get_method(rmethod);
+ if (state == atos) {
+ Register obj = r0;
+ Register mdp = r1;
+ Register tmp = r2;
+ __ ldr(mdp, Address(rmethod, Method::method_data_offset()));
+ __ profile_return_type(mdp, obj, tmp);
+ }
+
// Pop N words from the stack
__ get_cache_and_index_at_bcp(r1, r2, 1, index_size);
__ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
@@ -967,12 +975,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
- ExternalAddress state(SafepointSynchronize::address_of_state());
- unsigned long offset;
- __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
- __ ldrw(rscratch1, Address(rscratch1, offset));
- assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
- __ cbnz(rscratch1, slow_path);
+ __ safepoint_poll(slow_path);
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
@@ -986,6 +989,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
__ ldrw(val, Address(esp, 0)); // byte value
__ ldrw(crc, Address(esp, wordSize)); // Initial CRC
+ unsigned long offset;
__ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset);
__ add(tbl, tbl, offset);
@@ -1020,12 +1024,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
- ExternalAddress state(SafepointSynchronize::address_of_state());
- unsigned long offset;
- __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
- __ ldrw(rscratch1, Address(rscratch1, offset));
- assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
- __ cbnz(rscratch1, slow_path);
+ __ safepoint_poll(slow_path);
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
@@ -1375,7 +1374,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
if (os::is_MP()) {
if (UseMembar) {
// Force this write out before the read below
- __ dsb(Assembler::SY);
+ __ dmb(Assembler::ISH);
} else {
// Write serialization page so VM thread can do a pseudo remote membar.
// We use the current thread pointer to calculate a thread specific
@@ -1387,16 +1386,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// check for safepoint operation in progress and/or pending suspend requests
{
- Label Continue;
- {
- unsigned long offset;
- __ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset);
- __ ldrw(rscratch2, Address(rscratch2, offset));
- }
- assert(SafepointSynchronize::_not_synchronized == 0,
- "SafepointSynchronize::_not_synchronized");
- Label L;
- __ cbnz(rscratch2, L);
+ Label L, Continue;
+ __ safepoint_poll_acquire(L);
__ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
__ cbz(rscratch2, Continue);
__ bind(L);
@@ -1671,6 +1662,14 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
__ mov(rscratch2, true);
__ strb(rscratch2, do_not_unlock_if_synchronized);
+ Label no_mdp;
+ Register mdp = r3;
+ __ ldr(mdp, Address(rmethod, Method::method_data_offset()));
+ __ cbz(mdp, no_mdp);
+ __ add(mdp, mdp, in_bytes(MethodData::data_offset()));
+ __ profile_parameters_type(mdp, r1, r2);
+ __ bind(no_mdp);
+
// increment invocation count & check for overflow
Label invocation_counter_overflow;
Label profile_method;
diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
index 2ba42035e7b..9390d38b1b8 100644
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
@@ -1717,7 +1717,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
__ push_i(r1);
// Adjust the bcp by the 16-bit displacement in r2
__ add(rbcp, rbcp, r2);
- __ dispatch_only(vtos);
+ __ dispatch_only(vtos, /*generate_poll*/true);
return;
}
@@ -1833,7 +1833,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
// continue with the bytecode @ target
// rscratch1: target bytecode
// rbcp: target bcp
- __ dispatch_only(vtos);
+ __ dispatch_only(vtos, /*generate_poll*/true);
if (UseLoopCounter) {
if (ProfileInterpreter) {
@@ -1973,7 +1973,7 @@ void TemplateTable::ret() {
__ ldr(rbcp, Address(rmethod, Method::const_offset()));
__ lea(rbcp, Address(rbcp, r1));
__ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
- __ dispatch_next(vtos);
+ __ dispatch_next(vtos, 0, /*generate_poll*/true);
}
void TemplateTable::wide_ret() {
@@ -1984,7 +1984,7 @@ void TemplateTable::wide_ret() {
__ ldr(rbcp, Address(rmethod, Method::const_offset()));
__ lea(rbcp, Address(rbcp, r1));
__ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
- __ dispatch_next(vtos);
+ __ dispatch_next(vtos, 0, /*generate_poll*/true);
}
@@ -2014,7 +2014,7 @@ void TemplateTable::tableswitch() {
__ rev32(r3, r3);
__ load_unsigned_byte(rscratch1, Address(rbcp, r3, Address::sxtw(0)));
__ add(rbcp, rbcp, r3, ext::sxtw);
- __ dispatch_only(vtos);
+ __ dispatch_only(vtos, /*generate_poll*/true);
// handle default
__ bind(default_case);
__ profile_switch_default(r0);
@@ -2064,7 +2064,7 @@ void TemplateTable::fast_linearswitch() {
__ rev32(r3, r3);
__ add(rbcp, rbcp, r3, ext::sxtw);
__ ldrb(rscratch1, Address(rbcp, 0));
- __ dispatch_only(vtos);
+ __ dispatch_only(vtos, /*generate_poll*/true);
}
void TemplateTable::fast_binaryswitch() {
@@ -2162,7 +2162,7 @@ void TemplateTable::fast_binaryswitch() {
__ rev32(j, j);
__ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
__ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
- __ dispatch_only(vtos);
+ __ dispatch_only(vtos, /*generate_poll*/true);
// default case -> j = default offset
__ bind(default_case);
@@ -2171,7 +2171,7 @@ void TemplateTable::fast_binaryswitch() {
__ rev32(j, j);
__ load_unsigned_byte(rscratch1, Address(rbcp, j, Address::sxtw(0)));
__ lea(rbcp, Address(rbcp, j, Address::sxtw(0)));
- __ dispatch_only(vtos);
+ __ dispatch_only(vtos, /*generate_poll*/true);
}
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
index 76d750a6942..33e78cd8914 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
@@ -394,4 +394,6 @@ void VM_Version::initialize() {
g.generate_getPsrInfo());
get_processor_features();
+
+ UNSUPPORTED_OPTION(CriticalJNINatives);
}
diff --git a/src/hotspot/cpu/arm/stubGenerator_arm.cpp b/src/hotspot/cpu/arm/stubGenerator_arm.cpp
index 97ef93c2e4f..ec1a6d8279d 100644
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp
@@ -2968,7 +2968,9 @@ class StubGenerator: public StubCodeGenerator {
CardTableModRefBS* ct = barrier_set_cast(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
- Label L_cardtable_loop;
+ Label L_cardtable_loop, L_done;
+
+ __ cbz_32(count, L_done); // zero count - nothing to do
__ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop);
__ sub(count, count, BytesPerHeapOop); // last addr
@@ -2987,6 +2989,7 @@ class StubGenerator: public StubCodeGenerator {
__ strb(zero, Address(addr, 1, post_indexed));
__ subs(count, count, 1);
__ b(L_cardtable_loop, ge);
+ __ BIND(L_done);
}
break;
case BarrierSet::ModRef:
diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
index 631b20b81a5..875c5dfdfdf 100644
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
@@ -41,20 +41,25 @@
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
const Register temp_reg = R12_scratch2;
+ Label Lmiss;
+
verify_oop(receiver);
+ MacroAssembler::null_check(receiver, oopDesc::klass_offset_in_bytes(), &Lmiss);
load_klass(temp_reg, receiver);
- if (TrapBasedICMissChecks) {
+
+ if (TrapBasedICMissChecks && TrapBasedNullChecks) {
trap_ic_miss_check(temp_reg, iCache);
} else {
- Label L;
+ Label Lok;
cmpd(CCR0, temp_reg, iCache);
- beq(CCR0, L);
+ beq(CCR0, Lok);
+ bind(Lmiss);
//load_const_optimized(temp_reg, SharedRuntime::get_ic_miss_stub(), R0);
calculate_address_from_global_toc(temp_reg, SharedRuntime::get_ic_miss_stub(), true, true, false);
mtctr(temp_reg);
bctr();
align(32, 12);
- bind(L);
+ bind(Lok);
}
}
diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
index 282ffeb218b..c7fdc9d7e23 100644
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
@@ -3371,7 +3371,7 @@ void TemplateTable::invokevirtual(int byte_no) {
__ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
__ bfalse(CCR0, LnotFinal);
- if (RewriteBytecodes && !UseSharedSpaces) {
+ if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) {
patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
}
invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
diff --git a/src/hotspot/cpu/s390/assembler_s390.hpp b/src/hotspot/cpu/s390/assembler_s390.hpp
index a839700259c..9900adbfebc 100644
--- a/src/hotspot/cpu/s390/assembler_s390.hpp
+++ b/src/hotspot/cpu/s390/assembler_s390.hpp
@@ -582,7 +582,11 @@ class Assembler : public AbstractAssembler {
#define LOC_ZOPC (unsigned long)(0xebL << 40 | 0xf2L) // z196
#define LOCG_ZOPC (unsigned long)(0xebL << 40 | 0xe2L) // z196
-#define LMG_ZOPC (unsigned long)(235L << 40 | 4L)
+
+// LOAD multiple registers at once
+#define LM_ZOPC (unsigned int)(0x98 << 24)
+#define LMY_ZOPC (unsigned long)(0xebL << 40 | 0x98L)
+#define LMG_ZOPC (unsigned long)(0xebL << 40 | 0x04L)
#define LE_ZOPC (unsigned int)(0x78 << 24)
#define LEY_ZOPC (unsigned long)(237L << 40 | 100L)
@@ -613,7 +617,10 @@ class Assembler : public AbstractAssembler {
#define STOC_ZOPC (unsigned long)(0xebL << 40 | 0xf3L) // z196
#define STOCG_ZOPC (unsigned long)(0xebL << 40 | 0xe3L) // z196
-#define STMG_ZOPC (unsigned long)(235L << 40 | 36L)
+// STORE multiple registers at once
+#define STM_ZOPC (unsigned int)(0x90 << 24)
+#define STMY_ZOPC (unsigned long)(0xebL << 40 | 0x90L)
+#define STMG_ZOPC (unsigned long)(0xebL << 40 | 0x24L)
#define STE_ZOPC (unsigned int)(0x70 << 24)
#define STEY_ZOPC (unsigned long)(237L << 40 | 102L)
@@ -874,15 +881,19 @@ class Assembler : public AbstractAssembler {
// Shift
// arithmetic
-#define SLA_ZOPC (unsigned int)(139 << 24)
-#define SLAG_ZOPC (unsigned long)(235L << 40 | 11L)
-#define SRA_ZOPC (unsigned int)(138 << 24)
-#define SRAG_ZOPC (unsigned long)(235L << 40 | 10L)
+#define SLA_ZOPC (unsigned int)(0x8b << 24)
+#define SLAK_ZOPC (unsigned long)(0xebL << 40 | 0xddL)
+#define SLAG_ZOPC (unsigned long)(0xebL << 40 | 0x0bL)
+#define SRA_ZOPC (unsigned int)(0x8a << 24)
+#define SRAK_ZOPC (unsigned long)(0xebL << 40 | 0xdcL)
+#define SRAG_ZOPC (unsigned long)(0xebL << 40 | 0x0aL)
// logical
-#define SLL_ZOPC (unsigned int)(137 << 24)
-#define SLLG_ZOPC (unsigned long)(235L << 40 | 13L)
-#define SRL_ZOPC (unsigned int)(136 << 24)
-#define SRLG_ZOPC (unsigned long)(235L << 40 | 12L)
+#define SLL_ZOPC (unsigned int)(0x89 << 24)
+#define SLLK_ZOPC (unsigned long)(0xebL << 40 | 0xdfL)
+#define SLLG_ZOPC (unsigned long)(0xebL << 40 | 0x0dL)
+#define SRL_ZOPC (unsigned int)(0x88 << 24)
+#define SRLK_ZOPC (unsigned long)(0xebL << 40 | 0xdeL)
+#define SRLG_ZOPC (unsigned long)(0xebL << 40 | 0x0cL)
// Rotate, then AND/XOR/OR/insert
// rotate
@@ -2262,12 +2273,16 @@ class Assembler : public AbstractAssembler {
// shift
inline void z_sla( Register r1, int64_t d2, Register b2=Z_R0); // shift left r1 = r1 << ((d2+b2)&0x3f) ; int32, only 31 bits shifted, sign preserved!
+ inline void z_slak(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift left r1 = r3 << ((d2+b2)&0x3f) ; int32, only 31 bits shifted, sign preserved!
inline void z_slag(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift left r1 = r3 << ((d2+b2)&0x3f) ; int64, only 63 bits shifted, sign preserved!
inline void z_sra( Register r1, int64_t d2, Register b2=Z_R0); // shift right r1 = r1 >> ((d2+b2)&0x3f) ; int32, sign extended
+ inline void z_srak(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift right r1 = r3 >> ((d2+b2)&0x3f) ; int32, sign extended
inline void z_srag(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift right r1 = r3 >> ((d2+b2)&0x3f) ; int64, sign extended
inline void z_sll( Register r1, int64_t d2, Register b2=Z_R0); // shift left r1 = r1 << ((d2+b2)&0x3f) ; int32, zeros added
+ inline void z_sllk(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift left r1 = r3 << ((d2+b2)&0x3f) ; int32, zeros added
inline void z_sllg(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift left r1 = r3 << ((d2+b2)&0x3f) ; int64, zeros added
inline void z_srl( Register r1, int64_t d2, Register b2=Z_R0); // shift right r1 = r1 >> ((d2+b2)&0x3f) ; int32, zero extended
+ inline void z_srlk(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift right r1 = r3 >> ((d2+b2)&0x3f) ; int32, zero extended
inline void z_srlg(Register r1, Register r3, int64_t d2, Register b2=Z_R0); // shift right r1 = r3 >> ((d2+b2)&0x3f) ; int64, zero extended
// rotate
@@ -3035,7 +3050,11 @@ class Assembler : public AbstractAssembler {
inline void z_tam();
inline void z_stckf(int64_t d2, Register b2);
+ inline void z_stm( Register r1, Register r3, int64_t d2, Register b2);
+ inline void z_stmy(Register r1, Register r3, int64_t d2, Register b2);
inline void z_stmg(Register r1, Register r3, int64_t d2, Register b2);
+ inline void z_lm( Register r1, Register r3, int64_t d2, Register b2);
+ inline void z_lmy(Register r1, Register r3, int64_t d2, Register b2);
inline void z_lmg(Register r1, Register r3, int64_t d2, Register b2);
inline void z_cs( Register r1, Register r3, int64_t d2, Register b2);
diff --git a/src/hotspot/cpu/s390/assembler_s390.inline.hpp b/src/hotspot/cpu/s390/assembler_s390.inline.hpp
index 19c472787c5..583d86e18b0 100644
--- a/src/hotspot/cpu/s390/assembler_s390.inline.hpp
+++ b/src/hotspot/cpu/s390/assembler_s390.inline.hpp
@@ -334,12 +334,16 @@ inline void Assembler::z_stfle(int64_t d2, Register b2) { emit_32(STFLE_ZOPC | u
// SHIFT/RORATE OPERATIONS
//-----------------------------------
inline void Assembler::z_sla( Register r1, int64_t d2, Register b2) { emit_32( SLA_ZOPC | regt(r1, 8, 32) | uimm12(d2, 20, 32) | reg(b2, 16, 32)); }
+inline void Assembler::z_slak(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SLAK_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
inline void Assembler::z_slag(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SLAG_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
inline void Assembler::z_sra( Register r1, int64_t d2, Register b2) { emit_32( SRA_ZOPC | regt(r1, 8, 32) | uimm12(d2, 20, 32) | reg(b2, 16, 32)); }
+inline void Assembler::z_srak(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SRAK_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
inline void Assembler::z_srag(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SRAG_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
inline void Assembler::z_sll( Register r1, int64_t d2, Register b2) { emit_32( SLL_ZOPC | regt(r1, 8, 32) | uimm12(d2, 20, 32) | reg(b2, 16, 32)); }
+inline void Assembler::z_sllk(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SLLK_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
inline void Assembler::z_sllg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SLLG_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
inline void Assembler::z_srl( Register r1, int64_t d2, Register b2) { emit_32( SRL_ZOPC | regt(r1, 8, 32) | uimm12(d2, 20, 32) | reg(b2, 16, 32)); }
+inline void Assembler::z_srlk(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SRLK_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
inline void Assembler::z_srlg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( SRLG_ZOPC | regt(r1, 8, 48) | simm20(d2) | reg(b2, 16, 48) | reg(r3, 12, 48)); }
// rotate left
@@ -690,10 +694,14 @@ inline void Assembler::z_ahhlr(Register r1, Register r2, Register r3) { emit_32(
inline void Assembler::z_tam() { emit_16( TAM_ZOPC); }
inline void Assembler::z_stckf(int64_t d2, Register b2) { emit_32( STCKF_ZOPC | uimm12(d2, 20, 32) | regz(b2, 16, 32)); }
-inline void Assembler::z_stmg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( STMG_ZOPC | simm20(d2) | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) ); }
-inline void Assembler::z_lmg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( LMG_ZOPC | simm20(d2) | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) ); }
+inline void Assembler::z_stm( Register r1, Register r3, int64_t d2, Register b2) { emit_32( STM_ZOPC | reg(r1, 8, 32) | reg(r3,12,32)| reg(b2,16,32) | uimm12(d2, 20,32)); }
+inline void Assembler::z_stmy(Register r1, Register r3, int64_t d2, Register b2) { emit_48( STMY_ZOPC | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) | simm20(d2) ); }
+inline void Assembler::z_stmg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( STMG_ZOPC | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) | simm20(d2) ); }
+inline void Assembler::z_lm( Register r1, Register r3, int64_t d2, Register b2) { emit_32( LM_ZOPC | reg(r1, 8, 32) | reg(r3,12,32)| reg(b2,16,32) | uimm12(d2, 20,32)); }
+inline void Assembler::z_lmy( Register r1, Register r3, int64_t d2, Register b2) { emit_48( LMY_ZOPC | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) | simm20(d2) ); }
+inline void Assembler::z_lmg( Register r1, Register r3, int64_t d2, Register b2) { emit_48( LMG_ZOPC | reg(r1, 8, 48) | reg(r3,12,48)| reg(b2,16,48) | simm20(d2) ); }
-inline void Assembler::z_cs(Register r1, Register r3, int64_t d2, Register b2) { emit_32( CS_ZOPC | regt(r1, 8, 32) | reg(r3, 12, 32) | reg(b2, 16, 32) | uimm12(d2, 20, 32)); }
+inline void Assembler::z_cs( Register r1, Register r3, int64_t d2, Register b2) { emit_32( CS_ZOPC | regt(r1, 8, 32) | reg(r3, 12, 32) | reg(b2, 16, 32) | uimm12(d2, 20, 32)); }
inline void Assembler::z_csy(Register r1, Register r3, int64_t d2, Register b2) { emit_48( CSY_ZOPC | regt(r1, 8, 48) | reg(r3, 12, 48) | reg(b2, 16, 48) | simm20(d2)); }
inline void Assembler::z_csg(Register r1, Register r3, int64_t d2, Register b2) { emit_48( CSG_ZOPC | regt(r1, 8, 48) | reg(r3, 12, 48) | reg(b2, 16, 48) | simm20(d2)); }
inline void Assembler::z_cs( Register r1, Register r3, const Address& a) { assert(!a.has_index(), "Cannot encode index"); z_cs( r1, r3, a.disp(), a.baseOrR0()); }
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
index af2c02934ff..afc7a7667e2 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
@@ -936,7 +936,7 @@ void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) {
// Some extra safety net.
if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
- guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away");
+ guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
}
(this)->relocate(rspec, relocInfo::pcrel_addr_format);
@@ -956,7 +956,7 @@ void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) {
// Some extra safety net.
if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
- guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away");
+ guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
}
(this)->relocate(rspec, relocInfo::pcrel_addr_format);
@@ -1025,6 +1025,13 @@ void MacroAssembler::testbit(Register r, unsigned int bitPos) {
}
}
+void MacroAssembler::prefetch_read(Address a) {
+ z_pfd(1, a.disp20(), a.indexOrR0(), a.base());
+}
+void MacroAssembler::prefetch_update(Address a) {
+ z_pfd(2, a.disp20(), a.indexOrR0(), a.base());
+}
+
// Clear a register, i.e. load const zero into reg.
// Return len (in bytes) of generated instruction(s).
// whole_reg: Clear 64 bits if true, 32 bits otherwise.
@@ -4896,77 +4903,296 @@ unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Reg
// Intrinsics for CompactStrings
-// Compress char[] to byte[]. odd_reg contains cnt. Kills dst. Early clobber: result
+// Compress char[] to byte[].
+// Restores: src, dst
+// Uses: cnt
+// Kills: tmp, Z_R0, Z_R1.
+// Early clobber: result.
+// Note:
+// cnt is signed int. Do not rely on high word!
+// counts # characters, not bytes.
// The result is the number of characters copied before the first incompatible character was found.
-// If tmp2 is provided and the compression fails, the compression stops exactly at this point and the result is precise.
+// If precise is true, the processing stops exactly at this point. Otherwise, the result may be off
+// by a few bytes. The result always indicates the number of copied characters.
//
// Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure:
-// - Different number of characters may have been written to dead array (if tmp2 not provided).
+// - Different number of characters may have been written to dead array (if precise is false).
// - Returns a number ---
+ // Strings with 4 and 8 characters were fond to occur very frequently.
+ // Therefore, we handle them right away with minimal overhead.
+ Label skipShortcut, skip4Shortcut, skip8Shortcut;
+ Register Rout = Z_R0;
+ z_chi(Rcnt, 4);
+ z_brne(skip4Shortcut); // 4 characters are very frequent
+ z_lg(Z_R0, 0, Rsrc); // Treat exactly 4 characters specially.
+ if (VM_Version::has_DistinctOpnds()) {
+ Rout = Z_R0;
+ z_ngrk(Rix, Z_R0, Rmask);
+ } else {
+ Rout = Rix;
+ z_lgr(Rix, Z_R0);
+ z_ngr(Z_R0, Rmask);
+ }
+ z_brnz(skipShortcut);
+ z_stcmh(Rout, 5, 0, Rdst);
+ z_stcm(Rout, 5, 2, Rdst);
+ z_lgfr(result, Rcnt);
+ z_bru(AllDone);
+ bind(skip4Shortcut);
+
+ z_chi(Rcnt, 8);
+ z_brne(skip8Shortcut); // There's more to do...
+ z_lmg(Z_R0, Z_R1, 0, Rsrc); // Treat exactly 8 characters specially.
+ if (VM_Version::has_DistinctOpnds()) {
+ Rout = Z_R0;
+ z_ogrk(Rix, Z_R0, Z_R1);
+ z_ngr(Rix, Rmask);
+ } else {
+ Rout = Rix;
+ z_lgr(Rix, Z_R0);
+ z_ogr(Z_R0, Z_R1);
+ z_ngr(Z_R0, Rmask);
+ }
+ z_brnz(skipShortcut);
+ z_stcmh(Rout, 5, 0, Rdst);
+ z_stcm(Rout, 5, 2, Rdst);
+ z_stcmh(Z_R1, 5, 4, Rdst);
+ z_stcm(Z_R1, 5, 6, Rdst);
+ z_lgfr(result, Rcnt);
+ z_bru(AllDone);
+
+ bind(skip8Shortcut);
+ clear_reg(Z_R0, true, false); // #characters already processed (none). Precond for scalar loop.
+ z_brl(ScalarShortcut); // Just a few characters
+
+ bind(skipShortcut);
+ }
+#endif
+ clear_reg(Z_R0); // make sure register is properly initialized.
+
+ if (VM_Version::has_VectorFacility()) {
+ const int min_vcnt = 32; // Minimum #characters required to use vector instructions.
+ // Otherwise just do nothing in vector mode.
+ // Must be multiple of 2*(vector register length in chars (8 HW = 128 bits)).
+ const int log_min_vcnt = exact_log2(min_vcnt);
+ Label VectorLoop, VectorDone, VectorBreak;
+
+ VectorRegister Vtmp1 = Z_V16;
+ VectorRegister Vtmp2 = Z_V17;
+ VectorRegister Vmask = Z_V18;
+ VectorRegister Vzero = Z_V19;
+ VectorRegister Vsrc_first = Z_V20;
+ VectorRegister Vsrc_last = Z_V23;
+
+ assert((Vsrc_last->encoding() - Vsrc_first->encoding() + 1) == min_vcnt/8, "logic error");
+ assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()");
+ z_srak(Rix, Rcnt, log_min_vcnt); // # vector loop iterations
+ z_brz(VectorDone); // not enough data for vector loop
+
+ z_vzero(Vzero); // all zeroes
+ z_vgmh(Vmask, 0, 7); // generate 0xff00 mask for all 2-byte elements
+ z_sllg(Z_R0, Rix, log_min_vcnt); // remember #chars that will be processed by vector loop
+
+ bind(VectorLoop);
+ z_vlm(Vsrc_first, Vsrc_last, 0, Rsrc);
+ add2reg(Rsrc, min_vcnt*2);
+
+ //---< check for incompatible character >---
+ z_vo(Vtmp1, Z_V20, Z_V21);
+ z_vo(Vtmp2, Z_V22, Z_V23);
+ z_vo(Vtmp1, Vtmp1, Vtmp2);
+ z_vn(Vtmp1, Vtmp1, Vmask);
+ z_vceqhs(Vtmp1, Vtmp1, Vzero); // high half of all chars must be zero for successful compress.
+ z_brne(VectorBreak); // break vector loop, incompatible character found.
+ // re-process data from current iteration in break handler.
+
+ //---< pack & store characters >---
+ z_vpkh(Vtmp1, Z_V20, Z_V21); // pack (src1, src2) -> tmp1
+ z_vpkh(Vtmp2, Z_V22, Z_V23); // pack (src3, src4) -> tmp2
+ z_vstm(Vtmp1, Vtmp2, 0, Rdst); // store packed string
+ add2reg(Rdst, min_vcnt);
+
+ z_brct(Rix, VectorLoop);
+
+ z_bru(VectorDone);
+
+ bind(VectorBreak);
+ add2reg(Rsrc, -min_vcnt*2); // Fix Rsrc. Rsrc was already updated, but Rdst and Rix are not.
+ z_sll(Rix, log_min_vcnt); // # chars processed so far in VectorLoop, excl. current iteration.
+ z_sr(Z_R0, Rix); // correct # chars processed in total.
+
+ bind(VectorDone);
+ }
+
+ {
+ const int min_cnt = 8; // Minimum #characters required to use unrolled loop.
+ // Otherwise just do nothing in unrolled loop.
+ // Must be multiple of 8.
+ const int log_min_cnt = exact_log2(min_cnt);
+ Label UnrolledLoop, UnrolledDone, UnrolledBreak;
+
if (VM_Version::has_DistinctOpnds()) {
- z_ogrk(tmp2, Z_R0, Z_R1);
+ z_srk(Rix, Rcnt, Z_R0); // remaining # chars to compress in unrolled loop
} else {
- z_lgr(tmp2, Z_R0);
- z_ogr(tmp2, Z_R1);
+ z_lr(Rix, Rcnt);
+ z_sr(Rix, Z_R0);
}
- z_ngr(tmp2, mask);
- z_brne(Lslow); // Failed fast case, retry slowly.
+ z_sra(Rix, log_min_cnt); // unrolled loop count
+ z_brz(UnrolledDone);
+
+ bind(UnrolledLoop);
+ z_lmg(Z_R0, Z_R1, 0, Rsrc);
+ if (precise) {
+ z_ogr(Z_R1, Z_R0); // check all 8 chars for incompatibility
+ z_ngr(Z_R1, Rmask);
+ z_brnz(UnrolledBreak);
+
+ z_lg(Z_R1, 8, Rsrc); // reload destroyed register
+ z_stcmh(Z_R0, 5, 0, Rdst);
+ z_stcm(Z_R0, 5, 2, Rdst);
+ } else {
+ z_stcmh(Z_R0, 5, 0, Rdst);
+ z_stcm(Z_R0, 5, 2, Rdst);
+
+ z_ogr(Z_R0, Z_R1);
+ z_ngr(Z_R0, Rmask);
+ z_brnz(UnrolledBreak);
+ }
+ z_stcmh(Z_R1, 5, 4, Rdst);
+ z_stcm(Z_R1, 5, 6, Rdst);
+
+ add2reg(Rsrc, min_cnt*2);
+ add2reg(Rdst, min_cnt);
+ z_brct(Rix, UnrolledLoop);
+
+ z_lgfr(Z_R0, Rcnt); // # chars processed in total after unrolled loop.
+ z_nilf(Z_R0, ~(min_cnt-1));
+ z_tmll(Rcnt, min_cnt-1);
+ z_brnaz(ScalarShortcut); // if all bits zero, there is nothing left to do for scalar loop.
+ // Rix == 0 in all cases.
+ z_lgfr(result, Rcnt); // all characters processed.
+ z_sgfr(Rdst, Rcnt); // restore ptr
+ z_sgfr(Rsrc, Rcnt); // restore ptr, double the element count for Rsrc restore
+ z_sgfr(Rsrc, Rcnt);
+ z_bru(AllDone);
+
+ bind(UnrolledBreak);
+ z_lgfr(Z_R0, Rcnt); // # chars processed in total after unrolled loop
+ z_nilf(Z_R0, ~(min_cnt-1));
+ z_sll(Rix, log_min_cnt); // # chars processed so far in UnrolledLoop, excl. current iteration.
+ z_sr(Z_R0, Rix); // correct # chars processed in total.
+ if (!precise) {
+ z_lgfr(result, Z_R0);
+ z_aghi(result, min_cnt/2); // min_cnt/2 characters have already been written
+ // but ptrs were not updated yet.
+ z_sgfr(Rdst, Z_R0); // restore ptr
+ z_sgfr(Rsrc, Z_R0); // restore ptr, double the element count for Rsrc restore
+ z_sgfr(Rsrc, Z_R0);
+ z_bru(AllDone);
+ }
+ bind(UnrolledDone);
}
- z_stcmh(Z_R0, 5, 0, addr2);
- z_stcm(Z_R0, 5, 2, addr2);
- if (!precise) { z_ogr(Z_R0, Z_R1); }
- z_stcmh(Z_R1, 5, 4, addr2);
- z_stcm(Z_R1, 5, 6, addr2);
- if (!precise) {
- z_ngr(Z_R0, mask);
- z_brne(Ldone); // Failed (more than needed was written).
+
+ {
+ Label ScalarLoop, ScalarDone, ScalarBreak;
+
+ bind(ScalarShortcut);
+ z_ltgfr(result, Rcnt);
+ z_brz(AllDone);
+
+#if 0 // Sacrifice shortcuts for code compactness
+ {
+ //---< Special treatment for very short strings (one or two characters) >---
+ // For these strings, we are sure that the above code was skipped.
+ // Thus, no registers were modified, register restore is not required.
+ Label ScalarDoit, Scalar2Char;
+ z_chi(Rcnt, 2);
+ z_brh(ScalarDoit);
+ z_llh(Z_R1, 0, Z_R0, Rsrc);
+ z_bre(Scalar2Char);
+ z_tmll(Z_R1, 0xff00);
+ z_lghi(result, 0); // cnt == 1, first char invalid, no chars successfully processed
+ z_brnaz(AllDone);
+ z_stc(Z_R1, 0, Z_R0, Rdst);
+ z_lghi(result, 1);
+ z_bru(AllDone);
+
+ bind(Scalar2Char);
+ z_llh(Z_R0, 2, Z_R0, Rsrc);
+ z_tmll(Z_R1, 0xff00);
+ z_lghi(result, 0); // cnt == 2, first char invalid, no chars successfully processed
+ z_brnaz(AllDone);
+ z_stc(Z_R1, 0, Z_R0, Rdst);
+ z_tmll(Z_R0, 0xff00);
+ z_lghi(result, 1); // cnt == 2, second char invalid, one char successfully processed
+ z_brnaz(AllDone);
+ z_stc(Z_R0, 1, Z_R0, Rdst);
+ z_lghi(result, 2);
+ z_bru(AllDone);
+
+ bind(ScalarDoit);
+ }
+#endif
+
+ if (VM_Version::has_DistinctOpnds()) {
+ z_srk(Rix, Rcnt, Z_R0); // remaining # chars to compress in unrolled loop
+ } else {
+ z_lr(Rix, Rcnt);
+ z_sr(Rix, Z_R0);
+ }
+ z_lgfr(result, Rcnt); // # processed characters (if all runs ok).
+ z_brz(ScalarDone);
+
+ bind(ScalarLoop);
+ z_llh(Z_R1, 0, Z_R0, Rsrc);
+ z_tmll(Z_R1, 0xff00);
+ z_brnaz(ScalarBreak);
+ z_stc(Z_R1, 0, Z_R0, Rdst);
+ add2reg(Rsrc, 2);
+ add2reg(Rdst, 1);
+ z_brct(Rix, ScalarLoop);
+
+ z_bru(ScalarDone);
+
+ bind(ScalarBreak);
+ z_sr(result, Rix);
+
+ bind(ScalarDone);
+ z_sgfr(Rdst, result); // restore ptr
+ z_sgfr(Rsrc, result); // restore ptr, double the element count for Rsrc restore
+ z_sgfr(Rsrc, result);
}
- z_aghi(addr2, 8);
- z_brxle(ind1, even_reg, Lloop1);
-
- bind(Lslow);
- // Compute index limit and skip if negative.
- z_ahi(odd_reg, 16-2); // Last possible index for slow loop.
- z_lhi(even_reg, 2);
- z_cr(ind1, odd_reg);
- z_brh(Ldone);
-
- bind(Lloop2); // 1 Character per iteration.
- z_llh(Z_R0, Address(src, ind1));
- z_tmll(Z_R0, 0xFF00);
- z_brnaz(Ldone); // Failed slow case: Return number of written characters.
- z_stc(Z_R0, Address(addr2));
- z_aghi(addr2, 1);
- z_brxle(ind1, even_reg, Lloop2);
-
- bind(Ldone); // result = ind1 = 2*cnt
- z_srl(ind1, 1);
-
- BLOCK_COMMENT("} string_compress");
+ bind(AllDone);
+ if (precise) {
+ BLOCK_COMMENT("} encode_iso_array");
+ } else {
+ BLOCK_COMMENT("} string_compress");
+ }
return offset() - block_start;
}
@@ -4997,53 +5223,432 @@ unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Reg
return offset() - block_start;
}
-// Inflate byte[] to char[]. odd_reg contains cnt. Kills src.
-unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register odd_reg,
- Register even_reg, Register tmp) {
- int block_start = offset();
+// Inflate byte[] to char[].
+// Restores: src, dst
+// Uses: cnt
+// Kills: tmp, Z_R0, Z_R1.
+// Note:
+// cnt is signed int. Do not rely on high word!
+// counts # characters, not bytes.
+unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp) {
+ assert_different_registers(Z_R0, Z_R1, src, dst, cnt, tmp);
BLOCK_COMMENT("string_inflate {");
+ int block_start = offset();
- Label Lloop1, Lloop2, Lslow, Ldone;
- const Register addr1 = src, ind2 = tmp;
+ Register Rcnt = cnt; // # characters (src: bytes, dst: char (2-byte)), remaining after current loop.
+ Register Rix = tmp; // loop index
+ Register Rsrc = src; // addr(src array)
+ Register Rdst = dst; // addr(dst array)
+ Label ScalarShortcut, AllDone;
- z_sll(odd_reg, 1); // Number of bytes to write. (Must be a positive simm32.)
- clear_reg(ind2); // Index to write.
- z_ahi(odd_reg, -16); // Last possible index for fast loop.
- z_brl(Lslow);
+#if 0 // Sacrifice shortcuts for code compactness
+ {
+ //---< shortcuts for short strings (very frequent) >---
+ Label skipShortcut, skip4Shortcut;
+ z_ltr(Rcnt, Rcnt); // absolutely nothing to do for strings of len == 0.
+ z_brz(AllDone);
+ clear_reg(Z_R0); // make sure registers are properly initialized.
+ clear_reg(Z_R1);
+ z_chi(Rcnt, 4);
+ z_brne(skip4Shortcut); // 4 characters are very frequent
+ z_icm(Z_R0, 5, 0, Rsrc); // Treat exactly 4 characters specially.
+ z_icm(Z_R1, 5, 2, Rsrc);
+ z_stm(Z_R0, Z_R1, 0, Rdst);
+ z_bru(AllDone);
+ bind(skip4Shortcut);
- // ind2: index, even_reg: index increment, odd_reg: index limit
- clear_reg(Z_R0);
- clear_reg(Z_R1);
- z_lhi(even_reg, 16);
+ z_chi(Rcnt, 8);
+ z_brh(skipShortcut); // There's a lot to do...
+ z_lgfr(Z_R0, Rcnt); // remaining #characters (<= 8). Precond for scalar loop.
+ // This does not destroy the "register cleared" state of Z_R0.
+ z_brl(ScalarShortcut); // Just a few characters
+ z_icmh(Z_R0, 5, 0, Rsrc); // Treat exactly 8 characters specially.
+ z_icmh(Z_R1, 5, 4, Rsrc);
+ z_icm(Z_R0, 5, 2, Rsrc);
+ z_icm(Z_R1, 5, 6, Rsrc);
+ z_stmg(Z_R0, Z_R1, 0, Rdst);
+ z_bru(AllDone);
+ bind(skipShortcut);
+ }
+#endif
+ clear_reg(Z_R0); // make sure register is properly initialized.
- bind(Lloop1); // 8 Characters per iteration.
- z_icmh(Z_R0, 5, 0, addr1);
- z_icmh(Z_R1, 5, 4, addr1);
- z_icm(Z_R0, 5, 2, addr1);
- z_icm(Z_R1, 5, 6, addr1);
- z_aghi(addr1, 8);
- z_stg(Z_R0, Address(dst, ind2));
- z_stg(Z_R1, Address(dst, ind2, 8));
- z_brxle(ind2, even_reg, Lloop1);
+ if (VM_Version::has_VectorFacility()) {
+ const int min_vcnt = 32; // Minimum #characters required to use vector instructions.
+ // Otherwise just do nothing in vector mode.
+ // Must be multiple of vector register length (16 bytes = 128 bits).
+ const int log_min_vcnt = exact_log2(min_vcnt);
+ Label VectorLoop, VectorDone;
- bind(Lslow);
- // Compute index limit and skip if negative.
- z_ahi(odd_reg, 16-2); // Last possible index for slow loop.
- z_lhi(even_reg, 2);
- z_cr(ind2, odd_reg);
- z_brh(Ldone);
+ assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()");
+ z_srak(Rix, Rcnt, log_min_vcnt); // calculate # vector loop iterations
+ z_brz(VectorDone); // skip if none
- bind(Lloop2); // 1 Character per iteration.
- z_llc(Z_R0, Address(addr1));
- z_sth(Z_R0, Address(dst, ind2));
- z_aghi(addr1, 1);
- z_brxle(ind2, even_reg, Lloop2);
+ z_sllg(Z_R0, Rix, log_min_vcnt); // remember #chars that will be processed by vector loop
- bind(Ldone);
+ bind(VectorLoop);
+ z_vlm(Z_V20, Z_V21, 0, Rsrc); // get next 32 characters (single-byte)
+ add2reg(Rsrc, min_vcnt);
+
+ z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high)
+ z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low)
+ z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high)
+ z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low)
+ z_vstm(Z_V22, Z_V25, 0, Rdst); // store next 32 bytes
+ add2reg(Rdst, min_vcnt*2);
+
+ z_brct(Rix, VectorLoop);
+
+ bind(VectorDone);
+ }
+
+ const int min_cnt = 8; // Minimum #characters required to use unrolled scalar loop.
+ // Otherwise just do nothing in unrolled scalar mode.
+ // Must be multiple of 8.
+ {
+ const int log_min_cnt = exact_log2(min_cnt);
+ Label UnrolledLoop, UnrolledDone;
+
+
+ if (VM_Version::has_DistinctOpnds()) {
+ z_srk(Rix, Rcnt, Z_R0); // remaining # chars to process in unrolled loop
+ } else {
+ z_lr(Rix, Rcnt);
+ z_sr(Rix, Z_R0);
+ }
+ z_sra(Rix, log_min_cnt); // unrolled loop count
+ z_brz(UnrolledDone);
+
+ clear_reg(Z_R0);
+ clear_reg(Z_R1);
+
+ bind(UnrolledLoop);
+ z_icmh(Z_R0, 5, 0, Rsrc);
+ z_icmh(Z_R1, 5, 4, Rsrc);
+ z_icm(Z_R0, 5, 2, Rsrc);
+ z_icm(Z_R1, 5, 6, Rsrc);
+ add2reg(Rsrc, min_cnt);
+
+ z_stmg(Z_R0, Z_R1, 0, Rdst);
+
+ add2reg(Rdst, min_cnt*2);
+ z_brct(Rix, UnrolledLoop);
+
+ bind(UnrolledDone);
+ z_lgfr(Z_R0, Rcnt); // # chars left over after unrolled loop.
+ z_nilf(Z_R0, min_cnt-1);
+ z_brnz(ScalarShortcut); // if zero, there is nothing left to do for scalar loop.
+ // Rix == 0 in all cases.
+ z_sgfr(Z_R0, Rcnt); // negative # characters the ptrs have been advanced previously.
+ z_agr(Rdst, Z_R0); // restore ptr, double the element count for Rdst restore.
+ z_agr(Rdst, Z_R0);
+ z_agr(Rsrc, Z_R0); // restore ptr.
+ z_bru(AllDone);
+ }
+
+ {
+ bind(ScalarShortcut);
+ // Z_R0 must contain remaining # characters as 64-bit signed int here.
+ // register contents is preserved over scalar processing (for register fixup).
+
+#if 0 // Sacrifice shortcuts for code compactness
+ {
+ Label ScalarDefault;
+ z_chi(Rcnt, 2);
+ z_brh(ScalarDefault);
+ z_llc(Z_R0, 0, Z_R0, Rsrc); // 6 bytes
+ z_sth(Z_R0, 0, Z_R0, Rdst); // 4 bytes
+ z_brl(AllDone);
+ z_llc(Z_R0, 1, Z_R0, Rsrc); // 6 bytes
+ z_sth(Z_R0, 2, Z_R0, Rdst); // 4 bytes
+ z_bru(AllDone);
+ bind(ScalarDefault);
+ }
+#endif
+
+ Label CodeTable;
+ // Some comments on Rix calculation:
+ // - Rcnt is small, therefore no bits shifted out of low word (sll(g) instructions).
+ // - high word of both Rix and Rcnt may contain garbage
+ // - the final lngfr takes care of that garbage, extending the sign to high word
+ z_sllg(Rix, Z_R0, 2); // calculate 10*Rix = (4*Rix + Rix)*2
+ z_ar(Rix, Z_R0);
+ z_larl(Z_R1, CodeTable);
+ z_sll(Rix, 1);
+ z_lngfr(Rix, Rix); // ix range: [0..7], after inversion & mult: [-(7*12)..(0*12)].
+ z_bc(Assembler::bcondAlways, 0, Rix, Z_R1);
+
+ z_llc(Z_R1, 6, Z_R0, Rsrc); // 6 bytes
+ z_sth(Z_R1, 12, Z_R0, Rdst); // 4 bytes
+
+ z_llc(Z_R1, 5, Z_R0, Rsrc);
+ z_sth(Z_R1, 10, Z_R0, Rdst);
+
+ z_llc(Z_R1, 4, Z_R0, Rsrc);
+ z_sth(Z_R1, 8, Z_R0, Rdst);
+
+ z_llc(Z_R1, 3, Z_R0, Rsrc);
+ z_sth(Z_R1, 6, Z_R0, Rdst);
+
+ z_llc(Z_R1, 2, Z_R0, Rsrc);
+ z_sth(Z_R1, 4, Z_R0, Rdst);
+
+ z_llc(Z_R1, 1, Z_R0, Rsrc);
+ z_sth(Z_R1, 2, Z_R0, Rdst);
+
+ z_llc(Z_R1, 0, Z_R0, Rsrc);
+ z_sth(Z_R1, 0, Z_R0, Rdst);
+ bind(CodeTable);
+
+ z_chi(Rcnt, 8); // no fixup for small strings. Rdst, Rsrc were not modified.
+ z_brl(AllDone);
+
+ z_sgfr(Z_R0, Rcnt); // # characters the ptrs have been advanced previously.
+ z_agr(Rdst, Z_R0); // restore ptr, double the element count for Rdst restore.
+ z_agr(Rdst, Z_R0);
+ z_agr(Rsrc, Z_R0); // restore ptr.
+ }
+ bind(AllDone);
BLOCK_COMMENT("} string_inflate");
+ return offset() - block_start;
+}
+// Inflate byte[] to char[], length known at compile time.
+// Restores: src, dst
+// Kills: tmp, Z_R0, Z_R1.
+// Note:
+// len is signed int. Counts # characters, not bytes.
+unsigned int MacroAssembler::string_inflate_const(Register src, Register dst, Register tmp, int len) {
+ assert_different_registers(Z_R0, Z_R1, src, dst, tmp);
+
+ BLOCK_COMMENT("string_inflate_const {");
+ int block_start = offset();
+
+ Register Rix = tmp; // loop index
+ Register Rsrc = src; // addr(src array)
+ Register Rdst = dst; // addr(dst array)
+ Label ScalarShortcut, AllDone;
+ int nprocessed = 0;
+ int src_off = 0; // compensate for saved (optimized away) ptr advancement.
+ int dst_off = 0; // compensate for saved (optimized away) ptr advancement.
+ bool restore_inputs = false;
+ bool workreg_clear = false;
+
+ if ((len >= 32) && VM_Version::has_VectorFacility()) {
+ const int min_vcnt = 32; // Minimum #characters required to use vector instructions.
+ // Otherwise just do nothing in vector mode.
+ // Must be multiple of vector register length (16 bytes = 128 bits).
+ const int log_min_vcnt = exact_log2(min_vcnt);
+ const int iterations = (len - nprocessed) >> log_min_vcnt;
+ nprocessed += iterations << log_min_vcnt;
+ Label VectorLoop;
+
+ if (iterations == 1) {
+ z_vlm(Z_V20, Z_V21, 0+src_off, Rsrc); // get next 32 characters (single-byte)
+ z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high)
+ z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low)
+ z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high)
+ z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low)
+ z_vstm(Z_V22, Z_V25, 0+dst_off, Rdst); // store next 32 bytes
+
+ src_off += min_vcnt;
+ dst_off += min_vcnt*2;
+ } else {
+ restore_inputs = true;
+
+ z_lgfi(Rix, len>>log_min_vcnt);
+ bind(VectorLoop);
+ z_vlm(Z_V20, Z_V21, 0, Rsrc); // get next 32 characters (single-byte)
+ add2reg(Rsrc, min_vcnt);
+
+ z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high)
+ z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low)
+ z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high)
+ z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low)
+ z_vstm(Z_V22, Z_V25, 0, Rdst); // store next 32 bytes
+ add2reg(Rdst, min_vcnt*2);
+
+ z_brct(Rix, VectorLoop);
+ }
+ }
+
+ if (((len-nprocessed) >= 16) && VM_Version::has_VectorFacility()) {
+ const int min_vcnt = 16; // Minimum #characters required to use vector instructions.
+ // Otherwise just do nothing in vector mode.
+ // Must be multiple of vector register length (16 bytes = 128 bits).
+ const int log_min_vcnt = exact_log2(min_vcnt);
+ const int iterations = (len - nprocessed) >> log_min_vcnt;
+ nprocessed += iterations << log_min_vcnt;
+ assert(iterations == 1, "must be!");
+
+ z_vl(Z_V20, 0+src_off, Z_R0, Rsrc); // get next 16 characters (single-byte)
+ z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high)
+ z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low)
+ z_vstm(Z_V22, Z_V23, 0+dst_off, Rdst); // store next 32 bytes
+
+ src_off += min_vcnt;
+ dst_off += min_vcnt*2;
+ }
+
+ if ((len-nprocessed) > 8) {
+ const int min_cnt = 8; // Minimum #characters required to use unrolled scalar loop.
+ // Otherwise just do nothing in unrolled scalar mode.
+ // Must be multiple of 8.
+ const int log_min_cnt = exact_log2(min_cnt);
+ const int iterations = (len - nprocessed) >> log_min_cnt;
+ nprocessed += iterations << log_min_cnt;
+
+ //---< avoid loop overhead/ptr increment for small # iterations >---
+ if (iterations <= 2) {
+ clear_reg(Z_R0);
+ clear_reg(Z_R1);
+ workreg_clear = true;
+
+ z_icmh(Z_R0, 5, 0+src_off, Rsrc);
+ z_icmh(Z_R1, 5, 4+src_off, Rsrc);
+ z_icm(Z_R0, 5, 2+src_off, Rsrc);
+ z_icm(Z_R1, 5, 6+src_off, Rsrc);
+ z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
+
+ src_off += min_cnt;
+ dst_off += min_cnt*2;
+ }
+
+ if (iterations == 2) {
+ z_icmh(Z_R0, 5, 0+src_off, Rsrc);
+ z_icmh(Z_R1, 5, 4+src_off, Rsrc);
+ z_icm(Z_R0, 5, 2+src_off, Rsrc);
+ z_icm(Z_R1, 5, 6+src_off, Rsrc);
+ z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
+
+ src_off += min_cnt;
+ dst_off += min_cnt*2;
+ }
+
+ if (iterations > 2) {
+ Label UnrolledLoop;
+ restore_inputs = true;
+
+ clear_reg(Z_R0);
+ clear_reg(Z_R1);
+ workreg_clear = true;
+
+ z_lgfi(Rix, iterations);
+ bind(UnrolledLoop);
+ z_icmh(Z_R0, 5, 0, Rsrc);
+ z_icmh(Z_R1, 5, 4, Rsrc);
+ z_icm(Z_R0, 5, 2, Rsrc);
+ z_icm(Z_R1, 5, 6, Rsrc);
+ add2reg(Rsrc, min_cnt);
+
+ z_stmg(Z_R0, Z_R1, 0, Rdst);
+ add2reg(Rdst, min_cnt*2);
+
+ z_brct(Rix, UnrolledLoop);
+ }
+ }
+
+ if ((len-nprocessed) > 0) {
+ switch (len-nprocessed) {
+ case 8:
+ if (!workreg_clear) {
+ clear_reg(Z_R0);
+ clear_reg(Z_R1);
+ }
+ z_icmh(Z_R0, 5, 0+src_off, Rsrc);
+ z_icmh(Z_R1, 5, 4+src_off, Rsrc);
+ z_icm(Z_R0, 5, 2+src_off, Rsrc);
+ z_icm(Z_R1, 5, 6+src_off, Rsrc);
+ z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
+ break;
+ case 7:
+ if (!workreg_clear) {
+ clear_reg(Z_R0);
+ clear_reg(Z_R1);
+ }
+ clear_reg(Rix);
+ z_icm(Z_R0, 5, 0+src_off, Rsrc);
+ z_icm(Z_R1, 5, 2+src_off, Rsrc);
+ z_icm(Rix, 5, 4+src_off, Rsrc);
+ z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
+ z_llc(Z_R0, 6+src_off, Z_R0, Rsrc);
+ z_st(Rix, 8+dst_off, Z_R0, Rdst);
+ z_sth(Z_R0, 12+dst_off, Z_R0, Rdst);
+ break;
+ case 6:
+ if (!workreg_clear) {
+ clear_reg(Z_R0);
+ clear_reg(Z_R1);
+ }
+ clear_reg(Rix);
+ z_icm(Z_R0, 5, 0+src_off, Rsrc);
+ z_icm(Z_R1, 5, 2+src_off, Rsrc);
+ z_icm(Rix, 5, 4+src_off, Rsrc);
+ z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
+ z_st(Rix, 8+dst_off, Z_R0, Rdst);
+ break;
+ case 5:
+ if (!workreg_clear) {
+ clear_reg(Z_R0);
+ clear_reg(Z_R1);
+ }
+ z_icm(Z_R0, 5, 0+src_off, Rsrc);
+ z_icm(Z_R1, 5, 2+src_off, Rsrc);
+ z_llc(Rix, 4+src_off, Z_R0, Rsrc);
+ z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
+ z_sth(Rix, 8+dst_off, Z_R0, Rdst);
+ break;
+ case 4:
+ if (!workreg_clear) {
+ clear_reg(Z_R0);
+ clear_reg(Z_R1);
+ }
+ z_icm(Z_R0, 5, 0+src_off, Rsrc);
+ z_icm(Z_R1, 5, 2+src_off, Rsrc);
+ z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
+ break;
+ case 3:
+ if (!workreg_clear) {
+ clear_reg(Z_R0);
+ }
+ z_llc(Z_R1, 2+src_off, Z_R0, Rsrc);
+ z_icm(Z_R0, 5, 0+src_off, Rsrc);
+ z_sth(Z_R1, 4+dst_off, Z_R0, Rdst);
+ z_st(Z_R0, 0+dst_off, Rdst);
+ break;
+ case 2:
+ z_llc(Z_R0, 0+src_off, Z_R0, Rsrc);
+ z_llc(Z_R1, 1+src_off, Z_R0, Rsrc);
+ z_sth(Z_R0, 0+dst_off, Z_R0, Rdst);
+ z_sth(Z_R1, 2+dst_off, Z_R0, Rdst);
+ break;
+ case 1:
+ z_llc(Z_R0, 0+src_off, Z_R0, Rsrc);
+ z_sth(Z_R0, 0+dst_off, Z_R0, Rdst);
+ break;
+ default:
+ guarantee(false, "Impossible");
+ break;
+ }
+ src_off += len-nprocessed;
+ dst_off += (len-nprocessed)*2;
+ nprocessed = len;
+ }
+
+ //---< restore modified input registers >---
+ if ((nprocessed > 0) && restore_inputs) {
+ z_agfi(Rsrc, -(nprocessed-src_off));
+ if (nprocessed < 1000000000) { // avoid int overflow
+ z_agfi(Rdst, -(nprocessed*2-dst_off));
+ } else {
+ z_agfi(Rdst, -(nprocessed-dst_off));
+ z_agfi(Rdst, -nprocessed);
+ }
+ }
+
+ BLOCK_COMMENT("} string_inflate_const");
return offset() - block_start;
}
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.hpp b/src/hotspot/cpu/s390/macroAssembler_s390.hpp
index 908ce8d98aa..8fb0731747d 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp
@@ -198,6 +198,9 @@ class MacroAssembler: public Assembler {
// Test a bit in a register. Result is reflected in CC.
void testbit(Register r, unsigned int bitPos);
+ void prefetch_read(Address a);
+ void prefetch_update(Address a);
+
// Clear a register, i.e. load const zero into reg. Return len (in bytes) of
// generated instruction(s).
// whole_reg: Clear 64 bits if true, 32 bits otherwise.
@@ -836,7 +839,7 @@ class MacroAssembler: public Assembler {
void load_mirror(Register mirror, Register method);
//--------------------------
- //--- perations on arrays.
+ //--- Operations on arrays.
//--------------------------
unsigned int Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len);
unsigned int Clear_Array_Const(long cnt, Register base);
@@ -849,20 +852,34 @@ class MacroAssembler: public Assembler {
// Special String Intrinsics Implementation.
//-------------------------------------------
// Intrinsics for CompactStrings
- // Compress char[] to byte[]. odd_reg contains cnt. tmp3 is only needed for precise behavior in failure case. Kills dst.
- unsigned int string_compress(Register result, Register src, Register dst, Register odd_reg,
- Register even_reg, Register tmp, Register tmp2 = noreg);
+ // Restores: src, dst
+ // Uses: cnt
+ // Kills: tmp, Z_R0, Z_R1.
+ // Early clobber: result.
+ // Boolean precise controls accuracy of result value.
+ unsigned int string_compress(Register result, Register src, Register dst, Register cnt,
+ Register tmp, bool precise);
+
+ // Inflate byte[] to char[].
+ unsigned int string_inflate_trot(Register src, Register dst, Register cnt, Register tmp);
+
+ // Inflate byte[] to char[].
+ // Restores: src, dst
+ // Uses: cnt
+ // Kills: tmp, Z_R0, Z_R1.
+ unsigned int string_inflate(Register src, Register dst, Register cnt, Register tmp);
+
+ // Inflate byte[] to char[], length known at compile time.
+ // Restores: src, dst
+ // Kills: tmp, Z_R0, Z_R1.
+ // Note:
+ // len is signed int. Counts # characters, not bytes.
+ unsigned int string_inflate_const(Register src, Register dst, Register tmp, int len);
// Kills src.
unsigned int has_negatives(Register result, Register src, Register cnt,
Register odd_reg, Register even_reg, Register tmp);
- // Inflate byte[] to char[].
- unsigned int string_inflate_trot(Register src, Register dst, Register cnt, Register tmp);
- // Odd_reg contains cnt. Kills src.
- unsigned int string_inflate(Register src, Register dst, Register odd_reg,
- Register even_reg, Register tmp);
-
unsigned int string_compare(Register str1, Register str2, Register cnt1, Register cnt2,
Register odd_reg, Register even_reg, Register result, int ae);
diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad
index 15902d9f7aa..fb876ba6180 100644
--- a/src/hotspot/cpu/s390/s390.ad
+++ b/src/hotspot/cpu/s390/s390.ad
@@ -10267,14 +10267,14 @@ instruct indexOf_UL(iRegP haystack, rarg2RegI haycnt, iRegP needle, rarg5RegI ne
%}
// char[] to byte[] compression
-instruct string_compress(iRegP src, rarg5RegP dst, iRegI result, roddRegI len, revenRegI evenReg, iRegI tmp, flagsReg cr) %{
+instruct string_compress(iRegP src, iRegP dst, iRegI result, iRegI len, iRegI tmp, flagsReg cr) %{
match(Set result (StrCompressedCopy src (Binary dst len)));
- effect(TEMP_DEF result, USE_KILL dst, USE_KILL len, TEMP evenReg, TEMP tmp, KILL cr); // R0, R1 are killed, too.
+ effect(TEMP_DEF result, TEMP tmp, KILL cr); // R0, R1 are killed, too.
ins_cost(300);
format %{ "String Compress $src->$dst($len) -> $result" %}
ins_encode %{
__ string_compress($result$$Register, $src$$Register, $dst$$Register, $len$$Register,
- $evenReg$$Register, $tmp$$Register);
+ $tmp$$Register, false);
%}
ins_pipe(pipe_class_dummy);
%}
@@ -10293,13 +10293,25 @@ instruct string_compress(iRegP src, rarg5RegP dst, iRegI result, roddRegI len, r
//%}
// byte[] to char[] inflation
-instruct string_inflate(Universe dummy, rarg5RegP src, iRegP dst, roddRegI len, revenRegI evenReg, iRegI tmp, flagsReg cr) %{
+instruct string_inflate(Universe dummy, iRegP src, iRegP dst, iRegI len, iRegI tmp, flagsReg cr) %{
match(Set dummy (StrInflatedCopy src (Binary dst len)));
- effect(USE_KILL src, USE_KILL len, TEMP evenReg, TEMP tmp, KILL cr); // R0, R1 are killed, too.
+ effect(TEMP tmp, KILL cr); // R0, R1 are killed, too.
ins_cost(300);
format %{ "String Inflate $src->$dst($len)" %}
ins_encode %{
- __ string_inflate($src$$Register, $dst$$Register, $len$$Register, $evenReg$$Register, $tmp$$Register);
+ __ string_inflate($src$$Register, $dst$$Register, $len$$Register, $tmp$$Register);
+ %}
+ ins_pipe(pipe_class_dummy);
+%}
+
+// byte[] to char[] inflation
+instruct string_inflate_const(Universe dummy, iRegP src, iRegP dst, iRegI tmp, immI len, flagsReg cr) %{
+ match(Set dummy (StrInflatedCopy src (Binary dst len)));
+ effect(TEMP tmp, KILL cr); // R0, R1 are killed, too.
+ ins_cost(300);
+ format %{ "String Inflate (constLen) $src->$dst($len)" %}
+ ins_encode %{
+ __ string_inflate_const($src$$Register, $dst$$Register, $tmp$$Register, $len$$constant);
%}
ins_pipe(pipe_class_dummy);
%}
@@ -10318,14 +10330,14 @@ instruct has_negatives(rarg5RegP ary1, iRegI len, iRegI result, roddRegI oddReg,
%}
// encode char[] to byte[] in ISO_8859_1
-instruct encode_iso_array(rarg5RegP src, iRegP dst, iRegI result, roddRegI len, revenRegI evenReg, iRegI tmp, iRegI tmp2, flagsReg cr) %{
+instruct encode_iso_array(iRegP src, iRegP dst, iRegI result, iRegI len, iRegI tmp, flagsReg cr) %{
match(Set result (EncodeISOArray src (Binary dst len)));
- effect(TEMP_DEF result, USE_KILL src, USE_KILL len, TEMP evenReg, TEMP tmp, TEMP tmp2, KILL cr); // R0, R1 are killed, too.
+ effect(TEMP_DEF result, TEMP tmp, KILL cr); // R0, R1 are killed, too.
ins_cost(300);
format %{ "Encode array $src->$dst($len) -> $result" %}
ins_encode %{
__ string_compress($result$$Register, $src$$Register, $dst$$Register, $len$$Register,
- $evenReg$$Register, $tmp$$Register, $tmp2$$Register);
+ $tmp$$Register, true);
%}
ins_pipe(pipe_class_dummy);
%}
diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp
index 7c1e458b9d6..972bb7d9c59 100644
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp
@@ -2884,12 +2884,12 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
// ztos
BTB_BEGIN(is_Bool, bsize, "putfield_or_static:is_Bool");
__ pop(ztos);
- if (do_rewrite) {
+ if (!is_static) {
pop_and_check_object(obj);
}
__ z_nilf(Z_tos, 0x1);
__ z_stc(Z_tos, field);
- if (!is_static) {
+ if (do_rewrite) {
patch_bytecode(Bytecodes::_fast_zputfield, bc, Z_ARG5, true, byte_no);
}
__ z_bru(Done);
diff --git a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp
index 1f3dc374cce..c9b250eb06f 100644
--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp
+++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp
@@ -398,8 +398,13 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
if (o == NULL) {
__ set(NULL_WORD, reg);
} else {
+#ifdef ASSERT
+ {
+ ThreadInVMfromNative tiv(JavaThread::current());
+ assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
+ }
+#endif
int oop_index = __ oop_recorder()->find_index(o);
- assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
RelocationHolder rspec = oop_Relocation::spec(oop_index);
__ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
}
diff --git a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp
index 351555dbe51..d93b294574b 100644
--- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp
+++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp
@@ -898,7 +898,9 @@ class StubGenerator: public StubCodeGenerator {
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, tmp);
- Label L_loop;
+ Label L_loop, L_done;
+
+ __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_done); // zero count - nothing to do
__ sll_ptr(count, LogBytesPerHeapOop, count);
__ sub(count, BytesPerHeapOop, count);
@@ -914,6 +916,7 @@ class StubGenerator: public StubCodeGenerator {
__ subcc(count, 1, count);
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
__ delayed()->add(addr, 1, addr);
+ __ BIND(L_done);
}
break;
case BarrierSet::ModRef:
diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp
index 3bdc0ed3c30..961ebb531be 100644
--- a/src/hotspot/cpu/x86/assembler_x86.cpp
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp
@@ -1256,7 +1256,7 @@ void Assembler::addr_nop_8() {
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x58);
@@ -1266,7 +1266,7 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
void Assembler::addsd(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -1276,7 +1276,7 @@ void Assembler::addsd(XMMRegister dst, Address src) {
void Assembler::addss(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x58);
emit_int8((unsigned char)(0xC0 | encode));
@@ -1285,7 +1285,7 @@ void Assembler::addss(XMMRegister dst, XMMRegister src) {
void Assembler::addss(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x58);
@@ -1295,7 +1295,7 @@ void Assembler::addss(XMMRegister dst, Address src) {
void Assembler::aesdec(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xDE);
emit_operand(dst, src);
@@ -1303,7 +1303,7 @@ void Assembler::aesdec(XMMRegister dst, Address src) {
void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_aes(), "");
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xDE);
emit_int8(0xC0 | encode);
@@ -1312,7 +1312,7 @@ void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
void Assembler::aesdeclast(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xDF);
emit_operand(dst, src);
@@ -1320,7 +1320,7 @@ void Assembler::aesdeclast(XMMRegister dst, Address src) {
void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_aes(), "");
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xDF);
emit_int8((unsigned char)(0xC0 | encode));
@@ -1329,7 +1329,7 @@ void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
void Assembler::aesenc(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xDC);
emit_operand(dst, src);
@@ -1337,7 +1337,7 @@ void Assembler::aesenc(XMMRegister dst, Address src) {
void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_aes(), "");
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xDC);
emit_int8(0xC0 | encode);
@@ -1346,7 +1346,7 @@ void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
void Assembler::aesenclast(XMMRegister dst, Address src) {
assert(VM_Version::supports_aes(), "");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xDD);
emit_operand(dst, src);
@@ -1354,7 +1354,7 @@ void Assembler::aesenclast(XMMRegister dst, Address src) {
void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_aes(), "");
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xDD);
emit_int8((unsigned char)(0xC0 | encode));
@@ -1387,7 +1387,7 @@ void Assembler::andl(Register dst, Register src) {
void Assembler::andnl(Register dst, Register src1, Register src2) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF2);
emit_int8((unsigned char)(0xC0 | encode));
@@ -1396,7 +1396,7 @@ void Assembler::andnl(Register dst, Register src1, Register src2) {
void Assembler::andnl(Register dst, Register src1, Address src2) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF2);
emit_operand(dst, src2);
@@ -1424,7 +1424,7 @@ void Assembler::bswapl(Register reg) { // bswap
void Assembler::blsil(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)(0xC0 | encode));
@@ -1433,7 +1433,7 @@ void Assembler::blsil(Register dst, Register src) {
void Assembler::blsil(Register dst, Address src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF3);
emit_operand(rbx, src);
@@ -1441,7 +1441,7 @@ void Assembler::blsil(Register dst, Address src) {
void Assembler::blsmskl(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)(0xC0 | encode));
@@ -1450,7 +1450,7 @@ void Assembler::blsmskl(Register dst, Register src) {
void Assembler::blsmskl(Register dst, Address src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF3);
emit_operand(rdx, src);
@@ -1458,7 +1458,7 @@ void Assembler::blsmskl(Register dst, Address src) {
void Assembler::blsrl(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)(0xC0 | encode));
@@ -1467,7 +1467,7 @@ void Assembler::blsrl(Register dst, Register src) {
void Assembler::blsrl(Register dst, Address src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF3);
emit_operand(rcx, src);
@@ -1753,7 +1753,7 @@ void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x5A);
@@ -1763,7 +1763,7 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -1817,7 +1817,7 @@ void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x5A);
emit_int8((unsigned char)(0xC0 | encode));
@@ -1826,7 +1826,7 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
void Assembler::cvtss2sd(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x5A);
@@ -1870,7 +1870,7 @@ void Assembler::decl(Address dst) {
void Assembler::divsd(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -1880,7 +1880,7 @@ void Assembler::divsd(XMMRegister dst, Address src) {
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x5E);
@@ -1890,7 +1890,7 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
void Assembler::divss(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x5E);
@@ -1899,7 +1899,7 @@ void Assembler::divss(XMMRegister dst, Address src) {
void Assembler::divss(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x5E);
emit_int8((unsigned char)(0xC0 | encode));
@@ -2105,7 +2105,7 @@ void Assembler::jmpb(Label& L) {
void Assembler::ldmxcsr( Address src) {
if (UseAVX > 0 ) {
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
vex_prefix(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
emit_int8((unsigned char)0xAE);
emit_operand(as_Register(2), src);
@@ -2784,7 +2784,7 @@ void Assembler::movsbl(Register dst, Register src) { // movsxb
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x10);
@@ -2794,7 +2794,7 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) {
void Assembler::movsd(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -2805,7 +2805,7 @@ void Assembler::movsd(XMMRegister dst, Address src) {
void Assembler::movsd(Address dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.reset_is_clear_context();
attributes.set_rex_vex_w_reverted();
@@ -2816,7 +2816,7 @@ void Assembler::movsd(Address dst, XMMRegister src) {
void Assembler::movss(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x10);
emit_int8((unsigned char)(0xC0 | encode));
@@ -2825,7 +2825,7 @@ void Assembler::movss(XMMRegister dst, XMMRegister src) {
void Assembler::movss(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x10);
@@ -2835,7 +2835,7 @@ void Assembler::movss(XMMRegister dst, Address src) {
void Assembler::movss(Address dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
attributes.reset_is_clear_context();
simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
@@ -2931,7 +2931,7 @@ void Assembler::mull(Register src) {
void Assembler::mulsd(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -2941,7 +2941,7 @@ void Assembler::mulsd(XMMRegister dst, Address src) {
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x59);
@@ -2951,7 +2951,7 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
void Assembler::mulss(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x59);
@@ -2960,7 +2960,7 @@ void Assembler::mulss(XMMRegister dst, Address src) {
void Assembler::mulss(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x59);
emit_int8((unsigned char)(0xC0 | encode));
@@ -4289,7 +4289,7 @@ void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int
void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8((unsigned char)0x0E);
emit_int8((unsigned char)(0xC0 | encode));
@@ -4388,7 +4388,7 @@ void Assembler::smovl() {
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x51);
@@ -4398,7 +4398,7 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
void Assembler::sqrtsd(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -4408,7 +4408,7 @@ void Assembler::sqrtsd(XMMRegister dst, Address src) {
void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x51);
emit_int8((unsigned char)(0xC0 | encode));
@@ -4421,7 +4421,7 @@ void Assembler::std() {
void Assembler::sqrtss(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x51);
@@ -4484,7 +4484,7 @@ void Assembler::subl(Register dst, Register src) {
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x5C);
@@ -4494,7 +4494,7 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
void Assembler::subsd(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -4504,7 +4504,7 @@ void Assembler::subsd(XMMRegister dst, Address src) {
void Assembler::subss(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x5C);
emit_int8((unsigned char)(0xC0 | encode));
@@ -4513,7 +4513,7 @@ void Assembler::subss(XMMRegister dst, XMMRegister src) {
void Assembler::subss(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x5C);
@@ -4735,7 +4735,7 @@ void Assembler::xorb(Register dst, Address src) {
void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
assert(VM_Version::supports_avx(), "");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -4745,7 +4745,7 @@ void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x58);
@@ -4755,7 +4755,7 @@ void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
assert(VM_Version::supports_avx(), "");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x58);
@@ -4764,7 +4764,7 @@ void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x58);
emit_int8((unsigned char)(0xC0 | encode));
@@ -4773,7 +4773,7 @@ void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
assert(VM_Version::supports_avx(), "");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -4783,7 +4783,7 @@ void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x5E);
@@ -4793,7 +4793,7 @@ void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
assert(VM_Version::supports_avx(), "");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x5E);
@@ -4802,7 +4802,7 @@ void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x5E);
emit_int8((unsigned char)(0xC0 | encode));
@@ -4810,7 +4810,7 @@ void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
assert(VM_Version::supports_fma(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xB9);
emit_int8((unsigned char)(0xC0 | encode));
@@ -4818,7 +4818,7 @@ void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
assert(VM_Version::supports_fma(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xB9);
emit_int8((unsigned char)(0xC0 | encode));
@@ -4827,7 +4827,7 @@ void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
assert(VM_Version::supports_avx(), "");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -4837,7 +4837,7 @@ void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x59);
@@ -4847,7 +4847,7 @@ void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
assert(VM_Version::supports_avx(), "");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x59);
@@ -4856,7 +4856,7 @@ void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x59);
emit_int8((unsigned char)(0xC0 | encode));
@@ -4865,7 +4865,7 @@ void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
assert(VM_Version::supports_avx(), "");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -4875,7 +4875,7 @@ void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_rex_vex_w_reverted();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x5C);
@@ -4885,7 +4885,7 @@ void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
assert(VM_Version::supports_avx(), "");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x5C);
@@ -4894,7 +4894,7 @@ void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
emit_int8(0x5C);
emit_int8((unsigned char)(0xC0 | encode));
@@ -5203,6 +5203,24 @@ void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) {
emit_operand(dst, src);
}
+void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) {
+ assert(VM_Version::supports_avx(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+ emit_int8(0x51);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) {
+ assert(VM_Version::supports_avx(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
+ vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+ emit_int8(0x51);
+ emit_operand(dst, src);
+}
+
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
@@ -5377,7 +5395,7 @@ void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector
void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
assert(VM_Version::supports_avx() && (vector_len == 0) ||
VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
- InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8(0x01);
emit_int8((unsigned char)(0xC0 | encode));
@@ -5436,7 +5454,7 @@ void Assembler::paddq(XMMRegister dst, XMMRegister src) {
void Assembler::phaddw(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_sse3(), "");
- InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8(0x01);
emit_int8((unsigned char)(0xC0 | encode));
@@ -6679,7 +6697,7 @@ void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, in
void Assembler::vzeroupper() {
if (VM_Version::supports_vzeroupper()) {
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
(void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
emit_int8(0x77);
}
@@ -7442,7 +7460,7 @@ void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int
void Assembler::shlxl(Register dst, Register src1, Register src2) {
assert(VM_Version::supports_bmi2(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF7);
emit_int8((unsigned char)(0xC0 | encode));
@@ -7450,7 +7468,7 @@ void Assembler::shlxl(Register dst, Register src1, Register src2) {
void Assembler::shlxq(Register dst, Register src1, Register src2) {
assert(VM_Version::supports_bmi2(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF7);
emit_int8((unsigned char)(0xC0 | encode));
@@ -7985,7 +8003,7 @@ void Assembler::andq(Register dst, Register src) {
void Assembler::andnq(Register dst, Register src1, Register src2) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF2);
emit_int8((unsigned char)(0xC0 | encode));
@@ -7994,7 +8012,7 @@ void Assembler::andnq(Register dst, Register src1, Register src2) {
void Assembler::andnq(Register dst, Register src1, Address src2) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF2);
emit_operand(dst, src2);
@@ -8022,7 +8040,7 @@ void Assembler::bswapq(Register reg) {
void Assembler::blsiq(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)(0xC0 | encode));
@@ -8031,7 +8049,7 @@ void Assembler::blsiq(Register dst, Register src) {
void Assembler::blsiq(Register dst, Address src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF3);
emit_operand(rbx, src);
@@ -8039,7 +8057,7 @@ void Assembler::blsiq(Register dst, Address src) {
void Assembler::blsmskq(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)(0xC0 | encode));
@@ -8048,7 +8066,7 @@ void Assembler::blsmskq(Register dst, Register src) {
void Assembler::blsmskq(Register dst, Address src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF3);
emit_operand(rdx, src);
@@ -8056,7 +8074,7 @@ void Assembler::blsmskq(Register dst, Address src) {
void Assembler::blsrq(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)(0xC0 | encode));
@@ -8065,7 +8083,7 @@ void Assembler::blsrq(Register dst, Register src) {
void Assembler::blsrq(Register dst, Address src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
InstructionMark im(this);
- InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF3);
emit_operand(rcx, src);
@@ -8504,7 +8522,7 @@ void Assembler::mulq(Register src) {
void Assembler::mulxq(Register dst1, Register dst2, Register src) {
assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xF6);
emit_int8((unsigned char)(0xC0 | encode));
@@ -8667,7 +8685,7 @@ void Assembler::rorq(Register dst, int imm8) {
void Assembler::rorxq(Register dst, Register src, int imm8) {
assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes);
emit_int8((unsigned char)0xF0);
emit_int8((unsigned char)(0xC0 | encode));
@@ -8676,7 +8694,7 @@ void Assembler::rorxq(Register dst, Register src, int imm8) {
void Assembler::rorxd(Register dst, Register src, int imm8) {
assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes);
emit_int8((unsigned char)0xF0);
emit_int8((unsigned char)(0xC0 | encode));
diff --git a/src/hotspot/cpu/x86/assembler_x86.hpp b/src/hotspot/cpu/x86/assembler_x86.hpp
index c4e6645e319..2739cf3b5eb 100644
--- a/src/hotspot/cpu/x86/assembler_x86.hpp
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp
@@ -1919,9 +1919,11 @@ private:
void vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
- // Sqrt Packed Floating-Point Values - Double precision only
+ // Sqrt Packed Floating-Point Values
void vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len);
void vsqrtpd(XMMRegister dst, Address src, int vector_len);
+ void vsqrtps(XMMRegister dst, XMMRegister src, int vector_len);
+ void vsqrtps(XMMRegister dst, Address src, int vector_len);
// Bitwise Logical AND of Packed Floating-Point Values
void andpd(XMMRegister dst, XMMRegister src);
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index 3ac35d752e4..112321cce78 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -6630,6 +6630,13 @@ void MacroAssembler::restore_cpu_control_state_after_jni() {
}
// Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
vzeroupper();
+ // Reset k1 to 0xffff.
+ if (VM_Version::supports_evex()) {
+ push(rcx);
+ movl(rcx, 0xffff);
+ kmovwl(k1, rcx);
+ pop(rcx);
+ }
#ifndef _LP64
// Either restore the x87 floating pointer control word after returning
diff --git a/src/hotspot/cpu/x86/nativeInst_x86.hpp b/src/hotspot/cpu/x86/nativeInst_x86.hpp
index 436a48caf34..de4b448396b 100644
--- a/src/hotspot/cpu/x86/nativeInst_x86.hpp
+++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp
@@ -706,14 +706,11 @@ inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) =
inline bool NativeInstruction::is_safepoint_poll() {
#ifdef AMD64
if (SafepointMechanism::uses_thread_local_poll()) {
- // We know that the poll must have a REX_B prefix since we enforce its source to be
- // a rex-register and the destination to be rax.
const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix;
- const bool is_test_opcode = ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl;
- const bool is_rax_target = (ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg;
- if (has_rex_prefix && is_test_opcode && is_rax_target) {
- return true;
- }
+ const int test_offset = has_rex_prefix ? 1 : 0;
+ const bool is_test_opcode = ubyte_at(test_offset) == NativeTstRegMem::instruction_code_memXregl;
+ const bool is_rax_target = (ubyte_at(test_offset + 1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg;
+ return is_test_opcode && is_rax_target;
}
// Try decoding a near safepoint first:
if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
index cf85d5807a2..b02015a08c2 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
@@ -3388,26 +3388,63 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// No exception case
__ bind(noException);
- Label no_adjust, bail;
+ Label no_adjust, bail, no_prefix, not_special;
if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
// If our stashed return pc was modified by the runtime we avoid touching it
__ cmpptr(rbx, Address(rbp, wordSize));
__ jccb(Assembler::notEqual, no_adjust);
+ // Skip over the poll instruction.
+ // See NativeInstruction::is_safepoint_poll()
+ // Possible encodings:
+ // 85 00 test %eax,(%rax)
+ // 85 01 test %eax,(%rcx)
+ // 85 02 test %eax,(%rdx)
+ // 85 03 test %eax,(%rbx)
+ // 85 06 test %eax,(%rsi)
+ // 85 07 test %eax,(%rdi)
+ //
+ // 41 85 00 test %eax,(%r8)
+ // 41 85 01 test %eax,(%r9)
+ // 41 85 02 test %eax,(%r10)
+ // 41 85 03 test %eax,(%r11)
+ // 41 85 06 test %eax,(%r14)
+ // 41 85 07 test %eax,(%r15)
+ //
+ // 85 04 24 test %eax,(%rsp)
+ // 41 85 04 24 test %eax,(%r12)
+ // 85 45 00 test %eax,0x0(%rbp)
+ // 41 85 45 00 test %eax,0x0(%r13)
+
+ __ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_rex_b_prefix);
+ __ jcc(Assembler::notEqual, no_prefix);
+ __ addptr(rbx, 1);
+ __ bind(no_prefix);
+#ifdef ASSERT
+ __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
+#endif
+ // r12/r13/rsp/rbp base encoding takes 3 bytes with the following register values:
+ // r12/rsp 0x04
+ // r13/rbp 0x05
+ __ movzbq(rcx, Address(rbx, 1));
+ __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
+ __ subptr(rcx, 4); // looking for 0x00 .. 0x01
+ __ cmpptr(rcx, 1);
+ __ jcc(Assembler::above, not_special);
+ __ addptr(rbx, 1);
+ __ bind(not_special);
#ifdef ASSERT
// Verify the correct encoding of the poll we're about to skip.
- // See NativeInstruction::is_safepoint_poll()
- __ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_rex_b_prefix);
- __ jcc(Assembler::notEqual, bail);
- __ cmpb(Address(rbx, 1), NativeTstRegMem::instruction_code_memXregl);
+ __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
__ jcc(Assembler::notEqual, bail);
// Mask out the modrm bits
- __ testb(Address(rbx, 2), NativeTstRegMem::modrm_mask);
+ __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
// rax encodes to 0, so if the bits are nonzero it's incorrect
__ jcc(Assembler::notZero, bail);
#endif
// Adjust return pc forward to step over the safepoint poll instruction
- __ addptr(Address(rbp, wordSize), 3);
+ __ addptr(rbx, 2);
+ __ movptr(Address(rbp, wordSize), rbx);
}
__ bind(no_adjust);
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
index 7db8ba32981..5b28a4e28d5 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
@@ -1264,9 +1264,12 @@ class StubGenerator: public StubCodeGenerator {
CardTableModRefBS* ct = barrier_set_cast(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
- Label L_loop;
+ Label L_loop, L_done;
const Register end = count;
+ __ testl(count, count);
+ __ jcc(Assembler::zero, L_done); // zero count - nothing to do
+
__ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size
__ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
__ shrptr(start, CardTableModRefBS::card_shift);
@@ -1280,6 +1283,7 @@ class StubGenerator: public StubCodeGenerator {
__ movb(Address(start, count, Address::times_1), 0);
__ decrement(count);
__ jcc(Assembler::greaterEqual, L_loop);
+ __ BIND(L_done);
}
break;
default:
diff --git a/src/hotspot/cpu/x86/vm_version_x86.cpp b/src/hotspot/cpu/x86/vm_version_x86.cpp
index f322d60a6ed..563ff1f111b 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp
@@ -629,18 +629,26 @@ void VM_Version::get_processor_features() {
_features &= ~CPU_SSE;
// first try initial setting and detect what we can support
+ int use_avx_limit = 0;
if (UseAVX > 0) {
if (UseAVX > 2 && supports_evex()) {
- UseAVX = 3;
+ use_avx_limit = 3;
} else if (UseAVX > 1 && supports_avx2()) {
- UseAVX = 2;
+ use_avx_limit = 2;
} else if (UseAVX > 0 && supports_avx()) {
- UseAVX = 1;
+ use_avx_limit = 1;
} else {
- UseAVX = 0;
+ use_avx_limit = 0;
}
+ }
+ if (FLAG_IS_DEFAULT(UseAVX)) {
+ FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
+ } else if (UseAVX > use_avx_limit) {
+ warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit);
+ FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
} else if (UseAVX < 0) {
- UseAVX = 0;
+ warning("UseAVX=%d is not valid, setting it to UseAVX=0", (int) UseAVX);
+ FLAG_SET_DEFAULT(UseAVX, 0);
}
if (UseAVX < 3) {
@@ -710,16 +718,29 @@ void VM_Version::get_processor_features() {
// UseSSE is set to the smaller of what hardware supports and what
// the command line requires. I.e., you cannot set UseSSE to 2 on
// older Pentiums which do not support it.
- if (UseSSE > 4) UseSSE=4;
- if (UseSSE < 0) UseSSE=0;
- if (!supports_sse4_1()) // Drop to 3 if no SSE4 support
- UseSSE = MIN2((intx)3,UseSSE);
- if (!supports_sse3()) // Drop to 2 if no SSE3 support
- UseSSE = MIN2((intx)2,UseSSE);
- if (!supports_sse2()) // Drop to 1 if no SSE2 support
- UseSSE = MIN2((intx)1,UseSSE);
- if (!supports_sse ()) // Drop to 0 if no SSE support
- UseSSE = 0;
+ int use_sse_limit = 0;
+ if (UseSSE > 0) {
+ if (UseSSE > 3 && supports_sse4_1()) {
+ use_sse_limit = 4;
+ } else if (UseSSE > 2 && supports_sse3()) {
+ use_sse_limit = 3;
+ } else if (UseSSE > 1 && supports_sse2()) {
+ use_sse_limit = 2;
+ } else if (UseSSE > 0 && supports_sse()) {
+ use_sse_limit = 1;
+ } else {
+ use_sse_limit = 0;
+ }
+ }
+ if (FLAG_IS_DEFAULT(UseSSE)) {
+ FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
+ } else if (UseSSE > use_sse_limit) {
+ warning("UseSSE=%d is not supported on this CPU, setting it to UseSSE=%d", (int) UseSSE, use_sse_limit);
+ FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
+ } else if (UseSSE < 0) {
+ warning("UseSSE=%d is not valid, setting it to UseSSE=0", (int) UseSSE);
+ FLAG_SET_DEFAULT(UseSSE, 0);
+ }
// Use AES instructions if available.
if (supports_aes()) {
diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad
index afaa2da23c7..124e95a4dcd 100644
--- a/src/hotspot/cpu/x86/x86.ad
+++ b/src/hotspot/cpu/x86/x86.ad
@@ -1252,6 +1252,7 @@ const bool Matcher::match_rule_supported(int opcode) {
ret_value = false;
break;
case Op_SqrtVD:
+ case Op_SqrtVF:
if (UseAVX < 1) // enabled for AVX only
ret_value = false;
break;
@@ -2580,7 +2581,7 @@ instruct negD_reg_reg(regD dst, regD src) %{
instruct sqrtF_reg(regF dst, regF src) %{
predicate(UseSSE>=1);
- match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
+ match(Set dst (SqrtF src));
format %{ "sqrtss $dst, $src" %}
ins_cost(150);
@@ -2592,7 +2593,7 @@ instruct sqrtF_reg(regF dst, regF src) %{
instruct sqrtF_mem(regF dst, memory src) %{
predicate(UseSSE>=1);
- match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src)))));
+ match(Set dst (SqrtF (LoadF src)));
format %{ "sqrtss $dst, $src" %}
ins_cost(150);
@@ -2604,7 +2605,8 @@ instruct sqrtF_mem(regF dst, memory src) %{
instruct sqrtF_imm(regF dst, immF con) %{
predicate(UseSSE>=1);
- match(Set dst (ConvD2F (SqrtD (ConvF2D con))));
+ match(Set dst (SqrtF con));
+
format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
@@ -8388,7 +8390,7 @@ instruct vshiftcnt(vecS dst, rRegI cnt) %{
// --------------------------------- Sqrt --------------------------------------
-// Floating point vector sqrt - double precision only
+// Floating point vector sqrt
instruct vsqrt2D_reg(vecX dst, vecX src) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
match(Set dst (SqrtVD src));
@@ -8455,6 +8457,94 @@ instruct vsqrt8D_mem(vecZ dst, memory mem) %{
ins_pipe( pipe_slow );
%}
+instruct vsqrt2F_reg(vecD dst, vecD src) %{
+ predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+ match(Set dst (SqrtVF src));
+ format %{ "vsqrtps $dst,$src\t! sqrt packed2F" %}
+ ins_encode %{
+ int vector_len = 0;
+ __ vsqrtps($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vsqrt2F_mem(vecD dst, memory mem) %{
+ predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+ match(Set dst (SqrtVF (LoadVector mem)));
+ format %{ "vsqrtps $dst,$mem\t! sqrt packed2F" %}
+ ins_encode %{
+ int vector_len = 0;
+ __ vsqrtps($dst$$XMMRegister, $mem$$Address, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vsqrt4F_reg(vecX dst, vecX src) %{
+ predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+ match(Set dst (SqrtVF src));
+ format %{ "vsqrtps $dst,$src\t! sqrt packed4F" %}
+ ins_encode %{
+ int vector_len = 0;
+ __ vsqrtps($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vsqrt4F_mem(vecX dst, memory mem) %{
+ predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+ match(Set dst (SqrtVF (LoadVector mem)));
+ format %{ "vsqrtps $dst,$mem\t! sqrt packed4F" %}
+ ins_encode %{
+ int vector_len = 0;
+ __ vsqrtps($dst$$XMMRegister, $mem$$Address, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vsqrt8F_reg(vecY dst, vecY src) %{
+ predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
+ match(Set dst (SqrtVF src));
+ format %{ "vsqrtps $dst,$src\t! sqrt packed8F" %}
+ ins_encode %{
+ int vector_len = 1;
+ __ vsqrtps($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vsqrt8F_mem(vecY dst, memory mem) %{
+ predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
+ match(Set dst (SqrtVF (LoadVector mem)));
+ format %{ "vsqrtps $dst,$mem\t! sqrt packed8F" %}
+ ins_encode %{
+ int vector_len = 1;
+ __ vsqrtps($dst$$XMMRegister, $mem$$Address, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vsqrt16F_reg(vecZ dst, vecZ src) %{
+ predicate(UseAVX > 2 && n->as_Vector()->length() == 16);
+ match(Set dst (SqrtVF src));
+ format %{ "vsqrtps $dst,$src\t! sqrt packed16F" %}
+ ins_encode %{
+ int vector_len = 2;
+ __ vsqrtps($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vsqrt16F_mem(vecZ dst, memory mem) %{
+ predicate(UseAVX > 2 && n->as_Vector()->length() == 16);
+ match(Set dst (SqrtVF (LoadVector mem)));
+ format %{ "vsqrtps $dst,$mem\t! sqrt packed16F" %}
+ ins_encode %{
+ int vector_len = 2;
+ __ vsqrtps($dst$$XMMRegister, $mem$$Address, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
// ------------------------------ LeftShift -----------------------------------
// Shorts/Chars vector left shift
diff --git a/src/hotspot/os/aix/osThread_aix.cpp b/src/hotspot/os/aix/osThread_aix.cpp
index 0b13454ffef..6303dc27eb8 100644
--- a/src/hotspot/os/aix/osThread_aix.cpp
+++ b/src/hotspot/os/aix/osThread_aix.cpp
@@ -25,6 +25,7 @@
// no precompiled headers
+#include "memory/allocation.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index df5959b26d0..0325824e021 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -2490,6 +2490,22 @@ bool os::can_execute_large_page_memory() {
return false;
}
+char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
+ assert(file_desc >= 0, "file_desc is not valid");
+ char* result = NULL;
+
+ // Always round to os::vm_page_size(), which may be larger than 4K.
+ bytes = align_up(bytes, os::vm_page_size());
+ result = reserve_mmaped_memory(bytes, requested_addr, 0);
+
+ if (result != NULL) {
+ if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
+ vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
+ }
+ }
+ return result;
+}
+
// Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else).
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
diff --git a/src/hotspot/os/bsd/osThread_bsd.cpp b/src/hotspot/os/bsd/osThread_bsd.cpp
index de1383be848..c614f3825e3 100644
--- a/src/hotspot/os/bsd/osThread_bsd.cpp
+++ b/src/hotspot/os/bsd/osThread_bsd.cpp
@@ -23,6 +23,7 @@
*/
// no precompiled headers
+#include "memory/allocation.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/osThread.hpp"
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 6ebcb627fdc..64afc14318c 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -2350,6 +2350,17 @@ bool os::can_execute_large_page_memory() {
return UseHugeTLBFS;
}
+char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
+ assert(file_desc >= 0, "file_desc is not valid");
+ char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
+ if (result != NULL) {
+ if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
+ vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
+ }
+ }
+ return result;
+}
+
// Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else).
diff --git a/src/hotspot/os/linux/osContainer_linux.cpp b/src/hotspot/os/linux/osContainer_linux.cpp
index 03fd695cbc4..dacd53b27dc 100644
--- a/src/hotspot/os/linux/osContainer_linux.cpp
+++ b/src/hotspot/os/linux/osContainer_linux.cpp
@@ -323,7 +323,12 @@ void OSContainer::init() {
}
}
- if (mntinfo != NULL) fclose(mntinfo);
+ fclose(mntinfo);
+
+ if (memory == NULL || cpuset == NULL || cpu == NULL || cpuacct == NULL) {
+ log_debug(os, container)("Required cgroup subsystems not found");
+ return;
+ }
/*
* Read /proc/self/cgroup and map host mount point to
@@ -383,12 +388,7 @@ void OSContainer::init() {
}
}
- if (cgroup != NULL) fclose(cgroup);
-
- if (memory == NULL || cpuset == NULL || cpu == NULL) {
- log_debug(os, container)("Required cgroup subsystems not found");
- return;
- }
+ fclose(cgroup);
// We need to update the amount of physical memory now that
// command line arguments have been processed.
diff --git a/src/hotspot/os/linux/osThread_linux.cpp b/src/hotspot/os/linux/osThread_linux.cpp
index 381e7b0e7ba..6f7e074a522 100644
--- a/src/hotspot/os/linux/osThread_linux.cpp
+++ b/src/hotspot/os/linux/osThread_linux.cpp
@@ -23,6 +23,7 @@
*/
// no precompiled headers
+#include "memory/allocation.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/osThread.hpp"
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index 548e7823f7f..a91fac1b9fe 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -59,6 +59,7 @@
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/timer.hpp"
#include "semaphore_posix.hpp"
#include "services/attachListener.hpp"
@@ -129,6 +130,7 @@
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
#define LARGEPAGES_BIT (1 << 6)
+#define DAX_SHARED_BIT (1 << 8)
////////////////////////////////////////////////////////////////////////////////
// global variables
julong os::Linux::_physical_memory = 0;
@@ -1646,7 +1648,10 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
//
// Dynamic loader will make all stacks executable after
// this function returns, and will not do that again.
- assert(Threads::first() == NULL, "no Java threads should exist yet.");
+#ifdef ASSERT
+ ThreadsListHandle tlh;
+ assert(tlh.length() == 0, "no Java threads should exist yet.");
+#endif
} else {
warning("You have loaded library %s which might have disabled stack guard. "
"The VM will try to fix the stack guard now.\n"
@@ -1874,16 +1879,13 @@ void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,
// may have been queued at the same time.
if (!_stack_is_executable) {
- JavaThread *jt = Threads::first();
-
- while (jt) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
if (!jt->stack_guard_zone_unused() && // Stack not yet fully initialized
jt->stack_guards_enabled()) { // No pending stack overflow exceptions
if (!os::guard_memory((char *)jt->stack_end(), jt->stack_guard_zone_size())) {
warning("Attempt to reguard stack yellow zone failed.");
}
}
- jt = jt->next();
}
}
@@ -3369,10 +3371,13 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
// effective only if the bit 2 is cleared)
// - (bit 5) hugetlb private memory
// - (bit 6) hugetlb shared memory
+// - (bit 7) dax private memory
+// - (bit 8) dax shared memory
//
-static void set_coredump_filter(void) {
+static void set_coredump_filter(bool largepages, bool dax_shared) {
FILE *f;
long cdm;
+ bool filter_changed = false;
if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
return;
@@ -3385,8 +3390,15 @@ static void set_coredump_filter(void) {
rewind(f);
- if ((cdm & LARGEPAGES_BIT) == 0) {
+ if (largepages && (cdm & LARGEPAGES_BIT) == 0) {
cdm |= LARGEPAGES_BIT;
+ filter_changed = true;
+ }
+ if (dax_shared && (cdm & DAX_SHARED_BIT) == 0) {
+ cdm |= DAX_SHARED_BIT;
+ filter_changed = true;
+ }
+ if (filter_changed) {
fprintf(f, "%#lx", cdm);
}
@@ -3525,7 +3537,7 @@ void os::large_page_init() {
size_t large_page_size = Linux::setup_large_page_size();
UseLargePages = Linux::setup_large_page_type(large_page_size);
- set_coredump_filter();
+ set_coredump_filter(true /*largepages*/, false /*dax_shared*/);
}
#ifndef SHM_HUGETLB
@@ -3896,6 +3908,17 @@ bool os::can_execute_large_page_memory() {
return UseTransparentHugePages || UseHugeTLBFS;
}
+char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
+ assert(file_desc >= 0, "file_desc is not valid");
+ char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
+ if (result != NULL) {
+ if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
+ vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
+ }
+ }
+ return result;
+}
+
// Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else).
@@ -4947,25 +4970,20 @@ jint os::init_2(void) {
UseNUMA = false;
}
}
- // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
- // we can make the adaptive lgrp chunk resizing work. If the user specified
- // both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
- // disable adaptive resizing.
- if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
- if (FLAG_IS_DEFAULT(UseNUMA)) {
- UseNUMA = false;
- } else {
- if (FLAG_IS_DEFAULT(UseLargePages) &&
- FLAG_IS_DEFAULT(UseSHM) &&
- FLAG_IS_DEFAULT(UseHugeTLBFS)) {
- UseLargePages = false;
- } else if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) {
- warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)");
- UseAdaptiveSizePolicy = false;
- UseAdaptiveNUMAChunkSizing = false;
- }
+
+ if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
+ // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
+ // we can make the adaptive lgrp chunk resizing work. If the user specified both
+ // UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn
+ // and disable adaptive resizing.
+ if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) {
+ warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, "
+ "disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)");
+ UseAdaptiveSizePolicy = false;
+ UseAdaptiveNUMAChunkSizing = false;
}
}
+
if (!UseNUMA && ForceNUMA) {
UseNUMA = true;
}
@@ -5012,6 +5030,9 @@ jint os::init_2(void) {
// initialize thread priority policy
prio_init();
+ if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
+ set_coredump_filter(false /*largepages*/, true /*dax_shared*/);
+ }
return JNI_OK;
}
diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp
index fed874d30eb..8c7cf351ef0 100644
--- a/src/hotspot/os/posix/os_posix.cpp
+++ b/src/hotspot/os/posix/os_posix.cpp
@@ -40,6 +40,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -52,6 +53,20 @@
#endif
#define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
+#ifndef MAP_ANONYMOUS
+ #define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#define check_with_errno(check_type, cond, msg) \
+ do { \
+ int err = errno; \
+ check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err), \
+ os::errno_name(err)); \
+} while (false)
+
+#define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg)
+#define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg)
+
// Check core dump limit and report possible place where core can be found
void os::check_dump_limit(char* buffer, size_t bufferSize) {
if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
@@ -145,10 +160,124 @@ void os::wait_for_keypress_at_exit(void) {
return;
}
+int os::create_file_for_heap(const char* dir) {
+
+ const char name_template[] = "/jvmheap.XXXXXX";
+
+ char *fullname = (char*)os::malloc((strlen(dir) + strlen(name_template) + 1), mtInternal);
+ if (fullname == NULL) {
+ vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
+ return -1;
+ }
+ (void)strncpy(fullname, dir, strlen(dir)+1);
+ (void)strncat(fullname, name_template, strlen(name_template));
+
+ os::native_path(fullname);
+
+ sigset_t set, oldset;
+ int ret = sigfillset(&set);
+ assert_with_errno(ret == 0, "sigfillset returned error");
+
+ // set the file creation mask.
+ mode_t file_mode = S_IRUSR | S_IWUSR;
+
+ // create a new file.
+ int fd = mkstemp(fullname);
+
+ if (fd < 0) {
+ warning("Could not create file for heap with template %s", fullname);
+ os::free(fullname);
+ return -1;
+ }
+
+ // delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted.
+ ret = unlink(fullname);
+ assert_with_errno(ret == 0, "unlink returned error");
+
+ os::free(fullname);
+ return fd;
+}
+
+static char* reserve_mmapped_memory(size_t bytes, char* requested_addr) {
+ char * addr;
+ int flags = MAP_PRIVATE NOT_AIX( | MAP_NORESERVE ) | MAP_ANONYMOUS;
+ if (requested_addr != NULL) {
+ assert((uintptr_t)requested_addr % os::vm_page_size() == 0, "Requested address should be aligned to OS page size");
+ flags |= MAP_FIXED;
+ }
+
+ // Map reserved/uncommitted pages PROT_NONE so we fail early if we
+ // touch an uncommitted page. Otherwise, the read/write might
+ // succeed if we have enough swap space to back the physical page.
+ addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
+ flags, -1, 0);
+
+ if (addr != MAP_FAILED) {
+ MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC);
+ return addr;
+ }
+ return NULL;
+}
+
+static int util_posix_fallocate(int fd, off_t offset, off_t len) {
+#ifdef __APPLE__
+ fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len };
+ // First we try to get a continuous chunk of disk space
+ int ret = fcntl(fd, F_PREALLOCATE, &store);
+ if (ret == -1) {
+ // Maybe we are too fragmented, try to allocate non-continuous range
+ store.fst_flags = F_ALLOCATEALL;
+ ret = fcntl(fd, F_PREALLOCATE, &store);
+ }
+ if(ret != -1) {
+ return ftruncate(fd, len);
+ }
+ return -1;
+#else
+ return posix_fallocate(fd, offset, len);
+#endif
+}
+
+// Map the given address range to the provided file descriptor.
+char* os::map_memory_to_file(char* base, size_t size, int fd) {
+ assert(fd != -1, "File descriptor is not valid");
+
+ // allocate space for the file
+ if (util_posix_fallocate(fd, 0, (off_t)size) != 0) {
+ vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory."));
+ return NULL;
+ }
+
+ int prot = PROT_READ | PROT_WRITE;
+ int flags = MAP_SHARED;
+ if (base != NULL) {
+ flags |= MAP_FIXED;
+ }
+ char* addr = (char*)mmap(base, size, prot, flags, fd, 0);
+
+ if (addr == MAP_FAILED) {
+ return NULL;
+ }
+ if (base != NULL && addr != base) {
+ if (!os::release_memory(addr, size)) {
+ warning("Could not release memory on unsuccessful file mapping");
+ }
+ return NULL;
+ }
+ return addr;
+}
+
+char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
+ assert(fd != -1, "File descriptor is not valid");
+ assert(base != NULL, "Base cannot be NULL");
+
+ return map_memory_to_file(base, size, fd);
+}
+
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
// rather than unmapping and remapping the whole chunk to get requested alignment.
-char* os::reserve_memory_aligned(size_t size, size_t alignment) {
+char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
"Alignment must be a multiple of allocation granularity (page size)");
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
@@ -156,7 +285,20 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment) {
size_t extra_size = size + alignment;
assert(extra_size >= size, "overflow, size is too large to allow alignment");
- char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
+ char* extra_base;
+ if (file_desc != -1) {
+ // For file mapping, we do not call os:reserve_memory(extra_size, NULL, alignment, file_desc) because
+ // we need to deal with shrinking of the file space later when we release extra memory after alignment.
+ // We also cannot called os:reserve_memory() with file_desc set to -1 because on aix we might get SHM memory.
+ // So here to call a helper function while reserve memory for us. After we have a aligned base,
+ // we will replace anonymous mapping with file mapping.
+ extra_base = reserve_mmapped_memory(extra_size, NULL);
+ if (extra_base != NULL) {
+ MemTracker::record_virtual_memory_reserve((address)extra_base, extra_size, CALLER_PC);
+ }
+ } else {
+ extra_base = os::reserve_memory(extra_size, NULL, alignment);
+ }
if (extra_base == NULL) {
return NULL;
@@ -183,6 +325,13 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment) {
os::release_memory(extra_base + begin_offset + size, end_offset);
}
+ if (file_desc != -1) {
+ // After we have an aligned address, we can replace anonymous mapping with file mapping
+ if (replace_existing_mapping_with_file_mapping(aligned_base, size, file_desc) == NULL) {
+ vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
+ }
+ MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC);
+ }
return aligned_base;
}
@@ -478,8 +627,7 @@ int os::sleep(Thread* thread, jlong millis, bool interruptible) {
// interrupt support
void os::interrupt(Thread* thread) {
- assert(Thread::current() == thread || Threads_lock->owned_by_self(),
- "possibility of dangling Thread pointer");
+ debug_only(Thread::check_for_dangling_thread_pointer(thread);)
OSThread* osthread = thread->osthread();
@@ -499,12 +647,10 @@ void os::interrupt(Thread* thread) {
ParkEvent * ev = thread->_ParkEvent ;
if (ev != NULL) ev->unpark() ;
-
}
bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
- assert(Thread::current() == thread || Threads_lock->owned_by_self(),
- "possibility of dangling Thread pointer");
+ debug_only(Thread::check_for_dangling_thread_pointer(thread);)
OSThread* osthread = thread->osthread();
@@ -1351,16 +1497,6 @@ void os::ThreadCrashProtection::check_crash_protection(int sig,
}
}
-#define check_with_errno(check_type, cond, msg) \
- do { \
- int err = errno; \
- check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err), \
- os::errno_name(err)); \
-} while (false)
-
-#define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg)
-#define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg)
-
// POSIX unamed semaphores are not supported on OS X.
#ifndef __APPLE__
diff --git a/src/hotspot/os/solaris/os_solaris.cpp b/src/hotspot/os/solaris/os_solaris.cpp
index f136cec8275..ef7c1deaea4 100644
--- a/src/hotspot/os/solaris/os_solaris.cpp
+++ b/src/hotspot/os/solaris/os_solaris.cpp
@@ -2585,6 +2585,17 @@ char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
return addr;
}
+char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
+ assert(file_desc >= 0, "file_desc is not valid");
+ char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
+ if (result != NULL) {
+ if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
+ vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
+ }
+ }
+ return result;
+}
+
// Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else).
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index c52cddd391c..656a119d4d4 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -2904,6 +2904,75 @@ void os::large_page_init() {
UseLargePages = success;
}
+int os::create_file_for_heap(const char* dir) {
+
+ const char name_template[] = "/jvmheap.XXXXXX";
+ char *fullname = (char*)os::malloc((strlen(dir) + strlen(name_template) + 1), mtInternal);
+ if (fullname == NULL) {
+ vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
+ return -1;
+ }
+
+ (void)strncpy(fullname, dir, strlen(dir)+1);
+ (void)strncat(fullname, name_template, strlen(name_template));
+
+ os::native_path(fullname);
+
+ char *path = _mktemp(fullname);
+ if (path == NULL) {
+ warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
+ os::free(fullname);
+ return -1;
+ }
+
+ int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
+
+ os::free(fullname);
+ if (fd < 0) {
+ warning("Problem opening file for heap (%s)", os::strerror(errno));
+ return -1;
+ }
+ return fd;
+}
+
+// If 'base' is not NULL, function will return NULL if it cannot get 'base'
+char* os::map_memory_to_file(char* base, size_t size, int fd) {
+ assert(fd != -1, "File descriptor is not valid");
+
+ HANDLE fh = (HANDLE)_get_osfhandle(fd);
+#ifdef _LP64
+ HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
+ (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
+#else
+ HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
+ 0, (DWORD)size, NULL);
+#endif
+ if (fileMapping == NULL) {
+ if (GetLastError() == ERROR_DISK_FULL) {
+ vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
+ }
+ else {
+ vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
+ }
+
+ return NULL;
+ }
+
+ LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
+
+ CloseHandle(fileMapping);
+
+ return (char*)addr;
+}
+
+char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
+ assert(fd != -1, "File descriptor is not valid");
+ assert(base != NULL, "Base address cannot be NULL");
+
+ release_memory(base, size);
+ return map_memory_to_file(base, size, fd);
+}
+
// On win32, one cannot release just a part of reserved memory, it's an
// all or nothing deal. When we split a reservation, we must break the
// reservation into two reservations.
@@ -2923,7 +2992,7 @@ void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
// Multiple threads can race in this code but it's not possible to unmap small sections of
// virtual space to get requested alignment, like posix-like os's.
// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
-char* os::reserve_memory_aligned(size_t size, size_t alignment) {
+char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
"Alignment must be a multiple of allocation granularity (page size)");
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
@@ -2934,16 +3003,20 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment) {
char* aligned_base = NULL;
do {
- char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
+ char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
if (extra_base == NULL) {
return NULL;
}
// Do manual alignment
aligned_base = align_up(extra_base, alignment);
- os::release_memory(extra_base, extra_size);
+ if (file_desc != -1) {
+ os::unmap_memory(extra_base, extra_size);
+ } else {
+ os::release_memory(extra_base, extra_size);
+ }
- aligned_base = os::reserve_memory(size, aligned_base);
+ aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
} while (aligned_base == NULL);
@@ -2989,6 +3062,11 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
return reserve_memory(bytes, requested_addr);
}
+char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
+ assert(file_desc >= 0, "file_desc is not valid");
+ return map_memory_to_file(requested_addr, bytes, file_desc);
+}
+
size_t os::large_page_size() {
return _large_page_size;
}
@@ -3490,9 +3568,7 @@ OSReturn os::get_native_priority(const Thread* const thread,
void os::hint_no_preempt() {}
void os::interrupt(Thread* thread) {
- assert(!thread->is_Java_thread() || Thread::current() == thread ||
- Threads_lock->owned_by_self(),
- "possibility of dangling Thread pointer");
+ debug_only(Thread::check_for_dangling_thread_pointer(thread);)
OSThread* osthread = thread->osthread();
osthread->set_interrupted(true);
@@ -3513,8 +3589,7 @@ void os::interrupt(Thread* thread) {
bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
- assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
- "possibility of dangling Thread pointer");
+ debug_only(Thread::check_for_dangling_thread_pointer(thread);)
OSThread* osthread = thread->osthread();
// There is no synchronization between the setting of the interrupt
diff --git a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp
index 0713b6de460..af9eb3fb85c 100644
--- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp
+++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp
@@ -30,74 +30,6 @@
// Implementation of class atomic
-#ifdef M68K
-
-/*
- * __m68k_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Returns newval on success and oldval if no exchange happened.
- * This implementation is processor specific and works on
- * 68020 68030 68040 and 68060.
- *
- * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
- * instruction.
- * Using a kernelhelper would be better for arch complete implementation.
- *
- */
-
-static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
- int ret;
- __asm __volatile ("cas%.l %0,%2,%1"
- : "=d" (ret), "+m" (*(ptr))
- : "d" (newval), "0" (oldval));
- return ret;
-}
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
- is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
- `*PTR' before the operation.*/
-static inline int m68k_compare_and_swap(int newval,
- volatile int *ptr,
- int oldval) {
- for (;;) {
- int prev = *ptr;
- if (prev != oldval)
- return prev;
-
- if (__m68k_cmpxchg (prev, newval, ptr) == newval)
- // Success.
- return prev;
-
- // We failed even though prev == oldval. Try again.
- }
-}
-
-/* Atomically add an int to memory. */
-static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
- for (;;) {
- // Loop until success.
-
- int prev = *ptr;
-
- if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
- return prev + add_value;
- }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
- contents of `*PTR'. */
-static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
- for (;;) {
- // Loop until success.
- int prev = *ptr;
-
- if (__m68k_cmpxchg (prev, newval, ptr) == prev)
- return prev;
- }
-}
-#endif // M68K
-
#ifdef ARM
/*
@@ -175,12 +107,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
#ifdef ARM
return add_using_helper(arm_add_and_fetch, add_value, dest);
-#else
-#ifdef M68K
- return add_using_helper(m68k_add_and_fetch, add_value, dest);
#else
return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
#endif // ARM
}
@@ -200,9 +128,6 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
STATIC_ASSERT(4 == sizeof(T));
#ifdef ARM
return xchg_using_helper(arm_lock_test_and_set, exchange_value, dest);
-#else
-#ifdef M68K
- return xchg_using_helper(m68k_lock_test_and_set, exchange_value, dest);
#else
// __sync_lock_test_and_set is a bizarrely named atomic exchange
// operation. Note that some platforms only support this with the
@@ -215,7 +140,6 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
// barrier.
__sync_synchronize();
return result;
-#endif // M68K
#endif // ARM
}
@@ -242,12 +166,8 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
STATIC_ASSERT(4 == sizeof(T));
#ifdef ARM
return cmpxchg_using_helper(arm_compare_and_swap, exchange_value, dest, compare_value);
-#else
-#ifdef M68K
- return cmpxchg_using_helper(m68k_compare_and_swap, exchange_value, dest, compare_value);
#else
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
#endif // ARM
}
diff --git a/src/hotspot/os_cpu/linux_zero/os_linux_zero.hpp b/src/hotspot/os_cpu/linux_zero/os_linux_zero.hpp
index 8e6dc325c4c..a36e4792efd 100644
--- a/src/hotspot/os_cpu/linux_zero/os_linux_zero.hpp
+++ b/src/hotspot/os_cpu/linux_zero/os_linux_zero.hpp
@@ -36,12 +36,18 @@
// Atomically copy 64 bits of data
static void atomic_copy64(const volatile void *src, volatile void *dst) {
-#if defined(PPC32)
+#if defined(PPC32) && !defined(__SPE__)
double tmp;
asm volatile ("lfd %0, %2\n"
"stfd %0, %1\n"
: "=&f"(tmp), "=Q"(*(volatile double*)dst)
: "Q"(*(volatile double*)src));
+#elif defined(PPC32) && defined(__SPE__)
+ long tmp;
+ asm volatile ("evldd %0, %2\n"
+ "evstdd %0, %1\n"
+ : "=&r"(tmp), "=Q"(*(volatile long*)dst)
+ : "Q"(*(volatile long*)src));
#elif defined(S390) && !defined(_LP64)
double tmp;
asm volatile ("ld %0, 0(%1)\n"
diff --git a/src/hotspot/share/adlc/formssel.cpp b/src/hotspot/share/adlc/formssel.cpp
index 73229920040..fafd0ce7b27 100644
--- a/src/hotspot/share/adlc/formssel.cpp
+++ b/src/hotspot/share/adlc/formssel.cpp
@@ -4034,6 +4034,7 @@ int MatchRule::is_expensive() const {
strcmp(opType,"ModF")==0 ||
strcmp(opType,"ModI")==0 ||
strcmp(opType,"SqrtD")==0 ||
+ strcmp(opType,"SqrtF")==0 ||
strcmp(opType,"TanD")==0 ||
strcmp(opType,"ConvD2F")==0 ||
strcmp(opType,"ConvD2I")==0 ||
@@ -4167,7 +4168,7 @@ bool MatchRule::is_vector() const {
"DivVF","DivVD",
"AbsVF","AbsVD",
"NegVF","NegVD",
- "SqrtVD",
+ "SqrtVD","SqrtVF",
"AndV" ,"XorV" ,"OrV",
"AddReductionVI", "AddReductionVL",
"AddReductionVF", "AddReductionVD",
diff --git a/src/hotspot/share/aot/aotCodeHeap.cpp b/src/hotspot/share/aot/aotCodeHeap.cpp
index 2a9b3982cd2..e22e228acf1 100644
--- a/src/hotspot/share/aot/aotCodeHeap.cpp
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp
@@ -167,6 +167,7 @@ void AOTLib::verify_config() {
verify_flag(_config->_compactFields, CompactFields, "CompactFields");
verify_flag(_config->_enableContended, EnableContended, "EnableContended");
verify_flag(_config->_restrictContended, RestrictContended, "RestrictContended");
+ verify_flag(_config->_threadLocalHandshakes, ThreadLocalHandshakes, "ThreadLocalHandshakes");
if (!TieredCompilation && _config->_tieredAOT) {
handle_config_error("Shared file %s error: Expected to run with tiered compilation on", _name);
diff --git a/src/hotspot/share/aot/aotCodeHeap.hpp b/src/hotspot/share/aot/aotCodeHeap.hpp
index 7bfd5c67531..ae6e9ff2556 100644
--- a/src/hotspot/share/aot/aotCodeHeap.hpp
+++ b/src/hotspot/share/aot/aotCodeHeap.hpp
@@ -92,7 +92,7 @@ typedef struct {
} AOTHeader;
typedef struct {
- enum { CONFIG_SIZE = 7 * jintSize + 11 };
+ enum { CONFIG_SIZE = 7 * jintSize + 12 };
// 7 int values
int _config_size;
int _narrowOopShift;
@@ -101,7 +101,7 @@ typedef struct {
int _fieldsAllocationStyle;
int _objectAlignment;
int _codeSegmentSize;
- // byte[11] array map to boolean values here
+ // byte[12] array map to boolean values here
bool _debug_VM;
bool _useCompressedOops;
bool _useCompressedClassPointers;
@@ -113,6 +113,7 @@ typedef struct {
bool _enableContended;
bool _restrictContended;
bool _omitAssertions;
+ bool _threadLocalHandshakes;
} AOTConfiguration;
class AOTLib : public CHeapObj {
diff --git a/src/hotspot/share/aot/aotLoader.cpp b/src/hotspot/share/aot/aotLoader.cpp
index c91c04b9b1b..9b77338e43d 100644
--- a/src/hotspot/share/aot/aotLoader.cpp
+++ b/src/hotspot/share/aot/aotLoader.cpp
@@ -146,15 +146,6 @@ void AOTLoader::initialize() {
return;
}
- const char* home = Arguments::get_java_home();
- const char* file_separator = os::file_separator();
-
- for (int i = 0; i < (int) (sizeof(modules) / sizeof(const char*)); i++) {
- char library[JVM_MAXPATHLEN];
- jio_snprintf(library, sizeof(library), "%s%slib%slib%s%s%s%s", home, file_separator, file_separator, modules[i], UseCompressedOops ? "-coop" : "", UseG1GC ? "" : "-nong1", os::dll_file_extension());
- load_library(library, false);
- }
-
// Scan the AOTLibrary option.
if (AOTLibrary != NULL) {
const int len = (int)strlen(AOTLibrary);
@@ -172,6 +163,16 @@ void AOTLoader::initialize() {
}
}
}
+
+ // Load well-know AOT libraries from Java installation directory.
+ const char* home = Arguments::get_java_home();
+ const char* file_separator = os::file_separator();
+
+ for (int i = 0; i < (int) (sizeof(modules) / sizeof(const char*)); i++) {
+ char library[JVM_MAXPATHLEN];
+ jio_snprintf(library, sizeof(library), "%s%slib%slib%s%s%s%s", home, file_separator, file_separator, modules[i], UseCompressedOops ? "-coop" : "", UseG1GC ? "" : "-nong1", os::dll_file_extension());
+ load_library(library, false);
+ }
}
}
@@ -239,6 +240,21 @@ void AOTLoader::set_narrow_klass_shift() {
}
void AOTLoader::load_library(const char* name, bool exit_on_error) {
+ // Skip library if a library with the same name is already loaded.
+ const int file_separator = *os::file_separator();
+ const char* start = strrchr(name, file_separator);
+ const char* new_name = (start == NULL) ? name : (start + 1);
+ FOR_ALL_AOT_LIBRARIES(lib) {
+ const char* lib_name = (*lib)->name();
+ start = strrchr(lib_name, file_separator);
+ const char* old_name = (start == NULL) ? lib_name : (start + 1);
+ if (strcmp(old_name, new_name) == 0) {
+ if (PrintAOT) {
+ warning("AOT library %s is already loaded as %s.", name, lib_name);
+ }
+ return;
+ }
+ }
char ebuf[1024];
void* handle = os::dll_load(name, ebuf, sizeof ebuf);
if (handle == NULL) {
diff --git a/src/hotspot/share/c1/c1_LIR.hpp b/src/hotspot/share/c1/c1_LIR.hpp
index 757c5e79274..980cd4d07dc 100644
--- a/src/hotspot/share/c1/c1_LIR.hpp
+++ b/src/hotspot/share/c1/c1_LIR.hpp
@@ -196,8 +196,8 @@ class LIR_OprDesc: public CompilationResourceObj {
// data opr-type opr-kind
// +--------------+-------+-------+
// [max...........|7 6 5 4|3 2 1 0]
- // ^
- // is_pointer bit
+ // ^
+ // is_pointer bit
//
// lowest bit cleared, means it is a structure pointer
// we need 4 bits to represent types
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index 5074ba4ca83..6326ca1e76a 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -86,7 +86,7 @@
#define JAVA_CLASSFILE_MAGIC 0xCAFEBABE
#define JAVA_MIN_SUPPORTED_VERSION 45
-#define JAVA_MAX_SUPPORTED_VERSION 53
+#define JAVA_MAX_SUPPORTED_VERSION 54
#define JAVA_MAX_SUPPORTED_MINOR_VERSION 0
// Used for two backward compatibility reasons:
@@ -108,6 +108,8 @@
#define JAVA_9_VERSION 53
+#define JAVA_10_VERSION 54
+
void ClassFileParser::set_class_bad_constant_seen(short bad_constant) {
assert((bad_constant == 19 || bad_constant == 20) && _major_version >= JAVA_9_VERSION,
"Unexpected bad constant pool entry");
diff --git a/src/hotspot/share/classfile/classListParser.cpp b/src/hotspot/share/classfile/classListParser.cpp
index 8f7be316b50..b1fa759ae10 100644
--- a/src/hotspot/share/classfile/classListParser.cpp
+++ b/src/hotspot/share/classfile/classListParser.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,13 +23,32 @@
*/
#include "precompiled.hpp"
+#include "jvm.h"
+#include "jimage.hpp"
#include "classfile/classListParser.hpp"
-#include "runtime/os.hpp"
-#include "runtime/java.hpp"
+#include "classfile/classLoaderExt.hpp"
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#include "memory/metaspaceShared.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/fieldType.hpp"
+#include "runtime/javaCalls.hpp"
+#include "utilities/defaultStream.hpp"
+#include "utilities/hashtable.inline.hpp"
+#include "utilities/macros.hpp"
+
+ClassListParser* ClassListParser::_instance = NULL;
ClassListParser::ClassListParser(const char* file) {
+ assert(_instance == NULL, "must be singleton");
+ _instance = this;
_classlist_file = file;
_file = fopen(file, "r");
+ _line_no = 0;
+ _interfaces = new (ResourceObj::C_HEAP, mtClass) GrowableArray(10, true);
+
if (_file == NULL) {
char errmsg[JVM_MAXPATHLEN];
os::lasterror(errmsg, JVM_MAXPATHLEN);
@@ -41,6 +60,7 @@ ClassListParser::~ClassListParser() {
if (_file) {
fclose(_file);
}
+ _instance = NULL;
}
bool ClassListParser::parse_one_line() {
@@ -48,10 +68,10 @@ bool ClassListParser::parse_one_line() {
if (fgets(_line, sizeof(_line), _file) == NULL) {
return false;
}
- int line_len = (int)strlen(_line);
- if (line_len > _max_allowed_line_len) {
- tty->print_cr("input line too long (must be no longer than %d chars)", _max_allowed_line_len);
- vm_exit_during_initialization("Loading classlist failed");
+ ++ _line_no;
+ _line_len = (int)strlen(_line);
+ if (_line_len > _max_allowed_line_len) {
+ error("input line too long (must be no longer than %d chars)", _max_allowed_line_len);
}
if (*_line == '#') { // comment
continue;
@@ -59,8 +79,380 @@ bool ClassListParser::parse_one_line() {
break;
}
- // Remove trailing \r\n
- _line[strcspn(_line, "\r\n")] = 0;
+ _id = _unspecified;
+ _super = _unspecified;
+ _interfaces->clear();
+ _source = NULL;
+ _interfaces_specified = false;
+
+ {
+ int len = (int)strlen(_line);
+ int i;
+ // Replace \t\r\n with ' '
+ for (i=0; i 0) {
+ if (_line[len-1] == ' ') {
+ _line[len-1] = '\0';
+ len --;
+ } else {
+ break;
+ }
+ }
+ _line_len = len;
+ _class_name = _line;
+ }
+
+ if ((_token = strchr(_line, ' ')) == NULL) {
+ // No optional arguments are specified.
+ return true;
+ }
+
+ // Mark the end of the name, and go to the next input char
+ *_token++ = '\0';
+
+ while (*_token) {
+ skip_whitespaces();
+
+ if (parse_int_option("id:", &_id)) {
+ continue;
+ } else if (parse_int_option("super:", &_super)) {
+ check_already_loaded("Super class", _super);
+ continue;
+ } else if (skip_token("interfaces:")) {
+ int i;
+ while (try_parse_int(&i)) {
+ check_already_loaded("Interface", i);
+ _interfaces->append(i);
+ }
+ } else if (skip_token("source:")) {
+ skip_whitespaces();
+ _source = _token;
+ char* s = strchr(_token, ' ');
+ if (s == NULL) {
+ break; // end of input line
+ } else {
+ *s = '\0'; // mark the end of _source
+ _token = s+1;
+ }
+ } else {
+ error("Unknown input");
+ }
+ }
+
+ // if src is specified
+ // id super interfaces must all be specified
+ // loader may be specified
+ // else
+ // # the class is loaded from classpath
+ // id may be specified
+ // super, interfaces, loader must not be specified
return true;
}
+void ClassListParser::skip_whitespaces() {
+ while (*_token == ' ' || *_token == '\t') {
+ _token ++;
+ }
+}
+
+void ClassListParser::skip_non_whitespaces() {
+ while (*_token && *_token != ' ' && *_token != '\t') {
+ _token ++;
+ }
+}
+
+void ClassListParser::parse_int(int* value) {
+ skip_whitespaces();
+ if (sscanf(_token, "%i", value) == 1) {
+ skip_non_whitespaces();
+ if (*value < 0) {
+ error("Error: negative integers not allowed (%d)", *value);
+ }
+ } else {
+ error("Error: expected integer");
+ }
+}
+
+bool ClassListParser::try_parse_int(int* value) {
+ skip_whitespaces();
+ if (sscanf(_token, "%i", value) == 1) {
+ skip_non_whitespaces();
+ return true;
+ }
+ return false;
+}
+
+bool ClassListParser::skip_token(const char* option_name) {
+ size_t len = strlen(option_name);
+ if (strncmp(_token, option_name, len) == 0) {
+ _token += len;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool ClassListParser::parse_int_option(const char* option_name, int* value) {
+ if (skip_token(option_name)) {
+ if (*value != _unspecified) {
+ error("%s specified twice", option_name);
+ } else {
+ parse_int(value);
+ return true;
+ }
+ }
+ return false;
+}
+
+void ClassListParser::print_specified_interfaces() {
+ const int n = _interfaces->length();
+ jio_fprintf(defaultStream::error_stream(), "Currently specified interfaces[%d] = {\n", n);
+ for (int i=0; iat(i));
+ jio_fprintf(defaultStream::error_stream(), " %4d = %s\n", _interfaces->at(i), k->name()->as_klass_external_name());
+ }
+ jio_fprintf(defaultStream::error_stream(), "}\n");
+}
+
+void ClassListParser::print_actual_interfaces(InstanceKlass *ik) {
+ int n = ik->local_interfaces()->length();
+ jio_fprintf(defaultStream::error_stream(), "Actual interfaces[%d] = {\n", n);
+ for (int i = 0; i < n; i++) {
+ InstanceKlass* e = InstanceKlass::cast(ik->local_interfaces()->at(i));
+ jio_fprintf(defaultStream::error_stream(), " %s\n", e->name()->as_klass_external_name());
+ }
+ jio_fprintf(defaultStream::error_stream(), "}\n");
+}
+
+void ClassListParser::error(const char *msg, ...) {
+ va_list ap;
+ va_start(ap, msg);
+ int error_index = _token - _line;
+ if (error_index >= _line_len) {
+ error_index = _line_len - 1;
+ }
+ if (error_index < 0) {
+ error_index = 0;
+ }
+
+ jio_fprintf(defaultStream::error_stream(),
+ "An error has occurred while processing class list file %s %d:%d.\n",
+ _classlist_file, _line_no, (error_index + 1));
+ jio_vfprintf(defaultStream::error_stream(), msg, ap);
+
+ if (_line_len <= 0) {
+ jio_fprintf(defaultStream::error_stream(), "\n");
+ } else {
+ jio_fprintf(defaultStream::error_stream(), ":\n");
+ for (int i=0; i<_line_len; i++) {
+ char c = _line[i];
+ if (c == '\0') {
+ jio_fprintf(defaultStream::error_stream(), "%s", " ");
+ } else {
+ jio_fprintf(defaultStream::error_stream(), "%c", c);
+ }
+ }
+ jio_fprintf(defaultStream::error_stream(), "\n");
+ for (int i=0; ilocal_interfaces()->length() != _interfaces->length()) {
+ print_specified_interfaces();
+ print_actual_interfaces(k);
+ error("The number of interfaces (%d) specified in class list does not match the class file (%d)",
+ _interfaces->length(), k->local_interfaces()->length());
+ }
+
+ if (!SystemDictionaryShared::add_non_builtin_klass(class_name, ClassLoaderData::the_null_class_loader_data(),
+ k, THREAD)) {
+ error("Duplicated class %s", _class_name);
+ }
+
+ // This tells JVM_FindLoadedClass to not find this class.
+ k->set_shared_classpath_index(UNREGISTERED_INDEX);
+ }
+
+ return k;
+}
+
+InstanceKlass* ClassListParser::load_current_class(TRAPS) {
+ TempNewSymbol class_name_symbol = SymbolTable::new_symbol(_class_name, THREAD);
+ guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol.");
+
+ InstanceKlass *klass = NULL;
+ if (!is_loading_from_source()) {
+ if (is_super_specified()) {
+ error("If source location is not specified, super class must not be specified");
+ }
+ if (are_interfaces_specified()) {
+ error("If source location is not specified, interface(s) must not be specified");
+ }
+
+ bool non_array = !FieldType::is_array(class_name_symbol);
+
+ Handle s = java_lang_String::create_from_symbol(class_name_symbol, CHECK_0);
+ // Translate to external class name format, i.e., convert '/' chars to '.'
+ Handle string = java_lang_String::externalize_classname(s, CHECK_0);
+ JavaValue result(T_OBJECT);
+ InstanceKlass* spec_klass = non_array ?
+ SystemDictionary::ClassLoader_klass() : SystemDictionary::Class_klass();
+ Symbol* method_name = non_array ?
+ vmSymbols::loadClass_name() : vmSymbols::forName_name();
+ Handle loader = Handle(THREAD, SystemDictionary::java_system_loader());
+
+ if (non_array) {
+ JavaCalls::call_virtual(&result,
+ loader, //SystemDictionary::java_system_loader(),
+ spec_klass,
+ method_name, //vmSymbols::loadClass_name(),
+ vmSymbols::string_class_signature(),
+ string,
+ THREAD);
+ } else {
+ JavaCalls::call_static(&result,
+ spec_klass,
+ method_name,
+ vmSymbols::string_class_signature(),
+ string,
+ CHECK_NULL);
+ }
+ assert(result.get_type() == T_OBJECT, "just checking");
+ oop obj = (oop) result.get_jobject();
+ if (!HAS_PENDING_EXCEPTION && (obj != NULL)) {
+ if (non_array) {
+ klass = InstanceKlass::cast(java_lang_Class::as_Klass(obj));
+ } else {
+ klass = static_cast(java_lang_Class::array_klass_acquire(obj));
+ }
+ } else { // load classes in bootclasspath/a
+ if (HAS_PENDING_EXCEPTION) {
+ CLEAR_PENDING_EXCEPTION;
+ }
+
+ if (non_array) {
+ Klass* k = SystemDictionary::resolve_or_null(class_name_symbol, CHECK_NULL);
+ if (k != NULL) {
+ klass = InstanceKlass::cast(k);
+ } else {
+ if (!HAS_PENDING_EXCEPTION) {
+ THROW_NULL(vmSymbols::java_lang_ClassNotFoundException());
+ }
+ }
+ }
+ }
+ } else {
+ // If "source:" tag is specified, all super class and super interfaces must be specified in the
+ // class list file.
+ if (UseAppCDS) {
+ klass = load_class_from_source(class_name_symbol, CHECK_NULL);
+ }
+ }
+
+ if (klass != NULL && is_id_specified()) {
+ int id = this->id();
+ SystemDictionaryShared::update_shared_entry(klass, id);
+ InstanceKlass* old = table()->lookup(id);
+ if (old != NULL && old != klass) {
+ error("Duplicated ID %d for class %s", id, _class_name);
+ }
+ table()->add(id, klass);
+ }
+
+ return klass;
+}
+
+bool ClassListParser::is_loading_from_source() {
+ return (_source != NULL);
+}
+
+InstanceKlass* ClassListParser::lookup_class_by_id(int id) {
+ InstanceKlass* klass = table()->lookup(id);
+ if (klass == NULL) {
+ error("Class ID %d has not been defined", id);
+ }
+ return klass;
+}
+
+
+InstanceKlass* ClassListParser::lookup_super_for_current_class(Symbol* super_name) {
+ if (!is_loading_from_source()) {
+ return NULL;
+ }
+
+ InstanceKlass* k = lookup_class_by_id(super());
+ if (super_name != k->name()) {
+ error("The specified super class %s (id %d) does not match actual super class %s",
+ k->name()->as_klass_external_name(), super(),
+ super_name->as_klass_external_name());
+ }
+ return k;
+}
+
+InstanceKlass* ClassListParser::lookup_interface_for_current_class(Symbol* interface_name) {
+ if (!is_loading_from_source()) {
+ return NULL;
+ }
+
+ const int n = _interfaces->length();
+ if (n == 0) {
+ error("Class %s implements the interface %s, but no interface has been specified in the input line",
+ _class_name, interface_name->as_klass_external_name());
+ ShouldNotReachHere();
+ }
+
+ int i;
+ for (i=0; iat(i));
+ if (interface_name == k->name()) {
+ return k;
+ }
+ }
+
+ // interface_name is not specified by the "interfaces:" keyword.
+ print_specified_interfaces();
+ error("The interface %s implemented by class %s does not match any of the specified interface IDs",
+ interface_name->as_klass_external_name(), _class_name);
+ ShouldNotReachHere();
+ return NULL;
+}
+
diff --git a/src/hotspot/share/classfile/classListParser.hpp b/src/hotspot/share/classfile/classListParser.hpp
index 912ae3175a3..e6d48f41c8d 100644
--- a/src/hotspot/share/classfile/classListParser.hpp
+++ b/src/hotspot/share/classfile/classListParser.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,30 +27,122 @@
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "utilities/hashtable.hpp"
+
+class CDSClassInfo;
+
+// Look up from ID -> InstanceKlass*
+class ID2KlassTable : public Hashtable {
+public:
+ ID2KlassTable() : Hashtable(1987, sizeof(HashtableEntry)) { }
+ void add(int id, InstanceKlass* klass) {
+ unsigned int hash = (unsigned int)id;
+ HashtableEntry* entry = new_entry(hash, klass);
+ add_entry(hash_to_index(hash), entry);
+ }
+
+ InstanceKlass* lookup(int id) {
+ unsigned int hash = (unsigned int)id;
+ int index = hash_to_index(id);
+ for (HashtableEntry* e = bucket(index); e != NULL; e = e->next()) {
+ if (e->hash() == hash) {
+ return e->literal();
+ }
+ }
+ return NULL;
+ }
+};
class ClassListParser : public StackObj {
enum {
+ _unspecified = -999,
+
// Max number of bytes allowed per line in the classlist.
- // Theoretically Java class names could be 65535 bytes in length. In reality,
+ // Theoretically Java class names could be 65535 bytes in length. Also, an input line
+ // could have a very long path name up to JVM_MAXPATHLEN bytes in length. In reality,
// 4K bytes is more than enough.
_max_allowed_line_len = 4096,
_line_buf_extra = 10, // for detecting input too long
_line_buf_size = _max_allowed_line_len + _line_buf_extra
};
+ static ClassListParser* _instance; // the singleton.
const char* _classlist_file;
FILE* _file;
- char _line[_line_buf_size]; // The buffer that holds the current line.
+ ID2KlassTable _id2klass_table;
+
+ // The following field contains information from the *current* line being
+ // parsed.
+ char _line[_line_buf_size]; // The buffer that holds the current line. Some characters in
+ // the buffer may be overwritten by '\0' during parsing.
+ int _line_len; // Original length of the input line.
+ int _line_no; // Line number for current line being parsed
+ const char* _class_name;
+ int _id;
+ int _super;
+ GrowableArray* _interfaces;
+ bool _interfaces_specified;
+ const char* _source;
+
+ bool parse_int_option(const char* option_name, int* value);
+ InstanceKlass* load_class_from_source(Symbol* class_name, TRAPS);
+ ID2KlassTable *table() {
+ return &_id2klass_table;
+ }
+ InstanceKlass* lookup_class_by_id(int id);
+ void print_specified_interfaces();
+ void print_actual_interfaces(InstanceKlass *ik);
public:
ClassListParser(const char* file);
~ClassListParser();
+
+ static ClassListParser* instance() {
+ return _instance;
+ }
bool parse_one_line();
+ char* _token;
+ void error(const char* msg, ...);
+ void parse_int(int* value);
+ bool try_parse_int(int* value);
+ bool skip_token(const char* option_name);
+ void skip_whitespaces();
+ void skip_non_whitespaces();
+
+ bool is_id_specified() {
+ return _id != _unspecified;
+ }
+ bool is_super_specified() {
+ return _super != _unspecified;
+ }
+ bool are_interfaces_specified() {
+ return _interfaces->length() > 0;
+ }
+ int id() {
+ assert(is_id_specified(), "do not query unspecified id");
+ return _id;
+ }
+ int super() {
+ assert(is_super_specified(), "do not query unspecified super");
+ return _super;
+ }
+ void check_already_loaded(const char* which, int id) {
+ if (_id2klass_table.lookup(id) == NULL) {
+ error("%s id %d is not yet loaded", which, id);
+ }
+ }
const char* current_class_name() {
- return _line;
+ return _class_name;
}
+
+ InstanceKlass* load_current_class(TRAPS);
+
+ bool is_loading_from_source();
+
+ // Look up the super or interface of the current class being loaded
+ // (in this->load_current_class()).
+ InstanceKlass* lookup_super_for_current_class(Symbol* super_name);
+ InstanceKlass* lookup_interface_for_current_class(Symbol* interface_name);
};
-
-
-#endif // SHARE_VM_MEMORY_CLASSLISTPARSER_HPP
+#endif
diff --git a/src/hotspot/share/classfile/classLoader.hpp b/src/hotspot/share/classfile/classLoader.hpp
index 3ba612ec747..202a170979f 100644
--- a/src/hotspot/share/classfile/classLoader.hpp
+++ b/src/hotspot/share/classfile/classLoader.hpp
@@ -26,6 +26,7 @@
#define SHARE_VM_CLASSFILE_CLASSLOADER_HPP
#include "jimage.hpp"
+#include "runtime/handles.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/perfData.hpp"
#include "utilities/exceptions.hpp"
@@ -42,6 +43,7 @@
class JImageFile;
class ClassFileStream;
class PackageEntry;
+template class GrowableArray;
class ClassPathEntry : public CHeapObj {
private:
diff --git a/src/hotspot/share/classfile/classLoaderExt.cpp b/src/hotspot/share/classfile/classLoaderExt.cpp
index 44efabec083..a7256bb8e72 100644
--- a/src/hotspot/share/classfile/classLoaderExt.cpp
+++ b/src/hotspot/share/classfile/classLoaderExt.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,14 +23,329 @@
*/
#include "precompiled.hpp"
+#include "classfile/classFileParser.hpp"
+#include "classfile/classFileStream.hpp"
#include "classfile/classListParser.hpp"
+#include "classfile/classLoader.hpp"
#include "classfile/classLoaderExt.hpp"
-#include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
+#include "classfile/classLoaderData.inline.hpp"
+#include "classfile/klassFactory.hpp"
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/sharedPathsMiscInfo.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/filemap.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/symbol.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/java.hpp"
+#include "runtime/javaCalls.hpp"
+#include "runtime/os.hpp"
+#include "services/threadService.hpp"
+#include "utilities/stringUtils.hpp"
+jshort ClassLoaderExt::_app_paths_start_index = ClassLoaderExt::max_classpath_index;
+bool ClassLoaderExt::_has_app_classes = false;
+bool ClassLoaderExt::_has_platform_classes = false;
+
+void ClassLoaderExt::setup_app_search_path() {
+ assert(DumpSharedSpaces, "this function is only used with -Xshare:dump and -XX:+UseAppCDS");
+ _app_paths_start_index = ClassLoader::num_boot_classpath_entries();
+ char* app_class_path = os::strdup(Arguments::get_appclasspath());
+
+ if (strcmp(app_class_path, ".") == 0) {
+ // This doesn't make any sense, even for AppCDS, so let's skip it. We
+ // don't want to throw an error here because -cp "." is usually assigned
+ // by the launcher when classpath is not specified.
+ trace_class_path("app loader class path (skipped)=", app_class_path);
+ } else {
+ trace_class_path("app loader class path=", app_class_path);
+ shared_paths_misc_info()->add_app_classpath(app_class_path);
+ ClassLoader::setup_app_search_path(app_class_path);
+ }
+}
+
+char* ClassLoaderExt::read_manifest(ClassPathEntry* entry, jint *manifest_size, bool clean_text, TRAPS) {
+ const char* name = "META-INF/MANIFEST.MF";
+ char* manifest;
+ jint size;
+
+ assert(entry->is_jar_file(), "must be");
+ manifest = (char*) ((ClassPathZipEntry*)entry )->open_entry(name, &size, true, CHECK_NULL);
+
+ if (manifest == NULL) { // No Manifest
+ *manifest_size = 0;
+ return NULL;
+ }
+
+
+ if (clean_text) {
+ // See http://docs.oracle.com/javase/6/docs/technotes/guides/jar/jar.html#JAR%20Manifest
+ // (1): replace all CR/LF and CR with LF
+ StringUtils::replace_no_expand(manifest, "\r\n", "\n");
+
+ // (2) remove all new-line continuation (remove all "\n " substrings)
+ StringUtils::replace_no_expand(manifest, "\n ", "");
+ }
+
+ *manifest_size = (jint)strlen(manifest);
+ return manifest;
+}
+
+char* ClassLoaderExt::get_class_path_attr(const char* jar_path, char* manifest, jint manifest_size) {
+ const char* tag = "Class-Path: ";
+ const int tag_len = (int)strlen(tag);
+ char* found = NULL;
+ char* line_start = manifest;
+ char* end = manifest + manifest_size;
+
+ assert(*end == 0, "must be nul-terminated");
+
+ while (line_start < end) {
+ char* line_end = strchr(line_start, '\n');
+ if (line_end == NULL) {
+ // JAR spec require the manifest file to be terminated by a new line.
+ break;
+ }
+ if (strncmp(tag, line_start, tag_len) == 0) {
+ if (found != NULL) {
+ // Same behavior as jdk/src/share/classes/java/util/jar/Attributes.java
+ // If duplicated entries are found, the last one is used.
+ tty->print_cr("Warning: Duplicate name in Manifest: %s.\n"
+ "Ensure that the manifest does not have duplicate entries, and\n"
+ "that blank lines separate individual sections in both your\n"
+ "manifest and in the META-INF/MANIFEST.MF entry in the jar file:\n%s\n", tag, jar_path);
+ }
+ found = line_start + tag_len;
+ assert(found <= line_end, "sanity");
+ *line_end = '\0';
+ }
+ line_start = line_end + 1;
+ }
+ return found;
+}
+
+void ClassLoaderExt::process_jar_manifest(ClassPathEntry* entry,
+ bool check_for_duplicates) {
+ Thread* THREAD = Thread::current();
+ ResourceMark rm(THREAD);
+ jint manifest_size;
+ char* manifest = read_manifest(entry, &manifest_size, CHECK);
+
+ if (manifest == NULL) {
+ return;
+ }
+
+ if (strstr(manifest, "Extension-List:") != NULL) {
+ tty->print_cr("-Xshare:dump does not support Extension-List in JAR manifest: %s", entry->name());
+ vm_exit(1);
+ }
+
+ char* cp_attr = get_class_path_attr(entry->name(), manifest, manifest_size);
+
+ if (cp_attr != NULL && strlen(cp_attr) > 0) {
+ trace_class_path("found Class-Path: ", cp_attr);
+
+ char sep = os::file_separator()[0];
+ const char* dir_name = entry->name();
+ const char* dir_tail = strrchr(dir_name, sep);
+ int dir_len;
+ if (dir_tail == NULL) {
+ dir_len = 0;
+ } else {
+ dir_len = dir_tail - dir_name + 1;
+ }
+
+ // Split the cp_attr by spaces, and add each file
+ char* file_start = cp_attr;
+ char* end = file_start + strlen(file_start);
+
+ while (file_start < end) {
+ char* file_end = strchr(file_start, ' ');
+ if (file_end != NULL) {
+ *file_end = 0;
+ file_end += 1;
+ } else {
+ file_end = end;
+ }
+
+ int name_len = (int)strlen(file_start);
+ if (name_len > 0) {
+ ResourceMark rm(THREAD);
+ char* libname = NEW_RESOURCE_ARRAY(char, dir_len + name_len + 1);
+ *libname = 0;
+ strncat(libname, dir_name, dir_len);
+ strncat(libname, file_start, name_len);
+ trace_class_path("library = ", libname);
+ ClassLoader::update_class_path_entry_list(libname, true, false);
+ }
+
+ file_start = file_end;
+ }
+ }
+}
+
+void ClassLoaderExt::setup_search_paths() {
+ if (UseAppCDS) {
+ shared_paths_misc_info()->record_app_offset();
+ ClassLoaderExt::setup_app_search_path();
+ }
+}
+
+Thread* ClassLoaderExt::Context::_dump_thread = NULL;
+
+bool ClassLoaderExt::check(ClassLoaderExt::Context *context,
+ const ClassFileStream* stream,
+ const int classpath_index) {
+ if (stream != NULL) {
+ // Ignore any App classes from signed JAR file during CDS archiving
+ // dumping
+ if (DumpSharedSpaces &&
+ SharedClassUtil::is_classpath_entry_signed(classpath_index) &&
+ classpath_index >= _app_paths_start_index) {
+ tty->print_cr("Preload Warning: Skipping %s from signed JAR",
+ context->class_name());
+ return false;
+ }
+ if (classpath_index >= _app_paths_start_index) {
+ _has_app_classes = true;
+ _has_platform_classes = true;
+ }
+ }
+
+ return true;
+}
+
+void ClassLoaderExt::record_result(ClassLoaderExt::Context *context,
+ Symbol* class_name,
+ const s2 classpath_index,
+ InstanceKlass* result,
+ TRAPS) {
+ assert(DumpSharedSpaces, "Sanity");
+
+ // We need to remember where the class comes from during dumping.
+ oop loader = result->class_loader();
+ s2 classloader_type = ClassLoader::BOOT_LOADER;
+ if (SystemDictionary::is_system_class_loader(loader)) {
+ classloader_type = ClassLoader::APP_LOADER;
+ ClassLoaderExt::set_has_app_classes();
+ } else if (SystemDictionary::is_platform_class_loader(loader)) {
+ classloader_type = ClassLoader::PLATFORM_LOADER;
+ ClassLoaderExt::set_has_platform_classes();
+ }
+ result->set_shared_classpath_index(classpath_index);
+ result->set_class_loader_type(classloader_type);
+}
+
+void ClassLoaderExt::finalize_shared_paths_misc_info() {
+ if (UseAppCDS) {
+ if (!_has_app_classes) {
+ shared_paths_misc_info()->pop_app();
+ }
+ }
+}
+
+// Load the class of the given name from the location given by path. The path is specified by
+// the "source:" in the class list file (see classListParser.cpp), and can be a directory or
+// a JAR file.
+InstanceKlass* ClassLoaderExt::load_class(Symbol* name, const char* path, TRAPS) {
+
+ assert(name != NULL, "invariant");
+ assert(DumpSharedSpaces && UseAppCDS, "this function is only used with -Xshare:dump and -XX:+UseAppCDS");
+ ResourceMark rm(THREAD);
+ const char* class_name = name->as_C_string();
+
+ const char* file_name = file_name_for_class_name(class_name,
+ name->utf8_length());
+ assert(file_name != NULL, "invariant");
+
+ // Lookup stream for parsing .class file
+ ClassFileStream* stream = NULL;
+ ClassPathEntry* e = find_classpath_entry_from_cache(path, CHECK_NULL);
+ if (e == NULL) {
+ return NULL;
+ }
+ {
+ PerfClassTraceTime vmtimer(perf_sys_class_lookup_time(),
+ ((JavaThread*) THREAD)->get_thread_stat()->perf_timers_addr(),
+ PerfClassTraceTime::CLASS_LOAD);
+ stream = e->open_stream(file_name, CHECK_NULL);
+ }
+
+ if (NULL == stream) {
+ tty->print_cr("Preload Warning: Cannot find %s", class_name);
+ return NULL;
+ }
+
+ assert(stream != NULL, "invariant");
+ stream->set_verify(true);
+
+ ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+ Handle protection_domain;
+
+ InstanceKlass* result = KlassFactory::create_from_stream(stream,
+ name,
+ loader_data,
+ protection_domain,
+ NULL, // host_klass
+ NULL, // cp_patches
+ THREAD);
+
+ if (HAS_PENDING_EXCEPTION) {
+ tty->print_cr("Preload Error: Failed to load %s", class_name);
+ return NULL;
+ }
+ result->set_shared_classpath_index(UNREGISTERED_INDEX);
+ SystemDictionaryShared::set_shared_class_misc_info(result, stream);
+ return result;
+}
+
+struct CachedClassPathEntry {
+ const char* _path;
+ ClassPathEntry* _entry;
+};
+
+static GrowableArray* cached_path_entries = NULL;
+
+ClassPathEntry* ClassLoaderExt::find_classpath_entry_from_cache(const char* path, TRAPS) {
+ // This is called from dump time so it's single threaded and there's no need for a lock.
+ assert(DumpSharedSpaces && UseAppCDS, "this function is only used with -Xshare:dump and -XX:+UseAppCDS");
+ if (cached_path_entries == NULL) {
+ cached_path_entries = new (ResourceObj::C_HEAP, mtClass) GrowableArray(20, /*c heap*/ true);
+ }
+ CachedClassPathEntry ccpe;
+ for (int i=0; ilength(); i++) {
+ ccpe = cached_path_entries->at(i);
+ if (strcmp(ccpe._path, path) == 0) {
+ if (i != 0) {
+ // Put recent entries at the beginning to speed up searches.
+ cached_path_entries->remove_at(i);
+ cached_path_entries->insert_before(0, ccpe);
+ }
+ return ccpe._entry;
+ }
+ }
+
+ struct stat st;
+ if (os::stat(path, &st) != 0) {
+ // File or directory not found
+ return NULL;
+ }
+ ClassPathEntry* new_entry = NULL;
+
+ new_entry = create_class_path_entry(path, &st, false, false, CHECK_NULL);
+ if (new_entry == NULL) {
+ return NULL;
+ }
+ ccpe._path = strdup(path);
+ ccpe._entry = new_entry;
+ cached_path_entries->insert_before(0, ccpe);
+ return new_entry;
+}
Klass* ClassLoaderExt::load_one_class(ClassListParser* parser, TRAPS) {
- TempNewSymbol class_name_symbol = SymbolTable::new_symbol(parser->current_class_name(), THREAD);
- guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol.");
- return SystemDictionary::resolve_or_null(class_name_symbol, THREAD);
+ return parser->load_current_class(THREAD);
}
diff --git a/src/hotspot/share/classfile/classLoaderExt.hpp b/src/hotspot/share/classfile/classLoaderExt.hpp
index 09cb592b0d6..27e9ce25ef5 100644
--- a/src/hotspot/share/classfile/classLoaderExt.hpp
+++ b/src/hotspot/share/classfile/classLoaderExt.hpp
@@ -26,65 +26,152 @@
#define SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
#include "classfile/classLoader.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "oops/instanceKlass.hpp"
-#include "runtime/handles.hpp"
+#include "utilities/macros.hpp"
-class ClassListParser;
+CDS_ONLY(class SharedPathsMiscInfoExt;)
+CDS_ONLY(class ClassListParser;)
class ClassLoaderExt: public ClassLoader { // AllStatic
public:
-
+ enum SomeConstants {
+ max_classpath_index = 0x7fff
+ };
+ // ClassLoaderExt::Context --
+ //
+ // This is used by DumpSharedSpaces only - it enforces the same classloader
+ // delegation model as would be in run-time. I.e.,
+ // + classes defined by the NULL class loader cannot load classes in the PLATFORM or APP paths.
+ // + classes defined by the PLATFORM class loader cannot load classes in the APP paths.
class Context {
+ static Thread* _dump_thread;
+ const char* _class_name;
const char* _file_name;
public:
+ const char* class_name() {
+ return _class_name;
+ }
+ const char* file_name() {
+ return _file_name;
+ }
+
Context(const char* class_name, const char* file_name, TRAPS) {
+ _class_name = class_name;
_file_name = file_name;
+#if INCLUDE_CDS
+ if (!DumpSharedSpaces && !UseSharedSpaces) {
+ // Must not modify _app_paths_start_index if we're not using CDS.
+ assert(_app_paths_start_index == ClassLoaderExt::max_classpath_index, "must be");
+ }
+#endif
}
bool check(const ClassFileStream* stream, const int classpath_index) {
- return true;
+ CDS_ONLY(return ClassLoaderExt::check(this, stream, classpath_index);)
+ NOT_CDS(return true;)
}
bool should_verify(int classpath_index) {
- return false;
+ CDS_ONLY(return (classpath_index >= _app_paths_start_index);)
+ NOT_CDS(return false;)
}
void record_result(Symbol* class_name,
const s2 classpath_index,
- InstanceKlass* result, TRAPS) {
+ InstanceKlass* result,
+ TRAPS) {
#if INCLUDE_CDS
- assert(DumpSharedSpaces, "Sanity");
- oop loader = result->class_loader();
- s2 classloader_type = ClassLoader::BOOT_LOADER;
- if (SystemDictionary::is_system_class_loader(loader)) {
- classloader_type = ClassLoader::APP_LOADER;
- ClassLoaderExt::set_has_app_classes();
- } else if (SystemDictionary::is_platform_class_loader(loader)) {
- classloader_type = ClassLoader::PLATFORM_LOADER;
- ClassLoaderExt::set_has_platform_classes();
- }
- result->set_shared_classpath_index(classpath_index);
- result->set_class_loader_type(classloader_type);
+ ClassLoaderExt::record_result(this, class_name, classpath_index, result, THREAD);
#endif
}
- };
+ ~Context() {
+#if INCLUDE_CDS
+ if (!DumpSharedSpaces && !UseSharedSpaces) {
+ // Must not modify app_paths_start_index if we're not using CDS.
+ assert(_app_paths_start_index == ClassLoaderExt::max_classpath_index, "must be");
+ }
+#endif
+ }
+ }; // end ClassLoaderExt::Context
+
+private:
+#if INCLUDE_CDS
+ static char* get_class_path_attr(const char* jar_path, char* manifest, jint manifest_size);
+ static void setup_app_search_path(); // Only when -Xshare:dump
+ static SharedPathsMiscInfoExt* shared_paths_misc_info() {
+ return (SharedPathsMiscInfoExt*)_shared_paths_misc_info;
+ }
+ static jshort _app_paths_start_index; // index of first app JAR in shared classpath entry table
+ static bool _has_app_classes;
+ static bool _has_platform_classes;
+#endif
+
+public:
+ CDS_ONLY(static void process_jar_manifest(ClassPathEntry* entry, bool check_for_duplicates);)
+
+ // Called by JVMTI code to add boot classpath
static void append_boot_classpath(ClassPathEntry* new_entry) {
+#if INCLUDE_CDS
+ if (UseAppCDS) {
+ warning("UseAppCDS is disabled because bootstrap classpath has been appended");
+ UseAppCDS = false;
+ }
+#endif
ClassLoader::add_to_boot_append_entries(new_entry);
}
- static void setup_search_paths() {}
- static bool is_boot_classpath(int classpath_index) {
- return true;
- }
- static Klass* load_one_class(ClassListParser* parser, TRAPS);
+
+ static void setup_search_paths() NOT_CDS_RETURN;
+
#if INCLUDE_CDS
- static void set_has_app_classes() {}
- static void set_has_platform_classes() {}
+private:
+ static char* read_manifest(ClassPathEntry* entry, jint *manifest_size, bool clean_text, TRAPS);
+ static ClassPathEntry* find_classpath_entry_from_cache(const char* path, TRAPS);
+
+public:
static char* read_manifest(ClassPathEntry* entry, jint *manifest_size, TRAPS) {
- return NULL;
+ // Remove all the new-line continuations (which wrap long lines at 72 characters, see
+ // http://docs.oracle.com/javase/6/docs/technotes/guides/jar/jar.html#JAR%20Manifest), so
+ // that the manifest is easier to parse.
+ return read_manifest(entry, manifest_size, true, THREAD);
+ }
+ static char* read_raw_manifest(ClassPathEntry* entry, jint *manifest_size, TRAPS) {
+ // Do not remove new-line continuations, so we can easily pass it as an argument to
+ // java.util.jar.Manifest.getManifest() at run-time.
+ return read_manifest(entry, manifest_size, false, THREAD);
+ }
+
+ static void finalize_shared_paths_misc_info();
+
+ static jshort app_paths_start_index() { return _app_paths_start_index; }
+
+ static void init_paths_start_index(jshort app_start) {
+ _app_paths_start_index = app_start;
+ }
+
+ static bool is_boot_classpath(int classpath_index) {
+ return classpath_index < _app_paths_start_index;
+ }
+
+ static bool has_platform_or_app_classes() {
+ return _has_app_classes || _has_platform_classes;
+ }
+
+ static bool check(class ClassLoaderExt::Context *context,
+ const ClassFileStream* stream,
+ const int classpath_index);
+
+ static void record_result(class ClassLoaderExt::Context *context,
+ Symbol* class_name,
+ const s2 classpath_index,
+ InstanceKlass* result, TRAPS);
+ static InstanceKlass* load_class(Symbol* h_name, const char* path, TRAPS);
+ static Klass* load_one_class(ClassListParser* parser, TRAPS);
+ static void set_has_app_classes() {
+ _has_app_classes = true;
+ }
+ static void set_has_platform_classes() {
+ _has_platform_classes = true;
}
- static void process_jar_manifest(ClassPathEntry* entry, bool check_for_duplicates) {}
#endif
};
diff --git a/src/hotspot/share/classfile/klassFactory.hpp b/src/hotspot/share/classfile/klassFactory.hpp
index cb3ed851dd9..c08f8b9a119 100644
--- a/src/hotspot/share/classfile/klassFactory.hpp
+++ b/src/hotspot/share/classfile/klassFactory.hpp
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_CLASSFILE_KLASSFACTORY_HPP
#define SHARE_VM_CLASSFILE_KLASSFACTORY_HPP
-#include "memory/allocation.inline.hpp"
+#include "memory/allocation.hpp"
#include "runtime/handles.hpp"
class ClassFileStream;
diff --git a/src/hotspot/share/classfile/sharedClassUtil.cpp b/src/hotspot/share/classfile/sharedClassUtil.cpp
new file mode 100644
index 00000000000..2080472e2f1
--- /dev/null
+++ b/src/hotspot/share/classfile/sharedClassUtil.cpp
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoader.hpp"
+#include "classfile/classLoaderExt.hpp"
+#include "classfile/dictionary.hpp"
+#include "classfile/javaClasses.hpp"
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/stringTable.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#include "memory/filemap.hpp"
+#include "memory/metadataFactory.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/instanceKlass.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/java.hpp"
+#include "runtime/os.hpp"
+
+class ManifestStream: public ResourceObj {
+ private:
+ u1* _buffer_start; // Buffer bottom
+ u1* _buffer_end; // Buffer top (one past last element)
+ u1* _current; // Current buffer position
+
+ public:
+ // Constructor
+ ManifestStream(u1* buffer, int length) : _buffer_start(buffer),
+ _current(buffer) {
+ _buffer_end = buffer + length;
+ }
+
+ static bool is_attr(u1* attr, const char* name) {
+ return strncmp((const char*)attr, name, strlen(name)) == 0;
+ }
+
+ static char* copy_attr(u1* value, size_t len) {
+ char* buf = NEW_RESOURCE_ARRAY(char, len + 1);
+ strncpy(buf, (char*)value, len);
+ buf[len] = 0;
+ return buf;
+ }
+
+ // The return value indicates if the JAR is signed or not
+ bool check_is_signed() {
+ u1* attr = _current;
+ bool isSigned = false;
+ while (_current < _buffer_end) {
+ if (*_current == '\n') {
+ *_current = '\0';
+ u1* value = (u1*)strchr((char*)attr, ':');
+ if (value != NULL) {
+ assert(*(value+1) == ' ', "Unrecognized format" );
+ if (strstr((char*)attr, "-Digest") != NULL) {
+ isSigned = true;
+ break;
+ }
+ }
+ *_current = '\n'; // restore
+ attr = _current + 1;
+ }
+ _current ++;
+ }
+ return isSigned;
+ }
+};
+
+void SharedPathsMiscInfoExt::print_path(outputStream* out, int type, const char* path) {
+ switch(type) {
+ case APP:
+ ClassLoader::trace_class_path("Expecting -Djava.class.path=", path);
+ break;
+ default:
+ SharedPathsMiscInfo::print_path(out, type, path);
+ }
+}
+
+bool SharedPathsMiscInfoExt::check(jint type, const char* path) {
+
+ switch (type) {
+ case APP:
+ {
+ // Prefix is OK: E.g., dump with -cp foo.jar, but run with -cp foo.jar:bar.jar
+ size_t len = strlen(path);
+ const char *appcp = Arguments::get_appclasspath();
+ assert(appcp != NULL, "NULL app classpath");
+ size_t appcp_len = strlen(appcp);
+ if (appcp_len < len) {
+ return fail("Run time APP classpath is shorter than the one at dump time: ", appcp);
+ }
+ ResourceMark rm;
+ char* tmp_path;
+ if (len == appcp_len) {
+ tmp_path = (char*)appcp;
+ } else {
+ tmp_path = NEW_RESOURCE_ARRAY(char, len + 1);
+ strncpy(tmp_path, appcp, len);
+ tmp_path[len] = 0;
+ }
+ if (os::file_name_strcmp(path, tmp_path) != 0) {
+ return fail("[APP classpath mismatch, actual: -Djava.class.path=", appcp);
+ }
+ if (appcp[len] != '\0' && appcp[len] != os::path_separator()[0]) {
+ return fail("Dump time APP classpath is not a proper prefix of run time APP classpath: ", appcp);
+ }
+ }
+ break;
+ default:
+ return SharedPathsMiscInfo::check(type, path);
+ }
+
+ return true;
+}
+
+void SharedClassUtil::update_shared_classpath(ClassPathEntry *cpe, SharedClassPathEntry* e, TRAPS) {
+ ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+ SharedClassPathEntryExt* ent = (SharedClassPathEntryExt*)e;
+ ResourceMark rm(THREAD);
+ jint manifest_size;
+ bool isSigned;
+
+ if (cpe->is_jar_file()) {
+ char* manifest = ClassLoaderExt::read_manifest(cpe, &manifest_size, CHECK);
+ if (manifest != NULL) {
+ ManifestStream* stream = new ManifestStream((u1*)manifest,
+ manifest_size);
+ isSigned = stream->check_is_signed();
+ if (isSigned) {
+ ent->_is_signed = true;
+ } else {
+ // Copy the manifest into the shared archive
+ manifest = ClassLoaderExt::read_raw_manifest(cpe, &manifest_size, CHECK);
+ Array* buf = MetadataFactory::new_array(loader_data,
+ manifest_size,
+ THREAD);
+ char* p = (char*)(buf->data());
+ memcpy(p, manifest, manifest_size);
+ ent->set_manifest(buf);
+ ent->_is_signed = false;
+ }
+ }
+ }
+}
+
+void SharedClassUtil::initialize(TRAPS) {
+ if (UseSharedSpaces) {
+ int size = FileMapInfo::get_number_of_share_classpaths();
+ if (size > 0) {
+ SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD);
+ if (!DumpSharedSpaces) {
+ FileMapHeaderExt* header = (FileMapHeaderExt*)FileMapInfo::current_info()->header();
+ ClassLoaderExt::init_paths_start_index(header->_app_paths_start_index);
+ }
+ }
+ }
+
+ if (DumpSharedSpaces) {
+ if (SharedArchiveConfigFile) {
+ read_extra_data(SharedArchiveConfigFile, THREAD);
+ }
+ }
+}
+
+void SharedClassUtil::read_extra_data(const char* filename, TRAPS) {
+ HashtableTextDump reader(filename);
+ reader.check_version("VERSION: 1.0");
+
+ while (reader.remain() > 0) {
+ int utf8_length;
+ int prefix_type = reader.scan_prefix(&utf8_length);
+ ResourceMark rm(THREAD);
+ char* utf8_buffer = NEW_RESOURCE_ARRAY(char, utf8_length);
+ reader.get_utf8(utf8_buffer, utf8_length);
+
+ if (prefix_type == HashtableTextDump::SymbolPrefix) {
+ SymbolTable::new_symbol(utf8_buffer, utf8_length, THREAD);
+ } else{
+ assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity");
+ utf8_buffer[utf8_length] = '\0';
+ oop s = StringTable::intern(utf8_buffer, THREAD);
+ }
+ }
+}
+
+bool SharedClassUtil::is_classpath_entry_signed(int classpath_index) {
+ assert(classpath_index >= 0, "Sanity");
+ SharedClassPathEntryExt* ent = (SharedClassPathEntryExt*)
+ FileMapInfo::shared_classpath(classpath_index);
+ return ent->_is_signed;
+}
+
+void FileMapHeaderExt::populate(FileMapInfo* mapinfo, size_t alignment) {
+ FileMapInfo::FileMapHeader::populate(mapinfo, alignment);
+
+ ClassLoaderExt::finalize_shared_paths_misc_info();
+ _app_paths_start_index = ClassLoaderExt::app_paths_start_index();
+
+ _verify_local = BytecodeVerificationLocal;
+ _verify_remote = BytecodeVerificationRemote;
+ _has_platform_or_app_classes = ClassLoaderExt::has_platform_or_app_classes();
+}
+
+bool FileMapHeaderExt::validate() {
+ if (UseAppCDS) {
+ const char* prop = Arguments::get_property("java.system.class.loader");
+ if (prop != NULL) {
+ warning("UseAppCDS is disabled because the java.system.class.loader property is specified (value = \"%s\"). "
+ "To enable UseAppCDS, this property must be not be set", prop);
+ UseAppCDS = false;
+ }
+ }
+
+ if (!FileMapInfo::FileMapHeader::validate()) {
+ return false;
+ }
+
+ // For backwards compatibility, we don't check the verification setting
+ // if the archive only contains system classes.
+ if (_has_platform_or_app_classes &&
+ ((!_verify_local && BytecodeVerificationLocal) ||
+ (!_verify_remote && BytecodeVerificationRemote))) {
+ FileMapInfo::fail_continue("The shared archive file was created with less restrictive "
+ "verification setting than the current setting.");
+ return false;
+ }
+
+ return true;
+}
diff --git a/src/hotspot/share/classfile/sharedClassUtil.hpp b/src/hotspot/share/classfile/sharedClassUtil.hpp
index 236087f1871..c3b7f603466 100644
--- a/src/hotspot/share/classfile/sharedClassUtil.hpp
+++ b/src/hotspot/share/classfile/sharedClassUtil.hpp
@@ -27,37 +27,108 @@
#include "classfile/sharedPathsMiscInfo.hpp"
#include "memory/filemap.hpp"
+#include "classfile/classLoaderExt.hpp"
+#include "classfile/dictionary.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#include "oops/klass.hpp"
-class SharedClassUtil : AllStatic {
+class FileMapHeaderExt: public FileMapInfo::FileMapHeader {
public:
+ jshort _app_paths_start_index; // Index of first app classpath entry
+ bool _verify_local; // BytecodeVerificationLocal setting
+ bool _verify_remote; // BytecodeVerificationRemote setting
+ bool _has_platform_or_app_classes; // Archive contains app classes
- static SharedPathsMiscInfo* allocate_shared_paths_misc_info() {
- return new SharedPathsMiscInfo();
+ FileMapHeaderExt() {
+ _has_platform_or_app_classes = true;
+ }
+ virtual void populate(FileMapInfo* mapinfo, size_t alignment);
+ virtual bool validate();
+};
+
+// In addition to SharedPathsMiscInfo, the following information is also stored
+//
+//
+// + The value of Arguments::get_appclasspath() used during dumping.
+//
+class SharedPathsMiscInfoExt : public SharedPathsMiscInfo {
+private:
+ int _app_offset;
+public:
+ enum {
+ APP = 5
+ };
+
+ virtual const char* type_name(int type) {
+ switch (type) {
+ case APP: return "APP";
+ default: return SharedPathsMiscInfo::type_name(type);
+ }
}
- static SharedPathsMiscInfo* allocate_shared_paths_misc_info(char* buf, int size) {
- return new SharedPathsMiscInfo(buf, size);
+ virtual void print_path(outputStream* out, int type, const char* path);
+
+ SharedPathsMiscInfoExt() : SharedPathsMiscInfo() {
+ _app_offset = 0;
+ }
+ SharedPathsMiscInfoExt(char* buf, int size) : SharedPathsMiscInfo(buf, size) {
+ _app_offset = 0;
}
- static FileMapInfo::FileMapHeader* allocate_file_map_header() {
- return new FileMapInfo::FileMapHeader();
+ virtual bool check(jint type, const char* path);
+
+ void add_app_classpath(const char* path) {
+ add_path(path, APP);
}
- static size_t file_map_header_size() {
- return sizeof(FileMapInfo::FileMapHeader);
+ void record_app_offset() {
+ _app_offset = get_used_bytes();
}
-
- static size_t shared_class_path_entry_size() {
- return sizeof(SharedClassPathEntry);
- }
-
- static void update_shared_classpath(ClassPathEntry *cpe,
- SharedClassPathEntry* ent, TRAPS) {}
- static void initialize(TRAPS) {}
-
- inline static bool is_shared_boot_class(Klass* klass) {
- return (klass->_shared_class_path_index >= 0);
+ void pop_app() {
+ _cur_ptr = _buf_start + _app_offset;
+ write_jint(0);
}
};
+class SharedClassPathEntryExt: public SharedClassPathEntry {
+public:
+ //Maniest attributes
+ bool _is_signed;
+ void set_manifest(Array* manifest) {
+ _manifest = manifest;
+ }
+};
+
+class SharedClassUtil : AllStatic {
+public:
+ static SharedPathsMiscInfo* allocate_shared_paths_misc_info() {
+ return new SharedPathsMiscInfoExt();
+ }
+
+ static SharedPathsMiscInfo* allocate_shared_paths_misc_info(char* buf, int size) {
+ return new SharedPathsMiscInfoExt(buf, size);
+ }
+
+ static FileMapInfo::FileMapHeader* allocate_file_map_header() {
+ return new FileMapHeaderExt();
+ }
+
+ static size_t file_map_header_size() {
+ return sizeof(FileMapHeaderExt);
+ }
+
+ static size_t shared_class_path_entry_size() {
+ return sizeof(SharedClassPathEntryExt);
+ }
+
+ static void update_shared_classpath(ClassPathEntry *cpe, SharedClassPathEntry* ent, TRAPS);
+ static void initialize(TRAPS);
+
+private:
+ static void read_extra_data(const char* filename, TRAPS);
+
+public:
+ static bool is_classpath_entry_signed(int classpath_index);
+};
+
#endif // SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
diff --git a/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp b/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp
index 7f9314ea63c..98a76d1455d 100644
--- a/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp
+++ b/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp
@@ -34,6 +34,18 @@
#include "runtime/arguments.hpp"
#include "utilities/ostream.hpp"
+SharedPathsMiscInfo::SharedPathsMiscInfo() {
+ _buf_size = INITIAL_BUF_SIZE;
+ _cur_ptr = _buf_start = NEW_C_HEAP_ARRAY(char, _buf_size, mtClass);
+ _allocated = true;
+}
+
+SharedPathsMiscInfo::~SharedPathsMiscInfo() {
+ if (_allocated) {
+ FREE_C_HEAP_ARRAY(char, _buf_start);
+ }
+}
+
void SharedPathsMiscInfo::add_path(const char* path, int type) {
log_info(class, path)("type=%s ", type_name(type));
ClassLoader::trace_class_path("add misc shared path ", path);
@@ -127,7 +139,8 @@ bool SharedPathsMiscInfo::check() {
bool SharedPathsMiscInfo::check(jint type, const char* path) {
switch (type) {
case BOOT:
- if (os::file_name_strcmp(path, Arguments::get_sysclasspath()) != 0) {
+ // In the future we should perform the check based on the content of the mapped archive.
+ if (UseAppCDS && os::file_name_strcmp(path, Arguments::get_sysclasspath()) != 0) {
return fail("[BOOT classpath mismatch, actual =", Arguments::get_sysclasspath());
}
break;
diff --git a/src/hotspot/share/classfile/sharedPathsMiscInfo.hpp b/src/hotspot/share/classfile/sharedPathsMiscInfo.hpp
index 2099dc24881..e5576156e67 100644
--- a/src/hotspot/share/classfile/sharedPathsMiscInfo.hpp
+++ b/src/hotspot/share/classfile/sharedPathsMiscInfo.hpp
@@ -74,11 +74,7 @@ public:
INITIAL_BUF_SIZE = 128
};
// This constructor is used when creating the misc information (during dump)
- SharedPathsMiscInfo() {
- _buf_size = INITIAL_BUF_SIZE;
- _cur_ptr = _buf_start = NEW_C_HEAP_ARRAY(char, _buf_size, mtClass);
- _allocated = true;
- }
+ SharedPathsMiscInfo();
// This constructor is used when validating the misc info (during run time)
SharedPathsMiscInfo(char *buff, int size) {
_cur_ptr = _buf_start = buff;
@@ -86,11 +82,8 @@ public:
_buf_size = size;
_allocated = false;
}
- ~SharedPathsMiscInfo() {
- if (_allocated) {
- FREE_C_HEAP_ARRAY(char, _buf_start);
- }
- }
+ ~SharedPathsMiscInfo();
+
int get_used_bytes() {
return _cur_ptr - _buf_start;
}
diff --git a/src/hotspot/share/classfile/stringTable.hpp b/src/hotspot/share/classfile/stringTable.hpp
index a08bd225277..77538a53e5c 100644
--- a/src/hotspot/share/classfile/stringTable.hpp
+++ b/src/hotspot/share/classfile/stringTable.hpp
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_CLASSFILE_STRINGTABLE_HPP
#define SHARE_VM_CLASSFILE_STRINGTABLE_HPP
-#include "memory/allocation.inline.hpp"
+#include "memory/allocation.hpp"
#include "utilities/hashtable.hpp"
template class CompactHashtable;
diff --git a/src/hotspot/share/classfile/symbolTable.hpp b/src/hotspot/share/classfile/symbolTable.hpp
index 931669650b5..bf3c36257da 100644
--- a/src/hotspot/share/classfile/symbolTable.hpp
+++ b/src/hotspot/share/classfile/symbolTable.hpp
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
#define SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
-#include "memory/allocation.inline.hpp"
+#include "memory/allocation.hpp"
#include "oops/symbol.hpp"
#include "utilities/hashtable.hpp"
diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp
index df4a38f0306..f760c758273 100644
--- a/src/hotspot/share/classfile/systemDictionary.cpp
+++ b/src/hotspot/share/classfile/systemDictionary.cpp
@@ -1087,7 +1087,7 @@ InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name,
#if INCLUDE_CDS
ResourceMark rm(THREAD);
if (DumpSharedSpaces && !class_loader.is_null() &&
- !ArgumentsExt::using_AppCDS() && strcmp(class_name->as_C_string(), "Unnamed") != 0) {
+ !UseAppCDS && strcmp(class_name->as_C_string(), "Unnamed") != 0) {
// If AppCDS is not enabled, don't define the class at dump time (except for the "Unnamed"
// class, which is used by MethodHandles).
THROW_MSG_NULL(vmSymbols::java_lang_ClassNotFoundException(), class_name->as_C_string());
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp
new file mode 100644
index 00000000000..692ba891823
--- /dev/null
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp
@@ -0,0 +1,1086 @@
+/*
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classFileStream.hpp"
+#include "classfile/classListParser.hpp"
+#include "classfile/classLoader.hpp"
+#include "classfile/classLoaderData.inline.hpp"
+#include "classfile/classLoaderExt.hpp"
+#include "classfile/compactHashtable.inline.hpp"
+#include "classfile/dictionary.hpp"
+#include "classfile/javaClasses.hpp"
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#include "classfile/verificationType.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "logging/log.hpp"
+#include "memory/allocation.hpp"
+#include "memory/filemap.hpp"
+#include "memory/metadataFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
+#include "memory/oopFactory.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/klass.inline.hpp"
+#include "oops/objArrayOop.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/java.hpp"
+#include "runtime/javaCalls.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "utilities/hashtable.inline.hpp"
+#include "utilities/stringUtils.hpp"
+
+
+objArrayOop SystemDictionaryShared::_shared_protection_domains = NULL;
+objArrayOop SystemDictionaryShared::_shared_jar_urls = NULL;
+objArrayOop SystemDictionaryShared::_shared_jar_manifests = NULL;
+
+static Mutex* SharedDictionary_lock = NULL;
+
+void SystemDictionaryShared::initialize(TRAPS) {
+ if (_java_system_loader != NULL) {
+ SharedDictionary_lock = new Mutex(Mutex::leaf, "SharedDictionary_lock", true);
+
+ // These classes need to be initialized before calling get_shared_jar_manifest(), etc.
+ SystemDictionary::ByteArrayInputStream_klass()->initialize(CHECK);
+ SystemDictionary::File_klass()->initialize(CHECK);
+ SystemDictionary::Jar_Manifest_klass()->initialize(CHECK);
+ SystemDictionary::CodeSource_klass()->initialize(CHECK);
+ }
+}
+
+oop SystemDictionaryShared::shared_protection_domain(int index) {
+ return _shared_protection_domains->obj_at(index);
+}
+
+oop SystemDictionaryShared::shared_jar_url(int index) {
+ return _shared_jar_urls->obj_at(index);
+}
+
+oop SystemDictionaryShared::shared_jar_manifest(int index) {
+ return _shared_jar_manifests->obj_at(index);
+}
+
+
+Handle SystemDictionaryShared::get_shared_jar_manifest(int shared_path_index, TRAPS) {
+ Handle empty;
+ Handle manifest ;
+ if (shared_jar_manifest(shared_path_index) == NULL) {
+ SharedClassPathEntryExt* ent = (SharedClassPathEntryExt*)FileMapInfo::shared_classpath(shared_path_index);
+ long size = ent->manifest_size();
+ if (size <= 0) {
+ return empty; // No manifest - return NULL handle
+ }
+
+ // ByteArrayInputStream bais = new ByteArrayInputStream(buf);
+ InstanceKlass* bais_klass = SystemDictionary::ByteArrayInputStream_klass();
+ Handle bais = bais_klass->allocate_instance_handle(CHECK_(empty));
+ {
+ const char* src = ent->manifest();
+ assert(src != NULL, "No Manifest data");
+ typeArrayOop buf = oopFactory::new_byteArray(size, CHECK_(empty));
+ typeArrayHandle bufhandle(THREAD, buf);
+ char* dst = (char*)(buf->byte_at_addr(0));
+ memcpy(dst, src, (size_t)size);
+
+ JavaValue result(T_VOID);
+ JavaCalls::call_special(&result, bais, bais_klass,
+ vmSymbols::object_initializer_name(),
+ vmSymbols::byte_array_void_signature(),
+ bufhandle, CHECK_(empty));
+ }
+
+ // manifest = new Manifest(bais)
+ InstanceKlass* manifest_klass = SystemDictionary::Jar_Manifest_klass();
+ manifest = manifest_klass->allocate_instance_handle(CHECK_(empty));
+ {
+ JavaValue result(T_VOID);
+ JavaCalls::call_special(&result, manifest, manifest_klass,
+ vmSymbols::object_initializer_name(),
+ vmSymbols::input_stream_void_signature(),
+ bais, CHECK_(empty));
+ }
+ atomic_set_shared_jar_manifest(shared_path_index, manifest());
+ }
+
+ manifest = Handle(THREAD, shared_jar_manifest(shared_path_index));
+ assert(manifest.not_null(), "sanity");
+ return manifest;
+}
+
+Handle SystemDictionaryShared::get_shared_jar_url(int shared_path_index, TRAPS) {
+ Handle url_h;
+ if (shared_jar_url(shared_path_index) == NULL) {
+ JavaValue result(T_OBJECT);
+ const char* path = FileMapInfo::shared_classpath_name(shared_path_index);
+ Handle path_string = java_lang_String::create_from_str(path, CHECK_(url_h));
+ Klass* classLoaders_klass =
+ SystemDictionary::jdk_internal_loader_ClassLoaders_klass();
+ JavaCalls::call_static(&result, classLoaders_klass,
+ vmSymbols::toFileURL_name(),
+ vmSymbols::toFileURL_signature(),
+ path_string, CHECK_(url_h));
+
+ atomic_set_shared_jar_url(shared_path_index, (oop)result.get_jobject());
+ }
+
+ url_h = Handle(THREAD, shared_jar_url(shared_path_index));
+ assert(url_h.not_null(), "sanity");
+ return url_h;
+}
+
+Handle SystemDictionaryShared::get_package_name(Symbol* class_name, TRAPS) {
+ ResourceMark rm(THREAD);
+ Handle pkgname_string;
+ char* pkgname = (char*) ClassLoader::package_from_name((const char*) class_name->as_C_string());
+ if (pkgname != NULL) { // Package prefix found
+ StringUtils::replace_no_expand(pkgname, "/", ".");
+ pkgname_string = java_lang_String::create_from_str(pkgname,
+ CHECK_(pkgname_string));
+ }
+ return pkgname_string;
+}
+
+// Define Package for shared app classes from JAR file and also checks for
+// package sealing (all done in Java code)
+// See http://docs.oracle.com/javase/tutorial/deployment/jar/sealman.html
+void SystemDictionaryShared::define_shared_package(Symbol* class_name,
+ Handle class_loader,
+ Handle manifest,
+ Handle url,
+ TRAPS) {
+ assert(class_loader == _java_system_loader, "unexpected class loader");
+ // get_package_name() returns a NULL handle if the class is in unnamed package
+ Handle pkgname_string = get_package_name(class_name, CHECK);
+ if (pkgname_string.not_null()) {
+ Klass* app_classLoader_klass = SystemDictionary::jdk_internal_loader_ClassLoaders_AppClassLoader_klass();
+ JavaValue result(T_OBJECT);
+ JavaCallArguments args(3);
+ args.set_receiver(class_loader);
+ args.push_oop(pkgname_string);
+ args.push_oop(manifest);
+ args.push_oop(url);
+ JavaCalls::call_virtual(&result, app_classLoader_klass,
+ vmSymbols::defineOrCheckPackage_name(),
+ vmSymbols::defineOrCheckPackage_signature(),
+ &args,
+ CHECK);
+ }
+}
+
+// Define Package for shared app/platform classes from named module
+void SystemDictionaryShared::define_shared_package(Symbol* class_name,
+ Handle class_loader,
+ ModuleEntry* mod_entry,
+ TRAPS) {
+ assert(mod_entry != NULL, "module_entry should not be NULL");
+ Handle module_handle(THREAD, mod_entry->module());
+
+ Handle pkg_name = get_package_name(class_name, CHECK);
+ assert(pkg_name.not_null(), "Package should not be null for class in named module");
+
+ Klass* classLoader_klass;
+ if (SystemDictionary::is_system_class_loader(class_loader())) {
+ classLoader_klass = SystemDictionary::jdk_internal_loader_ClassLoaders_AppClassLoader_klass();
+ } else {
+ assert(SystemDictionary::is_platform_class_loader(class_loader()), "unexpected classloader");
+ classLoader_klass = SystemDictionary::jdk_internal_loader_ClassLoaders_PlatformClassLoader_klass();
+ }
+
+ JavaValue result(T_OBJECT);
+ JavaCallArguments args(2);
+ args.set_receiver(class_loader);
+ args.push_oop(pkg_name);
+ args.push_oop(module_handle);
+ JavaCalls::call_virtual(&result, classLoader_klass,
+ vmSymbols::definePackage_name(),
+ vmSymbols::definePackage_signature(),
+ &args,
+ CHECK);
+}
+
+// Get the ProtectionDomain associated with the CodeSource from the classloader.
+Handle SystemDictionaryShared::get_protection_domain_from_classloader(Handle class_loader,
+ Handle url, TRAPS) {
+ // CodeSource cs = new CodeSource(url, null);
+ InstanceKlass* cs_klass = SystemDictionary::CodeSource_klass();
+ Handle cs = cs_klass->allocate_instance_handle(CHECK_NH);
+ JavaValue void_result(T_VOID);
+ JavaCalls::call_special(&void_result, cs, cs_klass,
+ vmSymbols::object_initializer_name(),
+ vmSymbols::url_code_signer_array_void_signature(),
+ url, Handle(), CHECK_NH);
+
+ // protection_domain = SecureClassLoader.getProtectionDomain(cs);
+ Klass* secureClassLoader_klass = SystemDictionary::SecureClassLoader_klass();
+ JavaValue obj_result(T_OBJECT);
+ JavaCalls::call_virtual(&obj_result, class_loader, secureClassLoader_klass,
+ vmSymbols::getProtectionDomain_name(),
+ vmSymbols::getProtectionDomain_signature(),
+ cs, CHECK_NH);
+ return Handle(THREAD, (oop)obj_result.get_jobject());
+}
+
+// Returns the ProtectionDomain associated with the JAR file identified by the url.
+Handle SystemDictionaryShared::get_shared_protection_domain(Handle class_loader,
+ int shared_path_index,
+ Handle url,
+ TRAPS) {
+ Handle protection_domain;
+ if (shared_protection_domain(shared_path_index) == NULL) {
+ Handle pd = get_protection_domain_from_classloader(class_loader, url, THREAD);
+ atomic_set_shared_protection_domain(shared_path_index, pd());
+ }
+
+ // Acquire from the cache because if another thread beats the current one to
+ // set the shared protection_domain and the atomic_set fails, the current thread
+ // needs to get the updated protection_domain from the cache.
+ protection_domain = Handle(THREAD, shared_protection_domain(shared_path_index));
+ assert(protection_domain.not_null(), "sanity");
+ return protection_domain;
+}
+
+// Returns the ProtectionDomain associated with the moduleEntry.
+Handle SystemDictionaryShared::get_shared_protection_domain(Handle class_loader,
+ ModuleEntry* mod, TRAPS) {
+ ClassLoaderData *loader_data = mod->loader_data();
+ Handle protection_domain;
+ if (mod->shared_protection_domain() == NULL) {
+ Symbol* location = mod->location();
+ if (location != NULL) {
+ Handle url_string = java_lang_String::create_from_symbol(
+ location, CHECK_(protection_domain));
+ JavaValue result(T_OBJECT);
+ Klass* classLoaders_klass =
+ SystemDictionary::jdk_internal_loader_ClassLoaders_klass();
+ JavaCalls::call_static(&result, classLoaders_klass, vmSymbols::toFileURL_name(),
+ vmSymbols::toFileURL_signature(),
+ url_string, CHECK_(protection_domain));
+ Handle url = Handle(THREAD, (oop)result.get_jobject());
+
+ Handle pd = get_protection_domain_from_classloader(class_loader, url, THREAD);
+ mod->set_shared_protection_domain(loader_data, pd);
+ }
+ }
+
+ protection_domain = Handle(THREAD, mod->shared_protection_domain());
+ assert(protection_domain.not_null(), "sanity");
+ return protection_domain;
+}
+
+// Initializes the java.lang.Package and java.security.ProtectionDomain objects associated with
+// the given InstanceKlass.
+// Returns the ProtectionDomain for the InstanceKlass.
+Handle SystemDictionaryShared::init_security_info(Handle class_loader, InstanceKlass* ik, TRAPS) {
+ Handle pd;
+
+ if (ik != NULL) {
+ int index = ik->shared_classpath_index();
+ assert(index >= 0, "Sanity");
+ SharedClassPathEntryExt* ent =
+ (SharedClassPathEntryExt*)FileMapInfo::shared_classpath(index);
+ Symbol* class_name = ik->name();
+
+ if (ent->is_modules_image()) {
+ // For shared app/platform classes originated from the run-time image:
+ // The ProtectionDomains are cached in the corresponding ModuleEntries
+ // for fast access by the VM.
+ ResourceMark rm;
+ ClassLoaderData *loader_data =
+ ClassLoaderData::class_loader_data(class_loader());
+ PackageEntryTable* pkgEntryTable = loader_data->packages();
+ TempNewSymbol pkg_name = InstanceKlass::package_from_name(class_name, CHECK_(pd));
+ if (pkg_name != NULL) {
+ PackageEntry* pkg_entry = pkgEntryTable->lookup_only(pkg_name);
+ if (pkg_entry != NULL) {
+ ModuleEntry* mod_entry = pkg_entry->module();
+ pd = get_shared_protection_domain(class_loader, mod_entry, THREAD);
+ define_shared_package(class_name, class_loader, mod_entry, CHECK_(pd));
+ }
+ }
+ } else {
+ // For shared app/platform classes originated from JAR files on the class path:
+ // Each of the 3 SystemDictionaryShared::_shared_xxx arrays has the same length
+ // as the shared classpath table in the shared archive (see
+ // FileMap::_classpath_entry_table in filemap.hpp for details).
+ //
+ // If a shared InstanceKlass k is loaded from the class path, let
+ //
+ // index = k->shared_classpath_index():
+ //
+ // FileMap::_classpath_entry_table[index] identifies the JAR file that contains k.
+ //
+ // k's protection domain is:
+ //
+ // ProtectionDomain pd = _shared_protection_domains[index];
+ //
+ // and k's Package is initialized using
+ //
+ // manifest = _shared_jar_manifests[index];
+ // url = _shared_jar_urls[index];
+ // define_shared_package(class_name, class_loader, manifest, url, CHECK_(pd));
+ //
+ // Note that if an element of these 3 _shared_xxx arrays is NULL, it will be initialized by
+ // the corresponding SystemDictionaryShared::get_shared_xxx() function.
+ Handle manifest = get_shared_jar_manifest(index, CHECK_(pd));
+ Handle url = get_shared_jar_url(index, CHECK_(pd));
+ define_shared_package(class_name, class_loader, manifest, url, CHECK_(pd));
+ pd = get_shared_protection_domain(class_loader, index, url, CHECK_(pd));
+ }
+ }
+ return pd;
+}
+
+// Currently AppCDS only archives classes from the run-time image, the
+// -Xbootclasspath/a path, and the class path. The following rules need to be
+// revised when AppCDS is changed to archive classes from other code sources
+// in the future, for example the module path (specified by -p).
+//
+// Check if a shared class can be loaded by the specific classloader. Following
+// are the "visible" archived classes for different classloaders.
+//
+// NULL classloader:
+// - see SystemDictionary::is_shared_class_visible()
+// Platform classloader:
+// - Module class from "modules" jimage. ModuleEntry must be defined in the
+// classloader.
+// App Classloader:
+// - Module class from "modules" jimage. ModuleEntry must be defined in the
+// classloader.
+// - Class from -cp. The class must have no PackageEntry defined in any of the
+// boot/platform/app classloader, or must be in the unnamed module defined in the
+// AppClassLoader.
+bool SystemDictionaryShared::is_shared_class_visible_for_classloader(
+ InstanceKlass* ik,
+ Handle class_loader,
+ const char* pkg_string,
+ Symbol* pkg_name,
+ PackageEntry* pkg_entry,
+ ModuleEntry* mod_entry,
+ TRAPS) {
+ assert(class_loader.not_null(), "Class loader should not be NULL");
+ assert(Universe::is_module_initialized(), "Module system is not initialized");
+
+ int path_index = ik->shared_classpath_index();
+ SharedClassPathEntry* ent =
+ (SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
+
+ if (SystemDictionary::is_platform_class_loader(class_loader())) {
+ assert(ent != NULL, "shared class for PlatformClassLoader should have valid SharedClassPathEntry");
+ // The PlatformClassLoader can only load archived class originated from the
+ // run-time image. The class' PackageEntry/ModuleEntry must be
+ // defined by the PlatformClassLoader.
+ if (mod_entry != NULL) {
+ // PackageEntry/ModuleEntry is found in the classloader. Check if the
+ // ModuleEntry's location agrees with the archived class' origination.
+ if (ent->is_modules_image() && mod_entry->location()->starts_with("jrt:")) {
+ return true; // Module class from the "modules" jimage
+ }
+ }
+ } else if (SystemDictionary::is_system_class_loader(class_loader())) {
+ assert(ent != NULL, "shared class for system loader should have valid SharedClassPathEntry");
+ if (pkg_string == NULL) {
+ // The archived class is in the unnamed package. Currently, the boot image
+ // does not contain any class in the unnamed package.
+ assert(!ent->is_modules_image(), "Class in the unnamed package must be from the classpath");
+ if (path_index >= ClassLoaderExt::app_paths_start_index()) {
+ return true;
+ }
+ } else {
+ // Check if this is from a PackageEntry/ModuleEntry defined in the AppClassloader.
+ if (pkg_entry == NULL) {
+ // It's not guaranteed that the class is from the classpath if the
+ // PackageEntry cannot be found from the AppClassloader. Need to check
+ // the boot and platform classloader as well.
+ if (get_package_entry(pkg_name, ClassLoaderData::class_loader_data_or_null(SystemDictionary::java_platform_loader())) == NULL &&
+ get_package_entry(pkg_name, ClassLoaderData::the_null_class_loader_data()) == NULL) {
+ // The PackageEntry is not defined in any of the boot/platform/app classloaders.
+ // The archived class must from -cp path and not from the run-time image.
+ if (!ent->is_modules_image() && path_index >= ClassLoaderExt::app_paths_start_index()) {
+ return true;
+ }
+ }
+ } else if (mod_entry != NULL) {
+ // The package/module is defined in the AppClassLoader. Currently we only
+ // support archiving application module class from the run-time image.
+ // Packages from the -cp path are in the unnamed_module.
+ if ((ent->is_modules_image() && mod_entry->location()->starts_with("jrt:")) ||
+ (pkg_entry->in_unnamed_module() && path_index >= ClassLoaderExt::app_paths_start_index())) {
+ DEBUG_ONLY( \
+ ClassLoaderData* loader_data = class_loader_data(class_loader); \
+ if (pkg_entry->in_unnamed_module()) \
+ assert(mod_entry == loader_data->unnamed_module(), "the unnamed module is not defined in the classloader");)
+
+ return true;
+ }
+ }
+ }
+ } else {
+ // TEMP: if a shared class can be found by a custom loader, consider it visible now.
+ // FIXME: is this actually correct?
+ return true;
+ }
+ return false;
+}
+
+// The following stack shows how this code is reached:
+//
+// [0] SystemDictionaryShared::find_or_load_shared_class()
+// [1] JVM_FindLoadedClass
+// [2] java.lang.ClassLoader.findLoadedClass0()
+// [3] java.lang.ClassLoader.findLoadedClass()
+// [4] java.lang.ClassLoader.loadClass()
+// [5] jdk.internal.loader.ClassLoaders$AppClassLoader_klass.loadClass()
+//
+// Because AppCDS supports only the PlatformClassLoader and AppClassLoader, we make the following
+// assumptions (based on the JDK 8.0 source code):
+//
+// [a] these two loaders use the default implementation of
+// ClassLoader.loadClass(String name, boolean resolve), which
+// [b] calls findLoadedClass(name), immediately followed by parent.loadClass(),
+// immediately followed by findClass(name).
+// [c] If the requested class is a shared class of the current class loader, parent.loadClass()
+// always returns null, and
+// [d] if AppCDS is not enabled, the class would be loaded by findClass() by decoding it from a
+// JAR file and then parsed.
+//
+// Given these assumptions, we intercept the findLoadedClass() call to invoke
+// SystemDictionaryShared::find_or_load_shared_class() to load the shared class from
+// the archive. The reasons are:
+//
+// + Because AppCDS is a commercial feature, we want to hide the implementation. There
+// is currently no easy way to hide Java code, so we did it with native code.
+// + Start-up is improved because we avoid decoding the JAR file, and avoid delegating
+// to the parent (since we know the parent will not find this class).
+//
+// NOTE: there's a lot of assumption about the Java code. If any of that change, this
+// needs to be redesigned.
+//
+// An alternative is to modify the Java code of AppClassLoader.loadClass().
+//
+InstanceKlass* SystemDictionaryShared::find_or_load_shared_class(
+ Symbol* name, Handle class_loader, TRAPS) {
+ if (DumpSharedSpaces) {
+ return NULL;
+ }
+
+ InstanceKlass* k = NULL;
+ if (shared_dictionary() != NULL &&
+ UseAppCDS && (SystemDictionary::is_system_class_loader(class_loader()) ||
+ SystemDictionary::is_platform_class_loader(class_loader()))) {
+
+ // Fix for 4474172; see evaluation for more details
+ class_loader = Handle(
+ THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
+ ClassLoaderData *loader_data = register_loader(class_loader, CHECK_NULL);
+ Dictionary* dictionary = loader_data->dictionary();
+
+ unsigned int d_hash = dictionary->compute_hash(name);
+
+ bool DoObjectLock = true;
+ if (is_parallelCapable(class_loader)) {
+ DoObjectLock = false;
+ }
+
+ // Make sure we are synchronized on the class loader before we proceed
+ //
+ // Note: currently, find_or_load_shared_class is called only from
+ // JVM_FindLoadedClass and used for PlatformClassLoader and AppClassLoader,
+ // which are parallel-capable loaders, so this lock is NOT taken.
+ Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
+ check_loader_lock_contention(lockObject, THREAD);
+ ObjectLocker ol(lockObject, THREAD, DoObjectLock);
+
+ {
+ MutexLocker mu(SystemDictionary_lock, THREAD);
+ Klass* check = find_class(d_hash, name, dictionary);
+ if (check != NULL) {
+ return InstanceKlass::cast(check);
+ }
+ }
+
+ k = load_shared_class_for_builtin_loader(name, class_loader, THREAD);
+ if (k != NULL) {
+ define_instance_class(k, CHECK_NULL);
+ }
+ }
+
+ return k;
+}
+
+InstanceKlass* SystemDictionaryShared::load_shared_class_for_builtin_loader(
+ Symbol* class_name, Handle class_loader, TRAPS) {
+ assert(UseAppCDS && shared_dictionary() != NULL, "already checked");
+ Klass* k = shared_dictionary()->find_class_for_builtin_loader(class_name);
+
+ if (k != NULL) {
+ InstanceKlass* ik = InstanceKlass::cast(k);
+ if ((ik->is_shared_app_class() &&
+ SystemDictionary::is_system_class_loader(class_loader())) ||
+ (ik->is_shared_platform_class() &&
+ SystemDictionary::is_platform_class_loader(class_loader()))) {
+ Handle protection_domain =
+ SystemDictionaryShared::init_security_info(class_loader, ik, CHECK_NULL);
+ return load_shared_class(ik, class_loader, protection_domain, THREAD);
+ }
+ }
+
+ return NULL;
+}
+
+void SystemDictionaryShared::oops_do(OopClosure* f) {
+ f->do_oop((oop*)&_shared_protection_domains);
+ f->do_oop((oop*)&_shared_jar_urls);
+ f->do_oop((oop*)&_shared_jar_manifests);
+}
+
+void SystemDictionaryShared::allocate_shared_protection_domain_array(int size, TRAPS) {
+ if (_shared_protection_domains == NULL) {
+ _shared_protection_domains = oopFactory::new_objArray(
+ SystemDictionary::ProtectionDomain_klass(), size, CHECK);
+ }
+}
+
+void SystemDictionaryShared::allocate_shared_jar_url_array(int size, TRAPS) {
+ if (_shared_jar_urls == NULL) {
+ _shared_jar_urls = oopFactory::new_objArray(
+ SystemDictionary::URL_klass(), size, CHECK);
+ }
+}
+
+void SystemDictionaryShared::allocate_shared_jar_manifest_array(int size, TRAPS) {
+ if (_shared_jar_manifests == NULL) {
+ _shared_jar_manifests = oopFactory::new_objArray(
+ SystemDictionary::Jar_Manifest_klass(), size, CHECK);
+ }
+}
+
+void SystemDictionaryShared::allocate_shared_data_arrays(int size, TRAPS) {
+ allocate_shared_protection_domain_array(size, CHECK);
+ allocate_shared_jar_url_array(size, CHECK);
+ allocate_shared_jar_manifest_array(size, CHECK);
+}
+
+
+InstanceKlass* SystemDictionaryShared::lookup_from_stream(const Symbol* class_name,
+ Handle class_loader,
+ Handle protection_domain,
+ const ClassFileStream* cfs,
+ TRAPS) {
+ if (!UseAppCDS || shared_dictionary() == NULL) {
+ return NULL;
+ }
+ if (class_name == NULL) { // don't do this for anonymous classes
+ return NULL;
+ }
+ if (class_loader.is_null() ||
+ SystemDictionary::is_system_class_loader(class_loader()) ||
+ SystemDictionary::is_platform_class_loader(class_loader())) {
+ // This function is called for loading only UNREGISTERED classes.
+ // Do nothing for the BUILTIN loaders.
+ return NULL;
+ }
+
+ ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
+ Klass* k;
+
+ { // UNREGISTERED loader
+ if (!shared_dictionary()->class_exists_for_unregistered_loader(class_name)) {
+ // No classes of this name for unregistered loaders.
+ return NULL;
+ }
+
+ int clsfile_size = cfs->length();
+ int clsfile_crc32 = ClassLoader::crc32(0, (const char*)cfs->buffer(), cfs->length());
+
+ k = shared_dictionary()->find_class_for_unregistered_loader(class_name,
+ clsfile_size, clsfile_crc32);
+ }
+
+ if (k == NULL) { // not archived
+ return NULL;
+ }
+
+ return acquire_class_for_current_thread(InstanceKlass::cast(k), class_loader,
+ protection_domain, THREAD);
+}
+
+InstanceKlass* SystemDictionaryShared::acquire_class_for_current_thread(
+ InstanceKlass *ik,
+ Handle class_loader,
+ Handle protection_domain,
+ TRAPS) {
+ ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
+
+ {
+ MutexLocker mu(SharedDictionary_lock, THREAD);
+ if (ik->class_loader_data() != NULL) {
+ // ik is already loaded (by this loader or by a different loader)
+ // or ik is being loaded by a different thread (by this loader or by a different loader)
+ return NULL;
+ }
+
+ // No other thread has acquired this yet, so give it to *this thread*
+ ik->set_class_loader_data(loader_data);
+ }
+
+ // No longer holding SharedDictionary_lock
+ // No need to lock, as can be held only by a single thread.
+ loader_data->add_class(ik);
+
+ // Load and check super/interfaces, restore unsharable info
+ InstanceKlass* shared_klass = load_shared_class(ik, class_loader, protection_domain, THREAD);
+ if (shared_klass == NULL || HAS_PENDING_EXCEPTION) {
+ // TODO: clean up so it can be used again
+ return NULL;
+ }
+
+ return shared_klass;
+}
+
+bool SystemDictionaryShared::add_non_builtin_klass(Symbol* name, ClassLoaderData* loader_data,
+ InstanceKlass* k,
+ TRAPS) {
+ assert(DumpSharedSpaces, "only when dumping");
+ assert(UseAppCDS && boot_loader_dictionary() != NULL, "must be");
+
+ if (boot_loader_dictionary()->add_non_builtin_klass(name, loader_data, k)) {
+ MutexLocker mu_r(Compile_lock, THREAD); // not really necessary, but add_to_hierarchy asserts this.
+ add_to_hierarchy(k, CHECK_0);
+ return true;
+ }
+ return false;
+}
+
+// This function is called to resolve the super/interfaces of shared classes for
+// non-built-in loaders. E.g., ChildClass in the below example
+// where "super:" (and optionally "interface:") have been specified.
+//
+// java/lang/Object id: 0
+// Interface id: 2 super: 0 source: cust.jar
+// ChildClass id: 4 super: 0 interfaces: 2 source: cust.jar
+Klass* SystemDictionaryShared::dump_time_resolve_super_or_fail(
+ Symbol* child_name, Symbol* class_name, Handle class_loader,
+ Handle protection_domain, bool is_superclass, TRAPS) {
+
+ assert(DumpSharedSpaces, "only when dumping");
+
+ ClassListParser* parser = ClassListParser::instance();
+ if (parser == NULL) {
+ // We're still loading the well-known classes, before the ClassListParser is created.
+ return NULL;
+ }
+ if (child_name->equals(parser->current_class_name())) {
+ // When this function is called, all the numbered super and interface types
+ // must have already been loaded. Hence this function is never recursively called.
+ if (is_superclass) {
+ return parser->lookup_super_for_current_class(class_name);
+ } else {
+ return parser->lookup_interface_for_current_class(class_name);
+ }
+ } else {
+ // The VM is not trying to resolve a super type of parser->current_class_name().
+ // Instead, it's resolving an error class (because parser->current_class_name() has
+ // failed parsing or verification). Don't do anything here.
+ return NULL;
+ }
+}
+
+struct SharedMiscInfo {
+ Klass* _klass;
+ int _clsfile_size;
+ int _clsfile_crc32;
+};
+
+static GrowableArray* misc_info_array = NULL;
+
+void SystemDictionaryShared::set_shared_class_misc_info(Klass* k, ClassFileStream* cfs) {
+ assert(DumpSharedSpaces, "only when dumping");
+ int clsfile_size = cfs->length();
+ int clsfile_crc32 = ClassLoader::crc32(0, (const char*)cfs->buffer(), cfs->length());
+
+ if (misc_info_array == NULL) {
+ misc_info_array = new (ResourceObj::C_HEAP, mtClass) GrowableArray(20, /*c heap*/ true);
+ }
+
+ SharedMiscInfo misc_info;
+ DEBUG_ONLY({
+ for (int i=0; ilength(); i++) {
+ misc_info = misc_info_array->at(i);
+ assert(misc_info._klass != k, "cannot call set_shared_class_misc_info twice for the same class");
+ }
+ });
+
+ misc_info._klass = k;
+ misc_info._clsfile_size = clsfile_size;
+ misc_info._clsfile_crc32 = clsfile_crc32;
+
+ misc_info_array->append(misc_info);
+}
+
+void SystemDictionaryShared::init_shared_dictionary_entry(Klass* k, DictionaryEntry* ent) {
+ SharedDictionaryEntry* entry = (SharedDictionaryEntry*)ent;
+ entry->_id = -1;
+ entry->_clsfile_size = -1;
+ entry->_clsfile_crc32 = -1;
+ entry->_verifier_constraints = NULL;
+ entry->_verifier_constraint_flags = NULL;
+
+ if (misc_info_array != NULL) {
+ for (int i=0; ilength(); i++) {
+ SharedMiscInfo misc_info = misc_info_array->at(i);
+ if (misc_info._klass == k) {
+ entry->_clsfile_size = misc_info._clsfile_size;
+ entry->_clsfile_crc32 = misc_info._clsfile_crc32;
+ misc_info_array->remove_at(i);
+ return;
+ }
+ }
+ }
+}
+
+bool SystemDictionaryShared::add_verification_constraint(Klass* k, Symbol* name,
+ Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) {
+ assert(DumpSharedSpaces, "called at dump time only");
+
+ // Skip anonymous classes, which are not archived as they are not in
+ // dictionary (see assert_no_anonymoys_classes_in_dictionaries() in
+ // VM_PopulateDumpSharedSpace::doit()).
+ if (k->class_loader_data()->is_anonymous()) {
+ return true; // anonymous classes are not archived, skip
+ }
+
+ SharedDictionaryEntry* entry = ((SharedDictionary*)(k->class_loader_data()->dictionary()))->find_entry_for(k);
+ ResourceMark rm;
+ // Lambda classes are not archived and will be regenerated at runtime.
+ if (entry == NULL && strstr(k->name()->as_C_string(), "Lambda$") != NULL) {
+ return true;
+ }
+ assert(entry != NULL, "class should be in dictionary before being verified");
+ entry->add_verification_constraint(name, from_name, from_field_is_protected,
+ from_is_array, from_is_object);
+ if (entry->is_builtin()) {
+ // For builtin class loaders, we can try to complete the verification check at dump time,
+ // because we can resolve all the constraint classes.
+ return false;
+ } else {
+ // For non-builtin class loaders, we cannot complete the verification check at dump time,
+ // because at dump time we don't know how to resolve classes for such loaders.
+ return true;
+ }
+}
+
+void SystemDictionaryShared::finalize_verification_constraints() {
+ boot_loader_dictionary()->finalize_verification_constraints();
+}
+
+void SystemDictionaryShared::check_verification_constraints(InstanceKlass* klass,
+ TRAPS) {
+ assert(!DumpSharedSpaces && UseSharedSpaces, "called at run time with CDS enabled only");
+ SharedDictionaryEntry* entry = shared_dictionary()->find_entry_for(klass);
+ assert(entry != NULL, "call this only for shared classes");
+ entry->check_verification_constraints(klass, THREAD);
+}
+
+SharedDictionaryEntry* SharedDictionary::find_entry_for(Klass* klass) {
+ Symbol* class_name = klass->name();
+ unsigned int hash = compute_hash(class_name);
+ int index = hash_to_index(hash);
+
+ for (SharedDictionaryEntry* entry = bucket(index);
+ entry != NULL;
+ entry = entry->next()) {
+ if (entry->hash() == hash && entry->literal() == klass) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+void SharedDictionary::finalize_verification_constraints() {
+ int bytes = 0, count = 0;
+ for (int index = 0; index < table_size(); index++) {
+ for (SharedDictionaryEntry *probe = bucket(index);
+ probe != NULL;
+ probe = probe->next()) {
+ int n = probe->finalize_verification_constraints();
+ if (n > 0) {
+ bytes += n;
+ count ++;
+ }
+ }
+ }
+ if (log_is_enabled(Info, cds, verification)) {
+ double avg = 0;
+ if (count > 0) {
+ avg = double(bytes) / double(count);
+ }
+ log_info(cds, verification)("Recorded verification constraints for %d classes = %d bytes (avg = %.2f bytes) ", count, bytes, avg);
+ }
+}
+
+void SharedDictionaryEntry::add_verification_constraint(Symbol* name,
+ Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) {
+ if (_verifier_constraints == NULL) {
+ _verifier_constraints = new(ResourceObj::C_HEAP, mtClass) GrowableArray(8, true, mtClass);
+ }
+ if (_verifier_constraint_flags == NULL) {
+ _verifier_constraint_flags = new(ResourceObj::C_HEAP, mtClass) GrowableArray(4, true, mtClass);
+ }
+ GrowableArray* vc_array = (GrowableArray*)_verifier_constraints;
+ for (int i=0; ilength(); i+= 2) {
+ if (name == vc_array->at(i) &&
+ from_name == vc_array->at(i+1)) {
+ return;
+ }
+ }
+ vc_array->append(name);
+ vc_array->append(from_name);
+
+ GrowableArray* vcflags_array = (GrowableArray*)_verifier_constraint_flags;
+ char c = 0;
+ c |= from_field_is_protected ? FROM_FIELD_IS_PROTECTED : 0;
+ c |= from_is_array ? FROM_IS_ARRAY : 0;
+ c |= from_is_object ? FROM_IS_OBJECT : 0;
+ vcflags_array->append(c);
+
+ if (log_is_enabled(Trace, cds, verification)) {
+ ResourceMark rm;
+ log_trace(cds, verification)("add_verification_constraint: %s: %s must be subclass of %s",
+ instance_klass()->external_name(), from_name->as_klass_external_name(),
+ name->as_klass_external_name());
+ }
+}
+
+int SharedDictionaryEntry::finalize_verification_constraints() {
+ assert(DumpSharedSpaces, "called at dump time only");
+ Thread* THREAD = Thread::current();
+ ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+ GrowableArray* vc_array = (GrowableArray*)_verifier_constraints;
+ GrowableArray* vcflags_array = (GrowableArray*)_verifier_constraint_flags;
+
+ if (vc_array != NULL) {
+ if (log_is_enabled(Trace, cds, verification)) {
+ ResourceMark rm;
+ log_trace(cds, verification)("finalize_verification_constraint: %s",
+ literal()->external_name());
+ }
+
+ // Copy the constraints from C_HEAP-alloced GrowableArrays to Metaspace-alloced
+ // Arrays
+ int size = 0;
+ {
+ // FIXME: change this to be done after relocation, so we can use symbol offset??
+ int length = vc_array->length();
+ Array* out = MetadataFactory::new_array(loader_data, length, 0, THREAD);
+ assert(out != NULL, "Dump time allocation failure would have aborted VM");
+ for (int i=0; iat_put(i, vc_array->at(i));
+ }
+ _verifier_constraints = out;
+ size += out->size() * BytesPerWord;
+ delete vc_array;
+ }
+ {
+ int length = vcflags_array->length();
+ Array* out = MetadataFactory::new_array(loader_data, length, 0, THREAD);
+ assert(out != NULL, "Dump time allocation failure would have aborted VM");
+ for (int i=0; iat_put(i, vcflags_array->at(i));
+ }
+ _verifier_constraint_flags = out;
+ size += out->size() * BytesPerWord;
+ delete vcflags_array;
+ }
+
+ return size;
+ }
+ return 0;
+}
+
+void SharedDictionaryEntry::check_verification_constraints(InstanceKlass* klass, TRAPS) {
+ Array* vc_array = (Array*)_verifier_constraints;
+ Array* vcflags_array = (Array*)_verifier_constraint_flags;
+
+ if (vc_array != NULL) {
+ int length = vc_array->length();
+ for (int i=0; iat(i);
+ Symbol* from_name = vc_array->at(i+1);
+ char c = vcflags_array->at(i/2);
+
+ bool from_field_is_protected = (c & FROM_FIELD_IS_PROTECTED) ? true : false;
+ bool from_is_array = (c & FROM_IS_ARRAY) ? true : false;
+ bool from_is_object = (c & FROM_IS_OBJECT) ? true : false;
+
+ bool ok = VerificationType::resolve_and_check_assignability(klass, name,
+ from_name, from_field_is_protected, from_is_array, from_is_object, CHECK);
+ if (!ok) {
+ ResourceMark rm(THREAD);
+ stringStream ss;
+
+ ss.print_cr("Bad type on operand stack");
+ ss.print_cr("Exception Details:");
+ ss.print_cr(" Location:\n %s", klass->name()->as_C_string());
+ ss.print_cr(" Reason:\n Type '%s' is not assignable to '%s'",
+ from_name->as_quoted_ascii(), name->as_quoted_ascii());
+ THROW_MSG(vmSymbols::java_lang_VerifyError(), ss.as_string());
+ }
+ }
+ }
+}
+
+void SharedDictionaryEntry::metaspace_pointers_do(MetaspaceClosure* it) {
+ it->push((Array**)&_verifier_constraints);
+ it->push((Array**)&_verifier_constraint_flags);
+}
+
+bool SharedDictionary::add_non_builtin_klass(const Symbol* class_name,
+ ClassLoaderData* loader_data,
+ InstanceKlass* klass) {
+
+ assert(DumpSharedSpaces, "supported only when dumping");
+ assert(klass != NULL, "adding NULL klass");
+ assert(klass->name() == class_name, "sanity check on name");
+ assert(klass->shared_classpath_index() < 0,
+ "the shared classpath index should not be set for shared class loaded by the custom loaders");
+
+ // Add an entry for a non-builtin class.
+ // For a shared class for custom class loaders, SystemDictionary::resolve_or_null will
+ // not find this class, because is_builtin() is false.
+ unsigned int hash = compute_hash(class_name);
+ int index = hash_to_index(hash);
+
+ for (SharedDictionaryEntry* entry = bucket(index);
+ entry != NULL;
+ entry = entry->next()) {
+ if (entry->hash() == hash) {
+ Klass* klass = (Klass*)entry->literal();
+ if (klass->name() == class_name && klass->class_loader_data() == loader_data) {
+ // There is already a class defined with the same name
+ return false;
+ }
+ }
+ }
+
+ assert(Dictionary::entry_size() >= sizeof(SharedDictionaryEntry), "must be big enough");
+ SharedDictionaryEntry* entry = (SharedDictionaryEntry*)new_entry(hash, klass);
+ add_entry(index, entry);
+
+ assert(entry->is_unregistered(), "sanity");
+ assert(!entry->is_builtin(), "sanity");
+ return true;
+}
+
+
+//-----------------
+// SharedDictionary
+//-----------------
+
+
+Klass* SharedDictionary::find_class_for_builtin_loader(const Symbol* name) const {
+ SharedDictionaryEntry* entry = get_entry_for_builtin_loader(name);
+ return entry != NULL ? entry->instance_klass() : (Klass*)NULL;
+}
+
+Klass* SharedDictionary::find_class_for_unregistered_loader(const Symbol* name,
+ int clsfile_size,
+ int clsfile_crc32) const {
+
+ const SharedDictionaryEntry* entry = get_entry_for_unregistered_loader(name,
+ clsfile_size,
+ clsfile_crc32);
+ return entry != NULL ? entry->instance_klass() : (Klass*)NULL;
+}
+
+void SharedDictionary::update_entry(Klass* klass, int id) {
+ assert(DumpSharedSpaces, "supported only when dumping");
+ Symbol* class_name = klass->name();
+ unsigned int hash = compute_hash(class_name);
+ int index = hash_to_index(hash);
+
+ for (SharedDictionaryEntry* entry = bucket(index);
+ entry != NULL;
+ entry = entry->next()) {
+ if (entry->hash() == hash && entry->literal() == klass) {
+ entry->_id = id;
+ return;
+ }
+ }
+
+ ShouldNotReachHere();
+}
+
+SharedDictionaryEntry* SharedDictionary::get_entry_for_builtin_loader(const Symbol* class_name) const {
+ assert(!DumpSharedSpaces, "supported only when at runtime");
+ unsigned int hash = compute_hash(class_name);
+ const int index = hash_to_index(hash);
+
+ for (SharedDictionaryEntry* entry = bucket(index);
+ entry != NULL;
+ entry = entry->next()) {
+ if (entry->hash() == hash && entry->equals(class_name)) {
+ if (entry->is_builtin()) {
+ return entry;
+ }
+ }
+ }
+ return NULL;
+}
+
+SharedDictionaryEntry* SharedDictionary::get_entry_for_unregistered_loader(const Symbol* class_name,
+ int clsfile_size,
+ int clsfile_crc32) const {
+ assert(!DumpSharedSpaces, "supported only when at runtime");
+ unsigned int hash = compute_hash(class_name);
+ int index = hash_to_index(hash);
+
+ for (SharedDictionaryEntry* entry = bucket(index);
+ entry != NULL;
+ entry = entry->next()) {
+ if (entry->hash() == hash && entry->equals(class_name)) {
+ if (entry->is_unregistered()) {
+ if (clsfile_size == -1) {
+ // We're called from class_exists_for_unregistered_loader. At run time, we want to
+ // compute the CRC of a ClassFileStream only if there is an UNREGISTERED class
+ // with the matching name.
+ return entry;
+ } else {
+ // We're called from find_class_for_unregistered_loader
+ if (entry->_clsfile_size && clsfile_crc32 == entry->_clsfile_crc32) {
+ return entry;
+ }
+ }
+
+ // There can be only 1 class with this name for unregistered loaders.
+ return NULL;
+ }
+ }
+ }
+ return NULL;
+}
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.hpp b/src/hotspot/share/classfile/systemDictionaryShared.hpp
index 244e98e5d74..c1b87348a5a 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.hpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp
@@ -25,75 +25,362 @@
#ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
#define SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
-#include "classfile/systemDictionary.hpp"
+#include "oops/klass.hpp"
#include "classfile/dictionary.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "memory/filemap.hpp"
+
+
+/*===============================================================================
+
+ Handling of the classes in the AppCDS archive
+
+ To ensure safety and to simplify the implementation, archived classes are
+ "segregated" into several types. The following rules describe how they
+ are stored and looked up.
+
+[1] Category of archived classes
+
+ There are 3 disjoint groups of classes stored in the AppCDS archive. They are
+ categorized as by their SharedDictionaryEntry::loader_type()
+
+ BUILTIN: These classes may be defined ONLY by the BOOT/PLATFORM/APP
+ loaders.
+
+ UNREGISTERED: These classes may be defined ONLY by a ClassLoader
+ instance that's not listed above (using fingerprint matching)
+
+[2] How classes from different categories are specified in the classlist:
+
+ Starting from JDK9, each class in the classlist may be specified with
+ these keywords: "id", "super", "interfaces", "loader" and "source".
+
+
+ BUILTIN Only the "id" keyword may be (optionally) specified. All other
+ keywords are forbidden.
+
+ The named class is looked up from the jimage and from
+ Xbootclasspath/a and CLASSPATH.
+
+ UNREGISTERED: The "id", "super", and "source" keywords must all be
+ specified.
+
+ The "interfaces" keyword must be specified if the class implements
+ one or more local interfaces. The "interfaces" keyword must not be
+ specified if the class does not implement local interfaces.
+
+ The named class is looked up from the location specified in the
+ "source" keyword.
+
+ Example classlist:
+
+ # BUILTIN
+ java/lang/Object id: 0
+ java/lang/Cloneable id: 1
+ java/lang/String
+
+ # UNREGISTERED
+ Bar id: 3 super: 0 interfaces: 1 source: /foo.jar
+
+
+[3] Identifying the loader_type of archived classes in the shared dictionary
+
+ Each archived Klass* C is associated with a SharedDictionaryEntry* E
+
+ BUILTIN: (C->shared_classpath_index() >= 0)
+ UNREGISTERED: (C->shared_classpath_index() < 0)
+
+[4] Lookup of archived classes at run time:
+
+ (a) BUILTIN loaders:
+
+ Search the shared directory for a BUILTIN class with a matching name.
+
+ (b) UNREGISTERED loaders:
+
+ The search originates with SystemDictionaryShared::lookup_from_stream().
+
+ Search the shared directory for a UNREGISTERED class with a matching
+ (name, clsfile_len, clsfile_crc32) tuple.
+
+===============================================================================*/
+#define UNREGISTERED_INDEX -9999
class ClassFileStream;
-class SystemDictionaryShared: public SystemDictionary {
+// Archived classes need extra information not needed by traditionally loaded classes.
+// To keep footprint small, we add these in the dictionary entry instead of the InstanceKlass.
+class SharedDictionaryEntry : public DictionaryEntry {
+
public:
- static void initialize(TRAPS) {}
- static InstanceKlass* find_or_load_shared_class(Symbol* class_name,
- Handle class_loader,
- TRAPS) {
- return NULL;
- }
- static void roots_oops_do(OopClosure* blk) {}
- static void oops_do(OopClosure* f) {}
- static bool is_sharing_possible(ClassLoaderData* loader_data) {
- oop class_loader = loader_data->class_loader();
- return (class_loader == NULL);
- }
- static bool is_shared_class_visible_for_classloader(
- InstanceKlass* ik,
- Handle class_loader,
- const char* pkg_string,
- Symbol* pkg_name,
- PackageEntry* pkg_entry,
- ModuleEntry* mod_entry,
- TRAPS) {
- return false;
+ enum LoaderType {
+ LT_BUILTIN,
+ LT_UNREGISTERED
+ };
+
+ enum {
+ FROM_FIELD_IS_PROTECTED = 1 << 0,
+ FROM_IS_ARRAY = 1 << 1,
+ FROM_IS_OBJECT = 1 << 2
+ };
+
+ int _id;
+ int _clsfile_size;
+ int _clsfile_crc32;
+ void* _verifier_constraints; // FIXME - use a union here to avoid type casting??
+ void* _verifier_constraint_flags;
+
+ // See "Identifying the loader_type of archived classes" comments above.
+ LoaderType loader_type() const {
+ Klass* k = (Klass*)literal();
+
+ if ((k->shared_classpath_index() != UNREGISTERED_INDEX)) {
+ return LT_BUILTIN;
+ } else {
+ return LT_UNREGISTERED;
+ }
}
+ SharedDictionaryEntry* next() {
+ return (SharedDictionaryEntry*)(DictionaryEntry::next());
+ }
+
+ bool is_builtin() const {
+ return loader_type() == LT_BUILTIN;
+ }
+ bool is_unregistered() const {
+ return loader_type() == LT_UNREGISTERED;
+ }
+
+ void add_verification_constraint(Symbol* name,
+ Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object);
+ int finalize_verification_constraints();
+ void check_verification_constraints(InstanceKlass* klass, TRAPS);
+ void metaspace_pointers_do(MetaspaceClosure* it) NOT_CDS_RETURN;
+};
+
+class SharedDictionary : public Dictionary {
+ SharedDictionaryEntry* get_entry_for_builtin_loader(const Symbol* name) const;
+ SharedDictionaryEntry* get_entry_for_unregistered_loader(const Symbol* name,
+ int clsfile_size,
+ int clsfile_crc32) const;
+
+ // Convenience functions
+ SharedDictionaryEntry* bucket(int index) const {
+ return (SharedDictionaryEntry*)(Dictionary::bucket(index));
+ }
+
+public:
+ SharedDictionaryEntry* find_entry_for(Klass* klass);
+ void finalize_verification_constraints();
+
+ bool add_non_builtin_klass(const Symbol* class_name,
+ ClassLoaderData* loader_data,
+ InstanceKlass* obj);
+
+ void update_entry(Klass* klass, int id);
+
+ Klass* find_class_for_builtin_loader(const Symbol* name) const;
+ Klass* find_class_for_unregistered_loader(const Symbol* name,
+ int clsfile_size,
+ int clsfile_crc32) const;
+ bool class_exists_for_unregistered_loader(const Symbol* name) {
+ return (get_entry_for_unregistered_loader(name, -1, -1) != NULL);
+ }
+};
+
+class SystemDictionaryShared: public SystemDictionary {
+private:
+ // These _shared_xxxs arrays are used to initialize the java.lang.Package and
+ // java.security.ProtectionDomain objects associated with each shared class.
+ //
+ // See SystemDictionaryShared::init_security_info for more info.
+ static objArrayOop _shared_protection_domains;
+ static objArrayOop _shared_jar_urls;
+ static objArrayOop _shared_jar_manifests;
+
+ static InstanceKlass* load_shared_class_for_builtin_loader(
+ Symbol* class_name,
+ Handle class_loader,
+ TRAPS);
+ static Handle get_package_name(Symbol* class_name, TRAPS);
+
+
+ // Package handling:
+ //
+ // 1. For named modules in the runtime image
+ // BOOT classes: Reuses the existing JVM_GetSystemPackage(s) interfaces
+ // to get packages in named modules for shared classes.
+ // Package for non-shared classes in named module is also
+ // handled using JVM_GetSystemPackage(s).
+ //
+ // APP classes: VM calls ClassLoaders.AppClassLoader::definePackage(String, Module)
+ // to define package for shared app classes from named
+ // modules.
+ //
+ // PLATFORM classes: VM calls ClassLoaders.PlatformClassLoader::definePackage(String, Module)
+ // to define package for shared platform classes from named
+ // modules.
+ //
+ // 2. For unnamed modules
+ // BOOT classes: Reuses the existing JVM_GetSystemPackage(s) interfaces to
+ // get packages for shared boot classes in unnamed modules.
+ //
+ // APP classes: VM calls ClassLoaders.AppClassLoader::defineOrCheckPackage()
+ // with with the manifest and url from archived data.
+ //
+ // PLATFORM classes: No package is defined.
+ //
+ // The following two define_shared_package() functions are used to define
+ // package for shared APP and PLATFORM classes.
+ static void define_shared_package(Symbol* class_name,
+ Handle class_loader,
+ Handle manifest,
+ Handle url,
+ TRAPS);
+ static void define_shared_package(Symbol* class_name,
+ Handle class_loader,
+ ModuleEntry* mod_entry,
+ TRAPS);
+
+ static Handle get_shared_jar_manifest(int shared_path_index, TRAPS);
+ static Handle get_shared_jar_url(int shared_path_index, TRAPS);
+ static Handle get_protection_domain_from_classloader(Handle class_loader,
+ Handle url, TRAPS);
+ static Handle get_shared_protection_domain(Handle class_loader,
+ int shared_path_index,
+ Handle url,
+ TRAPS);
+ static Handle get_shared_protection_domain(Handle class_loader,
+ ModuleEntry* mod, TRAPS);
+ static Handle init_security_info(Handle class_loader, InstanceKlass* ik, TRAPS);
+
+ static void atomic_set_array_index(objArrayOop array, int index, oop o) {
+ // Benign race condition: array.obj_at(index) may already be filled in.
+ // The important thing here is that all threads pick up the same result.
+ // It doesn't matter which racing thread wins, as long as only one
+ // result is used by all threads, and all future queries.
+ array->atomic_compare_exchange_oop(index, o, NULL);
+ }
+
+ static oop shared_protection_domain(int index);
+ static void atomic_set_shared_protection_domain(int index, oop pd) {
+ atomic_set_array_index(_shared_protection_domains, index, pd);
+ }
+ static void allocate_shared_protection_domain_array(int size, TRAPS);
+ static oop shared_jar_url(int index);
+ static void atomic_set_shared_jar_url(int index, oop url) {
+ atomic_set_array_index(_shared_jar_urls, index, url);
+ }
+ static void allocate_shared_jar_url_array(int size, TRAPS);
+ static oop shared_jar_manifest(int index);
+ static void atomic_set_shared_jar_manifest(int index, oop man) {
+ atomic_set_array_index(_shared_jar_manifests, index, man);
+ }
+ static void allocate_shared_jar_manifest_array(int size, TRAPS);
+ static InstanceKlass* acquire_class_for_current_thread(
+ InstanceKlass *ik,
+ Handle class_loader,
+ Handle protection_domain,
+ TRAPS);
+
+public:
+ static void initialize(TRAPS);
+
+ // Called by PLATFORM/APP loader only
+ static InstanceKlass* find_or_load_shared_class(Symbol* class_name,
+ Handle class_loader,
+ TRAPS);
+
+
+ static void allocate_shared_data_arrays(int size, TRAPS);
+ static void oops_do(OopClosure* f);
+ static void roots_oops_do(OopClosure* f) {
+ oops_do(f);
+ }
+
+ // Check if sharing is supported for the class loader.
+ static bool is_sharing_possible(ClassLoaderData* loader_data) {
+ oop class_loader = loader_data->class_loader();
+ return (class_loader == NULL ||
+ (UseAppCDS && (SystemDictionary::is_system_class_loader(class_loader) ||
+ SystemDictionary::is_platform_class_loader(class_loader)))
+ );
+ }
+ static bool is_shared_class_visible_for_classloader(InstanceKlass* ik,
+ Handle class_loader,
+ const char* pkg_string,
+ Symbol* pkg_name,
+ PackageEntry* pkg_entry,
+ ModuleEntry* mod_entry,
+ TRAPS);
+ static PackageEntry* get_package_entry(Symbol* pkg,
+ ClassLoaderData *loader_data) {
+ if (loader_data != NULL) {
+ PackageEntryTable* pkgEntryTable = loader_data->packages();
+ return pkgEntryTable->lookup_only(pkg);
+ }
+ return NULL;
+ }
+
+ static bool add_non_builtin_klass(Symbol* class_name, ClassLoaderData* loader_data,
+ InstanceKlass* k, TRAPS);
static Klass* dump_time_resolve_super_or_fail(Symbol* child_name,
Symbol* class_name,
Handle class_loader,
Handle protection_domain,
bool is_superclass,
- TRAPS) {
- return NULL;
- }
+ TRAPS);
static size_t dictionary_entry_size() {
- return sizeof(DictionaryEntry);
+ return (DumpSharedSpaces) ? sizeof(SharedDictionaryEntry) : sizeof(DictionaryEntry);
+ }
+ static void init_shared_dictionary_entry(Klass* k, DictionaryEntry* entry) NOT_CDS_RETURN;
+ static bool is_builtin(DictionaryEntry* ent) {
+ // Can't use virtual function is_builtin because DictionaryEntry doesn't initialize
+ // vtable because it's not constructed properly.
+ SharedDictionaryEntry* entry = (SharedDictionaryEntry*)ent;
+ return entry->is_builtin();
}
- static void init_shared_dictionary_entry(Klass* k, DictionaryEntry* entry) {}
- static bool is_builtin(DictionaryEntry* entry) { return true; }
+ // For convenient access to the SharedDictionaryEntry's of the archived classes.
+ static SharedDictionary* shared_dictionary() {
+ assert(!DumpSharedSpaces, "not for dumping");
+ return (SharedDictionary*)SystemDictionary::shared_dictionary();
+ }
- static InstanceKlass* lookup_from_stream(Symbol* class_name,
+ static SharedDictionary* boot_loader_dictionary() {
+ return (SharedDictionary*)ClassLoaderData::the_null_class_loader_data()->dictionary();
+ }
+
+ static void update_shared_entry(Klass* klass, int id) {
+ assert(DumpSharedSpaces, "sanity");
+ assert((SharedDictionary*)(klass->class_loader_data()->dictionary()) != NULL, "sanity");
+ ((SharedDictionary*)(klass->class_loader_data()->dictionary()))->update_entry(klass, id);
+ }
+
+ static void set_shared_class_misc_info(Klass* k, ClassFileStream* cfs);
+
+ static InstanceKlass* lookup_from_stream(const Symbol* class_name,
Handle class_loader,
Handle protection_domain,
const ClassFileStream* st,
- TRAPS) {
- return NULL;
- }
-
- // The (non-application) CDS implementation supports only classes in the boot
- // class loader, which ensures that the verification constraints are the same
- // during archive creation time and runtime. Thus we can do the constraint checks
- // entirely during archive creation time.
+ TRAPS);
+ // "verification_constraints" are a set of checks performed by
+ // VerificationType::is_reference_assignable_from when verifying a shared class during
+ // dump time.
+ //
+ // With AppCDS, it is possible to override archived classes by calling
+ // ClassLoader.defineClass() directly. SystemDictionary::load_shared_class() already
+ // ensures that you cannot load a shared class if its super type(s) are changed. However,
+ // we need an additional check to ensure that the verification_constraints did not change
+ // between dump time and runtime.
static bool add_verification_constraint(Klass* k, Symbol* name,
Symbol* from_name, bool from_field_is_protected,
- bool from_is_array, bool from_is_object) {return false;}
- static void finalize_verification_constraints() {}
+ bool from_is_array, bool from_is_object) NOT_CDS_RETURN_(false);
+ static void finalize_verification_constraints() NOT_CDS_RETURN;
static void check_verification_constraints(InstanceKlass* klass,
- TRAPS) {}
-};
-
-class SharedDictionaryEntry : public DictionaryEntry {
-public:
- void metaspace_pointers_do(MetaspaceClosure* it) {}
+ TRAPS) NOT_CDS_RETURN;
};
#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
diff --git a/src/hotspot/share/classfile/systemDictionary_ext.hpp b/src/hotspot/share/classfile/systemDictionary_ext.hpp
index 698805b657d..6d257cd09e9 100644
--- a/src/hotspot/share/classfile/systemDictionary_ext.hpp
+++ b/src/hotspot/share/classfile/systemDictionary_ext.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,17 @@
#ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARY_EXT_HPP
#define SHARE_VM_CLASSFILE_SYSTEMDICTIONARY_EXT_HPP
+#if INCLUDE_CDS
+
+#define WK_KLASSES_DO_EXT(do_klass) \
+ /* well-known classes */ \
+ do_klass(jdk_internal_loader_ClassLoaders_klass, jdk_internal_loader_ClassLoaders, Pre ) \
+ /*end*/
+
+#else
+
#define WK_KLASSES_DO_EXT(do_klass)
+#endif // INCLUDE_CDS
+
#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARY_EXT_HPP
diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp
index 73fb9296772..65246e04bae 100644
--- a/src/hotspot/share/classfile/vmSymbols.hpp
+++ b/src/hotspot/share/classfile/vmSymbols.hpp
@@ -26,7 +26,6 @@
#define SHARE_VM_CLASSFILE_VMSYMBOLS_HPP
#include "classfile/moduleEntry.hpp"
-#include "classfile/vmSymbols_ext.hpp"
#include "oops/symbol.hpp"
#include "memory/iterator.hpp"
#include "trace/traceMacros.hpp"
@@ -673,8 +672,12 @@
/* trace signatures */ \
TRACE_TEMPLATES(template) \
\
- /* extensions */ \
- VM_SYMBOLS_DO_EXT(template, do_alias) \
+ /* cds */ \
+ template(jdk_internal_loader_ClassLoaders, "jdk/internal/loader/ClassLoaders") \
+ template(jdk_vm_cds_SharedClassInfo, "jdk/vm/cds/SharedClassInfo") \
+ template(url_void_signature, "(Ljava/net/URL;)V") \
+ template(toFileURL_name, "toFileURL") \
+ template(toFileURL_signature, "(Ljava/lang/String;)Ljava/net/URL;") \
\
/*end*/
diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
index b49892b1bf0..cf37f8db29c 100644
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -259,12 +259,12 @@ void CodeCache::initialize_heaps() {
}
// We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
- if(!heap_available(CodeBlobType::MethodProfiled)) {
+ if (!heap_available(CodeBlobType::MethodProfiled)) {
non_profiled_size += profiled_size;
profiled_size = 0;
}
// We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
- if(!heap_available(CodeBlobType::MethodNonProfiled)) {
+ if (!heap_available(CodeBlobType::MethodNonProfiled)) {
non_nmethod_size += non_profiled_size;
non_profiled_size = 0;
}
@@ -282,10 +282,11 @@ void CodeCache::initialize_heaps() {
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
- // Align CodeHeaps
- size_t alignment = heap_alignment();
+ // If large page support is enabled, align code heaps according to large
+ // page size to make sure that code cache is covered by large pages.
+ const size_t alignment = MAX2(page_size(false), (size_t) os::vm_allocation_granularity());
non_nmethod_size = align_up(non_nmethod_size, alignment);
- profiled_size = align_down(profiled_size, alignment);
+ profiled_size = align_down(profiled_size, alignment);
// Reserve one continuous chunk of memory for CodeHeaps and split it into
// parts for the individual heaps. The memory layout looks like this:
@@ -308,37 +309,29 @@ void CodeCache::initialize_heaps() {
add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
}
-size_t CodeCache::heap_alignment() {
- // If large page support is enabled, align code heaps according to large
- // page size to make sure that code cache is covered by large pages.
- const size_t page_size = os::can_execute_large_page_memory() ?
- os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) :
- os::vm_page_size();
- return MAX2(page_size, (size_t) os::vm_allocation_granularity());
+size_t CodeCache::page_size(bool aligned) {
+ if (os::can_execute_large_page_memory()) {
+ return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, 8) :
+ os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8);
+ } else {
+ return os::vm_page_size();
+ }
}
ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
- // Determine alignment
- const size_t page_size = os::can_execute_large_page_memory() ?
- MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8),
- os::page_size_for_region_aligned(size, 8)) :
- os::vm_page_size();
- const size_t granularity = os::vm_allocation_granularity();
- const size_t r_align = MAX2(page_size, granularity);
- const size_t r_size = align_up(size, r_align);
- const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
- MAX2(page_size, granularity);
-
- ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
-
+ // Align and reserve space for code cache
+ const size_t rs_ps = page_size();
+ const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
+ const size_t rs_size = align_up(size, rs_align);
+ ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size());
if (!rs.is_reserved()) {
- vm_exit_during_initialization("Could not reserve enough space for code cache");
+ vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
+ rs_size/K));
}
// Initialize bounds
_low_bound = (address)rs.base();
_high_bound = _low_bound + rs.size();
-
return rs;
}
@@ -415,7 +408,8 @@ void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type)
size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
size_initial = align_up(size_initial, os::vm_page_size());
if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
- vm_exit_during_initialization("Could not reserve enough space for code cache");
+ vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
+ heap->name(), size_initial/K));
}
// Register the CodeHeap
diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp
index 2749acd05b7..e8a098d0279 100644
--- a/src/hotspot/share/code/codeCache.hpp
+++ b/src/hotspot/share/code/codeCache.hpp
@@ -107,7 +107,7 @@ class CodeCache : AllStatic {
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
// Returns the name of the VM option to set the size of the corresponding CodeHeap
static const char* get_code_heap_flag_name(int code_blob_type);
- static size_t heap_alignment(); // Returns the alignment of the CodeHeaps in bytes
+ static size_t page_size(bool aligned = true); // Returns the page size used by the CodeCache
static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
// Iteration
diff --git a/src/hotspot/share/code/debugInfo.cpp b/src/hotspot/share/code/debugInfo.cpp
index 76a92387528..c81ef90e9d0 100644
--- a/src/hotspot/share/code/debugInfo.cpp
+++ b/src/hotspot/share/code/debugInfo.cpp
@@ -28,6 +28,8 @@
#include "code/nmethod.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/thread.hpp"
// Constructors
@@ -209,14 +211,24 @@ void ConstantDoubleValue::print_on(outputStream* st) const {
// ConstantOopWriteValue
void ConstantOopWriteValue::write_on(DebugInfoWriteStream* stream) {
- assert(JNIHandles::resolve(value()) == NULL ||
- Universe::heap()->is_in_reserved(JNIHandles::resolve(value())),
- "Should be in heap");
+#ifdef ASSERT
+ {
+ // cannot use ThreadInVMfromNative here since in case of JVMCI compiler,
+ // thread is already in VM state.
+ ThreadInVMfromUnknown tiv;
+ assert(JNIHandles::resolve(value()) == NULL ||
+ Universe::heap()->is_in_reserved(JNIHandles::resolve(value())),
+ "Should be in heap");
+ }
+#endif
stream->write_int(CONSTANT_OOP_CODE);
stream->write_handle(value());
}
void ConstantOopWriteValue::print_on(outputStream* st) const {
+ // using ThreadInVMfromUnknown here since in case of JVMCI compiler,
+ // thread is already in VM state.
+ ThreadInVMfromUnknown tiv;
JNIHandles::resolve(value())->print_value_on(st);
}
diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp
index 82757c0e2ff..83cc30d79b3 100644
--- a/src/hotspot/share/code/nmethod.cpp
+++ b/src/hotspot/share/code/nmethod.cpp
@@ -438,14 +438,14 @@ nmethod* nmethod::new_native_nmethod(const methodHandle& method,
basic_lock_sp_offset, oop_maps);
NOT_PRODUCT(if (nm != NULL) native_nmethod_stats.note_native_nmethod(nm));
}
- // verify nmethod
- debug_only(if (nm) nm->verify();) // might block
if (nm != NULL) {
- nm->log_new_nmethod();
- }
+ // verify nmethod
+ debug_only(nm->verify();) // might block
- nm->make_in_use();
+ nm->log_new_nmethod();
+ nm->make_in_use();
+ }
return nm;
}
diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp
index 4ac6ab9226d..c2862c84a0b 100644
--- a/src/hotspot/share/code/nmethod.hpp
+++ b/src/hotspot/share/code/nmethod.hpp
@@ -124,7 +124,7 @@ class nmethod : public CompiledMethod {
bool _unload_reported;
// Protected by Patching_lock
- volatile char _state; // {not_installed, in_use, not_entrant, zombie, unloaded}
+ volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded}
#ifdef ASSERT
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
diff --git a/src/hotspot/share/code/stubs.cpp b/src/hotspot/share/code/stubs.cpp
index 56883bc623d..81717b919ba 100644
--- a/src/hotspot/share/code/stubs.cpp
+++ b/src/hotspot/share/code/stubs.cpp
@@ -78,7 +78,6 @@ StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
_queue_begin = 0;
_queue_end = 0;
_number_of_stubs = 0;
- register_queue(this);
}
@@ -205,36 +204,6 @@ void StubQueue::remove_all(){
}
-enum { StubQueueLimit = 10 }; // there are only a few in the world
-static StubQueue* registered_stub_queues[StubQueueLimit];
-
-void StubQueue::register_queue(StubQueue* sq) {
- for (int i = 0; i < StubQueueLimit; i++) {
- if (registered_stub_queues[i] == NULL) {
- registered_stub_queues[i] = sq;
- return;
- }
- }
- ShouldNotReachHere();
-}
-
-
-void StubQueue::queues_do(void f(StubQueue* sq)) {
- for (int i = 0; i < StubQueueLimit; i++) {
- if (registered_stub_queues[i] != NULL) {
- f(registered_stub_queues[i]);
- }
- }
-}
-
-
-void StubQueue::stubs_do(void f(Stub* s)) {
- debug_only(verify();)
- MutexLockerEx lock(_mutex);
- for (Stub* s = first(); s != NULL; s = next(s)) f(s);
-}
-
-
void StubQueue::verify() {
// verify only if initialized
if (_stub_buffer == NULL) return;
diff --git a/src/hotspot/share/code/stubs.hpp b/src/hotspot/share/code/stubs.hpp
index ab84ffaada9..d50e30d1bd3 100644
--- a/src/hotspot/share/code/stubs.hpp
+++ b/src/hotspot/share/code/stubs.hpp
@@ -172,8 +172,6 @@ class StubQueue: public CHeapObj {
void stub_verify(Stub* s) { _stub_interface->verify(s); }
void stub_print(Stub* s) { _stub_interface->print(s); }
- static void register_queue(StubQueue*);
-
public:
StubQueue(StubInterface* stub_interface, int buffer_size, Mutex* lock,
const char* name);
@@ -204,8 +202,6 @@ class StubQueue: public CHeapObj {
void deallocate_unused_tail(); // deallocate the unused tail of the underlying CodeBlob
// only used from TemplateInterpreter::initialize()
// Iteration
- static void queues_do(void f(StubQueue* s)); // call f with each StubQueue
- void stubs_do(void f(Stub* s)); // call f with all stubs
Stub* first() const { return number_of_stubs() > 0 ? stub_at(_queue_begin) : NULL; }
Stub* next(Stub* s) const { int i = index_of(s) + stub_size(s);
// Only wrap around in the non-contiguous case (see stubss.cpp)
@@ -213,9 +209,6 @@ class StubQueue: public CHeapObj {
return (i == _queue_end) ? NULL : stub_at(i);
}
- address stub_code_begin(Stub* s) const { return _stub_interface->code_begin(s); }
- address stub_code_end(Stub* s) const { return _stub_interface->code_end(s); }
-
// Debugging/printing
void verify(); // verifies the stub queue
void print(); // prints information about the stub queue
diff --git a/src/hotspot/share/compiler/compileTask.hpp b/src/hotspot/share/compiler/compileTask.hpp
index c7329c9ba45..3522ee4f10e 100644
--- a/src/hotspot/share/compiler/compileTask.hpp
+++ b/src/hotspot/share/compiler/compileTask.hpp
@@ -25,10 +25,10 @@
#ifndef SHARE_VM_COMPILER_COMPILETASK_HPP
#define SHARE_VM_COMPILER_COMPILETASK_HPP
-#include "code/nmethod.hpp"
#include "ci/ciMethod.hpp"
+#include "code/nmethod.hpp"
#include "compiler/compileLog.hpp"
-#include "memory/allocation.inline.hpp"
+#include "memory/allocation.hpp"
#include "utilities/xmlstream.hpp"
// CompileTask
diff --git a/src/hotspot/share/compiler/methodMatcher.hpp b/src/hotspot/share/compiler/methodMatcher.hpp
index 546af4d5ed7..4adf6587417 100644
--- a/src/hotspot/share/compiler/methodMatcher.hpp
+++ b/src/hotspot/share/compiler/methodMatcher.hpp
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_COMPILER_METHODMATCHER_HPP
#define SHARE_VM_COMPILER_METHODMATCHER_HPP
-#include "memory/allocation.inline.hpp"
+#include "memory/allocation.hpp"
#include "runtime/handles.inline.hpp"
#include "memory/resourceArea.hpp"
diff --git a/src/hotspot/share/compiler/oopMap.hpp b/src/hotspot/share/compiler/oopMap.hpp
index 6c9fe4ee299..853f42a6f0d 100644
--- a/src/hotspot/share/compiler/oopMap.hpp
+++ b/src/hotspot/share/compiler/oopMap.hpp
@@ -28,6 +28,7 @@
#include "code/compressedStream.hpp"
#include "code/vmreg.hpp"
#include "memory/allocation.hpp"
+#include "oops/oopsHierarchy.hpp"
#include "utilities/growableArray.hpp"
// Interface for generating the frame map for compiled code. A frame map
@@ -42,6 +43,7 @@
class frame;
class RegisterMap;
class DerivedPointerEntry;
+class OopClosure;
class OopMapValue: public StackObj {
friend class VMStructs;
diff --git a/src/hotspot/share/gc/cms/allocationStats.cpp b/src/hotspot/share/gc/cms/allocationStats.cpp
index f23fa37c313..3bc7074a46e 100644
--- a/src/hotspot/share/gc/cms/allocationStats.cpp
+++ b/src/hotspot/share/gc/cms/allocationStats.cpp
@@ -30,3 +30,20 @@
// Technically this should be derived from machine speed, and
// ideally it would be dynamically adjusted.
float AllocationStats::_threshold = ((float)CMS_SweepTimerThresholdMillis)/1000;
+
+void AllocationStats::initialize(bool split_birth) {
+ AdaptivePaddedAverage* dummy =
+ new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
+ CMS_FLSPadding);
+ _desired = 0;
+ _coal_desired = 0;
+ _surplus = 0;
+ _bfr_surp = 0;
+ _prev_sweep = 0;
+ _before_sweep = 0;
+ _coal_births = 0;
+ _coal_deaths = 0;
+ _split_births = (split_birth ? 1 : 0);
+ _split_deaths = 0;
+ _returned_bytes = 0;
+}
diff --git a/src/hotspot/share/gc/cms/allocationStats.hpp b/src/hotspot/share/gc/cms/allocationStats.hpp
index 747b2904bed..c98b23f6918 100644
--- a/src/hotspot/share/gc/cms/allocationStats.hpp
+++ b/src/hotspot/share/gc/cms/allocationStats.hpp
@@ -64,22 +64,7 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
ssize_t _split_deaths; // loss from splitting
size_t _returned_bytes; // number of bytes returned to list.
public:
- void initialize(bool split_birth = false) {
- AdaptivePaddedAverage* dummy =
- new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
- CMS_FLSPadding);
- _desired = 0;
- _coal_desired = 0;
- _surplus = 0;
- _bfr_surp = 0;
- _prev_sweep = 0;
- _before_sweep = 0;
- _coal_births = 0;
- _coal_deaths = 0;
- _split_births = (split_birth ? 1 : 0);
- _split_deaths = 0;
- _returned_bytes = 0;
- }
+ void initialize(bool split_birth = false);
AllocationStats() {
initialize();
diff --git a/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp b/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp
index 0333b4f433e..e7160fb622a 100644
--- a/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp
+++ b/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp
@@ -71,6 +71,6 @@ void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
}
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
- // initialize the policy counters - 2 collectors, 3 generations
- _gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 3);
+ // initialize the policy counters - 2 collectors, 2 generations
+ _gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 2);
}
diff --git a/src/hotspot/share/gc/cms/cmsHeap.cpp b/src/hotspot/share/gc/cms/cmsHeap.cpp
index da344cce24d..8b44d900312 100644
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp
@@ -23,17 +23,48 @@
*/
#include "precompiled.hpp"
+#include "gc/cms/compactibleFreeListSpace.hpp"
+#include "gc/cms/concurrentMarkSweepGeneration.hpp"
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/cms/cmsHeap.hpp"
+#include "gc/cms/parNewGeneration.hpp"
#include "gc/cms/vmCMSOperations.hpp"
+#include "gc/shared/genMemoryPools.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/workgroup.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/vmThread.hpp"
+#include "services/memoryManager.hpp"
#include "utilities/stack.inline.hpp"
-CMSHeap::CMSHeap(GenCollectorPolicy *policy) : GenCollectedHeap(policy) {
+class CompactibleFreeListSpacePool : public CollectedMemoryPool {
+private:
+ CompactibleFreeListSpace* _space;
+public:
+ CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
+ const char* name,
+ size_t max_size,
+ bool support_usage_threshold) :
+ CollectedMemoryPool(name, space->capacity(), max_size, support_usage_threshold),
+ _space(space) {
+ }
+
+ MemoryUsage get_memory_usage() {
+ size_t max_heap_size = (available_for_allocation() ? max_size() : 0);
+ size_t used = used_in_bytes();
+ size_t committed = _space->capacity();
+
+ return MemoryUsage(initial_size(), used, committed, max_heap_size);
+ }
+
+ size_t used_in_bytes() {
+ return _space->used();
+ }
+};
+
+CMSHeap::CMSHeap(GenCollectorPolicy *policy) :
+ GenCollectedHeap(policy), _eden_pool(NULL), _survivor_pool(NULL), _old_pool(NULL) {
_workers = new WorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
/* are_ConcurrentGC_threads */false);
@@ -54,6 +85,38 @@ jint CMSHeap::initialize() {
return JNI_OK;
}
+void CMSHeap::initialize_serviceability() {
+ _young_manager = new GCMemoryManager("ParNew", "end of minor GC");
+ _old_manager = new GCMemoryManager("ConcurrentMarkSweep", "end of major GC");
+
+ ParNewGeneration* young = (ParNewGeneration*) young_gen();
+ _eden_pool = new ContiguousSpacePool(young->eden(),
+ "Par Eden Space",
+ young->max_eden_size(),
+ false);
+
+ _survivor_pool = new SurvivorContiguousSpacePool(young,
+ "Par Survivor Space",
+ young->max_survivor_size(),
+ false);
+
+ ConcurrentMarkSweepGeneration* old = (ConcurrentMarkSweepGeneration*) old_gen();
+ _old_pool = new CompactibleFreeListSpacePool(old->cmsSpace(),
+ "CMS Old Gen",
+ old->reserved().byte_size(),
+ true);
+
+ _young_manager->add_pool(_eden_pool);
+ _young_manager->add_pool(_survivor_pool);
+ young->set_gc_manager(_young_manager);
+
+ _old_manager->add_pool(_eden_pool);
+ _old_manager->add_pool(_survivor_pool);
+ _old_manager->add_pool(_old_pool);
+ old ->set_gc_manager(_old_manager);
+
+}
+
void CMSHeap::check_gen_kinds() {
assert(young_gen()->kind() == Generation::ParNew,
"Wrong youngest generation type");
@@ -183,3 +246,18 @@ void CMSHeap::gc_epilogue(bool full) {
GenCollectedHeap::gc_epilogue(full);
always_do_update_barrier = true;
};
+
+GrowableArray CMSHeap::memory_managers() {
+ GrowableArray memory_managers(2);
+ memory_managers.append(_young_manager);
+ memory_managers.append(_old_manager);
+ return memory_managers;
+}
+
+GrowableArray CMSHeap::memory_pools() {
+ GrowableArray memory_pools(3);
+ memory_pools.append(_eden_pool);
+ memory_pools.append(_survivor_pool);
+ memory_pools.append(_old_pool);
+ return memory_pools;
+}
diff --git a/src/hotspot/share/gc/cms/cmsHeap.hpp b/src/hotspot/share/gc/cms/cmsHeap.hpp
index bcd30cba859..93079d7cf2b 100644
--- a/src/hotspot/share/gc/cms/cmsHeap.hpp
+++ b/src/hotspot/share/gc/cms/cmsHeap.hpp
@@ -29,9 +29,12 @@
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/genCollectedHeap.hpp"
+#include "utilities/growableArray.hpp"
class CLDClosure;
class GenCollectorPolicy;
+class GCMemoryManager;
+class MemoryPool;
class OopsInGenClosure;
class outputStream;
class StrongRootsScope;
@@ -80,6 +83,9 @@ public:
void safepoint_synchronize_begin();
void safepoint_synchronize_end();
+ virtual GrowableArray memory_managers();
+ virtual GrowableArray memory_pools();
+
// If "young_gen_as_roots" is false, younger generations are
// not scanned as roots; in this case, the caller must be arranging to
// scan the younger generations itself. (For example, a generation might
@@ -92,12 +98,19 @@ public:
OopsInGenClosure* root_closure,
CLDClosure* cld_closure);
+ GCMemoryManager* old_manager() const { return _old_manager; }
+
private:
WorkGang* _workers;
+ MemoryPool* _eden_pool;
+ MemoryPool* _survivor_pool;
+ MemoryPool* _old_pool;
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
+ virtual void initialize_serviceability();
+
// Accessor for memory state verification support
NOT_PRODUCT(
virtual size_t skip_header_HeapWords() { return CMSCollector::skip_header_HeapWords(); }
diff --git a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
index c49112957b0..4f8f7836550 100644
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
@@ -8116,42 +8116,42 @@ size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
}
TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
-
+ GCMemoryManager* manager = CMSHeap::heap()->old_manager();
switch (phase) {
case CMSCollector::InitialMarking:
- initialize(true /* fullGC */ ,
- cause /* cause of the GC */,
- true /* recordGCBeginTime */,
- true /* recordPreGCUsage */,
- false /* recordPeakUsage */,
- false /* recordPostGCusage */,
- true /* recordAccumulatedGCTime */,
- false /* recordGCEndTime */,
- false /* countCollection */ );
+ initialize(manager /* GC manager */ ,
+ cause /* cause of the GC */,
+ true /* recordGCBeginTime */,
+ true /* recordPreGCUsage */,
+ false /* recordPeakUsage */,
+ false /* recordPostGCusage */,
+ true /* recordAccumulatedGCTime */,
+ false /* recordGCEndTime */,
+ false /* countCollection */ );
break;
case CMSCollector::FinalMarking:
- initialize(true /* fullGC */ ,
- cause /* cause of the GC */,
- false /* recordGCBeginTime */,
- false /* recordPreGCUsage */,
- false /* recordPeakUsage */,
- false /* recordPostGCusage */,
- true /* recordAccumulatedGCTime */,
- false /* recordGCEndTime */,
- false /* countCollection */ );
+ initialize(manager /* GC manager */ ,
+ cause /* cause of the GC */,
+ false /* recordGCBeginTime */,
+ false /* recordPreGCUsage */,
+ false /* recordPeakUsage */,
+ false /* recordPostGCusage */,
+ true /* recordAccumulatedGCTime */,
+ false /* recordGCEndTime */,
+ false /* countCollection */ );
break;
case CMSCollector::Sweeping:
- initialize(true /* fullGC */ ,
- cause /* cause of the GC */,
- false /* recordGCBeginTime */,
- false /* recordPreGCUsage */,
- true /* recordPeakUsage */,
- true /* recordPostGCusage */,
- false /* recordAccumulatedGCTime */,
- true /* recordGCEndTime */,
- true /* countCollection */ );
+ initialize(manager /* GC manager */ ,
+ cause /* cause of the GC */,
+ false /* recordGCBeginTime */,
+ false /* recordPreGCUsage */,
+ true /* recordPeakUsage */,
+ true /* recordPostGCusage */,
+ false /* recordAccumulatedGCTime */,
+ true /* recordGCEndTime */,
+ true /* countCollection */ );
break;
default:
diff --git a/src/hotspot/share/gc/cms/gSpaceCounters.cpp b/src/hotspot/share/gc/cms/gSpaceCounters.cpp
index dce1f39f6d5..36776c52945 100644
--- a/src/hotspot/share/gc/cms/gSpaceCounters.cpp
+++ b/src/hotspot/share/gc/cms/gSpaceCounters.cpp
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/cms/gSpaceCounters.hpp"
#include "gc/shared/generation.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "utilities/macros.hpp"
@@ -71,3 +72,7 @@ GSpaceCounters::GSpaceCounters(const char* name, int ordinal, size_t max_size,
_gen->capacity(), CHECK);
}
}
+
+GSpaceCounters::~GSpaceCounters() {
+ if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+}
diff --git a/src/hotspot/share/gc/cms/gSpaceCounters.hpp b/src/hotspot/share/gc/cms/gSpaceCounters.hpp
index 64ae6c60b26..40f181f4c4c 100644
--- a/src/hotspot/share/gc/cms/gSpaceCounters.hpp
+++ b/src/hotspot/share/gc/cms/gSpaceCounters.hpp
@@ -52,9 +52,7 @@ class GSpaceCounters: public CHeapObj {
GSpaceCounters(const char* name, int ordinal, size_t max_size, Generation* g,
GenerationCounters* gc, bool sampled=true);
- ~GSpaceCounters() {
- if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
- }
+ ~GSpaceCounters();
inline void update_capacity() {
_capacity->set_value(_gen->capacity());
diff --git a/src/hotspot/share/gc/g1/dirtyCardQueue.cpp b/src/hotspot/share/gc/g1/dirtyCardQueue.cpp
index 11c2da764e4..ae6a476ff59 100644
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.cpp
+++ b/src/hotspot/share/gc/g1/dirtyCardQueue.cpp
@@ -32,6 +32,7 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
// Closure used for updating remembered sets and recording references that
// point into the collection set while the mutator is running.
@@ -319,7 +320,7 @@ void DirtyCardQueueSet::abandon_logs() {
clear();
// Since abandon is done only at safepoints, we can safely manipulate
// these queues.
- for (JavaThread* t = Threads::first(); t; t = t->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
t->dirty_card_queue().reset();
}
shared_dirty_card_queue()->reset();
@@ -338,7 +339,7 @@ void DirtyCardQueueSet::concatenate_logs() {
int save_max_completed_queue = _max_completed_queue;
_max_completed_queue = max_jint;
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
- for (JavaThread* t = Threads::first(); t; t = t->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
concatenate_log(t->dirty_card_queue());
}
concatenate_log(_shared_dirty_card_queue);
diff --git a/src/hotspot/share/gc/g1/g1Arguments.cpp b/src/hotspot/share/gc/g1/g1Arguments.cpp
index bfe27a1cfae..b9ad5a02951 100644
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp
@@ -26,6 +26,7 @@
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "runtime/globals.hpp"
@@ -92,6 +93,22 @@ void G1Arguments::initialize_flags() {
}
log_trace(gc)("MarkStackSize: %uk MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
+
+#ifdef COMPILER2
+ // Enable loop strip mining to offer better pause time guarantees
+ if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
+ FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
+ }
+ if (UseCountedLoopSafepoints && FLAG_IS_DEFAULT(LoopStripMiningIter)) {
+ FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
+ }
+#endif
+}
+
+bool G1Arguments::parse_verification_type(const char* type) {
+ G1CollectedHeap::heap()->verifier()->parse_verification_type(type);
+ // Always return true because we want to parse all values.
+ return true;
}
CollectedHeap* G1Arguments::create_heap() {
diff --git a/src/hotspot/share/gc/g1/g1Arguments.hpp b/src/hotspot/share/gc/g1/g1Arguments.hpp
index 3f87c638c49..2dd753eb2a5 100644
--- a/src/hotspot/share/gc/g1/g1Arguments.hpp
+++ b/src/hotspot/share/gc/g1/g1Arguments.hpp
@@ -32,6 +32,7 @@ class CollectedHeap;
class G1Arguments : public GCArguments {
public:
virtual void initialize_flags();
+ virtual bool parse_verification_type(const char* type);
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
};
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index a62a48eeac5..7245bf55388 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -39,12 +39,12 @@
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1FullCollector.hpp"
-#include "gc/g1/g1FullGCScope.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp"
#include "gc/g1/g1HeapTransition.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1HotCardCache.hpp"
+#include "gc/g1/g1MemoryPool.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1ParScanThreadState.inline.hpp"
#include "gc/g1/g1Policy.hpp"
@@ -81,6 +81,7 @@
#include "runtime/atomic.hpp"
#include "runtime/init.hpp"
#include "runtime/orderAccess.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -1083,7 +1084,6 @@ void G1CollectedHeap::print_hrm_post_compaction() {
PostCompactionPrinterClosure cl(hr_printer());
heap_region_iterate(&cl);
}
-
}
void G1CollectedHeap::abort_concurrent_cycle() {
@@ -1132,7 +1132,7 @@ void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
assert(used() == recalculate_used(), "Should be equal");
_verifier->verify_region_sets_optional();
- _verifier->verify_before_gc();
+ _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
_verifier->check_bitmaps("Full GC Start");
}
@@ -1173,7 +1173,7 @@ void G1CollectedHeap::verify_after_full_collection() {
check_gc_time_stamps();
_hrm.verify_optional();
_verifier->verify_region_sets_optional();
- _verifier->verify_after_gc();
+ _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
// Clear the previous marking bitmap, if needed for bitmap verification.
// Note we cannot do this when we clear the next marking bitmap in
// G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
@@ -1217,34 +1217,6 @@ void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_tr
#endif
}
-void G1CollectedHeap::do_full_collection_inner(G1FullGCScope* scope) {
- GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
- g1_policy()->record_full_collection_start();
-
- print_heap_before_gc();
- print_heap_regions();
-
- abort_concurrent_cycle();
- verify_before_full_collection(scope->is_explicit_gc());
-
- gc_prologue(true);
- prepare_heap_for_full_collection();
-
- G1FullCollector collector(scope, ref_processor_stw(), concurrent_mark()->next_mark_bitmap(), workers()->active_workers());
- collector.prepare_collection();
- collector.collect();
- collector.complete_collection();
-
- prepare_heap_for_mutators();
-
- g1_policy()->record_full_collection_end();
- gc_epilogue(true);
-
- verify_after_full_collection();
-
- print_heap_after_full_collection(scope->heap_transition());
-}
-
bool G1CollectedHeap::do_full_collection(bool explicit_gc,
bool clear_all_soft_refs) {
assert_at_safepoint(true /* should_be_vm_thread */);
@@ -1257,8 +1229,12 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
collector_policy()->should_clear_all_soft_refs();
- G1FullGCScope scope(explicit_gc, do_clear_all_soft_refs);
- do_full_collection_inner(&scope);
+ G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs);
+ GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
+
+ collector.prepare_collection();
+ collector.collect();
+ collector.complete_collection();
// Full collection was successfully completed.
return true;
@@ -1550,6 +1526,11 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
CollectedHeap(),
_young_gen_sampling_thread(NULL),
_collector_policy(collector_policy),
+ _memory_manager("G1 Young Generation", "end of minor GC"),
+ _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
+ _eden_pool(NULL),
+ _survivor_pool(NULL),
+ _old_pool(NULL),
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
_g1_policy(create_g1_policy(_gc_timer_stw)),
@@ -1854,6 +1835,20 @@ jint G1CollectedHeap::initialize() {
return JNI_OK;
}
+void G1CollectedHeap::initialize_serviceability() {
+ _eden_pool = new G1EdenPool(this);
+ _survivor_pool = new G1SurvivorPool(this);
+ _old_pool = new G1OldGenPool(this);
+
+ _full_gc_memory_manager.add_pool(_eden_pool);
+ _full_gc_memory_manager.add_pool(_survivor_pool);
+ _full_gc_memory_manager.add_pool(_old_pool);
+
+ _memory_manager.add_pool(_eden_pool);
+ _memory_manager.add_pool(_survivor_pool);
+
+}
+
void G1CollectedHeap::stop() {
// Stop all concurrent threads. We do this to make sure these threads
// do not continue to execute and access resources (e.g. logging)
@@ -1879,6 +1874,7 @@ size_t G1CollectedHeap::conservative_max_heap_alignment() {
}
void G1CollectedHeap::post_initialize() {
+ CollectedHeap::post_initialize();
ref_processing_init();
}
@@ -2653,11 +2649,9 @@ G1CollectedHeap::doConcurrentMark() {
size_t G1CollectedHeap::pending_card_num() {
size_t extra_cards = 0;
- JavaThread *curr = Threads::first();
- while (curr != NULL) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
DirtyCardQueue& dcq = curr->dirty_card_queue();
extra_cards += dcq.size();
- curr = curr->next();
}
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
size_t buffer_size = dcqs.buffer_size();
@@ -2963,13 +2957,17 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
GCTraceCPUTime tcpu;
+ G1HeapVerifier::G1VerifyType verify_type;
FormatBuffer<> gc_string("Pause ");
if (collector_state()->during_initial_mark_pause()) {
gc_string.append("Initial Mark");
+ verify_type = G1HeapVerifier::G1VerifyInitialMark;
} else if (collector_state()->gcs_are_young()) {
gc_string.append("Young");
+ verify_type = G1HeapVerifier::G1VerifyYoungOnly;
} else {
gc_string.append("Mixed");
+ verify_type = G1HeapVerifier::G1VerifyMixed;
}
GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
@@ -2980,7 +2978,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
- TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
+ TraceMemoryManagerStats tms(&_memory_manager, gc_cause());
// If the secondary_free_list is not empty, append it to the
// free_list. No need to wait for the cleanup operation to finish;
@@ -3010,7 +3008,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
heap_region_iterate(&v_cl);
}
- _verifier->verify_before_gc();
+ _verifier->verify_before_gc(verify_type);
_verifier->check_bitmaps("GC Start");
@@ -3170,7 +3168,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
heap_region_iterate(&v_cl);
}
- _verifier->verify_after_gc();
+ _verifier->verify_after_gc(verify_type);
_verifier->check_bitmaps("GC End");
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
@@ -5394,3 +5392,18 @@ void G1CollectedHeap::rebuild_strong_code_roots() {
RebuildStrongCodeRootClosure blob_cl(this);
CodeCache::blobs_do(&blob_cl);
}
+
+GrowableArray G1CollectedHeap::memory_managers() {
+ GrowableArray memory_managers(2);
+ memory_managers.append(&_memory_manager);
+ memory_managers.append(&_full_gc_memory_manager);
+ return memory_managers;
+}
+
+GrowableArray G1CollectedHeap::memory_pools() {
+ GrowableArray memory_pools(3);
+ memory_pools.append(_eden_pool);
+ memory_pools.append(_survivor_pool);
+ memory_pools.append(_old_pool);
+ return memory_pools;
+}
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index 7467a5493f9..d268e2ef7be 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -42,14 +42,15 @@
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/g1SurvivorRegions.hpp"
#include "gc/g1/g1YCTypes.hpp"
-#include "gc/g1/hSpaceCounters.hpp"
#include "gc/g1/heapRegionManager.hpp"
#include "gc/g1/heapRegionSet.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/plab.hpp"
#include "gc/shared/preservedMarks.hpp"
#include "memory/memRegion.hpp"
+#include "services/memoryManager.hpp"
#include "utilities/stack.hpp"
// A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
@@ -64,6 +65,7 @@ class GenerationSpec;
class G1ParScanThreadState;
class G1ParScanThreadStateSet;
class G1ParScanThreadState;
+class MemoryPool;
class ObjectClosure;
class SpaceClosure;
class CompactibleSpaceClosure;
@@ -126,6 +128,7 @@ class G1CollectedHeap : public CollectedHeap {
friend class VM_G1IncCollectionPause;
friend class VMStructs;
friend class MutatorAllocRegion;
+ friend class G1FullCollector;
friend class G1GCAllocRegion;
friend class G1HeapVerifier;
@@ -148,6 +151,13 @@ private:
WorkGang* _workers;
G1CollectorPolicy* _collector_policy;
+ GCMemoryManager _memory_manager;
+ GCMemoryManager _full_gc_memory_manager;
+
+ MemoryPool* _eden_pool;
+ MemoryPool* _survivor_pool;
+ MemoryPool* _old_pool;
+
static size_t _humongous_object_threshold_in_words;
// The secondary free list which contains regions that have been
@@ -161,6 +171,8 @@ private:
// It keeps track of the humongous regions.
HeapRegionSet _humongous_set;
+ virtual void initialize_serviceability();
+
void eagerly_reclaim_humongous_regions();
// Start a new incremental collection set for the next pause.
void start_new_collection_set();
@@ -517,7 +529,6 @@ protected:
private:
// Internal helpers used during full GC to split it up to
// increase readability.
- void do_full_collection_inner(G1FullGCScope* scope);
void abort_concurrent_cycle();
void verify_before_full_collection(bool explicit_gc);
void prepare_heap_for_full_collection();
@@ -1006,6 +1017,9 @@ public:
// Adaptive size policy. No such thing for g1.
virtual AdaptiveSizePolicy* size_policy() { return NULL; }
+ virtual GrowableArray memory_managers();
+ virtual GrowableArray memory_pools();
+
// The rem set and barrier set.
G1RemSet* g1_rem_set() const { return _g1_rem_set; }
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index 9073616c33f..a63013bdec4 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -1015,9 +1015,7 @@ void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
SvcGCMarker sgcm(SvcGCMarker::OTHER);
if (VerifyDuringGC) {
- HandleMark hm; // handle scope
- g1h->prepare_for_verify();
- Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
+ g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (before)");
}
g1h->verifier()->check_bitmaps("Remark Start");
@@ -1038,9 +1036,7 @@ void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
// Verify the heap w.r.t. the previous marking bitmap.
if (VerifyDuringGC) {
- HandleMark hm; // handle scope
- g1h->prepare_for_verify();
- Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
+ g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (overflow)");
}
// Clear the marking state because we will be restarting
@@ -1055,9 +1051,7 @@ void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
true /* expected_active */);
if (VerifyDuringGC) {
- HandleMark hm; // handle scope
- g1h->prepare_for_verify();
- Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
+ g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "During GC (after)");
}
g1h->verifier()->check_bitmaps("Remark End");
assert(!restart_for_overflow(), "sanity");
@@ -1189,9 +1183,7 @@ void G1ConcurrentMark::cleanup() {
g1h->verifier()->verify_region_sets_optional();
if (VerifyDuringGC) {
- HandleMark hm; // handle scope
- g1h->prepare_for_verify();
- Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
+ g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (before)");
}
g1h->verifier()->check_bitmaps("Cleanup Start");
@@ -1263,9 +1255,7 @@ void G1ConcurrentMark::cleanup() {
Universe::update_heap_info_at_gc();
if (VerifyDuringGC) {
- HandleMark hm; // handle scope
- g1h->prepare_for_verify();
- Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");
+ g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (after)");
}
g1h->verifier()->check_bitmaps("Cleanup End");
@@ -1756,28 +1746,24 @@ private:
G1ConcurrentMark* _cm;
public:
void work(uint worker_id) {
- // Since all available tasks are actually started, we should
- // only proceed if we're supposed to be active.
- if (worker_id < _cm->active_tasks()) {
- G1CMTask* task = _cm->task(worker_id);
- task->record_start_time();
- {
- ResourceMark rm;
- HandleMark hm;
+ G1CMTask* task = _cm->task(worker_id);
+ task->record_start_time();
+ {
+ ResourceMark rm;
+ HandleMark hm;
- G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
- Threads::threads_do(&threads_f);
- }
-
- do {
- task->do_marking_step(1000000000.0 /* something very large */,
- true /* do_termination */,
- false /* is_serial */);
- } while (task->has_aborted() && !_cm->has_overflown());
- // If we overflow, then we do not want to restart. We instead
- // want to abort remark and do concurrent marking again.
- task->record_end_time();
+ G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
+ Threads::threads_do(&threads_f);
}
+
+ do {
+ task->do_marking_step(1000000000.0 /* something very large */,
+ true /* do_termination */,
+ false /* is_serial */);
+ } while (task->has_aborted() && !_cm->has_overflown());
+ // If we overflow, then we do not want to restart. We instead
+ // want to abort remark and do concurrent marking again.
+ task->record_end_time();
}
G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
index 8e39ff06698..31d09e9428b 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
@@ -26,6 +26,7 @@
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "logging/log.hpp"
+#include "memory/allocation.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
@@ -33,6 +34,107 @@
#include "utilities/pair.hpp"
#include
+G1ConcurrentRefineThread* G1ConcurrentRefineThreadControl::create_refinement_thread(uint worker_id, bool initializing) {
+ G1ConcurrentRefineThread* result = NULL;
+ if (initializing || !InjectGCWorkerCreationFailure) {
+ result = new G1ConcurrentRefineThread(_cr, worker_id);
+ }
+ if (result == NULL || result->osthread() == NULL) {
+ log_warning(gc)("Failed to create refinement thread %u, no more %s",
+ worker_id,
+ result == NULL ? "memory" : "OS threads");
+ }
+ return result;
+}
+
+G1ConcurrentRefineThreadControl::G1ConcurrentRefineThreadControl() :
+ _cr(NULL),
+ _threads(NULL),
+ _num_max_threads(0)
+{
+}
+
+G1ConcurrentRefineThreadControl::~G1ConcurrentRefineThreadControl() {
+ for (uint i = 0; i < _num_max_threads; i++) {
+ G1ConcurrentRefineThread* t = _threads[i];
+ if (t != NULL) {
+ delete t;
+ }
+ }
+ FREE_C_HEAP_ARRAY(G1ConcurrentRefineThread*, _threads);
+}
+
+jint G1ConcurrentRefineThreadControl::initialize(G1ConcurrentRefine* cr, uint num_max_threads) {
+ assert(cr != NULL, "G1ConcurrentRefine must not be NULL");
+ _cr = cr;
+ _num_max_threads = num_max_threads;
+
+ _threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, num_max_threads, mtGC);
+ if (_threads == NULL) {
+ vm_shutdown_during_initialization("Could not allocate thread holder array.");
+ return JNI_ENOMEM;
+ }
+
+ for (uint i = 0; i < num_max_threads; i++) {
+ if (UseDynamicNumberOfGCThreads && i != 0 /* Always start first thread. */) {
+ _threads[i] = NULL;
+ } else {
+ _threads[i] = create_refinement_thread(i, true);
+ if (_threads[i] == NULL) {
+ vm_shutdown_during_initialization("Could not allocate refinement threads.");
+ return JNI_ENOMEM;
+ }
+ }
+ }
+ return JNI_OK;
+}
+
+void G1ConcurrentRefineThreadControl::maybe_activate_next(uint cur_worker_id) {
+ assert(cur_worker_id < _num_max_threads,
+ "Activating another thread from %u not allowed since there can be at most %u",
+ cur_worker_id, _num_max_threads);
+ if (cur_worker_id == (_num_max_threads - 1)) {
+ // Already the last thread, there is no more thread to activate.
+ return;
+ }
+
+ uint worker_id = cur_worker_id + 1;
+ G1ConcurrentRefineThread* thread_to_activate = _threads[worker_id];
+ if (thread_to_activate == NULL) {
+ // Still need to create the thread...
+ _threads[worker_id] = create_refinement_thread(worker_id, false);
+ thread_to_activate = _threads[worker_id];
+ }
+ if (thread_to_activate != NULL && !thread_to_activate->is_active()) {
+ thread_to_activate->activate();
+ }
+}
+
+void G1ConcurrentRefineThreadControl::print_on(outputStream* st) const {
+ for (uint i = 0; i < _num_max_threads; ++i) {
+ if (_threads[i] != NULL) {
+ _threads[i]->print_on(st);
+ st->cr();
+ }
+ }
+}
+
+void G1ConcurrentRefineThreadControl::worker_threads_do(ThreadClosure* tc) {
+ for (uint i = 0; i < _num_max_threads; i++) {
+ if (_threads[i] != NULL) {
+ tc->do_thread(_threads[i]);
+ }
+ }
+}
+
+void G1ConcurrentRefineThreadControl::stop() {
+ for (uint i = 0; i < _num_max_threads; i++) {
+ if (_threads[i] != NULL) {
+ _threads[i]->stop();
+ }
+ }
+}
+
// Arbitrary but large limits, to simplify some of the zone calculations.
// The general idea is to allow expressions like
// MIN2(x OP y, max_XXX_zone)
@@ -96,7 +198,7 @@ static Thresholds calc_thresholds(size_t green_zone,
size_t yellow_zone,
uint worker_i) {
double yellow_size = yellow_zone - green_zone;
- double step = yellow_size / G1ConcurrentRefine::thread_num();
+ double step = yellow_size / G1ConcurrentRefine::max_num_threads();
if (worker_i == 0) {
// Potentially activate worker 0 more aggressively, to keep
// available buffers near green_zone value. When yellow_size is
@@ -115,8 +217,7 @@ G1ConcurrentRefine::G1ConcurrentRefine(size_t green_zone,
size_t yellow_zone,
size_t red_zone,
size_t min_yellow_zone_size) :
- _threads(NULL),
- _n_worker_threads(thread_num()),
+ _thread_control(),
_green_zone(green_zone),
_yellow_zone(yellow_zone),
_red_zone(red_zone),
@@ -125,9 +226,13 @@ G1ConcurrentRefine::G1ConcurrentRefine(size_t green_zone,
assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone);
}
+jint G1ConcurrentRefine::initialize() {
+ return _thread_control.initialize(this, max_num_threads());
+}
+
static size_t calc_min_yellow_zone_size() {
size_t step = G1ConcRefinementThresholdStep;
- uint n_workers = G1ConcurrentRefine::thread_num();
+ uint n_workers = G1ConcurrentRefine::max_num_threads();
if ((max_yellow_zone / step) < n_workers) {
return max_yellow_zone;
} else {
@@ -191,77 +296,27 @@ G1ConcurrentRefine* G1ConcurrentRefine::create(jint* ecode) {
return NULL;
}
- cr->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, cr->_n_worker_threads, mtGC);
- if (cr->_threads == NULL) {
- *ecode = JNI_ENOMEM;
- vm_shutdown_during_initialization("Could not allocate an array for G1ConcurrentRefineThread");
- return NULL;
- }
-
- uint worker_id_offset = DirtyCardQueueSet::num_par_ids();
-
- G1ConcurrentRefineThread *next = NULL;
- for (uint i = cr->_n_worker_threads - 1; i != UINT_MAX; i--) {
- Thresholds thresholds = calc_thresholds(green_zone, yellow_zone, i);
- G1ConcurrentRefineThread* t =
- new G1ConcurrentRefineThread(cr,
- next,
- worker_id_offset,
- i,
- activation_level(thresholds),
- deactivation_level(thresholds));
- assert(t != NULL, "Conc refine should have been created");
- if (t->osthread() == NULL) {
- *ecode = JNI_ENOMEM;
- vm_shutdown_during_initialization("Could not create G1ConcurrentRefineThread");
- return NULL;
- }
-
- assert(t->cr() == cr, "Conc refine thread should refer to this");
- cr->_threads[i] = t;
- next = t;
- }
-
- *ecode = JNI_OK;
+ *ecode = cr->initialize();
return cr;
}
void G1ConcurrentRefine::stop() {
- for (uint i = 0; i < _n_worker_threads; i++) {
- _threads[i]->stop();
- }
-}
-
-void G1ConcurrentRefine::update_thread_thresholds() {
- for (uint i = 0; i < _n_worker_threads; i++) {
- Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, i);
- _threads[i]->update_thresholds(activation_level(thresholds),
- deactivation_level(thresholds));
- }
+ _thread_control.stop();
}
G1ConcurrentRefine::~G1ConcurrentRefine() {
- for (uint i = 0; i < _n_worker_threads; i++) {
- delete _threads[i];
- }
- FREE_C_HEAP_ARRAY(G1ConcurrentRefineThread*, _threads);
}
void G1ConcurrentRefine::threads_do(ThreadClosure *tc) {
- for (uint i = 0; i < _n_worker_threads; i++) {
- tc->do_thread(_threads[i]);
- }
+ _thread_control.worker_threads_do(tc);
}
-uint G1ConcurrentRefine::thread_num() {
+uint G1ConcurrentRefine::max_num_threads() {
return G1ConcRefinementThreads;
}
void G1ConcurrentRefine::print_threads_on(outputStream* st) const {
- for (uint i = 0; i < _n_worker_threads; ++i) {
- _threads[i]->print_on(st);
- st->cr();
- }
+ _thread_control.print_on(st);
}
static size_t calc_new_green_zone(size_t green,
@@ -326,16 +381,15 @@ void G1ConcurrentRefine::adjust(double update_rs_time,
if (G1UseAdaptiveConcRefinement) {
update_zones(update_rs_time, update_rs_processed_buffers, goal_ms);
- update_thread_thresholds();
// Change the barrier params
- if (_n_worker_threads == 0) {
+ if (max_num_threads() == 0) {
// Disable dcqs notification when there are no threads to notify.
dcqs.set_process_completed_threshold(INT_MAX);
} else {
// Worker 0 is the primary; wakeup is via dcqs notification.
STATIC_ASSERT(max_yellow_zone <= INT_MAX);
- size_t activate = _threads[0]->activation_threshold();
+ size_t activate = activation_threshold(0);
dcqs.set_process_completed_threshold((int)activate);
}
dcqs.set_max_completed_queue((int)red_zone());
@@ -349,3 +403,42 @@ void G1ConcurrentRefine::adjust(double update_rs_time,
}
dcqs.notify_if_necessary();
}
+
+size_t G1ConcurrentRefine::activation_threshold(uint worker_id) const {
+ Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, worker_id);
+ return activation_level(thresholds);
+}
+
+size_t G1ConcurrentRefine::deactivation_threshold(uint worker_id) const {
+ Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, worker_id);
+ return deactivation_level(thresholds);
+}
+
+uint G1ConcurrentRefine::worker_id_offset() {
+ return DirtyCardQueueSet::num_par_ids();
+}
+
+void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_cur_buffers) {
+ if (num_cur_buffers > activation_threshold(worker_id + 1)) {
+ _thread_control.maybe_activate_next(worker_id);
+ }
+}
+
+bool G1ConcurrentRefine::do_refinement_step(uint worker_id) {
+ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+
+ size_t curr_buffer_num = dcqs.completed_buffers_num();
+ // If the number of the buffers falls down into the yellow zone,
+ // that means that the transition period after the evacuation pause has ended.
+ // Since the value written to the DCQS is the same for all threads, there is no
+ // need to synchronize.
+ if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= yellow_zone()) {
+ dcqs.set_completed_queue_padding(0);
+ }
+
+ maybe_activate_more_threads(worker_id, curr_buffer_num);
+
+ // Process the next buffer, if there are enough left.
+ return dcqs.refine_completed_buffer_concurrently(worker_id + worker_id_offset(),
+ deactivation_threshold(worker_id));
+}
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp
index b64d4e3ee80..52783881b9e 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp
@@ -30,30 +30,63 @@
// Forward decl
class CardTableEntryClosure;
+class G1ConcurrentRefine;
class G1ConcurrentRefineThread;
class outputStream;
class ThreadClosure;
-class G1ConcurrentRefine : public CHeapObj {
+// Helper class for refinement thread management. Used to start, stop and
+// iterate over them.
+class G1ConcurrentRefineThreadControl VALUE_OBJ_CLASS_SPEC {
+ G1ConcurrentRefine* _cr;
+
G1ConcurrentRefineThread** _threads;
- uint _n_worker_threads;
- /*
- * The value of the update buffer queue length falls into one of 3 zones:
- * green, yellow, red. If the value is in [0, green) nothing is
- * done, the buffers are left unprocessed to enable the caching effect of the
- * dirtied cards. In the yellow zone [green, yellow) the concurrent refinement
- * threads are gradually activated. In [yellow, red) all threads are
- * running. If the length becomes red (max queue length) the mutators start
- * processing the buffers.
- *
- * There are some interesting cases (when G1UseAdaptiveConcRefinement
- * is turned off):
- * 1) green = yellow = red = 0. In this case the mutator will process all
- * buffers. Except for those that are created by the deferred updates
- * machinery during a collection.
- * 2) green = 0. Means no caching. Can be a good way to minimize the
- * amount of time spent updating rsets during a collection.
- */
+ uint _num_max_threads;
+
+ // Create the refinement thread for the given worker id.
+ // If initializing is true, ignore InjectGCWorkerCreationFailure.
+ G1ConcurrentRefineThread* create_refinement_thread(uint worker_id, bool initializing);
+public:
+ G1ConcurrentRefineThreadControl();
+ ~G1ConcurrentRefineThreadControl();
+
+ jint initialize(G1ConcurrentRefine* cr, uint num_max_threads);
+
+ // If there is a "successor" thread that can be activated given the current id,
+ // activate it.
+ void maybe_activate_next(uint cur_worker_id);
+
+ void print_on(outputStream* st) const;
+ void worker_threads_do(ThreadClosure* tc);
+ void stop();
+};
+
+// Controls refinement threads and their activation based on the number of completed
+// buffers currently available in the global dirty card queue.
+// Refinement threads pick work from the queue based on these thresholds. They are activated
+// gradually based on the amount of work to do.
+// Refinement thread n activates thread n+1 if the instance of this class determines there
+// is enough work available. Threads deactivate themselves if the current amount of
+// completed buffers falls below their individual threshold.
+class G1ConcurrentRefine : public CHeapObj {
+ G1ConcurrentRefineThreadControl _thread_control;
+ /*
+ * The value of the completed dirty card queue length falls into one of 3 zones:
+ * green, yellow, red. If the value is in [0, green) nothing is
+ * done, the buffers are left unprocessed to enable the caching effect of the
+ * dirtied cards. In the yellow zone [green, yellow) the concurrent refinement
+ * threads are gradually activated. In [yellow, red) all threads are
+ * running. If the length becomes red (max queue length) the mutators start
+ * processing the buffers.
+ *
+ * There are some interesting cases (when G1UseAdaptiveConcRefinement
+ * is turned off):
+ * 1) green = yellow = red = 0. In this case the mutator will process all
+ * buffers. Except for those that are created by the deferred updates
+ * machinery during a collection.
+ * 2) green = 0. Means no caching. Can be a good way to minimize the
+ * amount of time spent updating remembered sets during a collection.
+ */
size_t _green_zone;
size_t _yellow_zone;
size_t _red_zone;
@@ -69,24 +102,32 @@ class G1ConcurrentRefine : public CHeapObj {
size_t update_rs_processed_buffers,
double goal_ms);
- // Update thread thresholds to account for updated zone values.
- void update_thread_thresholds();
+ static uint worker_id_offset();
+ void maybe_activate_more_threads(uint worker_id, size_t num_cur_buffers);
- public:
+ jint initialize();
+public:
~G1ConcurrentRefine();
- // Returns a G1ConcurrentRefine instance if succeeded to create/initialize G1ConcurrentRefine and G1ConcurrentRefineThreads.
- // Otherwise, returns NULL with error code.
+ // Returns a G1ConcurrentRefine instance if succeeded to create/initialize the
+ // G1ConcurrentRefine instance. Otherwise, returns NULL with error code.
static G1ConcurrentRefine* create(jint* ecode);
void stop();
+ // Adjust refinement thresholds based on work done during the pause and the goal time.
void adjust(double update_rs_time, size_t update_rs_processed_buffers, double goal_ms);
+ size_t activation_threshold(uint worker_id) const;
+ size_t deactivation_threshold(uint worker_id) const;
+ // Perform a single refinement step. Called by the refinement threads when woken up.
+ bool do_refinement_step(uint worker_id);
+
// Iterate over all concurrent refinement threads applying the given closure.
void threads_do(ThreadClosure *tc);
- static uint thread_num();
+ // Maximum number of refinement threads.
+ static uint max_num_threads();
void print_threads_on(outputStream* st) const;
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp
index f62e0c369e8..e1d1f8f4d01 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp
@@ -25,32 +25,20 @@
#include "precompiled.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1RemSet.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
-G1ConcurrentRefineThread::G1ConcurrentRefineThread(G1ConcurrentRefine* cr,
- G1ConcurrentRefineThread *next,
- uint worker_id_offset,
- uint worker_id,
- size_t activate,
- size_t deactivate) :
+G1ConcurrentRefineThread::G1ConcurrentRefineThread(G1ConcurrentRefine* cr, uint worker_id) :
ConcurrentGCThread(),
- _worker_id_offset(worker_id_offset),
_worker_id(worker_id),
_active(false),
- _next(next),
_monitor(NULL),
_cr(cr),
- _vtime_accum(0.0),
- _activation_threshold(activate),
- _deactivation_threshold(deactivate)
+ _vtime_accum(0.0)
{
-
// Each thread has its own monitor. The i-th thread is responsible for signaling
// to thread i+1 if the number of buffers in the queue exceeds a threshold for this
// thread. Monitors are also used to wake up the threads during termination.
@@ -67,13 +55,6 @@ G1ConcurrentRefineThread::G1ConcurrentRefineThread(G1ConcurrentRefine* cr,
create_and_start();
}
-void G1ConcurrentRefineThread::update_thresholds(size_t activate,
- size_t deactivate) {
- assert(deactivate < activate, "precondition");
- _activation_threshold = activate;
- _deactivation_threshold = deactivate;
-}
-
void G1ConcurrentRefineThread::wait_for_completed_buffers() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
while (!should_terminate() && !is_active()) {
@@ -118,9 +99,9 @@ void G1ConcurrentRefineThread::run_service() {
}
size_t buffers_processed = 0;
- DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
- log_debug(gc, refine)("Activated %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
- _worker_id, _activation_threshold, dcqs.completed_buffers_num());
+ log_debug(gc, refine)("Activated worker %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
+ _worker_id, _cr->activation_threshold(_worker_id),
+ JavaThread::dirty_card_queue_set().completed_buffers_num());
{
SuspendibleThreadSetJoiner sts_join;
@@ -131,33 +112,18 @@ void G1ConcurrentRefineThread::run_service() {
continue; // Re-check for termination after yield delay.
}
- size_t curr_buffer_num = dcqs.completed_buffers_num();
- // If the number of the buffers falls down into the yellow zone,
- // that means that the transition period after the evacuation pause has ended.
- if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cr()->yellow_zone()) {
- dcqs.set_completed_queue_padding(0);
- }
-
- // Check if we need to activate the next thread.
- if ((_next != NULL) &&
- !_next->is_active() &&
- (curr_buffer_num > _next->_activation_threshold)) {
- _next->activate();
- }
-
- // Process the next buffer, if there are enough left.
- if (!dcqs.refine_completed_buffer_concurrently(_worker_id + _worker_id_offset, _deactivation_threshold)) {
- break; // Deactivate, number of buffers fell below threshold.
+ if (!_cr->do_refinement_step(_worker_id)) {
+ break;
}
++buffers_processed;
}
}
deactivate();
- log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT
+ log_debug(gc, refine)("Deactivated worker %d, off threshold: " SIZE_FORMAT
", current: " SIZE_FORMAT ", processed: " SIZE_FORMAT,
- _worker_id, _deactivation_threshold,
- dcqs.completed_buffers_num(),
+ _worker_id, _cr->deactivation_threshold(_worker_id),
+ JavaThread::dirty_card_queue_set().completed_buffers_num(),
buffers_processed);
if (os::supports_vtime()) {
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp
index fbc10fcfb4c..8b3694411fa 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp
@@ -43,43 +43,29 @@ class G1ConcurrentRefineThread: public ConcurrentGCThread {
uint _worker_id;
uint _worker_id_offset;
- // The refinement threads collection is linked list. A predecessor can activate a successor
- // when the number of the rset update buffer crosses a certain threshold. A successor
- // would self-deactivate when the number of the buffers falls below the threshold.
bool _active;
- G1ConcurrentRefineThread* _next;
Monitor* _monitor;
G1ConcurrentRefine* _cr;
- // This thread's activation/deactivation thresholds
- size_t _activation_threshold;
- size_t _deactivation_threshold;
-
void wait_for_completed_buffers();
void set_active(bool x) { _active = x; }
- bool is_active();
- void activate();
+ // Deactivate this thread.
void deactivate();
bool is_primary() { return (_worker_id == 0); }
void run_service();
void stop_service();
-
public:
- // Constructor
- G1ConcurrentRefineThread(G1ConcurrentRefine* cr, G1ConcurrentRefineThread* next,
- uint worker_id_offset, uint worker_id,
- size_t activate, size_t deactivate);
+ G1ConcurrentRefineThread(G1ConcurrentRefine* cg1r, uint worker_id);
- void update_thresholds(size_t activate, size_t deactivate);
- size_t activation_threshold() const { return _activation_threshold; }
+ bool is_active();
+ // Activate this thread.
+ void activate();
// Total virtual time so far.
double vtime_accum() { return _vtime_accum; }
-
- G1ConcurrentRefine* cr() { return _cr; }
};
#endif // SHARE_VM_GC_G1_G1CONCURRENTREFINETHREAD_HPP
diff --git a/src/hotspot/share/gc/g1/g1DefaultPolicy.cpp b/src/hotspot/share/gc/g1/g1DefaultPolicy.cpp
index 409c8ba99aa..4c480ca5827 100644
--- a/src/hotspot/share/gc/g1/g1DefaultPolicy.cpp
+++ b/src/hotspot/share/gc/g1/g1DefaultPolicy.cpp
@@ -52,7 +52,7 @@ G1DefaultPolicy::G1DefaultPolicy(STWGCTimer* gc_timer) :
_analytics(new G1Analytics(&_predictor)),
_mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
_ihop_control(create_ihop_control(&_predictor)),
- _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 3)),
+ _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
_young_list_fixed_length(0),
_short_lived_surv_rate_group(new SurvRateGroup()),
_survivor_surv_rate_group(new SurvRateGroup()),
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp
index 81ec9a05e25..ffe54d28332 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp
@@ -35,6 +35,7 @@
#include "gc/g1/g1FullGCReferenceProcessorExecutor.hpp"
#include "gc/g1/g1FullGCScope.hpp"
#include "gc/g1/g1OopClosures.hpp"
+#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/preservedMarks.hpp"
@@ -62,20 +63,24 @@ static void update_derived_pointers() {
#endif
}
-G1FullCollector::G1FullCollector(G1FullGCScope* scope,
- ReferenceProcessor* reference_processor,
- G1CMBitMap* bitmap,
- uint workers) :
- _scope(scope),
- _num_workers(workers),
- _mark_bitmap(bitmap),
+G1CMBitMap* G1FullCollector::mark_bitmap() {
+ return _heap->concurrent_mark()->next_mark_bitmap();
+}
+
+ReferenceProcessor* G1FullCollector::reference_processor() {
+ return _heap->ref_processor_stw();
+}
+
+G1FullCollector::G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs) :
+ _heap(heap),
+ _scope(memory_manager, explicit_gc, clear_soft_refs),
+ _num_workers(heap->workers()->active_workers()),
_oop_queue_set(_num_workers),
_array_queue_set(_num_workers),
_preserved_marks_set(true),
- _reference_processor(reference_processor),
_serial_compaction_point(),
- _is_alive(_mark_bitmap),
- _is_alive_mutator(_reference_processor, &_is_alive) {
+ _is_alive(heap->concurrent_mark()->next_mark_bitmap()),
+ _is_alive_mutator(heap->ref_processor_stw(), &_is_alive) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
_preserved_marks_set.init(_num_workers);
@@ -99,8 +104,19 @@ G1FullCollector::~G1FullCollector() {
}
void G1FullCollector::prepare_collection() {
- _reference_processor->enable_discovery();
- _reference_processor->setup_policy(scope()->should_clear_soft_refs());
+ _heap->g1_policy()->record_full_collection_start();
+
+ _heap->print_heap_before_gc();
+ _heap->print_heap_regions();
+
+ _heap->abort_concurrent_cycle();
+ _heap->verify_before_full_collection(scope()->is_explicit_gc());
+
+ _heap->gc_prologue(true);
+ _heap->prepare_heap_for_full_collection();
+
+ reference_processor()->enable_discovery();
+ reference_processor()->setup_policy(scope()->should_clear_soft_refs());
// When collecting the permanent generation Method*s may be moving,
// so we either have to flush all bcp data or convert it into bci.
@@ -139,6 +155,15 @@ void G1FullCollector::complete_collection() {
BiasedLocking::restore_marks();
CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
+
+ _heap->prepare_heap_for_mutators();
+
+ _heap->g1_policy()->record_full_collection_end();
+ _heap->gc_epilogue(true);
+
+ _heap->verify_after_full_collection();
+
+ _heap->print_heap_after_full_collection(scope()->heap_transition());
}
void G1FullCollector::phase1_mark_live_objects() {
@@ -164,11 +189,11 @@ void G1FullCollector::phase1_mark_live_objects() {
GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&_is_alive, scope()->timer());
- G1CollectedHeap::heap()->complete_cleaning(&_is_alive, purged_class);
+ _heap->complete_cleaning(&_is_alive, purged_class);
} else {
GCTraceTime(Debug, gc, phases) debug("Phase 1: String and Symbol Tables Cleanup", scope()->timer());
// If no class unloading just clean out strings and symbols.
- G1CollectedHeap::heap()->partial_cleaning(&_is_alive, true, true, G1StringDedup::is_enabled());
+ _heap->partial_cleaning(&_is_alive, true, true, G1StringDedup::is_enabled());
}
scope()->tracer()->report_object_count_after_gc(&_is_alive);
@@ -210,18 +235,18 @@ void G1FullCollector::phase4_do_compaction() {
}
void G1FullCollector::restore_marks() {
- SharedRestorePreservedMarksTaskExecutor task_executor(G1CollectedHeap::heap()->workers());
+ SharedRestorePreservedMarksTaskExecutor task_executor(_heap->workers());
_preserved_marks_set.restore(&task_executor);
_preserved_marks_set.reclaim();
}
void G1FullCollector::run_task(AbstractGangTask* task) {
- G1CollectedHeap::heap()->workers()->run_task(task, _num_workers);
+ _heap->workers()->run_task(task, _num_workers);
}
void G1FullCollector::verify_after_marking() {
- if (!VerifyDuringGC) {
- //Only do verification if VerifyDuringGC is set.
+ if (!VerifyDuringGC || !_heap->verifier()->should_verify(G1HeapVerifier::G1VerifyFull)) {
+ // Only do verification if VerifyDuringGC and G1VerifyFull is set.
return;
}
@@ -229,7 +254,7 @@ void G1FullCollector::verify_after_marking() {
#if COMPILER2_OR_JVMCI
DerivedPointerTableDeactivate dpt_deact;
#endif
- G1CollectedHeap::heap()->prepare_for_verify();
+ _heap->prepare_for_verify();
// Note: we can verify only the heap here. When an object is
// marked, the previous value of the mark word (including
// identity hash values, ages, etc) is preserved, and the mark
@@ -240,6 +265,6 @@ void G1FullCollector::verify_after_marking() {
// fail. At the end of the GC, the original mark word values
// (including hash values) are restored to the appropriate
// objects.
- GCTraceTime(Info, gc, verify)("During GC (full)");
- G1CollectedHeap::heap()->verify(VerifyOption_G1UseFullMarking);
+ GCTraceTime(Info, gc, verify)("Verifying During GC (full)");
+ _heap->verify(VerifyOption_G1UseFullMarking);
}
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.hpp b/src/hotspot/share/gc/g1/g1FullCollector.hpp
index 947304596ee..576aba5c8c6 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.hpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp
@@ -28,6 +28,7 @@
#include "gc/g1/g1FullGCCompactionPoint.hpp"
#include "gc/g1/g1FullGCMarker.hpp"
#include "gc/g1/g1FullGCOopClosures.hpp"
+#include "gc/g1/g1FullGCScope.hpp"
#include "gc/shared/preservedMarks.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/taskqueue.hpp"
@@ -38,45 +39,41 @@ class G1CMBitMap;
class G1FullGCMarker;
class G1FullGCScope;
class G1FullGCCompactionPoint;
+class GCMemoryManager;
class ReferenceProcessor;
// The G1FullCollector holds data associated with the current Full GC.
class G1FullCollector : StackObj {
- G1FullGCScope* _scope;
+ G1CollectedHeap* _heap;
+ G1FullGCScope _scope;
uint _num_workers;
G1FullGCMarker** _markers;
G1FullGCCompactionPoint** _compaction_points;
- G1CMBitMap* _mark_bitmap;
OopQueueSet _oop_queue_set;
ObjArrayTaskQueueSet _array_queue_set;
PreservedMarksSet _preserved_marks_set;
- ReferenceProcessor* _reference_processor;
G1FullGCCompactionPoint _serial_compaction_point;
-
G1IsAliveClosure _is_alive;
ReferenceProcessorIsAliveMutator _is_alive_mutator;
public:
- G1FullCollector(G1FullGCScope* scope,
- ReferenceProcessor* reference_processor,
- G1CMBitMap* mark_bitmap,
- uint workers);
+ G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs);
~G1FullCollector();
void prepare_collection();
void collect();
void complete_collection();
- G1FullGCScope* scope() { return _scope; }
+ G1FullGCScope* scope() { return &_scope; }
uint workers() { return _num_workers; }
G1FullGCMarker* marker(uint id) { return _markers[id]; }
G1FullGCCompactionPoint* compaction_point(uint id) { return _compaction_points[id]; }
- G1CMBitMap* mark_bitmap() { return _mark_bitmap; }
OopQueueSet* oop_queue_set() { return &_oop_queue_set; }
ObjArrayTaskQueueSet* array_queue_set() { return &_array_queue_set; }
PreservedMarksSet* preserved_mark_set() { return &_preserved_marks_set; }
- ReferenceProcessor* reference_processor() { return _reference_processor; }
G1FullGCCompactionPoint* serial_compaction_point() { return &_serial_compaction_point; }
+ G1CMBitMap* mark_bitmap();
+ ReferenceProcessor* reference_processor();
private:
void phase1_mark_live_objects();
diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp
index 95f01be62c6..0ec0b324aab 100644
--- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp
@@ -26,6 +26,7 @@
#define SHARE_GC_G1_G1FULLGCCOMPACTIONPOINT_HPP
#include "memory/allocation.hpp"
+#include "oops/oopsHierarchy.hpp"
#include "utilities/growableArray.hpp"
class HeapRegion;
diff --git a/src/hotspot/share/gc/g1/g1FullGCScope.cpp b/src/hotspot/share/gc/g1/g1FullGCScope.cpp
index 87dd42b93f3..2430451df1c 100644
--- a/src/hotspot/share/gc/g1/g1FullGCScope.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCScope.cpp
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1FullGCScope.hpp"
-G1FullGCScope::G1FullGCScope(bool explicit_gc, bool clear_soft) :
+G1FullGCScope::G1FullGCScope(GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft) :
_rm(),
_explicit_gc(explicit_gc),
_g1h(G1CollectedHeap::heap()),
@@ -36,7 +36,7 @@ G1FullGCScope::G1FullGCScope(bool explicit_gc, bool clear_soft) :
_active(),
_cpu_time(),
_soft_refs(clear_soft, _g1h->collector_policy()),
- _memory_stats(true, _g1h->gc_cause()),
+ _memory_stats(memory_manager, _g1h->gc_cause()),
_collector_stats(_g1h->g1mm()->full_collection_counters()),
_heap_transition(_g1h) {
_timer.register_gc_start();
diff --git a/src/hotspot/share/gc/g1/g1FullGCScope.hpp b/src/hotspot/share/gc/g1/g1FullGCScope.hpp
index 20dc8d21643..850ee0aea0f 100644
--- a/src/hotspot/share/gc/g1/g1FullGCScope.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCScope.hpp
@@ -37,6 +37,8 @@
#include "memory/allocation.hpp"
#include "services/memoryService.hpp"
+class GCMemoryManager;
+
// Class used to group scoped objects used in the Full GC together.
class G1FullGCScope : public StackObj {
ResourceMark _rm;
@@ -54,7 +56,7 @@ class G1FullGCScope : public StackObj {
G1HeapTransition _heap_transition;
public:
- G1FullGCScope(bool explicit_gc, bool clear_soft);
+ G1FullGCScope(GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft);
~G1FullGCScope();
bool is_explicit_gc();
diff --git a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp
index 70e5ed29c65..ca1a22c78e0 100644
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp
@@ -376,6 +376,37 @@ public:
}
};
+void G1HeapVerifier::parse_verification_type(const char* type) {
+ if (strcmp(type, "young-only") == 0) {
+ enable_verification_type(G1VerifyYoungOnly);
+ } else if (strcmp(type, "initial-mark") == 0) {
+ enable_verification_type(G1VerifyInitialMark);
+ } else if (strcmp(type, "mixed") == 0) {
+ enable_verification_type(G1VerifyMixed);
+ } else if (strcmp(type, "remark") == 0) {
+ enable_verification_type(G1VerifyRemark);
+ } else if (strcmp(type, "cleanup") == 0) {
+ enable_verification_type(G1VerifyCleanup);
+ } else if (strcmp(type, "full") == 0) {
+ enable_verification_type(G1VerifyFull);
+ } else {
+ log_warning(gc, verify)("VerifyGCType: '%s' is unknown. Available types are: "
+ "young-only, initial-mark, mixed, remark, cleanup and full", type);
+ }
+}
+
+void G1HeapVerifier::enable_verification_type(G1VerifyType type) {
+ // First enable will clear _enabled_verification_types.
+ if (_enabled_verification_types == G1VerifyAll) {
+ _enabled_verification_types = type;
+ } else {
+ _enabled_verification_types |= type;
+ }
+}
+
+bool G1HeapVerifier::should_verify(G1VerifyType type) {
+ return (_enabled_verification_types & type) == type;
+}
void G1HeapVerifier::verify(VerifyOption vo) {
if (!SafepointSynchronize::is_at_safepoint()) {
@@ -541,28 +572,32 @@ void G1HeapVerifier::prepare_for_verify() {
}
}
-double G1HeapVerifier::verify(bool guard, const char* msg) {
+double G1HeapVerifier::verify(G1VerifyType type, VerifyOption vo, const char* msg) {
double verify_time_ms = 0.0;
- if (guard && _g1h->total_collections() >= VerifyGCStartAt) {
+ if (should_verify(type) && _g1h->total_collections() >= VerifyGCStartAt) {
double verify_start = os::elapsedTime();
HandleMark hm; // Discard invalid handles created during verification
prepare_for_verify();
- Universe::verify(VerifyOption_G1UsePrevMarking, msg);
+ Universe::verify(vo, msg);
verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
}
return verify_time_ms;
}
-void G1HeapVerifier::verify_before_gc() {
- double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
- _g1h->g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
+void G1HeapVerifier::verify_before_gc(G1VerifyType type) {
+ if (VerifyBeforeGC) {
+ double verify_time_ms = verify(type, VerifyOption_G1UsePrevMarking, "Before GC");
+ _g1h->g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
+ }
}
-void G1HeapVerifier::verify_after_gc() {
- double verify_time_ms = verify(VerifyAfterGC, "After GC");
- _g1h->g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
+void G1HeapVerifier::verify_after_gc(G1VerifyType type) {
+ if (VerifyAfterGC) {
+ double verify_time_ms = verify(type, VerifyOption_G1UsePrevMarking, "After GC");
+ _g1h->g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
+ }
}
diff --git a/src/hotspot/share/gc/g1/g1HeapVerifier.hpp b/src/hotspot/share/gc/g1/g1HeapVerifier.hpp
index 6d3bb3899f8..ee7cb0a316b 100644
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.hpp
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.hpp
@@ -34,6 +34,7 @@ class G1CollectedHeap;
class G1HeapVerifier : public CHeapObj {
private:
G1CollectedHeap* _g1h;
+ int _enabled_verification_types;
// verify_region_sets() performs verification over the region
// lists. It will be compiled in the product code to be used when
@@ -41,8 +42,21 @@ private:
void verify_region_sets();
public:
+ enum G1VerifyType {
+ G1VerifyYoungOnly = 1, // -XX:VerifyGCType=young-only
+ G1VerifyInitialMark = 2, // -XX:VerifyGCType=initial-mark
+ G1VerifyMixed = 4, // -XX:VerifyGCType=mixed
+ G1VerifyRemark = 8, // -XX:VerifyGCType=remark
+ G1VerifyCleanup = 16, // -XX:VerifyGCType=cleanup
+ G1VerifyFull = 32, // -XX:VerifyGCType=full
+ G1VerifyAll = -1
+ };
- G1HeapVerifier(G1CollectedHeap* heap) : _g1h(heap) { }
+ G1HeapVerifier(G1CollectedHeap* heap) : _g1h(heap), _enabled_verification_types(G1VerifyAll) { }
+
+ void parse_verification_type(const char* type);
+ void enable_verification_type(G1VerifyType type);
+ bool should_verify(G1VerifyType type);
// Perform verification.
@@ -73,9 +87,9 @@ public:
#endif // HEAP_REGION_SET_FORCE_VERIFY
void prepare_for_verify();
- double verify(bool guard, const char* msg);
- void verify_before_gc();
- void verify_after_gc();
+ double verify(G1VerifyType type, VerifyOption vo, const char* msg);
+ void verify_before_gc(G1VerifyType type);
+ void verify_after_gc(G1VerifyType type);
#ifndef PRODUCT
// Make sure that the given bitmap has no marked objects in the
diff --git a/src/hotspot/share/services/g1MemoryPool.cpp b/src/hotspot/share/gc/g1/g1MemoryPool.cpp
similarity index 95%
rename from src/hotspot/share/services/g1MemoryPool.cpp
rename to src/hotspot/share/gc/g1/g1MemoryPool.cpp
index 92daa7b726d..f658c0d2a27 100644
--- a/src/hotspot/share/services/g1MemoryPool.cpp
+++ b/src/hotspot/share/gc/g1/g1MemoryPool.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,8 @@
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1MemoryPool.hpp"
#include "gc/g1/heapRegion.hpp"
-#include "services/g1MemoryPool.hpp"
G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
const char* name,
@@ -33,7 +33,6 @@ G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
size_t max_size,
bool support_usage_threshold) :
_g1mm(g1h->g1mm()), CollectedMemoryPool(name,
- MemoryPool::Heap,
init_size,
max_size,
support_usage_threshold) {
diff --git a/src/hotspot/share/services/g1MemoryPool.hpp b/src/hotspot/share/gc/g1/g1MemoryPool.hpp
similarity index 92%
rename from src/hotspot/share/services/g1MemoryPool.hpp
rename to src/hotspot/share/gc/g1/g1MemoryPool.hpp
index fddc439e899..a6771c3fd79 100644
--- a/src/hotspot/share/services/g1MemoryPool.hpp
+++ b/src/hotspot/share/gc/g1/g1MemoryPool.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,15 +22,12 @@
*
*/
-#ifndef SHARE_VM_SERVICES_G1MEMORYPOOL_HPP
-#define SHARE_VM_SERVICES_G1MEMORYPOOL_HPP
+#ifndef SHARE_VM_GC_G1_G1MEMORYPOOL_HPP
+#define SHARE_VM_GC_G1_G1MEMORYPOOL_HPP
-#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
#include "gc/g1/g1MonitoringSupport.hpp"
#include "services/memoryPool.hpp"
#include "services/memoryUsage.hpp"
-#endif // INCLUDE_ALL_GCS
// This file contains the three classes that represent the memory
// pools of the G1 spaces: G1EdenPool, G1SurvivorPool, and
@@ -50,6 +47,8 @@
// on this model.
//
+class G1CollectedHeap;
+
// This class is shared by the three G1 memory pool classes
// (G1EdenPool, G1SurvivorPool, G1OldGenPool).
class G1MemoryPoolSuper : public CollectedMemoryPool {
@@ -107,4 +106,4 @@ public:
MemoryUsage get_memory_usage();
};
-#endif // SHARE_VM_SERVICES_G1MEMORYPOOL_HPP
+#endif // SHARE_VM_GC_G1_G1MEMORYPOOL_HPP
diff --git a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp
index a0cf7c2c3cf..0a566b3afc0 100644
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp
@@ -26,6 +26,7 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1MonitoringSupport.hpp"
#include "gc/g1/g1Policy.hpp"
+#include "gc/shared/hSpaceCounters.hpp"
G1GenerationCounters::G1GenerationCounters(G1MonitoringSupport* g1mm,
const char* name,
@@ -128,10 +129,10 @@ G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
// name "generation.1.space.0"
// Counters are created from maxCapacity, capacity, initCapacity,
// and used.
- _old_space_counters = new HSpaceCounters("space", 0 /* ordinal */,
+ _old_space_counters = new HSpaceCounters(_old_collection_counters->name_space(),
+ "space", 0 /* ordinal */,
pad_capacity(overall_reserved()) /* max_capacity */,
- pad_capacity(old_space_committed()) /* init_capacity */,
- _old_collection_counters);
+ pad_capacity(old_space_committed()) /* init_capacity */);
// Young collection set
// name "generation.0". This is logically the young generation.
@@ -139,27 +140,29 @@ G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
// See _old_collection_counters for additional counters
_young_collection_counters = new G1YoungGenerationCounters(this, "young");
+ const char* young_collection_name_space = _young_collection_counters->name_space();
+
// name "generation.0.space.0"
// See _old_space_counters for additional counters
- _eden_counters = new HSpaceCounters("eden", 0 /* ordinal */,
+ _eden_counters = new HSpaceCounters(young_collection_name_space,
+ "eden", 0 /* ordinal */,
pad_capacity(overall_reserved()) /* max_capacity */,
- pad_capacity(eden_space_committed()) /* init_capacity */,
- _young_collection_counters);
+ pad_capacity(eden_space_committed()) /* init_capacity */);
// name "generation.0.space.1"
// See _old_space_counters for additional counters
// Set the arguments to indicate that this survivor space is not used.
- _from_counters = new HSpaceCounters("s0", 1 /* ordinal */,
+ _from_counters = new HSpaceCounters(young_collection_name_space,
+ "s0", 1 /* ordinal */,
pad_capacity(0) /* max_capacity */,
- pad_capacity(0) /* init_capacity */,
- _young_collection_counters);
+ pad_capacity(0) /* init_capacity */);
// name "generation.0.space.2"
// See _old_space_counters for additional counters
- _to_counters = new HSpaceCounters("s1", 2 /* ordinal */,
+ _to_counters = new HSpaceCounters(young_collection_name_space,
+ "s1", 2 /* ordinal */,
pad_capacity(overall_reserved()) /* max_capacity */,
- pad_capacity(survivor_space_committed()) /* init_capacity */,
- _young_collection_counters);
+ pad_capacity(survivor_space_committed()) /* init_capacity */);
if (UsePerfData) {
// Given that this survivor space is not used, we update it here
diff --git a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp
index 3c1d7444108..ee103237009 100644
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp
@@ -25,9 +25,11 @@
#ifndef SHARE_VM_GC_G1_G1MONITORINGSUPPORT_HPP
#define SHARE_VM_GC_G1_G1MONITORINGSUPPORT_HPP
-#include "gc/g1/hSpaceCounters.hpp"
+#include "gc/shared/generationCounters.hpp"
+class CollectorCounters;
class G1CollectedHeap;
+class HSpaceCounters;
// Class for monitoring logical spaces in G1. It provides data for
// both G1's jstat counters as well as G1's memory pools.
diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
index 32b405d4226..03e75f1989d 100644
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
@@ -32,6 +32,7 @@
#include "gc/g1/g1StringDedup.hpp"
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/taskqueue.inline.hpp"
+#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/prefetch.inline.hpp"
@@ -390,3 +391,21 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
}
}
+G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length) :
+ _g1h(g1h),
+ _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
+ _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length, mtGC)),
+ _young_cset_length(young_cset_length),
+ _n_workers(n_workers),
+ _flushed(false) {
+ for (uint i = 0; i < n_workers; ++i) {
+ _states[i] = NULL;
+ }
+ memset(_surviving_young_words_total, 0, young_cset_length * sizeof(size_t));
+}
+
+G1ParScanThreadStateSet::~G1ParScanThreadStateSet() {
+ assert(_flushed, "thread local state from the per thread states should have been flushed");
+ FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states);
+ FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total);
+}
diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp
index 9bbb9a90a62..310b4270b02 100644
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp
@@ -204,24 +204,8 @@ class G1ParScanThreadStateSet : public StackObj {
bool _flushed;
public:
- G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length) :
- _g1h(g1h),
- _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
- _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length, mtGC)),
- _young_cset_length(young_cset_length),
- _n_workers(n_workers),
- _flushed(false) {
- for (uint i = 0; i < n_workers; ++i) {
- _states[i] = NULL;
- }
- memset(_surviving_young_words_total, 0, young_cset_length * sizeof(size_t));
- }
-
- ~G1ParScanThreadStateSet() {
- assert(_flushed, "thread local state from the per thread states should have been flushed");
- FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states);
- FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total);
- }
+ G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length);
+ ~G1ParScanThreadStateSet();
void flush();
diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp
index da0281c6130..c20a41e66f0 100644
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp
@@ -298,7 +298,7 @@ G1RemSet::~G1RemSet() {
}
uint G1RemSet::num_par_rem_sets() {
- return MAX2(DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::thread_num(), ParallelGCThreads);
+ return MAX2(DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads(), ParallelGCThreads);
}
void G1RemSet::initialize(size_t capacity, uint max_regions) {
diff --git a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp
index 2ddabe75d03..93a8f713836 100644
--- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp
+++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp
@@ -86,7 +86,7 @@ G1RemSetSummary::G1RemSetSummary() :
_num_processed_buf_mutator(0),
_num_processed_buf_rs_threads(0),
_num_coarsenings(0),
- _num_vtimes(G1ConcurrentRefine::thread_num()),
+ _num_vtimes(G1ConcurrentRefine::max_num_threads()),
_rs_threads_vtimes(NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC)),
_sampling_thread_vtime(0.0f) {
@@ -99,7 +99,7 @@ G1RemSetSummary::G1RemSetSummary(G1RemSet* rem_set) :
_num_processed_buf_mutator(0),
_num_processed_buf_rs_threads(0),
_num_coarsenings(0),
- _num_vtimes(G1ConcurrentRefine::thread_num()),
+ _num_vtimes(G1ConcurrentRefine::max_num_threads()),
_rs_threads_vtimes(NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC)),
_sampling_thread_vtime(0.0f) {
update();
diff --git a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp
index 9db03fdfc5f..5c1bc491e5d 100644
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp
@@ -175,6 +175,9 @@ void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* b
void
G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
+ if (mr.is_empty()) {
+ return;
+ }
volatile jbyte* byte = byte_for(mr.start());
jbyte* last_byte = byte_for(mr.last());
Thread* thr = Thread::current();
diff --git a/src/hotspot/share/gc/g1/satbMarkQueue.cpp b/src/hotspot/share/gc/g1/satbMarkQueue.cpp
index ddca44b038c..178cb450c9b 100644
--- a/src/hotspot/share/gc/g1/satbMarkQueue.cpp
+++ b/src/hotspot/share/gc/g1/satbMarkQueue.cpp
@@ -32,6 +32,7 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vmThread.hpp"
SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset, bool permanent) :
@@ -214,7 +215,7 @@ void SATBMarkQueueSet::dump_active_states(bool expected_active) {
log_error(gc, verify)("Expected SATB active state: %s", expected_active ? "ACTIVE" : "INACTIVE");
log_error(gc, verify)("Actual SATB active states:");
log_error(gc, verify)(" Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE");
- for (JavaThread* t = Threads::first(); t; t = t->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
log_error(gc, verify)(" Thread \"%s\" queue: %s", t->name(), t->satb_mark_queue().is_active() ? "ACTIVE" : "INACTIVE");
}
log_error(gc, verify)(" Shared queue: %s", shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE");
@@ -228,7 +229,7 @@ void SATBMarkQueueSet::verify_active_states(bool expected_active) {
}
// Verify thread queue states
- for (JavaThread* t = Threads::first(); t; t = t->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
if (t->satb_mark_queue().is_active() != expected_active) {
dump_active_states(expected_active);
guarantee(false, "Thread SATB queue has an unexpected active state");
@@ -249,14 +250,14 @@ void SATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active)
verify_active_states(expected_active);
#endif // ASSERT
_all_active = active;
- for (JavaThread* t = Threads::first(); t; t = t->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
t->satb_mark_queue().set_active(active);
}
shared_satb_queue()->set_active(active);
}
void SATBMarkQueueSet::filter_thread_buffers() {
- for(JavaThread* t = Threads::first(); t; t = t->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
t->satb_mark_queue().filter();
}
shared_satb_queue()->filter();
@@ -309,7 +310,7 @@ void SATBMarkQueueSet::print_all(const char* msg) {
i += 1;
}
- for (JavaThread* t = Threads::first(); t; t = t->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name());
t->satb_mark_queue().print(buffer);
}
@@ -341,8 +342,8 @@ void SATBMarkQueueSet::abandon_partial_marking() {
}
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
// So we can safely manipulate these queues.
- for (JavaThread* t = Threads::first(); t; t = t->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
t->satb_mark_queue().reset();
}
- shared_satb_queue()->reset();
+ shared_satb_queue()->reset();
}
diff --git a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp
index 120ff95bae5..7d5fd87d5c1 100644
--- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp
+++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp
@@ -29,6 +29,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "utilities/align.hpp"
MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment), _must_use_large_pages(false) {
@@ -90,14 +91,14 @@ void MutableNUMASpace::ensure_parsability() {
MutableSpace *s = ls->space();
if (s->top() < top()) { // For all spaces preceding the one containing top()
if (s->free_in_words() > 0) {
- intptr_t cur_top = (intptr_t)s->top();
+ HeapWord* cur_top = s->top();
size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
while (words_left_to_fill > 0) {
size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
assert(words_to_fill >= CollectedHeap::min_fill_size(),
"Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size());
- CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
+ CollectedHeap::fill_with_object(cur_top, words_to_fill);
if (!os::numa_has_static_binding()) {
size_t touched_words = words_to_fill;
#ifndef ASSERT
@@ -107,19 +108,19 @@ void MutableNUMASpace::ensure_parsability() {
}
#endif
MemRegion invalid;
- HeapWord *crossing_start = align_up((HeapWord*)cur_top, os::vm_page_size());
- HeapWord *crossing_end = align_down((HeapWord*)(cur_top + touched_words), os::vm_page_size());
+ HeapWord *crossing_start = align_up(cur_top, os::vm_page_size());
+ HeapWord *crossing_end = align_down(cur_top + touched_words, os::vm_page_size());
if (crossing_start != crossing_end) {
// If object header crossed a small page boundary we mark the area
// as invalid rounding it to a page_size().
- HeapWord *start = MAX2(align_down((HeapWord*)cur_top, page_size()), s->bottom());
- HeapWord *end = MIN2(align_up((HeapWord*)(cur_top + touched_words), page_size()), s->end());
+ HeapWord *start = MAX2(align_down(cur_top, page_size()), s->bottom());
+ HeapWord *end = MIN2(align_up(cur_top + touched_words, page_size()), s->end());
invalid = MemRegion(start, end);
}
ls->add_invalid_region(invalid);
}
- cur_top = cur_top + (words_to_fill * HeapWordSize);
+ cur_top += words_to_fill;
words_left_to_fill -= words_to_fill;
}
}
@@ -287,7 +288,7 @@ bool MutableNUMASpace::update_layout(bool force) {
FREE_C_HEAP_ARRAY(int, lgrp_ids);
if (changed) {
- for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
thread->set_lgrp_id(-1);
}
}
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
index 27fcf5c1adc..4fc4bc76e3b 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
@@ -33,6 +33,7 @@
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psMarkSweep.hpp"
+#include "gc/parallel/psMemoryPool.hpp"
#include "gc/parallel/psParallelCompact.inline.hpp"
#include "gc/parallel/psPromotionManager.hpp"
#include "gc/parallel/psScavenge.hpp"
@@ -45,6 +46,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/vmThread.hpp"
+#include "services/memoryManager.hpp"
#include "services/memTracker.hpp"
#include "utilities/vmError.hpp"
@@ -105,9 +107,9 @@ jint ParallelScavengeHeap::initialize() {
(old_gen()->virtual_space()->high_boundary() ==
young_gen()->virtual_space()->low_boundary()),
"Boundaries must meet");
- // initialize the policy counters - 2 collectors, 3 generations
+ // initialize the policy counters - 2 collectors, 2 generations
_gc_policy_counters =
- new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
+ new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
// Set up the GCTaskManager
_gc_task_manager = GCTaskManager::create(ParallelGCThreads);
@@ -119,7 +121,35 @@ jint ParallelScavengeHeap::initialize() {
return JNI_OK;
}
+void ParallelScavengeHeap::initialize_serviceability() {
+
+ _eden_pool = new EdenMutableSpacePool(_young_gen,
+ _young_gen->eden_space(),
+ "PS Eden Space",
+ false /* support_usage_threshold */);
+
+ _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
+ "PS Survivor Space",
+ false /* support_usage_threshold */);
+
+ _old_pool = new PSGenerationPool(_old_gen,
+ "PS Old Gen",
+ true /* support_usage_threshold */);
+
+ _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC");
+ _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC");
+
+ _old_manager->add_pool(_eden_pool);
+ _old_manager->add_pool(_survivor_pool);
+ _old_manager->add_pool(_old_pool);
+
+ _young_manager->add_pool(_eden_pool);
+ _young_manager->add_pool(_survivor_pool);
+
+}
+
void ParallelScavengeHeap::post_initialize() {
+ CollectedHeap::post_initialize();
// Need to init the tenuring threshold
PSScavenge::initialize();
if (UseParallelOldGC) {
@@ -674,3 +704,19 @@ void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
CodeCache::verify_scavenge_root_nmethod(nm);
}
+
+GrowableArray ParallelScavengeHeap::memory_managers() {
+ GrowableArray memory_managers(2);
+ memory_managers.append(_young_manager);
+ memory_managers.append(_old_manager);
+ return memory_managers;
+}
+
+GrowableArray ParallelScavengeHeap::memory_pools() {
+ GrowableArray memory_pools(3);
+ memory_pools.append(_eden_pool);
+ memory_pools.append(_survivor_pool);
+ memory_pools.append(_old_pool);
+ return memory_pools;
+}
+
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
index bfdc55f07f7..2eca2b12dcd 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,11 +36,14 @@
#include "gc/shared/gcWhen.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "memory/metaspace.hpp"
+#include "utilities/growableArray.hpp"
#include "utilities/ostream.hpp"
class AdjoiningGenerations;
class GCHeapSummary;
class GCTaskManager;
+class MemoryManager;
+class MemoryPool;
class PSAdaptiveSizePolicy;
class PSHeapSummary;
@@ -64,6 +67,15 @@ class ParallelScavengeHeap : public CollectedHeap {
// The task manager
static GCTaskManager* _gc_task_manager;
+ GCMemoryManager* _young_manager;
+ GCMemoryManager* _old_manager;
+
+ MemoryPool* _eden_pool;
+ MemoryPool* _survivor_pool;
+ MemoryPool* _old_pool;
+
+ virtual void initialize_serviceability();
+
void trace_heap(GCWhen::Type when, const GCTracer* tracer);
protected:
@@ -94,6 +106,9 @@ class ParallelScavengeHeap : public CollectedHeap {
virtual CollectorPolicy* collector_policy() const { return _collector_policy; }
+ virtual GrowableArray memory_managers();
+ virtual GrowableArray memory_pools();
+
static PSYoungGen* young_gen() { return _young_gen; }
static PSOldGen* old_gen() { return _old_gen; }
@@ -244,6 +259,9 @@ class ParallelScavengeHeap : public CollectedHeap {
ParStrongRootsScope();
~ParStrongRootsScope();
};
+
+ GCMemoryManager* old_gc_manager() const { return _old_manager; }
+ GCMemoryManager* young_gc_manager() const { return _young_manager; }
};
// Simple class for storing info about the heap at the start of GC, to be used
diff --git a/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp b/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp
index d4350f7db6c..437356866df 100644
--- a/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp
+++ b/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp
@@ -29,6 +29,7 @@
#include "gc/parallel/psScavenge.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcCause.hpp"
+#include "gc/shared/gcUtil.inline.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
#include "logging/log.hpp"
#include "runtime/timer.hpp"
diff --git a/src/hotspot/share/gc/parallel/psGenerationCounters.cpp b/src/hotspot/share/gc/parallel/psGenerationCounters.cpp
index d200258acdf..1b0e8d320a9 100644
--- a/src/hotspot/share/gc/parallel/psGenerationCounters.cpp
+++ b/src/hotspot/share/gc/parallel/psGenerationCounters.cpp
@@ -25,9 +25,9 @@
#include "precompiled.hpp"
#include "gc/parallel/psGenerationCounters.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
-
PSGenerationCounters::PSGenerationCounters(const char* name,
int ordinal, int spaces,
size_t min_capacity,
diff --git a/src/hotspot/share/gc/parallel/psMarkSweep.cpp b/src/hotspot/share/gc/parallel/psMarkSweep.cpp
index 185cbb2f62d..2ac25afc016 100644
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp
@@ -172,7 +172,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
heap->pre_full_gc_dump(_gc_timer);
TraceCollectorStats tcs(counters());
- TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
+ TraceMemoryManagerStats tms(heap->old_gc_manager(),gc_cause);
if (log_is_enabled(Debug, gc, heap, exit)) {
accumulated_time()->start();
diff --git a/src/hotspot/share/services/psMemoryPool.cpp b/src/hotspot/share/gc/parallel/psMemoryPool.cpp
similarity index 81%
rename from src/hotspot/share/services/psMemoryPool.cpp
rename to src/hotspot/share/gc/parallel/psMemoryPool.cpp
index 8e356cea27b..92e94efdfa2 100644
--- a/src/hotspot/share/services/psMemoryPool.cpp
+++ b/src/hotspot/share/gc/parallel/psMemoryPool.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,21 +23,12 @@
*/
#include "precompiled.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "classfile/vmSymbols.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/javaCalls.hpp"
-#include "services/lowMemoryDetector.hpp"
-#include "services/management.hpp"
-#include "services/memoryManager.hpp"
-#include "services/psMemoryPool.hpp"
+#include "gc/parallel/psMemoryPool.hpp"
PSGenerationPool::PSGenerationPool(PSOldGen* old_gen,
const char* name,
- PoolType type,
bool support_usage_threshold) :
- CollectedMemoryPool(name, type, old_gen->capacity_in_bytes(),
+ CollectedMemoryPool(name, old_gen->capacity_in_bytes(),
old_gen->reserved().byte_size(), support_usage_threshold), _old_gen(old_gen) {
}
@@ -58,9 +49,8 @@ MemoryUsage PSGenerationPool::get_memory_usage() {
EdenMutableSpacePool::EdenMutableSpacePool(PSYoungGen* young_gen,
MutableSpace* space,
const char* name,
- PoolType type,
bool support_usage_threshold) :
- CollectedMemoryPool(name, type, space->capacity_in_bytes(),
+ CollectedMemoryPool(name, space->capacity_in_bytes(),
(young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes()),
support_usage_threshold),
_young_gen(young_gen),
@@ -82,9 +72,8 @@ MemoryUsage EdenMutableSpacePool::get_memory_usage() {
//
SurvivorMutableSpacePool::SurvivorMutableSpacePool(PSYoungGen* young_gen,
const char* name,
- PoolType type,
bool support_usage_threshold) :
- CollectedMemoryPool(name, type, young_gen->from_space()->capacity_in_bytes(),
+ CollectedMemoryPool(name, young_gen->from_space()->capacity_in_bytes(),
young_gen->from_space()->capacity_in_bytes(),
support_usage_threshold), _young_gen(young_gen) {
}
diff --git a/src/hotspot/share/services/psMemoryPool.hpp b/src/hotspot/share/gc/parallel/psMemoryPool.hpp
similarity index 82%
rename from src/hotspot/share/services/psMemoryPool.hpp
rename to src/hotspot/share/gc/parallel/psMemoryPool.hpp
index f1a900caa0f..522799d8336 100644
--- a/src/hotspot/share/services/psMemoryPool.hpp
+++ b/src/hotspot/share/gc/parallel/psMemoryPool.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,28 +25,22 @@
#ifndef SHARE_VM_SERVICES_PSMEMORYPOOL_HPP
#define SHARE_VM_SERVICES_PSMEMORYPOOL_HPP
-#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
#include "gc/parallel/mutableSpace.hpp"
#include "gc/parallel/psOldGen.hpp"
#include "gc/parallel/psYoungGen.hpp"
-#include "gc/serial/defNewGeneration.hpp"
-#include "gc/shared/space.hpp"
-#include "memory/heap.hpp"
#include "services/memoryPool.hpp"
#include "services/memoryUsage.hpp"
-#endif // INCLUDE_ALL_GCS
class PSGenerationPool : public CollectedMemoryPool {
private:
PSOldGen* _old_gen;
public:
- PSGenerationPool(PSOldGen* pool, const char* name, PoolType type, bool support_usage_threshold);
+ PSGenerationPool(PSOldGen* pool, const char* name, bool support_usage_threshold);
MemoryUsage get_memory_usage();
- size_t used_in_bytes() { return _old_gen->used_in_bytes(); }
- size_t max_size() const { return _old_gen->reserved().byte_size(); }
+ size_t used_in_bytes() { return _old_gen->used_in_bytes(); }
+ size_t max_size() const { return _old_gen->reserved().byte_size(); }
};
class EdenMutableSpacePool : public CollectedMemoryPool {
@@ -58,7 +52,6 @@ public:
EdenMutableSpacePool(PSYoungGen* young_gen,
MutableSpace* space,
const char* name,
- PoolType type,
bool support_usage_threshold);
MutableSpace* space() { return _space; }
@@ -77,7 +70,6 @@ private:
public:
SurvivorMutableSpacePool(PSYoungGen* young_gen,
const char* name,
- PoolType type,
bool support_usage_threshold);
MemoryUsage get_memory_usage();
diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
index c46f0d41aa0..be23ea77681 100644
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
@@ -1772,7 +1772,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
heap->pre_full_gc_dump(&_gc_timer);
TraceCollectorStats tcs(counters());
- TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
+ TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause);
if (log_is_enabled(Debug, gc, heap, exit)) {
accumulated_time()->start();
diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp
index 64c0b7486ab..5bcb21bf9ff 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp
@@ -305,7 +305,7 @@ bool PSScavenge::invoke_no_policy() {
GCTraceCPUTime tcpu;
GCTraceTime(Info, gc) tm("Pause Young", NULL, gc_cause, true);
TraceCollectorStats tcs(counters());
- TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
+ TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause);
if (log_is_enabled(Debug, gc, heap, exit)) {
accumulated_time()->start();
diff --git a/src/hotspot/share/gc/parallel/spaceCounters.cpp b/src/hotspot/share/gc/parallel/spaceCounters.cpp
index 36cb8099523..3a7231d8b25 100644
--- a/src/hotspot/share/gc/parallel/spaceCounters.cpp
+++ b/src/hotspot/share/gc/parallel/spaceCounters.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/parallel/spaceCounters.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "utilities/macros.hpp"
@@ -63,3 +64,7 @@ SpaceCounters::SpaceCounters(const char* name, int ordinal, size_t max_size,
_object_space->capacity_in_bytes(), CHECK);
}
}
+
+SpaceCounters::~SpaceCounters() {
+ if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+}
diff --git a/src/hotspot/share/gc/parallel/spaceCounters.hpp b/src/hotspot/share/gc/parallel/spaceCounters.hpp
index 38e6542085f..e2cf1621b3d 100644
--- a/src/hotspot/share/gc/parallel/spaceCounters.hpp
+++ b/src/hotspot/share/gc/parallel/spaceCounters.hpp
@@ -53,9 +53,7 @@ class SpaceCounters: public CHeapObj {
SpaceCounters(const char* name, int ordinal, size_t max_size,
MutableSpace* m, GenerationCounters* gc);
- ~SpaceCounters() {
- if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
- }
+ ~SpaceCounters();
inline void update_capacity() {
_capacity->set_value(_object_space->capacity_in_bytes());
diff --git a/src/hotspot/share/gc/serial/cSpaceCounters.cpp b/src/hotspot/share/gc/serial/cSpaceCounters.cpp
index 84e9c96bd4e..076902d1dca 100644
--- a/src/hotspot/share/gc/serial/cSpaceCounters.cpp
+++ b/src/hotspot/share/gc/serial/cSpaceCounters.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/serial/cSpaceCounters.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/metaspace.hpp"
#include "memory/resourceArea.hpp"
@@ -64,6 +65,10 @@ CSpaceCounters::CSpaceCounters(const char* name, int ordinal, size_t max_size,
}
}
+CSpaceCounters::~CSpaceCounters() {
+ if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+}
+
void CSpaceCounters::update_capacity() {
_capacity->set_value(_space->capacity());
}
diff --git a/src/hotspot/share/gc/serial/cSpaceCounters.hpp b/src/hotspot/share/gc/serial/cSpaceCounters.hpp
index ffe43ab14a2..da0c4cfbf31 100644
--- a/src/hotspot/share/gc/serial/cSpaceCounters.hpp
+++ b/src/hotspot/share/gc/serial/cSpaceCounters.hpp
@@ -52,9 +52,7 @@ class CSpaceCounters: public CHeapObj {
CSpaceCounters(const char* name, int ordinal, size_t max_size,
ContiguousSpace* s, GenerationCounters* gc);
- ~CSpaceCounters() {
- if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
- }
+ ~CSpaceCounters();
virtual void update_capacity();
virtual void update_used();
diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp
index c4c17a8a386..c395e11fd27 100644
--- a/src/hotspot/share/gc/serial/serialHeap.cpp
+++ b/src/hotspot/share/gc/serial/serialHeap.cpp
@@ -23,9 +23,44 @@
*/
#include "precompiled.hpp"
+#include "gc/serial/defNewGeneration.hpp"
#include "gc/serial/serialHeap.hpp"
+#include "gc/shared/genMemoryPools.hpp"
+#include "services/memoryManager.hpp"
-SerialHeap::SerialHeap(GenCollectorPolicy* policy) : GenCollectedHeap(policy) {}
+SerialHeap::SerialHeap(GenCollectorPolicy* policy) :
+ GenCollectedHeap(policy), _eden_pool(NULL), _survivor_pool(NULL), _old_pool(NULL) {
+ _young_manager = new GCMemoryManager("Copy", "end of minor GC");
+ _old_manager = new GCMemoryManager("MarkSweepCompact", "end of major GC");
+}
+
+void SerialHeap::initialize_serviceability() {
+
+ DefNewGeneration* young = (DefNewGeneration*) young_gen();
+
+ // Add a memory pool for each space and young gen doesn't
+ // support low memory detection as it is expected to get filled up.
+ _eden_pool = new ContiguousSpacePool(young->eden(),
+ "Eden Space",
+ young->max_eden_size(),
+ false /* support_usage_threshold */);
+ _survivor_pool = new SurvivorContiguousSpacePool(young,
+ "Survivor Space",
+ young->max_survivor_size(),
+ false /* support_usage_threshold */);
+ Generation* old = old_gen();
+ _old_pool = new GenerationPool(old, "Tenured Gen", true);
+
+ _young_manager->add_pool(_eden_pool);
+ _young_manager->add_pool(_survivor_pool);
+ young->set_gc_manager(_young_manager);
+
+ _old_manager->add_pool(_eden_pool);
+ _old_manager->add_pool(_survivor_pool);
+ _old_manager->add_pool(_old_pool);
+ old->set_gc_manager(_old_manager);
+
+}
void SerialHeap::check_gen_kinds() {
assert(young_gen()->kind() == Generation::DefNew,
@@ -33,3 +68,18 @@ void SerialHeap::check_gen_kinds() {
assert(old_gen()->kind() == Generation::MarkSweepCompact,
"Wrong generation kind");
}
+
+GrowableArray SerialHeap::memory_managers() {
+ GrowableArray memory_managers(2);
+ memory_managers.append(_young_manager);
+ memory_managers.append(_old_manager);
+ return memory_managers;
+}
+
+GrowableArray SerialHeap::memory_pools() {
+ GrowableArray memory_pools(3);
+ memory_pools.append(_eden_pool);
+ memory_pools.append(_survivor_pool);
+ memory_pools.append(_old_pool);
+ return memory_pools;
+}
diff --git a/src/hotspot/share/gc/serial/serialHeap.hpp b/src/hotspot/share/gc/serial/serialHeap.hpp
index 8b96006638a..8df86f2e4bf 100644
--- a/src/hotspot/share/gc/serial/serialHeap.hpp
+++ b/src/hotspot/share/gc/serial/serialHeap.hpp
@@ -26,10 +26,20 @@
#define SHARE_VM_GC_SERIAL_SERIALHEAP_HPP
#include "gc/shared/genCollectedHeap.hpp"
+#include "utilities/growableArray.hpp"
class GenCollectorPolicy;
+class GCMemoryManager;
+class MemoryPool;
class SerialHeap : public GenCollectedHeap {
+private:
+ MemoryPool* _eden_pool;
+ MemoryPool* _survivor_pool;
+ MemoryPool* _old_pool;
+
+ virtual void initialize_serviceability();
+
protected:
virtual void check_gen_kinds();
@@ -44,6 +54,9 @@ public:
return "Serial";
}
+ virtual GrowableArray memory_managers();
+ virtual GrowableArray memory_pools();
+
// override
virtual bool is_in_closed_subset(const void* p) const {
return is_in(p);
@@ -52,7 +65,6 @@ public:
virtual bool card_mark_must_follow_store() const {
return false;
}
-
};
#endif // SHARE_VM_GC_CMS_CMSHEAP_HPP
diff --git a/src/hotspot/share/gc/shared/adaptiveSizePolicy.cpp b/src/hotspot/share/gc/shared/adaptiveSizePolicy.cpp
index 9aa3f79c08d..830835f19dc 100644
--- a/src/hotspot/share/gc/shared/adaptiveSizePolicy.cpp
+++ b/src/hotspot/share/gc/shared/adaptiveSizePolicy.cpp
@@ -26,6 +26,7 @@
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcCause.hpp"
+#include "gc/shared/gcUtil.inline.hpp"
#include "gc/shared/workgroup.hpp"
#include "logging/log.hpp"
#include "runtime/timer.hpp"
diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp
index b2a0dc53ceb..a32fd69a3fb 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp
@@ -40,6 +40,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "services/heapDumper.hpp"
#include "utilities/align.hpp"
@@ -540,10 +541,11 @@ void CollectedHeap::ensure_parsability(bool retire_tlabs) {
const bool deferred = _defer_initial_card_mark;
// The main thread starts allocating via a TLAB even before it
// has added itself to the threads list at vm boot-up.
- assert(!use_tlab || Threads::first() != NULL,
+ JavaThreadIteratorWithHandle jtiwh;
+ assert(!use_tlab || jtiwh.length() > 0,
"Attempt to fill tlabs before main thread has been added"
" to threads list is doomed to failure!");
- for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
+ for (; JavaThread *thread = jtiwh.next(); ) {
if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
#if COMPILER2_OR_JVMCI
// The deferred store barriers must all have been flushed to the
@@ -609,3 +611,7 @@ void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
_reserved.set_start(start);
_reserved.set_end(end);
}
+
+void CollectedHeap::post_initialize() {
+ initialize_serviceability();
+}
diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp
index 197eaf9b438..7be834d815c 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp
@@ -34,6 +34,7 @@
#include "utilities/debug.hpp"
#include "utilities/events.hpp"
#include "utilities/formatBuffer.hpp"
+#include "utilities/growableArray.hpp"
// A "CollectedHeap" is an implementation of a java heap for HotSpot. This
// is an abstract class: there may be many different kinds of heaps. This
@@ -46,6 +47,8 @@ class CollectorPolicy;
class GCHeapSummary;
class GCTimer;
class GCTracer;
+class GCMemoryManager;
+class MemoryPool;
class MetaspaceSummary;
class Thread;
class ThreadClosure;
@@ -217,7 +220,7 @@ class CollectedHeap : public CHeapObj {
// In many heaps, there will be a need to perform some initialization activities
// after the Universe is fully formed, but before general heap allocation is allowed.
// This is the correct place to place such initialization methods.
- virtual void post_initialize() = 0;
+ virtual void post_initialize();
// Stop any onging concurrent work and prepare for exit.
virtual void stop() {}
@@ -485,6 +488,9 @@ class CollectedHeap : public CHeapObj {
// Return the CollectorPolicy for the heap
virtual CollectorPolicy* collector_policy() const = 0;
+ virtual GrowableArray memory_managers() = 0;
+ virtual GrowableArray memory_pools() = 0;
+
// Iterate over all objects, calling "cl.do_object" on each.
virtual void object_iterate(ObjectClosure* cl) = 0;
@@ -529,6 +535,9 @@ class CollectedHeap : public CHeapObj {
// Generate any dumps preceding or following a full gc
private:
void full_gc_dump(GCTimer* timer, bool before);
+
+ virtual void initialize_serviceability() = 0;
+
public:
void pre_full_gc_dump(GCTimer* timer);
void post_full_gc_dump(GCTimer* timer);
diff --git a/src/hotspot/share/gc/shared/collectorCounters.cpp b/src/hotspot/share/gc/shared/collectorCounters.cpp
index b204a02e272..2a2e60f99b3 100644
--- a/src/hotspot/share/gc/shared/collectorCounters.cpp
+++ b/src/hotspot/share/gc/shared/collectorCounters.cpp
@@ -24,7 +24,9 @@
#include "precompiled.hpp"
#include "gc/shared/collectorCounters.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/os.hpp"
CollectorCounters::CollectorCounters(const char* name, int ordinal) {
@@ -59,3 +61,24 @@ CollectorCounters::CollectorCounters(const char* name, int ordinal) {
CHECK);
}
}
+
+CollectorCounters::~CollectorCounters() {
+ if (_name_space != NULL) {
+ FREE_C_HEAP_ARRAY(char, _name_space);
+ }
+}
+
+TraceCollectorStats::TraceCollectorStats(CollectorCounters* c) :
+ PerfTraceTimedEvent(c->time_counter(), c->invocation_counter()),
+ _c(c) {
+
+ if (UsePerfData) {
+ _c->last_entry_counter()->set_value(os::elapsed_counter());
+ }
+}
+
+TraceCollectorStats::~TraceCollectorStats() {
+ if (UsePerfData) {
+ _c->last_exit_counter()->set_value(os::elapsed_counter());
+ }
+}
diff --git a/src/hotspot/share/gc/shared/collectorCounters.hpp b/src/hotspot/share/gc/shared/collectorCounters.hpp
index 554d749e0cc..e9b272f03a9 100644
--- a/src/hotspot/share/gc/shared/collectorCounters.hpp
+++ b/src/hotspot/share/gc/shared/collectorCounters.hpp
@@ -49,9 +49,7 @@ class CollectorCounters: public CHeapObj {
CollectorCounters(const char* name, int ordinal);
- ~CollectorCounters() {
- if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
- }
+ ~CollectorCounters();
inline PerfCounter* invocation_counter() const { return _invocations; }
@@ -70,18 +68,9 @@ class TraceCollectorStats: public PerfTraceTimedEvent {
CollectorCounters* _c;
public:
- inline TraceCollectorStats(CollectorCounters* c) :
- PerfTraceTimedEvent(c->time_counter(), c->invocation_counter()),
- _c(c) {
+ TraceCollectorStats(CollectorCounters* c);
- if (UsePerfData) {
- _c->last_entry_counter()->set_value(os::elapsed_counter());
- }
- }
-
- inline ~TraceCollectorStats() {
- if (UsePerfData) _c->last_exit_counter()->set_value(os::elapsed_counter());
- }
+ ~TraceCollectorStats();
};
#endif // SHARE_VM_GC_SHARED_COLLECTORCOUNTERS_HPP
diff --git a/src/hotspot/share/gc/shared/collectorPolicy.cpp b/src/hotspot/share/gc/shared/collectorPolicy.cpp
index 2d21024a3c7..7542662ee76 100644
--- a/src/hotspot/share/gc/shared/collectorPolicy.cpp
+++ b/src/hotspot/share/gc/shared/collectorPolicy.cpp
@@ -911,7 +911,7 @@ void MarkSweepPolicy::initialize_generations() {
}
void MarkSweepPolicy::initialize_gc_policy_counters() {
- // Initialize the policy counters - 2 collectors, 3 generations.
- _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
+ // Initialize the policy counters - 2 collectors, 2 generations.
+ _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 2);
}
diff --git a/src/hotspot/share/gc/shared/gcArguments.cpp b/src/hotspot/share/gc/shared/gcArguments.cpp
index 90432c5f48b..8162faf0c67 100644
--- a/src/hotspot/share/gc/shared/gcArguments.cpp
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/serial/serialArguments.hpp"
+#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
@@ -84,6 +85,12 @@ void GCArguments::select_gc_ergonomically() {
#endif // INCLUDE_ALL_GCS
}
+bool GCArguments::parse_verification_type(const char* type) {
+ log_warning(gc, verify)("VerifyGCType is not supported by this collector.");
+ // Return false to avoid multiple warnings.
+ return false;
+}
+
void GCArguments::initialize_flags() {
#if INCLUDE_ALL_GCS
if (MinHeapFreeRatio == 100) {
@@ -99,6 +106,24 @@ void GCArguments::initialize_flags() {
#endif // INCLUDE_ALL_GCS
}
+void GCArguments::post_heap_initialize() {
+ if (strlen(VerifyGCType) > 0) {
+ const char delimiter[] = " ,\n";
+ size_t length = strlen(VerifyGCType);
+ char* type_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
+ strncpy(type_list, VerifyGCType, length + 1);
+ char* token = strtok(type_list, delimiter);
+ while (token != NULL) {
+ bool success = parse_verification_type(token);
+ if (!success) {
+ break;
+ }
+ token = strtok(NULL, delimiter);
+ }
+ FREE_C_HEAP_ARRAY(char, type_list);
+ }
+}
+
jint GCArguments::initialize() {
assert(!is_initialized(), "GC arguments already initialized");
diff --git a/src/hotspot/share/gc/shared/gcArguments.hpp b/src/hotspot/share/gc/shared/gcArguments.hpp
index b04c12d4a85..3a3f89be136 100644
--- a/src/hotspot/share/gc/shared/gcArguments.hpp
+++ b/src/hotspot/share/gc/shared/gcArguments.hpp
@@ -46,8 +46,16 @@ public:
static bool is_initialized();
static GCArguments* arguments();
+ void post_heap_initialize();
+
virtual void initialize_flags();
+ // Collector specific function to allow finer grained verification
+ // through VerifyGCType. If not overridden the default version will
+ // warn that the flag is not supported for the given collector.
+ // Returns true if parsing should continue, false otherwise.
+ virtual bool parse_verification_type(const char* type);
+
virtual size_t conservative_max_heap_alignment() = 0;
virtual CollectedHeap* create_heap() = 0;
diff --git a/src/hotspot/share/gc/shared/gcLocker.cpp b/src/hotspot/share/gc/shared/gcLocker.cpp
index 4b72cd80bfd..644d91116ba 100644
--- a/src/hotspot/share/gc/shared/gcLocker.cpp
+++ b/src/hotspot/share/gc/shared/gcLocker.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "logging/log.hpp"
#include "runtime/atomic.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
volatile jint GCLocker::_jni_lock_count = 0;
volatile bool GCLocker::_needs_gc = false;
@@ -45,14 +46,16 @@ void GCLocker::verify_critical_count() {
assert(!needs_gc() || _debug_jni_lock_count == _jni_lock_count, "must agree");
int count = 0;
// Count the number of threads with critical operations in progress
- for (JavaThread* thr = Threads::first(); thr; thr = thr->next()) {
+ JavaThreadIteratorWithHandle jtiwh;
+ for (; JavaThread *thr = jtiwh.next(); ) {
if (thr->in_critical()) {
count++;
}
}
if (_jni_lock_count != count) {
log_error(gc, verify)("critical counts don't match: %d != %d", _jni_lock_count, count);
- for (JavaThread* thr = Threads::first(); thr; thr = thr->next()) {
+ jtiwh.rewind();
+ for (; JavaThread *thr = jtiwh.next(); ) {
if (thr->in_critical()) {
log_error(gc, verify)(INTPTR_FORMAT " in_critical %d", p2i(thr), thr->in_critical());
}
diff --git a/src/hotspot/share/gc/shared/gcStats.cpp b/src/hotspot/share/gc/shared/gcStats.cpp
index 3a050f73066..14e63ea108f 100644
--- a/src/hotspot/share/gc/shared/gcStats.cpp
+++ b/src/hotspot/share/gc/shared/gcStats.cpp
@@ -24,8 +24,7 @@
#include "precompiled.hpp"
#include "gc/shared/gcStats.hpp"
-#include "gc/shared/gcUtil.hpp"
-#include "memory/allocation.inline.hpp"
+#include "gc/shared/gcUtil.inline.hpp"
GCStats::GCStats() {
_avg_promoted = new AdaptivePaddedNoZeroDevAverage(
diff --git a/src/hotspot/share/gc/shared/gcUtil.hpp b/src/hotspot/share/gc/shared/gcUtil.hpp
index 6b2f929bb5d..1fafbb51a00 100644
--- a/src/hotspot/share/gc/shared/gcUtil.hpp
+++ b/src/hotspot/share/gc/shared/gcUtil.hpp
@@ -146,7 +146,7 @@ class AdaptivePaddedAverage : public AdaptiveWeightedAverage {
// Placement support
void* operator new(size_t ignored, void* p) throw() { return p; }
// Allocator
- void* operator new(size_t size) throw() { return CHeapObj::operator new(size); }
+ void* operator new(size_t size) throw();
// Accessor
float padded_average() const { return _padded_avg; }
diff --git a/src/hotspot/share/gc/shared/gcUtil.inline.hpp b/src/hotspot/share/gc/shared/gcUtil.inline.hpp
new file mode 100644
index 00000000000..f03480becde
--- /dev/null
+++ b/src/hotspot/share/gc/shared/gcUtil.inline.hpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_GCUTIL_INLINE_HPP
+#define SHARE_VM_GC_SHARED_GCUTIL_INLINE_HPP
+
+#include "gc/shared/gcUtil.hpp"
+#include "memory/allocation.inline.hpp"
+
+inline void* AdaptivePaddedAverage::operator new(size_t size) throw() {
+ return CHeapObj::operator new(size);
+}
+
+#endif // SHARE_VM_GC_SHARED_GCUTIL_INLINE_HPP
diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
index 5b21eb6a294..ae67feca465 100644
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
@@ -143,6 +143,7 @@ char* GenCollectedHeap::allocate(size_t alignment,
}
void GenCollectedHeap::post_initialize() {
+ CollectedHeap::post_initialize();
ref_processing_init();
check_gen_kinds();
DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
@@ -270,7 +271,7 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
FormatBuffer<> title("Collect gen: %s", gen->short_name());
GCTraceTime(Trace, gc, phases) t1(title);
TraceCollectorStats tcs(gen->counters());
- TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
+ TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause());
gen->stat_record()->invocations++;
gen->stat_record()->accumulated_time.start();
diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.hpp b/src/hotspot/share/gc/shared/genCollectedHeap.hpp
index 5eb4f749d14..e0a91302b0e 100644
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp
@@ -112,6 +112,9 @@ protected:
// (gen-specific) roots processing.
SubTasksDone* _process_strong_tasks;
+ GCMemoryManager* _young_manager;
+ GCMemoryManager* _old_manager;
+
// Helper functions for allocation
HeapWord* attempt_allocation(size_t size,
bool is_tlab,
diff --git a/src/hotspot/share/gc/shared/genMemoryPools.cpp b/src/hotspot/share/gc/shared/genMemoryPools.cpp
new file mode 100644
index 00000000000..292f2197c32
--- /dev/null
+++ b/src/hotspot/share/gc/shared/genMemoryPools.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/serial/defNewGeneration.hpp"
+#include "gc/shared/generation.hpp"
+#include "gc/shared/genMemoryPools.hpp"
+#include "gc/shared/space.hpp"
+
+ContiguousSpacePool::ContiguousSpacePool(ContiguousSpace* space,
+ const char* name,
+ size_t max_size,
+ bool support_usage_threshold) :
+ CollectedMemoryPool(name, space->capacity(), max_size,
+ support_usage_threshold), _space(space) {
+}
+
+size_t ContiguousSpacePool::used_in_bytes() {
+ return space()->used();
+}
+
+MemoryUsage ContiguousSpacePool::get_memory_usage() {
+ size_t maxSize = (available_for_allocation() ? max_size() : 0);
+ size_t used = used_in_bytes();
+ size_t committed = _space->capacity();
+
+ return MemoryUsage(initial_size(), used, committed, maxSize);
+}
+
+SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* young_gen,
+ const char* name,
+ size_t max_size,
+ bool support_usage_threshold) :
+ CollectedMemoryPool(name, young_gen->from()->capacity(), max_size,
+ support_usage_threshold), _young_gen(young_gen) {
+}
+
+size_t SurvivorContiguousSpacePool::used_in_bytes() {
+ return _young_gen->from()->used();
+}
+
+size_t SurvivorContiguousSpacePool::committed_in_bytes() {
+ return _young_gen->from()->capacity();
+}
+
+MemoryUsage SurvivorContiguousSpacePool::get_memory_usage() {
+ size_t maxSize = (available_for_allocation() ? max_size() : 0);
+ size_t used = used_in_bytes();
+ size_t committed = committed_in_bytes();
+
+ return MemoryUsage(initial_size(), used, committed, maxSize);
+}
+
+GenerationPool::GenerationPool(Generation* gen,
+ const char* name,
+ bool support_usage_threshold) :
+ CollectedMemoryPool(name, gen->capacity(), gen->max_capacity(),
+ support_usage_threshold), _gen(gen) {
+}
+
+size_t GenerationPool::used_in_bytes() {
+ return _gen->used();
+}
+
+MemoryUsage GenerationPool::get_memory_usage() {
+ size_t used = used_in_bytes();
+ size_t committed = _gen->capacity();
+ size_t maxSize = (available_for_allocation() ? max_size() : 0);
+
+ return MemoryUsage(initial_size(), used, committed, maxSize);
+}
diff --git a/src/hotspot/share/gc/shared/genMemoryPools.hpp b/src/hotspot/share/gc/shared/genMemoryPools.hpp
new file mode 100644
index 00000000000..93ee91bf959
--- /dev/null
+++ b/src/hotspot/share/gc/shared/genMemoryPools.hpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_GENMEMORYPOOLS_HPP
+#define SHARE_VM_GC_SHARED_GENMEMORYPOOLS_HPP
+
+#include "services/memoryPool.hpp"
+
+class ContiguousSpace;
+class DefNewGeneration;
+class Generation;
+
+class ContiguousSpacePool : public CollectedMemoryPool {
+private:
+ ContiguousSpace* _space;
+
+public:
+ ContiguousSpacePool(ContiguousSpace* space,
+ const char* name,
+ size_t max_size,
+ bool support_usage_threshold);
+
+ ContiguousSpace* space() { return _space; }
+ MemoryUsage get_memory_usage();
+ size_t used_in_bytes();
+};
+
+class SurvivorContiguousSpacePool : public CollectedMemoryPool {
+private:
+ DefNewGeneration* _young_gen;
+
+public:
+ SurvivorContiguousSpacePool(DefNewGeneration* young_gen,
+ const char* name,
+ size_t max_size,
+ bool support_usage_threshold);
+
+ MemoryUsage get_memory_usage();
+
+ size_t used_in_bytes();
+ size_t committed_in_bytes();
+};
+
+class GenerationPool : public CollectedMemoryPool {
+private:
+ Generation* _gen;
+public:
+ GenerationPool(Generation* gen, const char* name, bool support_usage_threshold);
+
+ MemoryUsage get_memory_usage();
+ size_t used_in_bytes();
+};
+
+#endif // SHARE_VM_GC_SHARED_GENMEMORYPOOLS_HPP
diff --git a/src/hotspot/share/gc/shared/generation.cpp b/src/hotspot/share/gc/shared/generation.cpp
index 68c92230729..fbdb2f45b3f 100644
--- a/src/hotspot/share/gc/shared/generation.cpp
+++ b/src/hotspot/share/gc/shared/generation.cpp
@@ -44,7 +44,8 @@
#include "utilities/events.hpp"
Generation::Generation(ReservedSpace rs, size_t initial_size) :
- _ref_processor(NULL) {
+ _ref_processor(NULL),
+ _gc_manager(NULL) {
if (!_virtual_space.initialize(rs, initial_size)) {
vm_exit_during_initialization("Could not reserve enough space for "
"object heap");
diff --git a/src/hotspot/share/gc/shared/generation.hpp b/src/hotspot/share/gc/shared/generation.hpp
index 00d17a22a33..ea8f8d8b8ee 100644
--- a/src/hotspot/share/gc/shared/generation.hpp
+++ b/src/hotspot/share/gc/shared/generation.hpp
@@ -58,6 +58,7 @@
//
class DefNewGeneration;
+class GCMemoryManager;
class GenerationSpec;
class CompactibleSpace;
class ContiguousSpace;
@@ -86,6 +87,8 @@ class Generation: public CHeapObj {
MemRegion _prev_used_region; // for collectors that want to "remember" a value for
// used region at some specific point during collection.
+ GCMemoryManager* _gc_manager;
+
protected:
// Minimum and maximum addresses for memory reserved (not necessarily
// committed) for generation.
@@ -554,6 +557,16 @@ public:
// Performance Counter support
virtual void update_counters() = 0;
virtual CollectorCounters* counters() { return _gc_counters; }
+
+ GCMemoryManager* gc_manager() const {
+ assert(_gc_manager != NULL, "not initialized yet");
+ return _gc_manager;
+ }
+
+ void set_gc_manager(GCMemoryManager* gc_manager) {
+ _gc_manager = gc_manager;
+ }
+
};
#endif // SHARE_VM_GC_SHARED_GENERATION_HPP
diff --git a/src/hotspot/share/gc/shared/generationCounters.cpp b/src/hotspot/share/gc/shared/generationCounters.cpp
index 134d28765e0..1efa58c4790 100644
--- a/src/hotspot/share/gc/shared/generationCounters.cpp
+++ b/src/hotspot/share/gc/shared/generationCounters.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/shared/generationCounters.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
void GenerationCounters::initialize(const char* name, int ordinal, int spaces,
@@ -78,6 +79,12 @@ GenerationCounters::GenerationCounters(const char* name,
initialize(name, ordinal, spaces, min_capacity, max_capacity, curr_capacity);
}
+GenerationCounters::~GenerationCounters() {
+ if (_name_space != NULL) {
+ FREE_C_HEAP_ARRAY(char, _name_space);
+ }
+}
+
void GenerationCounters::update_all() {
assert(_virtual_space != NULL, "otherwise, override this method");
_current_size->set_value(_virtual_space->committed_size());
diff --git a/src/hotspot/share/gc/shared/generationCounters.hpp b/src/hotspot/share/gc/shared/generationCounters.hpp
index 2b53549d575..71c42bb318c 100644
--- a/src/hotspot/share/gc/shared/generationCounters.hpp
+++ b/src/hotspot/share/gc/shared/generationCounters.hpp
@@ -68,9 +68,7 @@ private:
GenerationCounters(const char* name, int ordinal, int spaces,
size_t min_capacity, size_t max_capacity, VirtualSpace* v);
- ~GenerationCounters() {
- if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
- }
+ ~GenerationCounters();
virtual void update_all();
diff --git a/src/hotspot/share/gc/g1/hSpaceCounters.cpp b/src/hotspot/share/gc/shared/hSpaceCounters.cpp
similarity index 69%
rename from src/hotspot/share/gc/g1/hSpaceCounters.cpp
rename to src/hotspot/share/gc/shared/hSpaceCounters.cpp
index 7a5afddfedf..351c5cd920f 100644
--- a/src/hotspot/share/gc/g1/hSpaceCounters.cpp
+++ b/src/hotspot/share/gc/shared/hSpaceCounters.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,22 +23,23 @@
*/
#include "precompiled.hpp"
-#include "gc/g1/hSpaceCounters.hpp"
-#include "gc/shared/generation.hpp"
+#include "gc/shared/hSpaceCounters.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/perfData.hpp"
-HSpaceCounters::HSpaceCounters(const char* name,
+HSpaceCounters::HSpaceCounters(const char* name_space,
+ const char* name,
int ordinal,
size_t max_size,
- size_t initial_capacity,
- GenerationCounters* gc) {
+ size_t initial_capacity) {
if (UsePerfData) {
EXCEPTION_MARK;
ResourceMark rm;
const char* cns =
- PerfDataManager::name_space(gc->name_space(), "space", ordinal);
+ PerfDataManager::name_space(name_space, "space", ordinal);
_name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
strcpy(_name_space, cns);
@@ -64,3 +65,33 @@ HSpaceCounters::HSpaceCounters(const char* name,
initial_capacity, CHECK);
}
}
+
+HSpaceCounters::~HSpaceCounters() {
+ if (_name_space != NULL) {
+ FREE_C_HEAP_ARRAY(char, _name_space);
+ }
+}
+
+void HSpaceCounters::update_capacity(size_t v) {
+ _capacity->set_value(v);
+}
+
+void HSpaceCounters::update_used(size_t v) {
+ _used->set_value(v);
+}
+
+void HSpaceCounters::update_all(size_t capacity, size_t used) {
+ update_capacity(capacity);
+ update_used(used);
+}
+
+debug_only(
+ // for security reasons, we do not allow arbitrary reads from
+ // the counters as they may live in shared memory.
+ jlong HSpaceCounters::used() {
+ return _used->get_value();
+ }
+ jlong HSpaceCounters::capacity() {
+ return _used->get_value();
+ }
+)
diff --git a/src/hotspot/share/gc/g1/hSpaceCounters.hpp b/src/hotspot/share/gc/shared/hSpaceCounters.hpp
similarity index 60%
rename from src/hotspot/share/gc/g1/hSpaceCounters.hpp
rename to src/hotspot/share/gc/shared/hSpaceCounters.hpp
index fd7ed263415..36873fd8f70 100644
--- a/src/hotspot/share/gc/g1/hSpaceCounters.hpp
+++ b/src/hotspot/share/gc/shared/hSpaceCounters.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,65 +22,47 @@
*
*/
-#ifndef SHARE_VM_GC_G1_HSPACECOUNTERS_HPP
-#define SHARE_VM_GC_G1_HSPACECOUNTERS_HPP
+#ifndef SHARE_VM_GC_SHARED_HSPACECOUNTERS_HPP
+#define SHARE_VM_GC_SHARED_HSPACECOUNTERS_HPP
-#include "gc/shared/generation.hpp"
-#include "gc/shared/generationCounters.hpp"
+#include "memory/allocation.hpp"
#include "runtime/perfData.hpp"
#include "utilities/macros.hpp"
// A HSpaceCounter is a holder class for performance counters
// that track a collections (logical spaces) in a heap;
-class HeapSpaceUsedHelper;
-class G1SpaceMonitoringSupport;
-
class HSpaceCounters: public CHeapObj {
friend class VMStructs;
private:
- PerfVariable* _capacity;
- PerfVariable* _used;
+ PerfVariable* _capacity;
+ PerfVariable* _used;
// Constant PerfData types don't need to retain a reference.
// However, it's a good idea to document them here.
- char* _name_space;
+ char* _name_space;
public:
- HSpaceCounters(const char* name, int ordinal, size_t max_size,
- size_t initial_capacity, GenerationCounters* gc);
+ HSpaceCounters(const char* name_space, const char* name, int ordinal,
+ size_t max_size, size_t initial_capacity);
- ~HSpaceCounters() {
- if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
- }
+ ~HSpaceCounters();
- inline void update_capacity(size_t v) {
- _capacity->set_value(v);
- }
+ void update_capacity(size_t v);
+ void update_used(size_t v);
- inline void update_used(size_t v) {
- _used->set_value(v);
- }
+ void update_all(size_t capacity, size_t used);
debug_only(
// for security reasons, we do not allow arbitrary reads from
// the counters as they may live in shared memory.
- jlong used() {
- return _used->get_value();
- }
- jlong capacity() {
- return _used->get_value();
- }
+ jlong used();
+ jlong capacity();
)
- inline void update_all(size_t capacity, size_t used) {
- update_capacity(capacity);
- update_used(used);
- }
-
const char* name_space() const { return _name_space; }
};
-#endif // SHARE_VM_GC_G1_HSPACECOUNTERS_HPP
+#endif // SHARE_VM_GC_SHARED_HSPACECOUNTERS_HPP
diff --git a/src/hotspot/share/gc/shared/taskqueue.hpp b/src/hotspot/share/gc/shared/taskqueue.hpp
index ef1e9688bd4..ca0e1a02a58 100644
--- a/src/hotspot/share/gc/shared/taskqueue.hpp
+++ b/src/hotspot/share/gc/shared/taskqueue.hpp
@@ -26,6 +26,8 @@
#define SHARE_VM_GC_SHARED_TASKQUEUE_HPP
#include "memory/allocation.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "utilities/ostream.hpp"
#include "utilities/stack.hpp"
// Simple TaskQueue stats that are collected by default in debug builds.
@@ -425,7 +427,7 @@ class ParallelTaskTerminator: public StackObj {
private:
uint _n_threads;
TaskQueueSetSuper* _queue_set;
- uint _offered_termination;
+ volatile uint _offered_termination;
#ifdef TRACESPINNING
static uint _total_yields;
diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp
index e8828abbe40..6634a0cc906 100644
--- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp
+++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp
@@ -30,6 +30,7 @@
#include "memory/universe.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "utilities/copy.hpp"
// Thread-Local Edens support
@@ -48,7 +49,7 @@ void ThreadLocalAllocBuffer::clear_before_allocation() {
void ThreadLocalAllocBuffer::accumulate_statistics_before_gc() {
global_stats()->initialize();
- for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
thread->tlab().accumulate_statistics();
thread->tlab().initialize_statistics();
}
@@ -130,7 +131,7 @@ void ThreadLocalAllocBuffer::make_parsable(bool retire, bool zap) {
void ThreadLocalAllocBuffer::resize_all_tlabs() {
if (ResizeTLAB) {
- for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
thread->tlab().resize();
}
}
diff --git a/src/hotspot/share/gc/shared/workgroup.cpp b/src/hotspot/share/gc/shared/workgroup.cpp
index 73b3a2a55ab..2538e701fc9 100644
--- a/src/hotspot/share/gc/shared/workgroup.cpp
+++ b/src/hotspot/share/gc/shared/workgroup.cpp
@@ -261,6 +261,10 @@ WorkGang::WorkGang(const char* name,
_dispatcher(create_dispatcher())
{ }
+WorkGang::~WorkGang() {
+ delete _dispatcher;
+}
+
AbstractGangWorker* WorkGang::allocate_worker(uint worker_id) {
return new GangWorker(this, worker_id);
}
diff --git a/src/hotspot/share/gc/shared/workgroup.hpp b/src/hotspot/share/gc/shared/workgroup.hpp
index 320652a993c..c1b50f3885d 100644
--- a/src/hotspot/share/gc/shared/workgroup.hpp
+++ b/src/hotspot/share/gc/shared/workgroup.hpp
@@ -122,6 +122,8 @@ class AbstractWorkGang : public CHeapObj {
// Printing support.
const char* _name;
+ ~AbstractWorkGang() {}
+
private:
// Initialize only instance data.
const bool _are_GC_task_threads;
@@ -206,9 +208,6 @@ class WorkGang: public AbstractWorkGang {
// To get access to the GangTaskDispatcher instance.
friend class GangWorker;
- // Never deleted.
- ~WorkGang();
-
GangTaskDispatcher* const _dispatcher;
GangTaskDispatcher* dispatcher() const {
return _dispatcher;
@@ -220,6 +219,8 @@ public:
bool are_GC_task_threads,
bool are_ConcurrentGC_threads);
+ ~WorkGang();
+
// Run a task using the current active number of workers, returns when the task is done.
virtual void run_task(AbstractGangTask* task);
// Run a task with the given number of workers, returns
diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp
index 134ea685c83..ab5e3542fa9 100644
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp
@@ -42,6 +42,7 @@
#include "runtime/interfaceSupport.hpp"
#include "runtime/reflection.hpp"
#include "runtime/sharedRuntime.hpp"
+#include "runtime/threadSMR.hpp"
#include "utilities/debug.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/macros.hpp"
@@ -598,12 +599,13 @@ JRT_ENTRY(jint, JVMCIRuntime::identity_hash_code(JavaThread* thread, oopDesc* ob
JRT_END
JRT_ENTRY(jboolean, JVMCIRuntime::thread_is_interrupted(JavaThread* thread, oopDesc* receiver, jboolean clear_interrupted))
- // Ensure that the C++ Thread and OSThread structures aren't freed before we operate.
- // This locking requires thread_in_vm which is why this method cannot be JRT_LEAF.
Handle receiverHandle(thread, receiver);
- MutexLockerEx ml(thread->threadObj() == (void*)receiver ? NULL : Threads_lock);
+ // A nested ThreadsListHandle may require the Threads_lock which
+ // requires thread_in_vm which is why this method cannot be JRT_LEAF.
+ ThreadsListHandle tlh;
+
JavaThread* receiverThread = java_lang_Thread::thread(receiverHandle());
- if (receiverThread == NULL) {
+ if (receiverThread == NULL || (EnableThreadSMRExtraValidityChecks && !tlh.includes(receiverThread))) {
// The other thread may exit during this process, which is ok so return false.
return JNI_FALSE;
} else {
diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp
index 66a9d859ed0..0f4ffa27113 100644
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp
@@ -317,6 +317,7 @@
\
nonstatic_field(Thread, _tlab, ThreadLocalAllocBuffer) \
nonstatic_field(Thread, _allocated_bytes, jlong) \
+ nonstatic_field(Thread, _polling_page, address) \
\
nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \
nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \
diff --git a/src/hotspot/share/logging/logTag.hpp b/src/hotspot/share/logging/logTag.hpp
index e53e7cb4774..1956f27a078 100644
--- a/src/hotspot/share/logging/logTag.hpp
+++ b/src/hotspot/share/logging/logTag.hpp
@@ -121,6 +121,7 @@
LOG_TAG(safepoint) \
LOG_TAG(scavenge) \
LOG_TAG(scrub) \
+ LOG_TAG(smr) \
LOG_TAG(stacktrace) \
LOG_TAG(stackwalk) \
LOG_TAG(start) \
diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp
index 5671e12c2eb..18de6beaef9 100644
--- a/src/hotspot/share/memory/metaspace.cpp
+++ b/src/hotspot/share/memory/metaspace.cpp
@@ -785,7 +785,10 @@ class SpaceManager : public CHeapObj {
Mutex* const _lock;
// Type of metadata allocated.
- Metaspace::MetadataType _mdtype;
+ const Metaspace::MetadataType _mdtype;
+
+ // Type of metaspace
+ const Metaspace::MetaspaceType _space_type;
// List of chunks in use by this SpaceManager. Allocations
// are done from the current chunk. The list is used for deallocating
@@ -796,6 +799,10 @@ class SpaceManager : public CHeapObj {
// Maximum number of small chunks to allocate to a SpaceManager
static uint const _small_chunk_limit;
+ // Maximum number of specialize chunks to allocate for anonymous
+ // metadata space to a SpaceManager
+ static uint const _anon_metadata_specialize_chunk_limit;
+
// Sum of all space in allocated chunks
size_t _allocated_blocks_words;
@@ -846,6 +853,7 @@ class SpaceManager : public CHeapObj {
public:
SpaceManager(Metaspace::MetadataType mdtype,
+ Metaspace::MetaspaceType space_type,
Mutex* lock);
~SpaceManager();
@@ -963,6 +971,7 @@ class SpaceManager : public CHeapObj {
};
uint const SpaceManager::_small_chunk_limit = 4;
+uint const SpaceManager::_anon_metadata_specialize_chunk_limit = 4;
const char* SpaceManager::_expand_lock_name =
"SpaceManager chunk allocation lock";
@@ -2400,6 +2409,20 @@ size_t SpaceManager::calc_chunk_size(size_t word_size) {
// _small_chunk_limit small chunks can be allocated.
// After that a medium chunk is preferred.
size_t chunk_word_size;
+
+ // Special case for anonymous metadata space.
+ // Anonymous metadata space is usually small, with majority within 1K - 2K range and
+ // rarely about 4K (64-bits JVM).
+ // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
+ // from SpecializeChunk up to _anon_metadata_specialize_chunk_limit (4) reduces space waste
+ // from 60+% to around 30%.
+ if (_space_type == Metaspace::AnonymousMetaspaceType &&
+ _mdtype == Metaspace::NonClassType &&
+ sum_count_in_chunks_in_use(SpecializedIndex) < _anon_metadata_specialize_chunk_limit &&
+ word_size + Metachunk::overhead() <= SpecializedChunk) {
+ return SpecializedChunk;
+ }
+
if (chunks_in_use(MediumIndex) == NULL &&
sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
chunk_word_size = (size_t) small_chunk_size();
@@ -2504,8 +2527,10 @@ void SpaceManager::print_on(outputStream* st) const {
}
SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
+ Metaspace::MetaspaceType space_type,
Mutex* lock) :
_mdtype(mdtype),
+ _space_type(space_type),
_allocated_blocks_words(0),
_allocated_chunks_words(0),
_allocated_chunks_count(0),
@@ -3781,11 +3806,11 @@ void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
verify_global_initialization();
// Allocate SpaceManager for metadata objects.
- _vsm = new SpaceManager(NonClassType, lock);
+ _vsm = new SpaceManager(NonClassType, type, lock);
if (using_class_space()) {
// Allocate SpaceManager for classes.
- _class_vsm = new SpaceManager(ClassType, lock);
+ _class_vsm = new SpaceManager(ClassType, type, lock);
}
MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
diff --git a/src/hotspot/share/memory/resourceArea.cpp b/src/hotspot/share/memory/resourceArea.cpp
index 3995e6335db..aca27c0b56a 100644
--- a/src/hotspot/share/memory/resourceArea.cpp
+++ b/src/hotspot/share/memory/resourceArea.cpp
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/resourceArea.hpp"
+#include "memory/resourceArea.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.inline.hpp"
#include "services/memTracker.hpp"
diff --git a/src/hotspot/share/memory/resourceArea.hpp b/src/hotspot/share/memory/resourceArea.hpp
index 5fc13ac9243..32d57fd6e61 100644
--- a/src/hotspot/share/memory/resourceArea.hpp
+++ b/src/hotspot/share/memory/resourceArea.hpp
@@ -57,18 +57,7 @@ public:
debug_only(_nesting = 0;);
}
- char* allocate_bytes(size_t size, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
-#ifdef ASSERT
- if (_nesting < 1 && !_warned++)
- fatal("memory leak: allocating without ResourceMark");
- if (UseMallocOnly) {
- // use malloc, but save pointer in res. area for later freeing
- char** save = (char**)internal_malloc_4(sizeof(char*));
- return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
- }
-#endif
- return (char*)Amalloc(size, alloc_failmode);
- }
+ char* allocate_bytes(size_t size, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
// Bias this resource area to specific memory type
// (by default, ResourceArea is tagged as mtThread, per-thread general purpose storage)
diff --git a/src/hotspot/share/memory/resourceArea.inline.hpp b/src/hotspot/share/memory/resourceArea.inline.hpp
new file mode 100644
index 00000000000..895c0b71bbf
--- /dev/null
+++ b/src/hotspot/share/memory/resourceArea.inline.hpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_RESOURCEAREA_INLINE_HPP
+#define SHARE_VM_MEMORY_RESOURCEAREA_INLINE_HPP
+
+#include "memory/resourceArea.hpp"
+
+inline char* ResourceArea::allocate_bytes(size_t size, AllocFailType alloc_failmode) {
+#ifdef ASSERT
+ if (_nesting < 1 && !_warned++)
+ fatal("memory leak: allocating without ResourceMark");
+ if (UseMallocOnly) {
+ // use malloc, but save pointer in res. area for later freeing
+ char** save = (char**)internal_malloc_4(sizeof(char*));
+ return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
+ }
+#endif
+ return (char*)Amalloc(size, alloc_failmode);
+}
+
+#endif // SHARE_VM_MEMORY_RESOURCEAREA_INLINE_HPP
diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp
index 32da59f6053..0dbad080700 100644
--- a/src/hotspot/share/memory/universe.cpp
+++ b/src/hotspot/share/memory/universe.cpp
@@ -687,6 +687,10 @@ jint universe_init() {
Metaspace::global_initialize();
+ // Initialize performance counters for metaspaces
+ MetaspaceCounters::initialize_performance_counters();
+ CompressedClassSpaceCounters::initialize_performance_counters();
+
AOTLoader::universe_init();
// Checks 'AfterMemoryInit' constraints.
@@ -764,6 +768,7 @@ jint Universe::initialize_heap() {
}
log_info(gc)("Using %s", _collectedHeap->name());
+ GCArguments::arguments()->post_heap_initialize();
ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
#ifdef _LP64
@@ -852,7 +857,7 @@ ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|| use_large_pages, "Wrong alignment to use large pages");
// Now create the space.
- ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages);
+ ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, AllocateHeapAt);
if (total_rs.is_reserved()) {
assert((total_reserved == total_rs.size()) && ((uintptr_t)total_rs.base() % alignment == 0),
@@ -866,6 +871,9 @@ ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
Universe::set_narrow_oop_base((address)total_rs.compressed_oop_base());
}
+ if (AllocateHeapAt != NULL) {
+ log_info(gc,heap)("Successfully allocated Java heap at location %s", AllocateHeapAt);
+ }
return total_rs;
}
@@ -1085,10 +1093,6 @@ bool universe_post_init() {
// ("weak") refs processing infrastructure initialization
Universe::heap()->post_initialize();
- // Initialize performance counters for metaspaces
- MetaspaceCounters::initialize_performance_counters();
- CompressedClassSpaceCounters::initialize_performance_counters();
-
MemoryService::add_metaspace_memory_pools();
MemoryService::set_universe_heap(Universe::heap());
diff --git a/src/hotspot/share/memory/virtualspace.cpp b/src/hotspot/share/memory/virtualspace.cpp
index a3289d9b043..759210aace1 100644
--- a/src/hotspot/share/memory/virtualspace.cpp
+++ b/src/hotspot/share/memory/virtualspace.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,10 +35,10 @@
// Dummy constructor
ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
- _alignment(0), _special(false), _executable(false) {
+ _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
}
-ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
+ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
bool has_preferred_page_size = preferred_page_size != 0;
// Want to use large pages where possible and pad with small pages.
size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
@@ -59,19 +59,30 @@ ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
bool large,
- char* requested_address) {
+ char* requested_address) : _fd_for_heap(-1) {
initialize(size, alignment, large, requested_address, false);
}
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
bool large,
- bool executable) {
+ bool executable) : _fd_for_heap(-1) {
initialize(size, alignment, large, NULL, executable);
}
+// Helper method
+static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
+ if (is_file_mapped) {
+ if (!os::unmap_memory(base, size)) {
+ fatal("os::unmap_memory failed");
+ }
+ } else if (!os::release_memory(base, size)) {
+ fatal("os::release_memory failed");
+ }
+}
+
// Helper method.
static bool failed_to_reserve_as_requested(char* base, char* requested_address,
- const size_t size, bool special)
+ const size_t size, bool special, bool is_file_mapped = false)
{
if (base == requested_address || requested_address == NULL)
return false; // did not fail
@@ -87,9 +98,7 @@ static bool failed_to_reserve_as_requested(char* base, char* requested_address,
fatal("os::release_memory_special failed");
}
} else {
- if (!os::release_memory(base, size)) {
- fatal("os::release_memory failed");
- }
+ unmap_or_release_memory(base, size, is_file_mapped);
}
}
return true;
@@ -120,7 +129,18 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
// If OS doesn't support demand paging for large page memory, we need
// to use reserve_memory_special() to reserve and pin the entire region.
+ // If there is a backing file directory for this space then whether
+ // large pages are allocated is up to the filesystem of the backing file.
+ // So we ignore the UseLargePages flag in this case.
bool special = large && !os::can_commit_large_page_memory();
+ if (special && _fd_for_heap != -1) {
+ special = false;
+ if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
+ !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
+ log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
+ }
+ }
+
char* base = NULL;
if (special) {
@@ -157,13 +177,13 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
// important. If available space is not detected, return NULL.
if (requested_address != 0) {
- base = os::attempt_reserve_memory_at(size, requested_address);
- if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
+ base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
+ if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
// OS ignored requested address. Try different address.
base = NULL;
}
} else {
- base = os::reserve_memory(size, NULL, alignment);
+ base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
}
if (base == NULL) return;
@@ -171,13 +191,14 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
// Check alignment constraints
if ((((size_t)base) & (alignment - 1)) != 0) {
// Base not aligned, retry
- if (!os::release_memory(base, size)) fatal("os::release_memory failed");
+ unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
+
// Make sure that size is aligned
size = align_up(size, alignment);
- base = os::reserve_memory_aligned(size, alignment);
+ base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
if (requested_address != 0 &&
- failed_to_reserve_as_requested(base, requested_address, size, false)) {
+ failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
// As a result of the alignment constraints, the allocated base differs
// from the requested address. Return back to the caller who can
// take remedial action (like try again without a requested address).
@@ -190,6 +211,10 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
_base = base;
_size = size;
_alignment = alignment;
+ // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
+ if (_fd_for_heap != -1) {
+ _special = true;
+ }
}
@@ -252,7 +277,11 @@ void ReservedSpace::release() {
char *real_base = _base - _noaccess_prefix;
const size_t real_size = _size + _noaccess_prefix;
if (special()) {
- os::release_memory_special(real_base, real_size);
+ if (_fd_for_heap != -1) {
+ os::unmap_memory(real_base, real_size);
+ } else {
+ os::release_memory_special(real_base, real_size);
+ }
} else{
os::release_memory(real_base, real_size);
}
@@ -313,7 +342,17 @@ void ReservedHeapSpace::try_reserve_heap(size_t size,
// If OS doesn't support demand paging for large page memory, we need
// to use reserve_memory_special() to reserve and pin the entire region.
+ // If there is a backing file directory for this space then whether
+ // large pages are allocated is up to the filesystem of the backing file.
+ // So we ignore the UseLargePages flag in this case.
bool special = large && !os::can_commit_large_page_memory();
+ if (special && _fd_for_heap != -1) {
+ special = false;
+ if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
+ !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
+ log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
+ }
+ }
char* base = NULL;
log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
@@ -350,9 +389,9 @@ void ReservedHeapSpace::try_reserve_heap(size_t size,
// important. If available space is not detected, return NULL.
if (requested_address != 0) {
- base = os::attempt_reserve_memory_at(size, requested_address);
+ base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
} else {
- base = os::reserve_memory(size, NULL, alignment);
+ base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
}
}
if (base == NULL) { return; }
@@ -362,6 +401,11 @@ void ReservedHeapSpace::try_reserve_heap(size_t size,
_size = size;
_alignment = alignment;
+ // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
+ if (_fd_for_heap != -1) {
+ _special = true;
+ }
+
// Check alignment constraints
if ((((size_t)base) & (alignment - 1)) != 0) {
// Base not aligned, retry.
@@ -556,12 +600,20 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
}
}
-ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
+ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
if (size == 0) {
return;
}
+ if (heap_allocation_directory != NULL) {
+ _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
+ if (_fd_for_heap == -1) {
+ vm_exit_during_initialization(
+ err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
+ }
+ }
+
// Heap size should be aligned to alignment, too.
guarantee(is_aligned(size, alignment), "set by caller");
@@ -585,6 +637,10 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large)
if (base() != NULL) {
MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
}
+
+ if (_fd_for_heap != -1) {
+ os::close(_fd_for_heap);
+ }
}
// Reserve space for code segment. Same as Java heap only we mark this as
diff --git a/src/hotspot/share/memory/virtualspace.hpp b/src/hotspot/share/memory/virtualspace.hpp
index 2475f09d156..5041ce145ec 100644
--- a/src/hotspot/share/memory/virtualspace.hpp
+++ b/src/hotspot/share/memory/virtualspace.hpp
@@ -37,6 +37,7 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
size_t _noaccess_prefix;
size_t _alignment;
bool _special;
+ int _fd_for_heap;
private:
bool _executable;
@@ -115,7 +116,9 @@ class ReservedHeapSpace : public ReservedSpace {
void establish_noaccess_prefix();
public:
// Constructor. Tries to find a heap that is good for compressed oops.
- ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large);
+ // heap_allocation_directory is the path to the backing memory for Java heap. When set, Java heap will be allocated
+ // on the device which is managed by the file system where the directory resides.
+ ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large, const char* heap_allocation_directory = NULL);
// Returns the base to be used for compression, i.e. so that null can be
// encoded safely and implicit null checks can work.
char *compressed_oop_base() { return _base - _noaccess_prefix; }
diff --git a/src/hotspot/share/oops/array.hpp b/src/hotspot/share/oops/array.hpp
index 225331e0521..f56da8e26e0 100644
--- a/src/hotspot/share/oops/array.hpp
+++ b/src/hotspot/share/oops/array.hpp
@@ -26,7 +26,6 @@
#define SHARE_VM_OOPS_ARRAY_HPP
#include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
#include "memory/metaspace.hpp"
#include "runtime/orderAccess.hpp"
#include "utilities/align.hpp"
diff --git a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp
index 9e1eefd5251..4dd2e35e2cf 100644
--- a/src/hotspot/share/oops/constantPool.cpp
+++ b/src/hotspot/share/oops/constantPool.cpp
@@ -31,6 +31,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "interpreter/linkResolver.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/heapInspection.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
@@ -2300,3 +2301,11 @@ SymbolHashMapEntry* SymbolHashMap::find_entry(Symbol* sym) {
}
return NULL;
}
+
+void SymbolHashMap::initialize_table(int table_size) {
+ _table_size = table_size;
+ _buckets = NEW_C_HEAP_ARRAY(SymbolHashMapBucket, table_size, mtSymbol);
+ for (int index = 0; index < table_size; index++) {
+ _buckets[index].clear();
+ }
+}
diff --git a/src/hotspot/share/oops/constantPool.hpp b/src/hotspot/share/oops/constantPool.hpp
index 681935edae5..8dc75922419 100644
--- a/src/hotspot/share/oops/constantPool.hpp
+++ b/src/hotspot/share/oops/constantPool.hpp
@@ -982,13 +982,7 @@ class SymbolHashMap: public CHeapObj {
int _table_size;
SymbolHashMapBucket* _buckets;
- void initialize_table(int table_size) {
- _table_size = table_size;
- _buckets = NEW_C_HEAP_ARRAY(SymbolHashMapBucket, table_size, mtSymbol);
- for (int index = 0; index < table_size; index++) {
- _buckets[index].clear();
- }
- }
+ void initialize_table(int table_size);
public:
diff --git a/src/hotspot/share/oops/generateOopMap.cpp b/src/hotspot/share/oops/generateOopMap.cpp
index e8edb2c0030..50d4de01f4f 100644
--- a/src/hotspot/share/oops/generateOopMap.cpp
+++ b/src/hotspot/share/oops/generateOopMap.cpp
@@ -27,6 +27,7 @@
#include "interpreter/bytecodeStream.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
+#include "memory/allocation.inline.hpp"
#include "oops/generateOopMap.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
@@ -217,6 +218,12 @@ public:
int RetTable::_init_nof_entries = 10;
int RetTableEntry::_init_nof_jsrs = 5;
+RetTableEntry::RetTableEntry(int target, RetTableEntry *next) {
+ _target_bci = target;
+ _jsrs = new GrowableArray(_init_nof_jsrs);
+ _next = next;
+}
+
void RetTableEntry::add_delta(int bci, int delta) {
if (_target_bci > bci) _target_bci += delta;
diff --git a/src/hotspot/share/oops/generateOopMap.hpp b/src/hotspot/share/oops/generateOopMap.hpp
index e401746240a..3fa44525146 100644
--- a/src/hotspot/share/oops/generateOopMap.hpp
+++ b/src/hotspot/share/oops/generateOopMap.hpp
@@ -26,7 +26,7 @@
#define SHARE_VM_OOPS_GENERATEOOPMAP_HPP
#include "interpreter/bytecodeStream.hpp"
-#include "memory/allocation.inline.hpp"
+#include "memory/allocation.hpp"
#include "memory/universe.inline.hpp"
#include "oops/method.hpp"
#include "oops/oopsHierarchy.hpp"
@@ -57,7 +57,7 @@ class RetTableEntry : public ResourceObj {
GrowableArray * _jsrs; // List of return addresses (bytecode index)
RetTableEntry *_next; // Link to next entry
public:
- RetTableEntry(int target, RetTableEntry *next) { _target_bci=target; _jsrs = new GrowableArray(_init_nof_jsrs); _next = next; }
+ RetTableEntry(int target, RetTableEntry *next);
// Query
int target_bci() const { return _target_bci; }
diff --git a/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp b/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp
index cb416918d9e..a00d5b14951 100644
--- a/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp
+++ b/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp
@@ -71,10 +71,15 @@ void InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
Devirtualizer::do_klass(closure, klass);
}
} else {
- // If klass is NULL then this a mirror for a primitive type.
- // We don't have to follow them, since they are handled as strong
- // roots in Universe::oops_do.
- assert(java_lang_Class::is_primitive(obj), "Sanity check");
+ // We would like to assert here (as below) that if klass has been NULL, then
+ // this has been a mirror for a primitive type that we do not need to follow
+ // as they are always strong roots.
+ // However, we might get across a klass that just changed during CMS concurrent
+ // marking if allocation occurred in the old generation.
+ // This is benign here, as we keep alive all CLDs that were loaded during the
+ // CMS concurrent phase in the class loading, i.e. they will be iterated over
+ // and kept alive during remark.
+ // assert(java_lang_Class::is_primitive(obj), "Sanity check");
}
}
diff --git a/src/hotspot/share/opto/c2_globals.hpp b/src/hotspot/share/opto/c2_globals.hpp
index e1c859c3af9..9f105299402 100644
--- a/src/hotspot/share/opto/c2_globals.hpp
+++ b/src/hotspot/share/opto/c2_globals.hpp
@@ -740,6 +740,14 @@
\
develop(bool, RenumberLiveNodes, true, \
"Renumber live nodes") \
+ \
+ product(uintx, LoopStripMiningIter, 0, \
+ "Number of iterations in strip mined loop") \
+ range(0, max_juint) \
+ \
+ product(uintx, LoopStripMiningIterShortLoop, 0, \
+ "Loop with fewer iterations are not strip mined") \
+ range(0, max_juint) \
C2_FLAGS(DECLARE_DEVELOPER_FLAG, \
DECLARE_PD_DEVELOPER_FLAG, \
diff --git a/src/hotspot/share/opto/callGenerator.cpp b/src/hotspot/share/opto/callGenerator.cpp
index a5aaa552fee..3a0dd830ac1 100644
--- a/src/hotspot/share/opto/callGenerator.cpp
+++ b/src/hotspot/share/opto/callGenerator.cpp
@@ -362,6 +362,20 @@ void LateInlineCallGenerator::do_late_inline() {
return;
}
+ // check for unreachable loop
+ CallProjections callprojs;
+ call->extract_projections(&callprojs, true);
+ if (callprojs.fallthrough_catchproj == call->in(0) ||
+ callprojs.catchall_catchproj == call->in(0) ||
+ callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) ||
+ callprojs.catchall_memproj == call->in(TypeFunc::Memory) ||
+ callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) ||
+ callprojs.catchall_ioproj == call->in(TypeFunc::I_O) ||
+ (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
+ (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
+ return;
+ }
+
Compile* C = Compile::current();
// Remove inlined methods from Compiler's lists.
if (call->is_macro()) {
diff --git a/src/hotspot/share/opto/cfgnode.cpp b/src/hotspot/share/opto/cfgnode.cpp
index 272c1340fbb..e2269104bf7 100644
--- a/src/hotspot/share/opto/cfgnode.cpp
+++ b/src/hotspot/share/opto/cfgnode.cpp
@@ -571,6 +571,18 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return NULL;
} else if (can_reshape) { // Optimization phase - remove the node
PhaseIterGVN *igvn = phase->is_IterGVN();
+ // Strip mined (inner) loop is going away, remove outer loop.
+ if (is_CountedLoop() &&
+ as_Loop()->is_strip_mined()) {
+ Node* outer_sfpt = as_CountedLoop()->outer_safepoint();
+ Node* outer_out = as_CountedLoop()->outer_loop_exit();
+ if (outer_sfpt != NULL && outer_out != NULL) {
+ Node* in = outer_sfpt->in(0);
+ igvn->replace_node(outer_out, in);
+ LoopNode* outer = as_CountedLoop()->outer_loop();
+ igvn->replace_input_of(outer, LoopNode::LoopBackControl, igvn->C->top());
+ }
+ }
Node *parent_ctrl;
if( cnt == 0 ) {
assert( req() == 1, "no inputs expected" );
diff --git a/src/hotspot/share/opto/classes.hpp b/src/hotspot/share/opto/classes.hpp
index 123ea3e0f28..235021f8660 100644
--- a/src/hotspot/share/opto/classes.hpp
+++ b/src/hotspot/share/opto/classes.hpp
@@ -133,6 +133,8 @@ macro(ConvL2F)
macro(ConvL2I)
macro(CountedLoop)
macro(CountedLoopEnd)
+macro(OuterStripMinedLoop)
+macro(OuterStripMinedLoopEnd)
macro(CountLeadingZerosI)
macro(CountLeadingZerosL)
macro(CountTrailingZerosI)
@@ -252,6 +254,7 @@ macro(SafePoint)
macro(SafePointScalarObject)
macro(SCMemProj)
macro(SqrtD)
+macro(SqrtF)
macro(Start)
macro(StartOSR)
macro(StoreB)
@@ -320,6 +323,7 @@ macro(AbsVD)
macro(NegVF)
macro(NegVD)
macro(SqrtVD)
+macro(SqrtVF)
macro(LShiftCntV)
macro(RShiftCntV)
macro(LShiftVB)
diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp
index 6fe374640b6..8e5da8a4028 100644
--- a/src/hotspot/share/opto/compile.cpp
+++ b/src/hotspot/share/opto/compile.cpp
@@ -2751,27 +2751,28 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
case Op_CallRuntime:
case Op_CallLeaf:
case Op_CallLeafNoFP: {
- assert( n->is_Call(), "" );
+ assert (n->is_Call(), "");
CallNode *call = n->as_Call();
// Count call sites where the FP mode bit would have to be flipped.
// Do not count uncommon runtime calls:
// uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
// _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
- if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
+ if (!call->is_CallStaticJava() || !call->as_CallStaticJava()->_name) {
frc.inc_call_count(); // Count the call site
} else { // See if uncommon argument is shared
Node *n = call->in(TypeFunc::Parms);
int nop = n->Opcode();
// Clone shared simple arguments to uncommon calls, item (1).
- if( n->outcnt() > 1 &&
+ if (n->outcnt() > 1 &&
!n->is_Proj() &&
nop != Op_CreateEx &&
nop != Op_CheckCastPP &&
nop != Op_DecodeN &&
nop != Op_DecodeNKlass &&
- !n->is_Mem() ) {
+ !n->is_Mem() &&
+ !n->is_Phi()) {
Node *x = n->clone();
- call->set_req( TypeFunc::Parms, x );
+ call->set_req(TypeFunc::Parms, x);
}
}
break;
@@ -3244,9 +3245,11 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
break;
case Op_Loop:
case Op_CountedLoop:
+ case Op_OuterStripMinedLoop:
if (n->as_Loop()->is_inner_loop()) {
frc.inc_inner_loop_count();
}
+ n->as_Loop()->verify_strip_mined(0);
break;
case Op_LShiftI:
case Op_RShiftI:
@@ -3525,6 +3528,14 @@ bool Compile::final_graph_reshaping() {
record_method_not_compilable("infinite loop");
return true; // Found unvisited kid; must be unreach
}
+
+ // Here so verification code in final_graph_reshaping_walk()
+ // always see an OuterStripMinedLoopEnd
+ if (n->is_OuterStripMinedLoopEnd()) {
+ IfNode* init_iff = n->as_If();
+ Node* iff = new IfNode(init_iff->in(0), init_iff->in(1), init_iff->_prob, init_iff->_fcnt);
+ n->subsume_by(iff, this);
+ }
}
// If original bytecodes contained a mixture of floats and doubles
diff --git a/src/hotspot/share/opto/convertnode.cpp b/src/hotspot/share/opto/convertnode.cpp
index 5d97a7330f2..aaf0cc2fc61 100644
--- a/src/hotspot/share/opto/convertnode.cpp
+++ b/src/hotspot/share/opto/convertnode.cpp
@@ -73,6 +73,21 @@ const Type* ConvD2FNode::Value(PhaseGVN* phase) const {
return TypeF::make( (float)td->getd() );
}
+//------------------------------Ideal------------------------------------------
+// If we see pattern ConvF2D SomeDoubleOp ConvD2F, do operation as float.
+Node *ConvD2FNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+ if ( in(1)->Opcode() == Op_SqrtD ) {
+ Node* sqrtd = in(1);
+ if ( sqrtd->in(1)->Opcode() == Op_ConvF2D ) {
+ if ( Matcher::match_rule_supported(Op_SqrtF) ) {
+ Node* convf2d = sqrtd->in(1);
+ return new SqrtFNode(phase->C, sqrtd->in(0), convf2d->in(1));
+ }
+ }
+ }
+ return NULL;
+}
+
//------------------------------Identity---------------------------------------
// Float's can be converted to doubles with no loss of bits. Hence
// converting a float to a double and back to a float is a NOP.
diff --git a/src/hotspot/share/opto/convertnode.hpp b/src/hotspot/share/opto/convertnode.hpp
index 0a3e78b18dc..6ae7de5882f 100644
--- a/src/hotspot/share/opto/convertnode.hpp
+++ b/src/hotspot/share/opto/convertnode.hpp
@@ -51,6 +51,7 @@ class ConvD2FNode : public Node {
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node* Identity(PhaseGVN* phase);
+ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return Op_RegF; }
};
diff --git a/src/hotspot/share/opto/idealGraphPrinter.cpp b/src/hotspot/share/opto/idealGraphPrinter.cpp
index 0d5fbdda39c..cb78db147b2 100644
--- a/src/hotspot/share/opto/idealGraphPrinter.cpp
+++ b/src/hotspot/share/opto/idealGraphPrinter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "opto/machnode.hpp"
#include "opto/parse.hpp"
#include "runtime/threadCritical.hpp"
+#include "runtime/threadSMR.hpp"
#ifndef PRODUCT
@@ -91,8 +92,7 @@ IdealGraphPrinter *IdealGraphPrinter::printer() {
}
void IdealGraphPrinter::clean_up() {
- JavaThread *p;
- for (p = Threads::first(); p; p = p->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *p = jtiwh.next(); ) {
if (p->is_Compiler_thread()) {
CompilerThread *c = (CompilerThread *)p;
IdealGraphPrinter *printer = c->ideal_graph_printer();
diff --git a/src/hotspot/share/opto/ifnode.cpp b/src/hotspot/share/opto/ifnode.cpp
index 7865e3ca882..3437dfed372 100644
--- a/src/hotspot/share/opto/ifnode.cpp
+++ b/src/hotspot/share/opto/ifnode.cpp
@@ -117,6 +117,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
// No intervening control, like a simple Call
Node *r = iff->in(0);
if( !r->is_Region() ) return NULL;
+ if (r->is_Loop() && r->in(LoopNode::LoopBackControl)->is_top()) return NULL; // going away anyway
if( phi->region() != r ) return NULL;
// No other users of the cmp/bool
if (b->outcnt() != 1 || cmp->outcnt() != 1) {
diff --git a/src/hotspot/share/opto/loopPredicate.cpp b/src/hotspot/share/opto/loopPredicate.cpp
index 0de17405ccc..9de54bb372a 100644
--- a/src/hotspot/share/opto/loopPredicate.cpp
+++ b/src/hotspot/share/opto/loopPredicate.cpp
@@ -515,8 +515,8 @@ class Invariance : public StackObj {
_visited(area), _invariant(area), _stack(area, 10 /* guess */),
_clone_visited(area), _old_new(area)
{
- Node* head = _lpt->_head;
- Node* entry = head->in(LoopNode::EntryControl);
+ LoopNode* head = _lpt->_head->as_Loop();
+ Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
if (entry->outcnt() != 1) {
// If a node is pinned between the predicates and the loop
// entry, we won't be able to move any node in the loop that
@@ -801,6 +801,10 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
return false;
}
+ if (head->is_OuterStripMinedLoop()) {
+ return false;
+ }
+
CountedLoopNode *cl = NULL;
if (head->is_valid_counted_loop()) {
cl = head->as_CountedLoop();
@@ -812,7 +816,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
cl = NULL;
}
- Node* entry = head->in(LoopNode::EntryControl);
+ Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
ProjNode *predicate_proj = NULL;
// Loop limit check predicate should be near the loop.
predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
@@ -1007,6 +1011,8 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
}
#endif
+ head->verify_strip_mined(1);
+
return hoisted;
}
diff --git a/src/hotspot/share/opto/loopTransform.cpp b/src/hotspot/share/opto/loopTransform.cpp
index 2bad280129a..28a270ef4c4 100644
--- a/src/hotspot/share/opto/loopTransform.cpp
+++ b/src/hotspot/share/opto/loopTransform.cpp
@@ -67,6 +67,16 @@ void IdealLoopTree::record_for_igvn() {
Node *n = _body.at(i);
_phase->_igvn._worklist.push(n);
}
+ // put body of outer strip mined loop on igvn work list as well
+ if (_head->is_CountedLoop() && _head->as_Loop()->is_strip_mined()) {
+ CountedLoopNode* l = _head->as_CountedLoop();
+ _phase->_igvn._worklist.push(l->outer_loop());
+ _phase->_igvn._worklist.push(l->outer_loop_tail());
+ _phase->_igvn._worklist.push(l->outer_loop_end());
+ _phase->_igvn._worklist.push(l->outer_safepoint());
+ Node* cle_out = _head->as_CountedLoop()->loopexit()->proj_out(false);
+ _phase->_igvn._worklist.push(cle_out);
+ }
}
//------------------------------compute_exact_trip_count-----------------------
@@ -494,7 +504,7 @@ void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
loop->dump_head();
}
#endif
- Node* head = loop->_head;
+ LoopNode* head = loop->_head->as_Loop();
bool counted_loop = head->is_CountedLoop();
if (counted_loop) {
CountedLoopNode *cl = head->as_CountedLoop();
@@ -514,7 +524,7 @@ void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
// Step 1: Clone the loop body. The clone becomes the peeled iteration.
// The pre-loop illegally has 2 control users (old & new loops).
- clone_loop( loop, old_new, dom_depth(head) );
+ clone_loop(loop, old_new, dom_depth(head->skip_strip_mined()), ControlAroundStripMined);
// Step 2: Make the old-loop fall-in edges point to the peeled iteration.
// Do this by making the old-loop fall-in edges act as if they came
@@ -523,8 +533,8 @@ void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
// the pre-loop with only 1 user (the new peeled iteration), but the
// peeled-loop backedge has 2 users.
Node* new_entry = old_new[head->in(LoopNode::LoopBackControl)->_idx];
- _igvn.hash_delete(head);
- head->set_req(LoopNode::EntryControl, new_entry);
+ _igvn.hash_delete(head->skip_strip_mined());
+ head->skip_strip_mined()->set_req(LoopNode::EntryControl, new_entry);
for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
Node* old = head->fast_out(j);
if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) {
@@ -1009,8 +1019,6 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
CountedLoopEndNode *main_end = main_head->loopexit();
guarantee(main_end != NULL, "no loop exit node");
assert( main_end->outcnt() == 2, "1 true, 1 false path only" );
- uint dd_main_head = dom_depth(main_head);
- uint max = main_head->outcnt();
Node *pre_header= main_head->in(LoopNode::EntryControl);
Node *init = main_head->init_trip();
@@ -1043,7 +1051,16 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
// Step B1: Clone the loop body. The clone becomes the pre-loop. The main
// loop pre-header illegally has 2 control users (old & new loops).
- clone_loop( loop, old_new, dd_main_head );
+ LoopNode* outer_main_head = main_head;
+ IdealLoopTree* outer_loop = loop;
+ if (main_head->is_strip_mined()) {
+ main_head->verify_strip_mined(1);
+ outer_main_head = main_head->outer_loop();
+ outer_loop = loop->_parent;
+ assert(outer_loop->_head == outer_main_head, "broken loop tree");
+ }
+ uint dd_main_head = dom_depth(outer_main_head);
+ clone_loop(loop, old_new, dd_main_head, ControlAroundStripMined);
CountedLoopNode* pre_head = old_new[main_head->_idx]->as_CountedLoop();
CountedLoopEndNode* pre_end = old_new[main_end ->_idx]->as_CountedLoopEnd();
pre_head->set_pre_loop(main_head);
@@ -1058,7 +1075,7 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
IfFalseNode *new_pre_exit = new IfFalseNode(pre_end);
_igvn.register_new_node_with_optimizer( new_pre_exit );
set_idom(new_pre_exit, pre_end, dd_main_head);
- set_loop(new_pre_exit, loop->_parent);
+ set_loop(new_pre_exit, outer_loop->_parent);
// Step B2: Build a zero-trip guard for the main-loop. After leaving the
// pre-loop, the main-loop may not execute at all. Later in life this
@@ -1075,22 +1092,22 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
IfNode *min_iff = new IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN );
_igvn.register_new_node_with_optimizer( min_iff );
set_idom(min_iff, new_pre_exit, dd_main_head);
- set_loop(min_iff, loop->_parent);
+ set_loop(min_iff, outer_loop->_parent);
// Plug in the false-path, taken if we need to skip main-loop
_igvn.hash_delete( pre_exit );
pre_exit->set_req(0, min_iff);
set_idom(pre_exit, min_iff, dd_main_head);
- set_idom(pre_exit->unique_out(), min_iff, dd_main_head);
+ set_idom(pre_exit->unique_ctrl_out(), min_iff, dd_main_head);
// Make the true-path, must enter the main loop
Node *min_taken = new IfTrueNode( min_iff );
_igvn.register_new_node_with_optimizer( min_taken );
set_idom(min_taken, min_iff, dd_main_head);
- set_loop(min_taken, loop->_parent);
+ set_loop(min_taken, outer_loop->_parent);
// Plug in the true path
- _igvn.hash_delete( main_head );
- main_head->set_req(LoopNode::EntryControl, min_taken);
- set_idom(main_head, min_taken, dd_main_head);
+ _igvn.hash_delete(outer_main_head);
+ outer_main_head->set_req(LoopNode::EntryControl, min_taken);
+ set_idom(outer_main_head, min_taken, dd_main_head);
Arena *a = Thread::current()->resource_area();
VectorSet visited(a);
@@ -1102,7 +1119,7 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) {
Node *pre_phi = old_new[main_phi->_idx];
Node *fallpre = clone_up_backedge_goo(pre_head->back_control(),
- main_head->init_control(),
+ main_head->skip_strip_mined()->in(LoopNode::EntryControl),
pre_phi->in(LoopNode::LoopBackControl),
visited, clones);
_igvn.hash_delete(main_phi);
@@ -1305,16 +1322,24 @@ void PhaseIdealLoop::insert_scalar_rced_post_loop(IdealLoopTree *loop, Node_List
Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree *loop, Node_List &old_new,
CountedLoopNode *main_head, CountedLoopEndNode *main_end,
Node *incr, Node *limit, CountedLoopNode *&post_head) {
+ IfNode* outer_main_end = main_end;
+ IdealLoopTree* outer_loop = loop;
+ if (main_head->is_strip_mined()) {
+ main_head->verify_strip_mined(1);
+ outer_main_end = main_head->outer_loop_end();
+ outer_loop = loop->_parent;
+ assert(outer_loop->_head == main_head->in(LoopNode::EntryControl), "broken loop tree");
+ }
//------------------------------
// Step A: Create a new post-Loop.
- Node* main_exit = main_end->proj_out(false);
+ Node* main_exit = outer_main_end->proj_out(false);
assert(main_exit->Opcode() == Op_IfFalse, "");
int dd_main_exit = dom_depth(main_exit);
// Step A1: Clone the loop body of main. The clone becomes the post-loop.
// The main loop pre-header illegally has 2 control users (old & new loops).
- clone_loop(loop, old_new, dd_main_exit);
+ clone_loop(loop, old_new, dd_main_exit, ControlAroundStripMined);
assert(old_new[main_end->_idx]->Opcode() == Op_CountedLoopEnd, "");
post_head = old_new[main_head->_idx]->as_CountedLoop();
post_head->set_normal_loop();
@@ -1325,10 +1350,10 @@ Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree *loop, Node_List &old_new,
post_end->_prob = PROB_FAIR;
// Build the main-loop normal exit.
- IfFalseNode *new_main_exit = new IfFalseNode(main_end);
+ IfFalseNode *new_main_exit = new IfFalseNode(outer_main_end);
_igvn.register_new_node_with_optimizer(new_main_exit);
- set_idom(new_main_exit, main_end, dd_main_exit);
- set_loop(new_main_exit, loop->_parent);
+ set_idom(new_main_exit, outer_main_end, dd_main_exit);
+ set_loop(new_main_exit, outer_loop->_parent);
// Step A2: Build a zero-trip guard for the post-loop. After leaving the
// main-loop, the post-loop may not execute at all. We 'opaque' the incr
@@ -1346,7 +1371,7 @@ Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree *loop, Node_List &old_new,
IfNode *zer_iff = new IfNode(new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN);
_igvn.register_new_node_with_optimizer(zer_iff);
set_idom(zer_iff, new_main_exit, dd_main_exit);
- set_loop(zer_iff, loop->_parent);
+ set_loop(zer_iff, outer_loop->_parent);
// Plug in the false-path, taken if we need to skip this post-loop
_igvn.replace_input_of(main_exit, 0, zer_iff);
@@ -1356,7 +1381,7 @@ Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree *loop, Node_List &old_new,
Node *zer_taken = new IfTrueNode(zer_iff);
_igvn.register_new_node_with_optimizer(zer_taken);
set_idom(zer_taken, zer_iff, dd_main_exit);
- set_loop(zer_taken, loop->_parent);
+ set_loop(zer_taken, outer_loop->_parent);
// Plug in the true path
_igvn.hash_delete(post_head);
post_head->set_req(LoopNode::EntryControl, zer_taken);
@@ -1431,7 +1456,7 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
// if rounds of unroll,optimize are making progress
loop_head->set_node_count_before_unroll(loop->_body.size());
- Node *ctrl = loop_head->in(LoopNode::EntryControl);
+ Node *ctrl = loop_head->skip_strip_mined()->in(LoopNode::EntryControl);
Node *limit = loop_head->limit();
Node *init = loop_head->init_trip();
Node *stride = loop_head->stride();
@@ -1610,7 +1635,7 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
// represents the odd iterations; since the loop trips an even number of
// times its backedge is never taken. Kill the backedge.
uint dd = dom_depth(loop_head);
- clone_loop( loop, old_new, dd );
+ clone_loop(loop, old_new, dd, IgnoreStripMined);
// Make backedges of the clone equal to backedges of the original.
// Make the fall-in from the original come from the fall-out of the clone.
@@ -1653,6 +1678,7 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
}
loop->record_for_igvn();
+ loop_head->clear_strip_mined();
#ifndef PRODUCT
if (C->do_vector_loop() && (PrintOpto && (VerifyLoopOptimizations || TraceLoopOpts))) {
@@ -2047,7 +2073,7 @@ int PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
}
// Need to find the main-loop zero-trip guard
- Node *ctrl = cl->in(LoopNode::EntryControl);
+ Node *ctrl = cl->skip_strip_mined()->in(LoopNode::EntryControl);
Node *iffm = ctrl->in(0);
Node *opqzm = iffm->in(1)->in(1)->in(2);
assert(opqzm->in(1) == main_limit, "do not understand situation");
@@ -2413,7 +2439,6 @@ bool PhaseIdealLoop::multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoop
_igvn.register_new_node_with_optimizer(cur_min);
Node *cmp_node = rce_loop_end->cmp_node();
_igvn.replace_input_of(cmp_node, 2, cur_min);
- set_idom(cmp_node, cur_min, dom_depth(ctrl));
set_ctrl(cur_min, ctrl);
set_loop(cur_min, rce_loop->_parent);
@@ -2519,7 +2544,7 @@ void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
#ifdef ASSERT
static CountedLoopNode* locate_pre_from_main(CountedLoopNode *cl) {
- Node *ctrl = cl->in(LoopNode::EntryControl);
+ Node *ctrl = cl->skip_strip_mined()->in(LoopNode::EntryControl);
assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
Node *iffm = ctrl->in(0);
assert(iffm->Opcode() == Op_If, "");
@@ -2558,7 +2583,7 @@ void IdealLoopTree::remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *
}
assert(locate_pre_from_main(main_head) == cl, "bad main loop");
- Node* main_iff = main_head->in(LoopNode::EntryControl)->in(0);
+ Node* main_iff = main_head->skip_strip_mined()->in(LoopNode::EntryControl)->in(0);
// Remove the Opaque1Node of the pre loop and make it execute all iterations
phase->_igvn.replace_input_of(pre_cmp, 2, pre_cmp->in(2)->in(2));
@@ -2619,7 +2644,7 @@ bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
}
if (needs_guard) {
// Check for an obvious zero trip guard.
- Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl));
+ Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->skip_strip_mined()->in(LoopNode::EntryControl));
if (inctrl->Opcode() == Op_IfTrue || inctrl->Opcode() == Op_IfFalse) {
bool maybe_swapped = (inctrl->Opcode() == Op_IfFalse);
// The test should look like just the backedge of a CountedLoop
@@ -3167,6 +3192,8 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
return false;
}
+ head->verify_strip_mined(1);
+
// Check that the body only contains a store of a loop invariant
// value that is indexed by the loop phi.
Node* store = NULL;
@@ -3288,6 +3315,16 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
}
*/
+ if (head->is_strip_mined()) {
+ // Inner strip mined loop goes away so get rid of outer strip
+ // mined loop
+ Node* outer_sfpt = head->outer_safepoint();
+ Node* in = outer_sfpt->in(0);
+ Node* outer_out = head->outer_loop_exit();
+ lazy_replace(outer_out, in);
+ _igvn.replace_input_of(outer_sfpt, 0, C->top());
+ }
+
// Redirect the old control and memory edges that are outside the loop.
// Sometimes the memory phi of the head is used as the outgoing
// state of the loop. It's safe in this case to replace it with the
diff --git a/src/hotspot/share/opto/loopUnswitch.cpp b/src/hotspot/share/opto/loopUnswitch.cpp
index 72201c2e282..9940b333fe0 100644
--- a/src/hotspot/share/opto/loopUnswitch.cpp
+++ b/src/hotspot/share/opto/loopUnswitch.cpp
@@ -132,11 +132,11 @@ void PhaseIdealLoop::do_unswitching (IdealLoopTree *loop, Node_List &old_new) {
head->as_CountedLoop()->set_normal_loop();
}
- ProjNode* proj_true = create_slow_version_of_loop(loop, old_new, unswitch_iff->Opcode());
+ ProjNode* proj_true = create_slow_version_of_loop(loop, old_new, unswitch_iff->Opcode(), CloneIncludesStripMined);
#ifdef ASSERT
Node* uniqc = proj_true->unique_ctrl_out();
- Node* entry = head->in(LoopNode::EntryControl);
+ Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
Node* predicate = find_predicate(entry);
if (predicate != NULL && UseLoopPredicate) {
// We may have two predicates, find first.
@@ -145,7 +145,8 @@ void PhaseIdealLoop::do_unswitching (IdealLoopTree *loop, Node_List &old_new) {
}
if (predicate != NULL) predicate = predicate->in(0);
assert(proj_true->is_IfTrue() &&
- (predicate == NULL && uniqc == head ||
+ (predicate == NULL && uniqc == head && !head->is_strip_mined() ||
+ predicate == NULL && uniqc == head->in(LoopNode::EntryControl) && head->is_strip_mined() ||
predicate != NULL && uniqc == predicate), "by construction");
#endif
// Increment unswitch count
@@ -223,13 +224,16 @@ void PhaseIdealLoop::do_unswitching (IdealLoopTree *loop, Node_List &old_new) {
// Return control projection of the entry to the fast version.
ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
Node_List &old_new,
- int opcode) {
+ int opcode,
+ CloneLoopMode mode) {
LoopNode* head = loop->_head->as_Loop();
bool counted_loop = head->is_CountedLoop();
- Node* entry = head->in(LoopNode::EntryControl);
+ Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
_igvn.rehash_node_delayed(entry);
IdealLoopTree* outer_loop = loop->_parent;
+ head->verify_strip_mined(1);
+
Node *cont = _igvn.intcon(1);
set_ctrl(cont, C->root());
Node* opq = new Opaque1Node(C, cont);
@@ -247,19 +251,21 @@ ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
// Clone the loop body. The clone becomes the fast loop. The
// original pre-header will (illegally) have 3 control users
// (old & new loops & new if).
- clone_loop(loop, old_new, dom_depth(head), iff);
+ clone_loop(loop, old_new, dom_depth(head->skip_strip_mined()), mode, iff);
assert(old_new[head->_idx]->is_Loop(), "" );
// Fast (true) control
Node* iffast_pred = clone_loop_predicates(entry, iffast, !counted_loop);
- _igvn.replace_input_of(head, LoopNode::EntryControl, iffast_pred);
- set_idom(head, iffast_pred, dom_depth(head));
// Slow (false) control
Node* ifslow_pred = clone_loop_predicates(entry, ifslow, !counted_loop);
- LoopNode* slow_head = old_new[head->_idx]->as_Loop();
- _igvn.replace_input_of(slow_head, LoopNode::EntryControl, ifslow_pred);
- set_idom(slow_head, ifslow_pred, dom_depth(slow_head));
+
+ Node* l = head->skip_strip_mined();
+ _igvn.replace_input_of(l, LoopNode::EntryControl, iffast_pred);
+ set_idom(l, iffast_pred, dom_depth(l));
+ LoopNode* slow_l = old_new[head->_idx]->as_Loop()->skip_strip_mined();
+ _igvn.replace_input_of(slow_l, LoopNode::EntryControl, ifslow_pred);
+ set_idom(slow_l, ifslow_pred, dom_depth(l));
recompute_dom_depth();
@@ -270,9 +276,9 @@ LoopNode* PhaseIdealLoop::create_reserve_version_of_loop(IdealLoopTree *loop, Co
Node_List old_new;
LoopNode* head = loop->_head->as_Loop();
bool counted_loop = head->is_CountedLoop();
- Node* entry = head->in(LoopNode::EntryControl);
+ Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
_igvn.rehash_node_delayed(entry);
- IdealLoopTree* outer_loop = loop->_parent;
+ IdealLoopTree* outer_loop = head->is_strip_mined() ? loop->_parent->_parent : loop->_parent;
ConINode* const_1 = _igvn.intcon(1);
set_ctrl(const_1, C->root());
@@ -286,7 +292,7 @@ LoopNode* PhaseIdealLoop::create_reserve_version_of_loop(IdealLoopTree *loop, Co
// Clone the loop body. The clone becomes the fast loop. The
// original pre-header will (illegally) have 3 control users
// (old & new loops & new if).
- clone_loop(loop, old_new, dom_depth(head), iff);
+ clone_loop(loop, old_new, dom_depth(head), CloneIncludesStripMined, iff);
assert(old_new[head->_idx]->is_Loop(), "" );
LoopNode* slow_head = old_new[head->_idx]->as_Loop();
@@ -303,9 +309,9 @@ LoopNode* PhaseIdealLoop::create_reserve_version_of_loop(IdealLoopTree *loop, Co
#endif
// Fast (true) control
- _igvn.replace_input_of(head, LoopNode::EntryControl, iffast);
+ _igvn.replace_input_of(head->skip_strip_mined(), LoopNode::EntryControl, iffast);
// Slow (false) control
- _igvn.replace_input_of(slow_head, LoopNode::EntryControl, ifslow);
+ _igvn.replace_input_of(slow_head->skip_strip_mined(), LoopNode::EntryControl, ifslow);
recompute_dom_depth();
@@ -394,7 +400,7 @@ bool CountedLoopReserveKit::create_reserve() {
return false;
}
- Node* ifslow_pred = _lp_reserved->as_CountedLoop()->in(LoopNode::EntryControl);
+ Node* ifslow_pred = _lp_reserved->skip_strip_mined()->in(LoopNode::EntryControl);
if (!ifslow_pred->is_IfFalse()) {
return false;
diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp
index 5f003d304ff..1e5af607642 100644
--- a/src/hotspot/share/opto/loopnode.cpp
+++ b/src/hotspot/share/opto/loopnode.cpp
@@ -261,8 +261,68 @@ void PhaseIdealLoop::set_subtree_ctrl( Node *n ) {
set_early_ctrl( n );
}
+// Create a skeleton strip mined outer loop: a Loop head before the
+// inner strip mined loop, a safepoint and an exit condition guarded
+// by an opaque node after the inner strip mined loop with a backedge
+// to the loop head. The inner strip mined loop is left as it is. Only
+// once loop optimizations are over, do we adjust the inner loop exit
+// condition to limit its number of iterations, set the outer loop
+// exit condition and add Phis to the outer loop head. Some loop
+// optimizations that operate on the inner strip mined loop need to be
+// aware of the outer strip mined loop: loop unswitching needs to
+// clone the outer loop as well as the inner, unrolling needs to only
+// clone the inner loop etc. No optimizations need to change the outer
+// strip mined loop as it is only a skeleton.
+IdealLoopTree* PhaseIdealLoop::create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control,
+ IdealLoopTree* loop, float cl_prob, float le_fcnt,
+ Node*& entry_control, Node*& iffalse) {
+ Node* outer_test = _igvn.intcon(0);
+ set_ctrl(outer_test, C->root());
+ Node *orig = iffalse;
+ iffalse = iffalse->clone();
+ _igvn.register_new_node_with_optimizer(iffalse);
+ set_idom(iffalse, idom(orig), dom_depth(orig));
+
+ IfNode *outer_le = new OuterStripMinedLoopEndNode(iffalse, outer_test, cl_prob, le_fcnt);
+ Node *outer_ift = new IfTrueNode (outer_le);
+ Node* outer_iff = orig;
+ _igvn.replace_input_of(outer_iff, 0, outer_le);
+
+ LoopNode *outer_l = new OuterStripMinedLoopNode(C, init_control, outer_ift);
+ entry_control = outer_l;
+
+ IdealLoopTree* outer_ilt = new IdealLoopTree(this, outer_l, outer_ift);
+ IdealLoopTree* parent = loop->_parent;
+ IdealLoopTree* sibling = parent->_child;
+ if (sibling == loop) {
+ parent->_child = outer_ilt;
+ } else {
+ while (sibling->_next != loop) {
+ sibling = sibling->_next;
+ }
+ sibling->_next = outer_ilt;
+ }
+ outer_ilt->_next = loop->_next;
+ outer_ilt->_parent = parent;
+ outer_ilt->_child = loop;
+ outer_ilt->_nest = loop->_nest;
+ loop->_parent = outer_ilt;
+ loop->_next = NULL;
+ loop->_nest++;
+
+ set_loop(iffalse, outer_ilt);
+ register_control(outer_le, outer_ilt, iffalse);
+ register_control(outer_ift, outer_ilt, outer_le);
+ set_idom(outer_iff, outer_le, dom_depth(outer_le));
+ _igvn.register_new_node_with_optimizer(outer_l);
+ set_loop(outer_l, outer_ilt);
+ set_idom(outer_l, init_control, dom_depth(init_control)+1);
+
+ return outer_ilt;
+}
+
//------------------------------is_counted_loop--------------------------------
-bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
+bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop) {
PhaseGVN *gvn = &_igvn;
// Counted loop head must be a good RegionNode with only 3 not NULL
@@ -280,7 +340,7 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// Allow funny placement of Safepoint
if (back_control->Opcode() == Op_SafePoint) {
- if (UseCountedLoopSafepoints) {
+ if (LoopStripMiningIter != 0) {
// Leaving the safepoint on the backedge and creating a
// CountedLoop will confuse optimizations. We can't move the
// safepoint around because its jvm state wouldn't match a new
@@ -600,7 +660,7 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
}
set_subtree_ctrl( limit );
- if (!UseCountedLoopSafepoints) {
+ if (LoopStripMiningIter == 0) {
// Check for SafePoint on backedge and remove
Node *sfpt = x->in(LoopNode::LoopBackControl);
if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) {
@@ -683,8 +743,20 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
assert(iff->outcnt() == 0, "should be dead now");
lazy_replace( iff, le ); // fix 'get_ctrl'
+ Node *sfpt2 = le->in(0);
+
+ Node* entry_control = init_control;
+ bool strip_mine_loop = LoopStripMiningIter > 1 && loop->_child == NULL &&
+ sfpt2->Opcode() == Op_SafePoint && !loop->_has_call;
+ IdealLoopTree* outer_ilt = NULL;
+ if (strip_mine_loop) {
+ outer_ilt = create_outer_strip_mined_loop(test, cmp, init_control, loop,
+ cl_prob, le->_fcnt, entry_control,
+ iffalse);
+ }
+
// Now setup a new CountedLoopNode to replace the existing LoopNode
- CountedLoopNode *l = new CountedLoopNode(init_control, back_control);
+ CountedLoopNode *l = new CountedLoopNode(entry_control, back_control);
l->set_unswitch_count(x->as_Loop()->unswitch_count()); // Preserve
// The following assert is approximately true, and defines the intention
// of can_be_counted_loop. It fails, however, because phase->type
@@ -696,12 +768,19 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// Fix all data nodes placed at the old loop head.
// Uses the lazy-update mechanism of 'get_ctrl'.
lazy_replace( x, l );
- set_idom(l, init_control, dom_depth(x));
+ set_idom(l, entry_control, dom_depth(entry_control) + 1);
- if (!UseCountedLoopSafepoints) {
+ if (LoopStripMiningIter == 0 || strip_mine_loop) {
// Check for immediately preceding SafePoint and remove
- Node *sfpt2 = le->in(0);
- if (sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2)) {
+ if (sfpt2->Opcode() == Op_SafePoint && (LoopStripMiningIter != 0 || is_deleteable_safept(sfpt2))) {
+ if (strip_mine_loop) {
+ Node* outer_le = outer_ilt->_tail->in(0);
+ Node* sfpt = sfpt2->clone();
+ sfpt->set_req(0, iffalse);
+ outer_le->set_req(0, sfpt);
+ register_control(sfpt, outer_ilt, iffalse);
+ set_idom(outer_le, sfpt, dom_depth(sfpt));
+ }
lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control));
if (loop->_safepts != NULL) {
loop->_safepts->yank(sfpt2);
@@ -730,6 +809,13 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// bounds
l->phi()->as_Phi()->set_type(l->phi()->Value(&_igvn));
+ if (strip_mine_loop) {
+ l->mark_strip_mined();
+ l->verify_strip_mined(1);
+ outer_ilt->_head->as_Loop()->verify_strip_mined(1);
+ loop = outer_ilt;
+ }
+
return true;
}
@@ -776,12 +862,93 @@ Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) {
// Return a node which is more "ideal" than the current node.
// Attempt to convert into a counted-loop.
Node *LoopNode::Ideal(PhaseGVN *phase, bool can_reshape) {
- if (!can_be_counted_loop(phase)) {
+ if (!can_be_counted_loop(phase) && !is_OuterStripMinedLoop()) {
phase->C->set_major_progress();
}
return RegionNode::Ideal(phase, can_reshape);
}
+void LoopNode::verify_strip_mined(int expect_skeleton) const {
+#ifdef ASSERT
+ const OuterStripMinedLoopNode* outer = NULL;
+ const CountedLoopNode* inner = NULL;
+ if (is_strip_mined()) {
+ assert(is_CountedLoop(), "no Loop should be marked strip mined");
+ inner = as_CountedLoop();
+ outer = inner->in(LoopNode::EntryControl)->as_OuterStripMinedLoop();
+ } else if (is_OuterStripMinedLoop()) {
+ outer = this->as_OuterStripMinedLoop();
+ inner = outer->unique_ctrl_out()->as_CountedLoop();
+ assert(!is_strip_mined(), "outer loop shouldn't be marked strip mined");
+ }
+ if (inner != NULL || outer != NULL) {
+ assert(inner != NULL && outer != NULL, "missing loop in strip mined nest");
+ Node* outer_tail = outer->in(LoopNode::LoopBackControl);
+ Node* outer_le = outer_tail->in(0);
+ assert(outer_le->Opcode() == Op_OuterStripMinedLoopEnd, "tail of outer loop should be an If");
+ Node* sfpt = outer_le->in(0);
+ assert(sfpt->Opcode() == Op_SafePoint, "where's the safepoint?");
+ Node* inner_out = sfpt->in(0);
+ if (inner_out->outcnt() != 1) {
+ ResourceMark rm;
+ Unique_Node_List wq;
+
+ for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) {
+ Node* u = inner_out->fast_out(i);
+ if (u == sfpt) {
+ continue;
+ }
+ wq.clear();
+ wq.push(u);
+ bool found_sfpt = false;
+ for (uint next = 0; next < wq.size() && !found_sfpt; next++) {
+ Node *n = wq.at(next);
+ for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !found_sfpt; i++) {
+ Node* u = n->fast_out(i);
+ if (u == sfpt) {
+ found_sfpt = true;
+ }
+ if (!u->is_CFG()) {
+ wq.push(u);
+ }
+ }
+ }
+ assert(found_sfpt, "no node in loop that's not input to safepoint");
+ }
+ }
+ CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd();
+ assert(cle == inner->loopexit(), "mismatch");
+ bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0;
+ if (has_skeleton) {
+ assert(expect_skeleton == 1 || expect_skeleton == -1, "unexpected skeleton node");
+ assert(outer->outcnt() == 2, "only phis");
+ } else {
+ assert(expect_skeleton == 0 || expect_skeleton == -1, "no skeleton node?");
+ uint phis = 0;
+ for (DUIterator_Fast imax, i = inner->fast_outs(imax); i < imax; i++) {
+ Node* u = inner->fast_out(i);
+ if (u->is_Phi()) {
+ phis++;
+ }
+ }
+ for (DUIterator_Fast imax, i = outer->fast_outs(imax); i < imax; i++) {
+ Node* u = outer->fast_out(i);
+ assert(u == outer || u == inner || u->is_Phi(), "nothing between inner and outer loop");
+ }
+ uint stores = 0;
+ for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) {
+ Node* u = inner_out->fast_out(i);
+ if (u->is_Store()) {
+ stores++;
+ }
+ }
+ assert(outer->outcnt() >= phis + 2 && outer->outcnt() <= phis + 2 + stores + 1, "only phis");
+ }
+ assert(sfpt->outcnt() == 1, "no data node");
+ assert(outer_tail->outcnt() == 1 || !has_skeleton, "no data node");
+ }
+#endif
+}
//=============================================================================
//------------------------------Ideal------------------------------------------
@@ -802,6 +969,7 @@ void CountedLoopNode::dump_spec(outputStream *st) const {
if (is_pre_loop ()) st->print("pre of N%d" , _main_idx);
if (is_main_loop()) st->print("main of N%d", _idx);
if (is_post_loop()) st->print("post of N%d", _main_idx);
+ if (is_strip_mined()) st->print(" strip mined");
}
#endif
@@ -990,6 +1158,365 @@ Node* CountedLoopNode::match_incr_with_optional_truncation(
return NULL;
}
+LoopNode* CountedLoopNode::skip_strip_mined(int expect_opaq) {
+ if (is_strip_mined()) {
+ verify_strip_mined(expect_opaq);
+ return in(EntryControl)->as_Loop();
+ }
+ return this;
+}
+
+OuterStripMinedLoopNode* CountedLoopNode::outer_loop() const {
+ assert(is_strip_mined(), "not a strip mined loop");
+ Node* c = in(EntryControl);
+ if (c == NULL || c->is_top() || !c->is_OuterStripMinedLoop()) {
+ return NULL;
+ }
+ return c->as_OuterStripMinedLoop();
+}
+
+IfTrueNode* OuterStripMinedLoopNode::outer_loop_tail() const {
+ Node* c = in(LoopBackControl);
+ if (c == NULL || c->is_top()) {
+ return NULL;
+ }
+ return c->as_IfTrue();
+}
+
+IfTrueNode* CountedLoopNode::outer_loop_tail() const {
+ LoopNode* l = outer_loop();
+ if (l == NULL) {
+ return NULL;
+ }
+ return l->outer_loop_tail();
+}
+
+OuterStripMinedLoopEndNode* OuterStripMinedLoopNode::outer_loop_end() const {
+ IfTrueNode* proj = outer_loop_tail();
+ if (proj == NULL) {
+ return NULL;
+ }
+ Node* c = proj->in(0);
+ if (c == NULL || c->is_top() || c->outcnt() != 2) {
+ return NULL;
+ }
+ return c->as_OuterStripMinedLoopEnd();
+}
+
+OuterStripMinedLoopEndNode* CountedLoopNode::outer_loop_end() const {
+ LoopNode* l = outer_loop();
+ if (l == NULL) {
+ return NULL;
+ }
+ return l->outer_loop_end();
+}
+
+IfFalseNode* OuterStripMinedLoopNode::outer_loop_exit() const {
+ IfNode* le = outer_loop_end();
+ if (le == NULL) {
+ return NULL;
+ }
+ Node* c = le->proj_out(false);
+ if (c == NULL) {
+ return NULL;
+ }
+ return c->as_IfFalse();
+}
+
+IfFalseNode* CountedLoopNode::outer_loop_exit() const {
+ LoopNode* l = outer_loop();
+ if (l == NULL) {
+ return NULL;
+ }
+ return l->outer_loop_exit();
+}
+
+SafePointNode* OuterStripMinedLoopNode::outer_safepoint() const {
+ IfNode* le = outer_loop_end();
+ if (le == NULL) {
+ return NULL;
+ }
+ Node* c = le->in(0);
+ if (c == NULL || c->is_top()) {
+ return NULL;
+ }
+ assert(c->Opcode() == Op_SafePoint, "broken outer loop");
+ return c->as_SafePoint();
+}
+
+SafePointNode* CountedLoopNode::outer_safepoint() const {
+ LoopNode* l = outer_loop();
+ if (l == NULL) {
+ return NULL;
+ }
+ return l->outer_safepoint();
+}
+
+void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) {
+ // Look for the outer & inner strip mined loop, reduce number of
+ // iterations of the inner loop, set exit condition of outer loop,
+ // construct required phi nodes for outer loop.
+ CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop();
+ assert(inner_cl->is_strip_mined(), "inner loop should be strip mined");
+ Node* inner_iv_phi = inner_cl->phi();
+ if (inner_iv_phi == NULL) {
+ return;
+ }
+ CountedLoopEndNode* inner_cle = inner_cl->loopexit();
+
+ int stride = inner_cl->stride_con();
+ jlong scaled_iters_long = ((jlong)LoopStripMiningIter) * ABS(stride);
+ int scaled_iters = (int)scaled_iters_long;
+ int short_scaled_iters = LoopStripMiningIterShortLoop* ABS(stride);
+ const TypeInt* inner_iv_t = igvn->type(inner_iv_phi)->is_int();
+ jlong iter_estimate = (jlong)inner_iv_t->_hi - (jlong)inner_iv_t->_lo;
+ assert(iter_estimate > 0, "broken");
+ if ((jlong)scaled_iters != scaled_iters_long || iter_estimate <= short_scaled_iters) {
+ // Remove outer loop and safepoint (too few iterations)
+ Node* outer_sfpt = outer_safepoint();
+ Node* outer_out = outer_loop_exit();
+ igvn->replace_node(outer_out, outer_sfpt->in(0));
+ igvn->replace_input_of(outer_sfpt, 0, igvn->C->top());
+ inner_cl->clear_strip_mined();
+ return;
+ }
+ if (iter_estimate <= scaled_iters_long) {
+ // We would only go through one iteration of
+ // the outer loop: drop the outer loop but
+ // keep the safepoint so we don't run for
+ // too long without a safepoint
+ IfNode* outer_le = outer_loop_end();
+ Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
+ igvn->replace_node(outer_le, iff);
+ inner_cl->clear_strip_mined();
+ return;
+ }
+
+ Node* cle_tail = inner_cle->proj_out(true);
+ ResourceMark rm;
+ Node_List old_new;
+ if (cle_tail->outcnt() > 1) {
+ // Look for nodes on backedge of inner loop and clone them
+ Unique_Node_List backedge_nodes;
+ for (DUIterator_Fast imax, i = cle_tail->fast_outs(imax); i < imax; i++) {
+ Node* u = cle_tail->fast_out(i);
+ if (u != inner_cl) {
+ assert(!u->is_CFG(), "control flow on the backedge?");
+ backedge_nodes.push(u);
+ }
+ }
+ uint last = igvn->C->unique();
+ for (uint next = 0; next < backedge_nodes.size(); next++) {
+ Node* n = backedge_nodes.at(next);
+ old_new.map(n->_idx, n->clone());
+ for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+ Node* u = n->fast_out(i);
+ assert(!u->is_CFG(), "broken");
+ if (u->_idx >= last) {
+ continue;
+ }
+ if (!u->is_Phi()) {
+ backedge_nodes.push(u);
+ } else {
+ assert(u->in(0) == inner_cl, "strange phi on the backedge");
+ }
+ }
+ }
+ // Put the clones on the outer loop backedge
+ Node* le_tail = outer_loop_tail();
+ for (uint next = 0; next < backedge_nodes.size(); next++) {
+ Node *n = old_new[backedge_nodes.at(next)->_idx];
+ for (uint i = 1; i < n->req(); i++) {
+ if (n->in(i) != NULL && old_new[n->in(i)->_idx] != NULL) {
+ n->set_req(i, old_new[n->in(i)->_idx]);
+ }
+ }
+ if (n->in(0) != NULL) {
+ assert(n->in(0) == cle_tail, "node not on backedge?");
+ n->set_req(0, le_tail);
+ }
+ igvn->register_new_node_with_optimizer(n);
+ }
+ }
+
+ Node* iv_phi = NULL;
+ // Make a clone of each phi in the inner loop
+ // for the outer loop
+ for (uint i = 0; i < inner_cl->outcnt(); i++) {
+ Node* u = inner_cl->raw_out(i);
+ if (u->is_Phi()) {
+ assert(u->in(0) == inner_cl, "inconsistent");
+ Node* phi = u->clone();
+ phi->set_req(0, this);
+ Node* be = old_new[phi->in(LoopNode::LoopBackControl)->_idx];
+ if (be != NULL) {
+ phi->set_req(LoopNode::LoopBackControl, be);
+ }
+ phi = igvn->transform(phi);
+ igvn->replace_input_of(u, LoopNode::EntryControl, phi);
+ if (u == inner_iv_phi) {
+ iv_phi = phi;
+ }
+ }
+ }
+ Node* cle_out = inner_cle->proj_out(false);
+ if (cle_out->outcnt() > 1) {
+ // Look for chains of stores that were sunk
+ // out of the inner loop and are in the outer loop
+ for (DUIterator_Fast imax, i = cle_out->fast_outs(imax); i < imax; i++) {
+ Node* u = cle_out->fast_out(i);
+ if (u->is_Store()) {
+ Node* first = u;
+ for(;;) {
+ Node* next = first->in(MemNode::Memory);
+ if (!next->is_Store() || next->in(0) != cle_out) {
+ break;
+ }
+ first = next;
+ }
+ Node* last = u;
+ for(;;) {
+ Node* next = NULL;
+ for (DUIterator_Fast jmax, j = last->fast_outs(jmax); j < jmax; j++) {
+ Node* uu = last->fast_out(j);
+ if (uu->is_Store() && uu->in(0) == cle_out) {
+ assert(next == NULL, "only one in the outer loop");
+ next = uu;
+ }
+ }
+ if (next == NULL) {
+ break;
+ }
+ last = next;
+ }
+ Node* phi = NULL;
+ for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
+ Node* uu = fast_out(j);
+ if (uu->is_Phi()) {
+ Node* be = uu->in(LoopNode::LoopBackControl);
+ while (be->is_Store() && old_new[be->_idx] != NULL) {
+ ShouldNotReachHere();
+ be = be->in(MemNode::Memory);
+ }
+ if (be == last || be == first->in(MemNode::Memory)) {
+ assert(phi == NULL, "only one phi");
+ phi = uu;
+ }
+ }
+ }
+#ifdef ASSERT
+ for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
+ Node* uu = fast_out(j);
+ if (uu->is_Phi() && uu->bottom_type() == Type::MEMORY) {
+ if (uu->adr_type() == igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type()))) {
+ assert(phi == uu, "what's that phi?");
+ } else if (uu->adr_type() == TypePtr::BOTTOM) {
+ Node* n = uu->in(LoopNode::LoopBackControl);
+ uint limit = igvn->C->live_nodes();
+ uint i = 0;
+ while (n != uu) {
+ i++;
+ assert(i < limit, "infinite loop");
+ if (n->is_Proj()) {
+ n = n->in(0);
+ } else if (n->is_SafePoint() || n->is_MemBar()) {
+ n = n->in(TypeFunc::Memory);
+ } else if (n->is_Phi()) {
+ n = n->in(1);
+ } else if (n->is_MergeMem()) {
+ n = n->as_MergeMem()->memory_at(igvn->C->get_alias_index(u->adr_type()));
+ } else if (n->is_Store() || n->is_LoadStore() || n->is_ClearArray()) {
+ n = n->in(MemNode::Memory);
+ } else {
+ n->dump();
+ ShouldNotReachHere();
+ }
+ }
+ }
+ }
+ }
+#endif
+ if (phi == NULL) {
+ // If the an entire chains was sunk, the
+ // inner loop has no phi for that memory
+ // slice, create one for the outer loop
+ phi = PhiNode::make(this, first->in(MemNode::Memory), Type::MEMORY,
+ igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type())));
+ phi->set_req(LoopNode::LoopBackControl, last);
+ phi = igvn->transform(phi);
+ igvn->replace_input_of(first, MemNode::Memory, phi);
+ } else {
+ // Or fix the outer loop fix to include
+ // that chain of stores.
+ Node* be = phi->in(LoopNode::LoopBackControl);
+ while (be->is_Store() && old_new[be->_idx] != NULL) {
+ ShouldNotReachHere();
+ be = be->in(MemNode::Memory);
+ }
+ if (be == first->in(MemNode::Memory)) {
+ if (be == phi->in(LoopNode::LoopBackControl)) {
+ igvn->replace_input_of(phi, LoopNode::LoopBackControl, last);
+ } else {
+ igvn->replace_input_of(be, MemNode::Memory, last);
+ }
+ } else {
+#ifdef ASSERT
+ if (be == phi->in(LoopNode::LoopBackControl)) {
+ assert(phi->in(LoopNode::LoopBackControl) == last, "");
+ } else {
+ assert(be->in(MemNode::Memory) == last, "");
+ }
+#endif
+ }
+ }
+ }
+ }
+ }
+
+ if (iv_phi != NULL) {
+ // Now adjust the inner loop's exit condition
+ Node* limit = inner_cl->limit();
+ Node* sub = NULL;
+ if (stride > 0) {
+ sub = igvn->transform(new SubINode(limit, iv_phi));
+ } else {
+ sub = igvn->transform(new SubINode(iv_phi, limit));
+ }
+ Node* min = igvn->transform(new MinINode(sub, igvn->intcon(scaled_iters)));
+ Node* new_limit = NULL;
+ if (stride > 0) {
+ new_limit = igvn->transform(new AddINode(min, iv_phi));
+ } else {
+ new_limit = igvn->transform(new SubINode(iv_phi, min));
+ }
+ igvn->replace_input_of(inner_cle->cmp_node(), 2, new_limit);
+ Node* cmp = inner_cle->cmp_node()->clone();
+ Node* bol = inner_cle->in(CountedLoopEndNode::TestValue)->clone();
+ cmp->set_req(2, limit);
+ bol->set_req(1, igvn->transform(cmp));
+ igvn->replace_input_of(outer_loop_end(), 1, igvn->transform(bol));
+ } else {
+ assert(false, "should be able to adjust outer loop");
+ IfNode* outer_le = outer_loop_end();
+ Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
+ igvn->replace_node(outer_le, iff);
+ inner_cl->clear_strip_mined();
+ }
+}
+
+const Type* OuterStripMinedLoopEndNode::Value(PhaseGVN* phase) const {
+ if (!in(0)) return Type::TOP;
+ if (phase->type(in(0)) == Type::TOP)
+ return Type::TOP;
+
+ return TypeTuple::IFBOTH;
+}
+
+Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+ if (remove_dead_region(phase, can_reshape)) return this;
+
+ return NULL;
+}
//------------------------------filtered_type--------------------------------
// Return a type based on condition control flow
@@ -1778,10 +2305,11 @@ void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
if (_head->is_Loop()) _head->as_Loop()->set_inner_loop();
}
+ IdealLoopTree* loop = this;
if (_head->is_CountedLoop() ||
- phase->is_counted_loop(_head, this)) {
+ phase->is_counted_loop(_head, loop)) {
- if (!UseCountedLoopSafepoints) {
+ if (LoopStripMiningIter == 0 || (LoopStripMiningIter > 1 && _child == NULL)) {
// Indicate we do not need a safepoint here
_has_sfpt = 1;
}
@@ -1800,8 +2328,10 @@ void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
}
// Recursively
- if (_child) _child->counted_loop( phase );
- if (_next) _next ->counted_loop( phase );
+ assert(loop->_child != this || (loop->_head->as_Loop()->is_OuterStripMinedLoop() && _head->as_CountedLoop()->is_strip_mined()), "what kind of loop was added?");
+ assert(loop->_child != this || (loop->_child->_child == NULL && loop->_child->_next == NULL), "would miss some loops");
+ if (loop->_child && loop->_child != this) loop->_child->counted_loop(phase);
+ if (loop->_next) loop->_next ->counted_loop(phase);
}
#ifndef PRODUCT
@@ -1812,7 +2342,7 @@ void IdealLoopTree::dump_head( ) const {
tty->print(" ");
tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx);
if (_irreducible) tty->print(" IRREDUCIBLE");
- Node* entry = _head->in(LoopNode::EntryControl);
+ Node* entry = _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl);
Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
if (predicate != NULL ) {
tty->print(" limit_check");
@@ -1863,6 +2393,9 @@ void IdealLoopTree::dump_head( ) const {
if (Verbose) {
tty->print(" body={"); _body.dump_simple(); tty->print(" }");
}
+ if (_head->as_Loop()->is_strip_mined()) {
+ tty->print(" strip_mined");
+ }
tty->cr();
}
@@ -3232,7 +3765,7 @@ bool PhaseIdealLoop::is_canonical_loop_entry(CountedLoopNode* cl) {
if (!cl->is_main_loop() && !cl->is_post_loop()) {
return false;
}
- Node* ctrl = cl->in(LoopNode::EntryControl);
+ Node* ctrl = cl->skip_strip_mined()->in(LoopNode::EntryControl);
if (ctrl == NULL || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) {
return false;
}
@@ -3292,7 +3825,7 @@ Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
}
while(worklist.size() != 0 && LCA != early) {
Node* s = worklist.pop();
- if (s->is_Load()) {
+ if (s->is_Load() || s->Opcode() == Op_SafePoint) {
continue;
} else if (s->is_MergeMem()) {
for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
@@ -3471,6 +4004,38 @@ void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, N
}
}
+// Verify that no data node is schedules in the outer loop of a strip
+// mined loop.
+void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) {
+#ifdef ASSERT
+ if (get_loop(least)->_nest == 0) {
+ return;
+ }
+ IdealLoopTree* loop = get_loop(least);
+ Node* head = loop->_head;
+ if (head->is_OuterStripMinedLoop()) {
+ Node* sfpt = head->as_Loop()->outer_safepoint();
+ ResourceMark rm;
+ Unique_Node_List wq;
+ wq.push(sfpt);
+ for (uint i = 0; i < wq.size(); i++) {
+ Node *m = wq.at(i);
+ for (uint i = 1; i < m->req(); i++) {
+ Node* nn = m->in(i);
+ if (nn == n) {
+ return;
+ }
+ if (nn != NULL && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) {
+ wq.push(nn);
+ }
+ }
+ }
+ ShouldNotReachHere();
+ }
+#endif
+}
+
+
//------------------------------build_loop_late_post---------------------------
// Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
// Second pass finds latest legal placement, and ideal loop placement.
@@ -3580,8 +4145,9 @@ void PhaseIdealLoop::build_loop_late_post( Node *n ) {
// which can inhibit range check elimination.
if (least != early) {
Node* ctrl_out = least->unique_ctrl_out();
- if (ctrl_out && ctrl_out->is_CountedLoop() &&
- least == ctrl_out->in(LoopNode::EntryControl)) {
+ if (ctrl_out && ctrl_out->is_Loop() &&
+ least == ctrl_out->in(LoopNode::EntryControl) &&
+ (ctrl_out->is_CountedLoop() || ctrl_out->is_OuterStripMinedLoop())) {
Node* least_dom = idom(least);
if (get_loop(least_dom)->is_member(get_loop(least))) {
least = least_dom;
@@ -3606,6 +4172,7 @@ void PhaseIdealLoop::build_loop_late_post( Node *n ) {
// Assign discovered "here or above" point
least = find_non_split_ctrl(least);
+ verify_strip_mined_scheduling(n, least);
set_ctrl(n, least);
// Collect inner loop bodies
diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp
index 70168186cca..7e615de7e29 100644
--- a/src/hotspot/share/opto/loopnode.hpp
+++ b/src/hotspot/share/opto/loopnode.hpp
@@ -37,6 +37,7 @@ class CountedLoopNode;
class IdealLoopTree;
class LoopNode;
class Node;
+class OuterStripMinedLoopEndNode;
class PhaseIdealLoop;
class CountedLoopReserveKit;
class VectorSet;
@@ -71,7 +72,8 @@ protected:
VectorizedLoop=2048,
HasAtomicPostLoop=4096,
HasRangeChecks=8192,
- IsMultiversioned=16384};
+ IsMultiversioned=16384,
+ StripMined=32768};
char _unswitch_count;
enum { _unswitch_max=3 };
char _postloop_flags;
@@ -90,6 +92,7 @@ public:
int is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; }
void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; }
int partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
+ int is_strip_mined() const { return _loop_flags & StripMined; }
void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
void mark_has_reductions() { _loop_flags |= HasReductions; }
@@ -100,6 +103,8 @@ public:
void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; }
void mark_has_range_checks() { _loop_flags |= HasRangeChecks; }
void mark_is_multiversioned() { _loop_flags |= IsMultiversioned; }
+ void mark_strip_mined() { _loop_flags |= StripMined; }
+ void clear_strip_mined() { _loop_flags &= ~StripMined; }
int unswitch_max() { return _unswitch_max; }
int unswitch_count() { return _unswitch_count; }
@@ -131,6 +136,13 @@ public:
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
+
+ void verify_strip_mined(int expect_skeleton) const;
+ virtual LoopNode* skip_strip_mined(int expect_opaq = 1) { return this; }
+ virtual IfTrueNode* outer_loop_tail() const { ShouldNotReachHere(); return NULL; }
+ virtual OuterStripMinedLoopEndNode* outer_loop_end() const { ShouldNotReachHere(); return NULL; }
+ virtual IfFalseNode* outer_loop_exit() const { ShouldNotReachHere(); return NULL; }
+ virtual SafePointNode* outer_safepoint() const { ShouldNotReachHere(); return NULL; }
};
//------------------------------Counted Loops----------------------------------
@@ -278,6 +290,13 @@ public:
void set_slp_max_unroll(int unroll_factor) { _slp_maximum_unroll_factor = unroll_factor; }
int slp_max_unroll() const { return _slp_maximum_unroll_factor; }
+ virtual LoopNode* skip_strip_mined(int expect_opaq = 1);
+ OuterStripMinedLoopNode* outer_loop() const;
+ virtual IfTrueNode* outer_loop_tail() const;
+ virtual OuterStripMinedLoopEndNode* outer_loop_end() const;
+ virtual IfFalseNode* outer_loop_exit() const;
+ virtual SafePointNode* outer_safepoint() const;
+
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
@@ -374,6 +393,40 @@ class LoopLimitNode : public Node {
virtual Node* Identity(PhaseGVN* phase);
};
+// Support for strip mining
+class OuterStripMinedLoopNode : public LoopNode {
+private:
+ CountedLoopNode* inner_loop() const;
+public:
+ OuterStripMinedLoopNode(Compile* C, Node *entry, Node *backedge)
+ : LoopNode(entry, backedge) {
+ init_class_id(Class_OuterStripMinedLoop);
+ init_flags(Flag_is_macro);
+ C->add_macro_node(this);
+ }
+
+ virtual int Opcode() const;
+
+ virtual IfTrueNode* outer_loop_tail() const;
+ virtual OuterStripMinedLoopEndNode* outer_loop_end() const;
+ virtual IfFalseNode* outer_loop_exit() const;
+ virtual SafePointNode* outer_safepoint() const;
+ void adjust_strip_mined_loop(PhaseIterGVN* igvn);
+};
+
+class OuterStripMinedLoopEndNode : public IfNode {
+public:
+ OuterStripMinedLoopEndNode(Node *control, Node *test, float prob, float cnt)
+ : IfNode(control, test, prob, cnt) {
+ init_class_id(Class_OuterStripMinedLoopEnd);
+ }
+
+ virtual int Opcode() const;
+
+ virtual const Type* Value(PhaseGVN* phase) const;
+ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+};
+
// -----------------------------IdealLoopTree----------------------------------
class IdealLoopTree : public ResourceObj {
public:
@@ -780,6 +833,7 @@ private:
void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
void build_loop_late_post ( Node* n );
+ void verify_strip_mined_scheduling(Node *n, Node* least);
// Array of immediate dominance info for each CFG node indexed by node idx
private:
@@ -877,7 +931,10 @@ public:
// Per-Node transform
virtual Node *transform( Node *a_node ) { return 0; }
- bool is_counted_loop( Node *x, IdealLoopTree *loop );
+ bool is_counted_loop(Node* x, IdealLoopTree*& loop);
+ IdealLoopTree* create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control,
+ IdealLoopTree* loop, float cl_prob, float le_fcnt,
+ Node*& entry_control, Node*& iffalse);
Node* exact_limit( IdealLoopTree *loop );
@@ -908,8 +965,24 @@ public:
// When nonnull, the clone and original are side-by-side, both are
// dominated by the passed in side_by_side_idom node. Used in
// construction of unswitched loops.
+ enum CloneLoopMode {
+ IgnoreStripMined = 0, // Only clone inner strip mined loop
+ CloneIncludesStripMined = 1, // clone both inner and outer strip mined loops
+ ControlAroundStripMined = 2 // Only clone inner strip mined loop,
+ // result control flow branches
+ // either to inner clone or outer
+ // strip mined loop.
+ };
void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth,
- Node* side_by_side_idom = NULL);
+ CloneLoopMode mode, Node* side_by_side_idom = NULL);
+ void clone_loop_handle_data_uses(Node* old, Node_List &old_new,
+ IdealLoopTree* loop, IdealLoopTree* companion_loop,
+ Node_List*& split_if_set, Node_List*& split_bool_set,
+ Node_List*& split_cex_set, Node_List& worklist,
+ uint new_counter, CloneLoopMode mode);
+ void clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
+ IdealLoopTree* outer_loop, int dd, Node_List &old_new,
+ Node_List& extra_data_nodes);
// If we got the effect of peeling, either by actually peeling or by
// making a pre-loop which must execute at least once, we can remove
@@ -1020,7 +1093,8 @@ public:
// and inserting an if to select fast-slow versions.
ProjNode* create_slow_version_of_loop(IdealLoopTree *loop,
Node_List &old_new,
- int opcode);
+ int opcode,
+ CloneLoopMode mode);
// Clone a loop and return the clone head (clone_loop_head).
// Added nodes include int(1), int(0) - disconnected, If, IfTrue, IfFalse,
@@ -1098,7 +1172,7 @@ public:
// "Nearly" because all Nodes have been cloned from the original in the loop,
// but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
// through the Phi recursively, and return a Bool.
- BoolNode *clone_iff( PhiNode *phi, IdealLoopTree *loop );
+ Node *clone_iff( PhiNode *phi, IdealLoopTree *loop );
CmpNode *clone_bool( PhiNode *phi, IdealLoopTree *loop );
diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp
index 45acc2fcae2..d6ddc5723fe 100644
--- a/src/hotspot/share/opto/loopopts.cpp
+++ b/src/hotspot/share/opto/loopopts.cpp
@@ -26,6 +26,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "opto/addnode.hpp"
+#include "opto/callnode.hpp"
#include "opto/castnode.hpp"
#include "opto/connode.hpp"
#include "opto/castnode.hpp"
@@ -306,7 +307,12 @@ Node *PhaseIdealLoop::has_local_phi_input( Node *n ) {
get_ctrl(m->in(2)) != n_ctrl &&
get_ctrl(m->in(3)) != n_ctrl) {
// Move the AddP up to dominating point
- set_ctrl_and_loop(m, find_non_split_ctrl(idom(n_ctrl)));
+ Node* c = find_non_split_ctrl(idom(n_ctrl));
+ if (c->is_OuterStripMinedLoop()) {
+ c->as_Loop()->verify_strip_mined(1);
+ c = c->in(LoopNode::EntryControl);
+ }
+ set_ctrl_and_loop(m, c);
continue;
}
return NULL;
@@ -750,14 +756,13 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
if (ctrl_ok) {
// move the Store
_igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem);
- _igvn.replace_input_of(n, 0, n_loop->_head->in(LoopNode::EntryControl));
+ _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl));
_igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl));
// Disconnect the phi now. An empty phi can confuse other
// optimizations in this pass of loop opts.
_igvn.replace_node(mem, mem->in(LoopNode::EntryControl));
n_loop->_body.yank(mem);
- IdealLoopTree* new_loop = get_loop(n->in(0));
set_ctrl_and_loop(n, n->in(0));
return n;
@@ -840,6 +845,16 @@ void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
_igvn.replace_node(hook, n);
return;
}
+#ifdef ASSERT
+ if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) {
+ assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined");
+ n_loop->_head->as_Loop()->verify_strip_mined(1);
+ Node* outer = n_loop->_head->as_CountedLoop()->outer_loop();
+ IdealLoopTree* outer_loop = get_loop(outer);
+ assert(n_loop->_parent == outer_loop, "broken loop tree");
+ assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state");
+ }
+#endif
// Move store out of the loop
_igvn.replace_node(hook, n->in(MemNode::Memory));
@@ -1016,7 +1031,7 @@ Node *PhaseIdealLoop::place_near_use( Node *useblock ) const {
IdealLoopTree *u_loop = get_loop( useblock );
return (u_loop->_irreducible || u_loop->_child)
? useblock
- : u_loop->_head->in(LoopNode::EntryControl);
+ : u_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
}
@@ -1407,47 +1422,56 @@ void PhaseIdealLoop::split_if_with_blocks( VectorSet &visited, Node_Stack &nstac
// "Nearly" because all Nodes have been cloned from the original in the loop,
// but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
// through the Phi recursively, and return a Bool.
-BoolNode *PhaseIdealLoop::clone_iff( PhiNode *phi, IdealLoopTree *loop ) {
+Node* PhaseIdealLoop::clone_iff(PhiNode *phi, IdealLoopTree *loop) {
// Convert this Phi into a Phi merging Bools
uint i;
- for( i = 1; i < phi->req(); i++ ) {
+ for (i = 1; i < phi->req(); i++) {
Node *b = phi->in(i);
- if( b->is_Phi() ) {
- _igvn.replace_input_of(phi, i, clone_iff( b->as_Phi(), loop ));
+ if (b->is_Phi()) {
+ _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi(), loop));
} else {
- assert( b->is_Bool(), "" );
+ assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
}
}
- Node *sample_bool = phi->in(1);
- Node *sample_cmp = sample_bool->in(1);
+ Node* n = phi->in(1);
+ Node* sample_opaque = NULL;
+ Node *sample_bool = NULL;
+ if (n->Opcode() == Op_Opaque4) {
+ sample_opaque = n;
+ sample_bool = n->in(1);
+ assert(sample_bool->is_Bool(), "wrong type");
+ } else {
+ sample_bool = n;
+ }
+ Node *sample_cmp = sample_bool->in(1);
// Make Phis to merge the Cmp's inputs.
- PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP );
- PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP );
- for( i = 1; i < phi->req(); i++ ) {
- Node *n1 = phi->in(i)->in(1)->in(1);
- Node *n2 = phi->in(i)->in(1)->in(2);
- phi1->set_req( i, n1 );
- phi2->set_req( i, n2 );
- phi1->set_type( phi1->type()->meet_speculative(n1->bottom_type()));
- phi2->set_type( phi2->type()->meet_speculative(n2->bottom_type()));
+ PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
+ PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
+ for (i = 1; i < phi->req(); i++) {
+ Node *n1 = sample_opaque == NULL ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
+ Node *n2 = sample_opaque == NULL ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
+ phi1->set_req(i, n1);
+ phi2->set_req(i, n2);
+ phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
+ phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
}
// See if these Phis have been made before.
// Register with optimizer
Node *hit1 = _igvn.hash_find_insert(phi1);
- if( hit1 ) { // Hit, toss just made Phi
+ if (hit1) { // Hit, toss just made Phi
_igvn.remove_dead_node(phi1); // Remove new phi
- assert( hit1->is_Phi(), "" );
+ assert(hit1->is_Phi(), "" );
phi1 = (PhiNode*)hit1; // Use existing phi
} else { // Miss
_igvn.register_new_node_with_optimizer(phi1);
}
Node *hit2 = _igvn.hash_find_insert(phi2);
- if( hit2 ) { // Hit, toss just made Phi
+ if (hit2) { // Hit, toss just made Phi
_igvn.remove_dead_node(phi2); // Remove new phi
- assert( hit2->is_Phi(), "" );
+ assert(hit2->is_Phi(), "" );
phi2 = (PhiNode*)hit2; // Use existing phi
} else { // Miss
_igvn.register_new_node_with_optimizer(phi2);
@@ -1457,8 +1481,8 @@ BoolNode *PhaseIdealLoop::clone_iff( PhiNode *phi, IdealLoopTree *loop ) {
set_ctrl(phi2, phi->in(0));
// Make a new Cmp
Node *cmp = sample_cmp->clone();
- cmp->set_req( 1, phi1 );
- cmp->set_req( 2, phi2 );
+ cmp->set_req(1, phi1);
+ cmp->set_req(2, phi2);
_igvn.register_new_node_with_optimizer(cmp);
set_ctrl(cmp, phi->in(0));
@@ -1468,8 +1492,16 @@ BoolNode *PhaseIdealLoop::clone_iff( PhiNode *phi, IdealLoopTree *loop ) {
_igvn.register_new_node_with_optimizer(b);
set_ctrl(b, phi->in(0));
- assert( b->is_Bool(), "" );
- return (BoolNode*)b;
+ if (sample_opaque != NULL) {
+ Node* opaque = sample_opaque->clone();
+ opaque->set_req(1, b);
+ _igvn.register_new_node_with_optimizer(opaque);
+ set_ctrl(opaque, phi->in(0));
+ return opaque;
+ }
+
+ assert(b->is_Bool(), "");
+ return b;
}
//------------------------------clone_bool-------------------------------------
@@ -1552,6 +1584,252 @@ void PhaseIdealLoop::sink_use( Node *use, Node *post_loop ) {
}
}
+void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
+ IdealLoopTree* loop, IdealLoopTree* outer_loop,
+ Node_List*& split_if_set, Node_List*& split_bool_set,
+ Node_List*& split_cex_set, Node_List& worklist,
+ uint new_counter, CloneLoopMode mode) {
+ Node* nnn = old_new[old->_idx];
+ // Copy uses to a worklist, so I can munge the def-use info
+ // with impunity.
+ for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
+ worklist.push(old->fast_out(j));
+
+ while( worklist.size() ) {
+ Node *use = worklist.pop();
+ if (!has_node(use)) continue; // Ignore dead nodes
+ if (use->in(0) == C->top()) continue;
+ IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
+ // Check for data-use outside of loop - at least one of OLD or USE
+ // must not be a CFG node.
+#ifdef ASSERT
+ if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == NULL) {
+ Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint();
+ assert(mode == ControlAroundStripMined && use == sfpt, "missed a node");
+ }
+#endif
+ if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) {
+
+ // If the Data use is an IF, that means we have an IF outside of the
+ // loop that is switching on a condition that is set inside of the
+ // loop. Happens if people set a loop-exit flag; then test the flag
+ // in the loop to break the loop, then test is again outside of the
+ // loop to determine which way the loop exited.
+ // Loop predicate If node connects to Bool node through Opaque1 node.
+ if (use->is_If() || use->is_CMove() || C->is_predicate_opaq(use) || use->Opcode() == Op_Opaque4) {
+ // Since this code is highly unlikely, we lazily build the worklist
+ // of such Nodes to go split.
+ if (!split_if_set) {
+ ResourceArea *area = Thread::current()->resource_area();
+ split_if_set = new Node_List(area);
+ }
+ split_if_set->push(use);
+ }
+ if (use->is_Bool()) {
+ if (!split_bool_set) {
+ ResourceArea *area = Thread::current()->resource_area();
+ split_bool_set = new Node_List(area);
+ }
+ split_bool_set->push(use);
+ }
+ if (use->Opcode() == Op_CreateEx) {
+ if (!split_cex_set) {
+ ResourceArea *area = Thread::current()->resource_area();
+ split_cex_set = new Node_List(area);
+ }
+ split_cex_set->push(use);
+ }
+
+
+ // Get "block" use is in
+ uint idx = 0;
+ while( use->in(idx) != old ) idx++;
+ Node *prev = use->is_CFG() ? use : get_ctrl(use);
+ assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" );
+ Node *cfg = prev->_idx >= new_counter
+ ? prev->in(2)
+ : idom(prev);
+ if( use->is_Phi() ) // Phi use is in prior block
+ cfg = prev->in(idx); // NOT in block of Phi itself
+ if (cfg->is_top()) { // Use is dead?
+ _igvn.replace_input_of(use, idx, C->top());
+ continue;
+ }
+
+ while(!outer_loop->is_member(get_loop(cfg))) {
+ prev = cfg;
+ cfg = cfg->_idx >= new_counter ? cfg->in(2) : idom(cfg);
+ }
+ // If the use occurs after merging several exits from the loop, then
+ // old value must have dominated all those exits. Since the same old
+ // value was used on all those exits we did not need a Phi at this
+ // merge point. NOW we do need a Phi here. Each loop exit value
+ // is now merged with the peeled body exit; each exit gets its own
+ // private Phi and those Phis need to be merged here.
+ Node *phi;
+ if( prev->is_Region() ) {
+ if( idx == 0 ) { // Updating control edge?
+ phi = prev; // Just use existing control
+ } else { // Else need a new Phi
+ phi = PhiNode::make( prev, old );
+ // Now recursively fix up the new uses of old!
+ for( uint i = 1; i < prev->req(); i++ ) {
+ worklist.push(phi); // Onto worklist once for each 'old' input
+ }
+ }
+ } else {
+ // Get new RegionNode merging old and new loop exits
+ prev = old_new[prev->_idx];
+ assert( prev, "just made this in step 7" );
+ if( idx == 0) { // Updating control edge?
+ phi = prev; // Just use existing control
+ } else { // Else need a new Phi
+ // Make a new Phi merging data values properly
+ phi = PhiNode::make( prev, old );
+ phi->set_req( 1, nnn );
+ }
+ }
+ // If inserting a new Phi, check for prior hits
+ if( idx != 0 ) {
+ Node *hit = _igvn.hash_find_insert(phi);
+ if( hit == NULL ) {
+ _igvn.register_new_node_with_optimizer(phi); // Register new phi
+ } else { // or
+ // Remove the new phi from the graph and use the hit
+ _igvn.remove_dead_node(phi);
+ phi = hit; // Use existing phi
+ }
+ set_ctrl(phi, prev);
+ }
+ // Make 'use' use the Phi instead of the old loop body exit value
+ _igvn.replace_input_of(use, idx, phi);
+ if( use->_idx >= new_counter ) { // If updating new phis
+ // Not needed for correctness, but prevents a weak assert
+ // in AddPNode from tripping (when we end up with different
+ // base & derived Phis that will become the same after
+ // IGVN does CSE).
+ Node *hit = _igvn.hash_find_insert(use);
+ if( hit ) // Go ahead and re-hash for hits.
+ _igvn.replace_node( use, hit );
+ }
+
+ // If 'use' was in the loop-exit block, it now needs to be sunk
+ // below the post-loop merge point.
+ sink_use( use, prev );
+ }
+ }
+}
+
+void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
+ IdealLoopTree* outer_loop, int dd, Node_List &old_new,
+ Node_List& extra_data_nodes) {
+ if (head->is_strip_mined() && mode != IgnoreStripMined) {
+ CountedLoopNode* cl = head->as_CountedLoop();
+ Node* l = cl->outer_loop();
+ Node* tail = cl->outer_loop_tail();
+ IfNode* le = cl->outer_loop_end();
+ Node* sfpt = cl->outer_safepoint();
+ CountedLoopEndNode* cle = cl->loopexit();
+ CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop();
+ CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit();
+ Node* cle_out = cle->proj_out(false);
+
+ Node* new_sfpt = NULL;
+ Node* new_cle_out = cle_out->clone();
+ old_new.map(cle_out->_idx, new_cle_out);
+ if (mode == CloneIncludesStripMined) {
+ // clone outer loop body
+ Node* new_l = l->clone();
+ Node* new_tail = tail->clone();
+ IfNode* new_le = le->clone()->as_If();
+ new_sfpt = sfpt->clone();
+
+ set_loop(new_l, outer_loop->_parent);
+ set_idom(new_l, new_l->in(LoopNode::EntryControl), dd);
+ set_loop(new_cle_out, outer_loop->_parent);
+ set_idom(new_cle_out, new_cle, dd);
+ set_loop(new_sfpt, outer_loop->_parent);
+ set_idom(new_sfpt, new_cle_out, dd);
+ set_loop(new_le, outer_loop->_parent);
+ set_idom(new_le, new_sfpt, dd);
+ set_loop(new_tail, outer_loop->_parent);
+ set_idom(new_tail, new_le, dd);
+ set_idom(new_cl, new_l, dd);
+
+ old_new.map(l->_idx, new_l);
+ old_new.map(tail->_idx, new_tail);
+ old_new.map(le->_idx, new_le);
+ old_new.map(sfpt->_idx, new_sfpt);
+
+ new_l->set_req(LoopNode::LoopBackControl, new_tail);
+ new_l->set_req(0, new_l);
+ new_tail->set_req(0, new_le);
+ new_le->set_req(0, new_sfpt);
+ new_sfpt->set_req(0, new_cle_out);
+ new_cle_out->set_req(0, new_cle);
+ new_cl->set_req(LoopNode::EntryControl, new_l);
+
+ _igvn.register_new_node_with_optimizer(new_l);
+ _igvn.register_new_node_with_optimizer(new_tail);
+ _igvn.register_new_node_with_optimizer(new_le);
+ } else {
+ Node *newhead = old_new[loop->_head->_idx];
+ newhead->as_Loop()->clear_strip_mined();
+ _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl));
+ set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
+ }
+ // Look at data node that were assigned a control in the outer
+ // loop: they are kept in the outer loop by the safepoint so start
+ // from the safepoint node's inputs.
+ IdealLoopTree* outer_loop = get_loop(l);
+ Node_Stack stack(2);
+ stack.push(sfpt, 1);
+ uint new_counter = C->unique();
+ while (stack.size() > 0) {
+ Node* n = stack.node();
+ uint i = stack.index();
+ while (i < n->req() &&
+ (n->in(i) == NULL ||
+ !has_ctrl(n->in(i)) ||
+ get_loop(get_ctrl(n->in(i))) != outer_loop ||
+ (old_new[n->in(i)->_idx] != NULL && old_new[n->in(i)->_idx]->_idx >= new_counter))) {
+ i++;
+ }
+ if (i < n->req()) {
+ stack.set_index(i+1);
+ stack.push(n->in(i), 0);
+ } else {
+ assert(old_new[n->_idx] == NULL || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet");
+ Node* m = n == sfpt ? new_sfpt : n->clone();
+ if (m != NULL) {
+ for (uint i = 0; i < n->req(); i++) {
+ if (m->in(i) != NULL && old_new[m->in(i)->_idx] != NULL) {
+ m->set_req(i, old_new[m->in(i)->_idx]);
+ }
+ }
+ } else {
+ assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?");
+ }
+ if (n != sfpt) {
+ extra_data_nodes.push(n);
+ _igvn.register_new_node_with_optimizer(m);
+ assert(get_ctrl(n) == cle_out, "what other control?");
+ set_ctrl(m, new_cle_out);
+ old_new.map(n->_idx, m);
+ }
+ stack.pop();
+ }
+ }
+ if (mode == CloneIncludesStripMined) {
+ _igvn.register_new_node_with_optimizer(new_sfpt);
+ _igvn.register_new_node_with_optimizer(new_cle_out);
+ }
+ } else {
+ Node *newhead = old_new[loop->_head->_idx];
+ set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
+ }
+}
+
//------------------------------clone_loop-------------------------------------
//
// C L O N E A L O O P B O D Y
@@ -1580,7 +1858,10 @@ void PhaseIdealLoop::sink_use( Node *use, Node *post_loop ) {
// dominated by the side_by_side_idom node. Used in construction of
// unswitched loops.
void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd,
- Node* side_by_side_idom) {
+ CloneLoopMode mode, Node* side_by_side_idom) {
+
+ LoopNode* head = loop->_head->as_Loop();
+ head->verify_strip_mined(1);
if (C->do_vector_loop() && PrintOpto) {
const char* mname = C->method()->name()->as_quoted_ascii();
@@ -1613,6 +1894,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
_igvn.register_new_node_with_optimizer(nnn);
}
+ IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop;
// Step 2: Fix the edges in the new body. If the old input is outside the
// loop use it. If the old input is INside the loop, use the corresponding
@@ -1624,7 +1906,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
if (has_ctrl(old)) {
set_ctrl(nnn, old_new[get_ctrl(old)->_idx]);
} else {
- set_loop(nnn, loop->_parent);
+ set_loop(nnn, outer_loop->_parent);
if (old->outcnt() > 0) {
set_idom( nnn, old_new[idom(old)->_idx], dd );
}
@@ -1640,22 +1922,21 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
}
_igvn.hash_find_insert(nnn);
}
- Node *newhead = old_new[loop->_head->_idx];
- set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
+ ResourceArea *area = Thread::current()->resource_area();
+ Node_List extra_data_nodes(area);
+ clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes);
// Step 3: Now fix control uses. Loop varying control uses have already
// been fixed up (as part of all input edges in Step 2). Loop invariant
// control uses must be either an IfFalse or an IfTrue. Make a merge
// point to merge the old and new IfFalse/IfTrue nodes; make the use
// refer to this.
- ResourceArea *area = Thread::current()->resource_area();
Node_List worklist(area);
uint new_counter = C->unique();
for( i = 0; i < loop->_body.size(); i++ ) {
Node* old = loop->_body.at(i);
if( !old->is_CFG() ) continue;
- Node* nnn = old_new[old->_idx];
// Copy uses to a worklist, so I can munge the def-use info
// with impunity.
@@ -1669,9 +1950,29 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
if( !loop->is_member( use_loop ) && use->is_CFG() ) {
// Both OLD and USE are CFG nodes here.
assert( use->is_Proj(), "" );
+ Node* nnn = old_new[old->_idx];
+
+ Node* newuse = NULL;
+ if (head->is_strip_mined() && mode != IgnoreStripMined) {
+ CountedLoopNode* cl = head->as_CountedLoop();
+ CountedLoopEndNode* cle = cl->loopexit();
+ Node* cle_out = cle->proj_out(false);
+ if (use == cle_out) {
+ IfNode* le = cl->outer_loop_end();
+ use = le->proj_out(false);
+ use_loop = get_loop(use);
+ if (mode == CloneIncludesStripMined) {
+ nnn = old_new[le->_idx];
+ } else {
+ newuse = old_new[cle_out->_idx];
+ }
+ }
+ }
+ if (newuse == NULL) {
+ newuse = use->clone();
+ }
// Clone the loop exit control projection
- Node *newuse = use->clone();
if (C->do_vector_loop()) {
cm.verify_insert_and_clone(use, newuse, cm.clone_idx());
}
@@ -1705,6 +2006,10 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
if( useuse->in(k) == use ) {
useuse->set_req(k, r);
uses_found++;
+ if (useuse->is_Loop() && k == LoopNode::EntryControl) {
+ assert(dom_depth(useuse) > dd_r , "");
+ set_idom(useuse, r, dom_depth(useuse));
+ }
}
}
l -= uses_found; // we deleted 1 or more copies of this edge
@@ -1728,123 +2033,16 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
Node_List *split_cex_set = NULL;
for( i = 0; i < loop->_body.size(); i++ ) {
Node* old = loop->_body.at(i);
- Node* nnn = old_new[old->_idx];
- // Copy uses to a worklist, so I can munge the def-use info
- // with impunity.
- for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
- worklist.push(old->fast_out(j));
+ clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
+ split_bool_set, split_cex_set, worklist, new_counter,
+ mode);
+ }
- while( worklist.size() ) {
- Node *use = worklist.pop();
- if (!has_node(use)) continue; // Ignore dead nodes
- if (use->in(0) == C->top()) continue;
- IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
- // Check for data-use outside of loop - at least one of OLD or USE
- // must not be a CFG node.
- if( !loop->is_member( use_loop ) && (!old->is_CFG() || !use->is_CFG())) {
-
- // If the Data use is an IF, that means we have an IF outside of the
- // loop that is switching on a condition that is set inside of the
- // loop. Happens if people set a loop-exit flag; then test the flag
- // in the loop to break the loop, then test is again outside of the
- // loop to determine which way the loop exited.
- // Loop predicate If node connects to Bool node through Opaque1 node.
- if (use->is_If() || use->is_CMove() || C->is_predicate_opaq(use)) {
- // Since this code is highly unlikely, we lazily build the worklist
- // of such Nodes to go split.
- if( !split_if_set )
- split_if_set = new Node_List(area);
- split_if_set->push(use);
- }
- if( use->is_Bool() ) {
- if( !split_bool_set )
- split_bool_set = new Node_List(area);
- split_bool_set->push(use);
- }
- if( use->Opcode() == Op_CreateEx ) {
- if( !split_cex_set )
- split_cex_set = new Node_List(area);
- split_cex_set->push(use);
- }
-
-
- // Get "block" use is in
- uint idx = 0;
- while( use->in(idx) != old ) idx++;
- Node *prev = use->is_CFG() ? use : get_ctrl(use);
- assert( !loop->is_member( get_loop( prev ) ), "" );
- Node *cfg = prev->_idx >= new_counter
- ? prev->in(2)
- : idom(prev);
- if( use->is_Phi() ) // Phi use is in prior block
- cfg = prev->in(idx); // NOT in block of Phi itself
- if (cfg->is_top()) { // Use is dead?
- _igvn.replace_input_of(use, idx, C->top());
- continue;
- }
-
- while( !loop->is_member( get_loop( cfg ) ) ) {
- prev = cfg;
- cfg = cfg->_idx >= new_counter ? cfg->in(2) : idom(cfg);
- }
- // If the use occurs after merging several exits from the loop, then
- // old value must have dominated all those exits. Since the same old
- // value was used on all those exits we did not need a Phi at this
- // merge point. NOW we do need a Phi here. Each loop exit value
- // is now merged with the peeled body exit; each exit gets its own
- // private Phi and those Phis need to be merged here.
- Node *phi;
- if( prev->is_Region() ) {
- if( idx == 0 ) { // Updating control edge?
- phi = prev; // Just use existing control
- } else { // Else need a new Phi
- phi = PhiNode::make( prev, old );
- // Now recursively fix up the new uses of old!
- for( uint i = 1; i < prev->req(); i++ ) {
- worklist.push(phi); // Onto worklist once for each 'old' input
- }
- }
- } else {
- // Get new RegionNode merging old and new loop exits
- prev = old_new[prev->_idx];
- assert( prev, "just made this in step 7" );
- if( idx == 0 ) { // Updating control edge?
- phi = prev; // Just use existing control
- } else { // Else need a new Phi
- // Make a new Phi merging data values properly
- phi = PhiNode::make( prev, old );
- phi->set_req( 1, nnn );
- }
- }
- // If inserting a new Phi, check for prior hits
- if( idx != 0 ) {
- Node *hit = _igvn.hash_find_insert(phi);
- if( hit == NULL ) {
- _igvn.register_new_node_with_optimizer(phi); // Register new phi
- } else { // or
- // Remove the new phi from the graph and use the hit
- _igvn.remove_dead_node(phi);
- phi = hit; // Use existing phi
- }
- set_ctrl(phi, prev);
- }
- // Make 'use' use the Phi instead of the old loop body exit value
- _igvn.replace_input_of(use, idx, phi);
- if( use->_idx >= new_counter ) { // If updating new phis
- // Not needed for correctness, but prevents a weak assert
- // in AddPNode from tripping (when we end up with different
- // base & derived Phis that will become the same after
- // IGVN does CSE).
- Node *hit = _igvn.hash_find_insert(use);
- if( hit ) // Go ahead and re-hash for hits.
- _igvn.replace_node( use, hit );
- }
-
- // If 'use' was in the loop-exit block, it now needs to be sunk
- // below the post-loop merge point.
- sink_use( use, prev );
- }
- }
+ for (i = 0; i < extra_data_nodes.size(); i++) {
+ Node* old = extra_data_nodes.at(i);
+ clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
+ split_bool_set, split_cex_set, worklist, new_counter,
+ mode);
}
// Check for IFs that need splitting/cloning. Happens if an IF outside of
@@ -1852,31 +2050,31 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
// takes control from one or more OLD Regions (which in turn get from NEW
// Regions). In any case, there will be a set of Phis for each merge point
// from the IF up to where the original BOOL def exists the loop.
- if( split_if_set ) {
- while( split_if_set->size() ) {
+ if (split_if_set) {
+ while (split_if_set->size()) {
Node *iff = split_if_set->pop();
- if( iff->in(1)->is_Phi() ) {
- BoolNode *b = clone_iff( iff->in(1)->as_Phi(), loop );
+ if (iff->in(1)->is_Phi()) {
+ Node *b = clone_iff(iff->in(1)->as_Phi(), loop);
_igvn.replace_input_of(iff, 1, b);
}
}
}
- if( split_bool_set ) {
- while( split_bool_set->size() ) {
+ if (split_bool_set) {
+ while (split_bool_set->size()) {
Node *b = split_bool_set->pop();
Node *phi = b->in(1);
- assert( phi->is_Phi(), "" );
- CmpNode *cmp = clone_bool( (PhiNode*)phi, loop );
+ assert(phi->is_Phi(), "");
+ CmpNode *cmp = clone_bool((PhiNode*)phi, loop);
_igvn.replace_input_of(b, 1, cmp);
}
}
- if( split_cex_set ) {
- while( split_cex_set->size() ) {
+ if (split_cex_set) {
+ while (split_cex_set->size()) {
Node *b = split_cex_set->pop();
- assert( b->in(0)->is_Region(), "" );
- assert( b->in(1)->is_Phi(), "" );
- assert( b->in(0)->in(0) == b->in(1)->in(0), "" );
- split_up( b, b->in(0), NULL );
+ assert(b->in(0)->is_Region(), "");
+ assert(b->in(1)->is_Phi(), "");
+ assert(b->in(0)->in(0) == b->in(1)->in(0), "");
+ split_up(b, b->in(0), NULL);
}
}
@@ -2936,7 +3134,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
- clone_loop( loop, old_new, dd );
+ clone_loop(loop, old_new, dd, IgnoreStripMined);
const uint clone_exit_idx = 1;
const uint orig_exit_idx = 2;
diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp
index a6ec2ff4267..8e8737f65c6 100644
--- a/src/hotspot/share/opto/macro.cpp
+++ b/src/hotspot/share/opto/macro.cpp
@@ -282,7 +282,8 @@ void PhaseMacroExpand::eliminate_card_mark(Node* p2x) {
if (!this_region->in(ind)->is_IfFalse()) {
ind = 2;
}
- if (this_region->in(ind)->is_IfFalse()) {
+ if (this_region->in(ind)->is_IfFalse() &&
+ this_region->in(ind)->in(0)->Opcode() == Op_If) {
Node* bol = this_region->in(ind)->in(0)->in(1);
assert(bol->is_Bool(), "");
cmpx = bol->in(1);
@@ -2660,6 +2661,8 @@ void PhaseMacroExpand::eliminate_macro_nodes() {
break;
case Node::Class_ArrayCopy:
break;
+ case Node::Class_OuterStripMinedLoop:
+ break;
default:
assert(n->Opcode() == Op_LoopLimit ||
n->Opcode() == Op_Opaque1 ||
@@ -2733,6 +2736,10 @@ bool PhaseMacroExpand::expand_macro_nodes() {
} else if (n->Opcode() == Op_Opaque4) {
_igvn.replace_node(n, n->in(2));
success = true;
+ } else if (n->Opcode() == Op_OuterStripMinedLoop) {
+ n->as_OuterStripMinedLoop()->adjust_strip_mined_loop(&_igvn);
+ C->remove_macro_node(n);
+ success = true;
}
assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
progress = progress || success;
diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp
index 658b54fd358..17942bac08b 100644
--- a/src/hotspot/share/opto/node.hpp
+++ b/src/hotspot/share/opto/node.hpp
@@ -111,6 +111,8 @@ class MulNode;
class MultiNode;
class MultiBranchNode;
class NeverBranchNode;
+class OuterStripMinedLoopNode;
+class OuterStripMinedLoopEndNode;
class Node;
class Node_Array;
class Node_List;
@@ -623,8 +625,9 @@ public:
DEFINE_CLASS_ID(Catch, PCTable, 0)
DEFINE_CLASS_ID(Jump, PCTable, 1)
DEFINE_CLASS_ID(If, MultiBranch, 1)
- DEFINE_CLASS_ID(CountedLoopEnd, If, 0)
- DEFINE_CLASS_ID(RangeCheck, If, 1)
+ DEFINE_CLASS_ID(CountedLoopEnd, If, 0)
+ DEFINE_CLASS_ID(RangeCheck, If, 1)
+ DEFINE_CLASS_ID(OuterStripMinedLoopEnd, If, 2)
DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
DEFINE_CLASS_ID(Start, Multi, 2)
DEFINE_CLASS_ID(MemBar, Multi, 3)
@@ -684,8 +687,9 @@ public:
DEFINE_CLASS_ID(Region, Node, 5)
DEFINE_CLASS_ID(Loop, Region, 0)
- DEFINE_CLASS_ID(Root, Loop, 0)
- DEFINE_CLASS_ID(CountedLoop, Loop, 1)
+ DEFINE_CLASS_ID(Root, Loop, 0)
+ DEFINE_CLASS_ID(CountedLoop, Loop, 1)
+ DEFINE_CLASS_ID(OuterStripMinedLoop, Loop, 2)
DEFINE_CLASS_ID(Sub, Node, 6)
DEFINE_CLASS_ID(Cmp, Sub, 0)
@@ -841,6 +845,8 @@ public:
DEFINE_CLASS_QUERY(Mul)
DEFINE_CLASS_QUERY(Multi)
DEFINE_CLASS_QUERY(MultiBranch)
+ DEFINE_CLASS_QUERY(OuterStripMinedLoop)
+ DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd)
DEFINE_CLASS_QUERY(Parm)
DEFINE_CLASS_QUERY(PCTable)
DEFINE_CLASS_QUERY(Phi)
diff --git a/src/hotspot/share/opto/reg_split.cpp b/src/hotspot/share/opto/reg_split.cpp
index d0c72c3a5b1..05f42d5a343 100644
--- a/src/hotspot/share/opto/reg_split.cpp
+++ b/src/hotspot/share/opto/reg_split.cpp
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "libadt/vectset.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/resourceArea.hpp"
+#include "memory/resourceArea.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/c2compiler.hpp"
#include "opto/callnode.hpp"
diff --git a/src/hotspot/share/opto/split_if.cpp b/src/hotspot/share/opto/split_if.cpp
index 0634b5efb5e..c3a9965bdf3 100644
--- a/src/hotspot/share/opto/split_if.cpp
+++ b/src/hotspot/share/opto/split_if.cpp
@@ -169,7 +169,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
assert(u->in(1) == bol, "");
// Get control block of either the CMove or the If input
Node *u_ctrl = u->is_If() ? u->in(0) : get_ctrl(u);
- assert(u_ctrl != blk1 && u_ctrl != blk2, "won't converge");
+ assert((u_ctrl != blk1 && u_ctrl != blk2) || u->is_CMove(), "won't converge");
Node *x = bol->clone();
register_new_node(x, u_ctrl);
_igvn.replace_input_of(u, 1, x);
diff --git a/src/hotspot/share/opto/subnode.cpp b/src/hotspot/share/opto/subnode.cpp
index cc1d881f10d..f53c9eb14f8 100644
--- a/src/hotspot/share/opto/subnode.cpp
+++ b/src/hotspot/share/opto/subnode.cpp
@@ -1595,3 +1595,12 @@ const Type* SqrtDNode::Value(PhaseGVN* phase) const {
if( d < 0.0 ) return Type::DOUBLE;
return TypeD::make( sqrt( d ) );
}
+
+const Type* SqrtFNode::Value(PhaseGVN* phase) const {
+ const Type *t1 = phase->type( in(1) );
+ if( t1 == Type::TOP ) return Type::TOP;
+ if( t1->base() != Type::FloatCon ) return Type::FLOAT;
+ float f = t1->getf();
+ if( f < 0.0f ) return Type::FLOAT;
+ return TypeF::make( (float)sqrt( (double)f ) );
+}
diff --git a/src/hotspot/share/opto/subnode.hpp b/src/hotspot/share/opto/subnode.hpp
index a4adbcf5a47..68aa1a3c0b8 100644
--- a/src/hotspot/share/opto/subnode.hpp
+++ b/src/hotspot/share/opto/subnode.hpp
@@ -442,6 +442,20 @@ public:
virtual const Type* Value(PhaseGVN* phase) const;
};
+//------------------------------SqrtFNode--------------------------------------
+// square root a float
+class SqrtFNode : public Node {
+public:
+ SqrtFNode(Compile* C, Node *c, Node *in1) : Node(c, in1) {
+ init_flags(Flag_is_expensive);
+ C->add_expensive_node(this);
+ }
+ virtual int Opcode() const;
+ const Type *bottom_type() const { return Type::FLOAT; }
+ virtual uint ideal_reg() const { return Op_RegF; }
+ virtual const Type* Value(PhaseGVN* phase) const;
+};
+
//-------------------------------ReverseBytesINode--------------------------------
// reverse bytes of an integer
class ReverseBytesINode : public Node {
diff --git a/src/hotspot/share/opto/superword.cpp b/src/hotspot/share/opto/superword.cpp
index b0afe8e5c74..39a525a1955 100644
--- a/src/hotspot/share/opto/superword.cpp
+++ b/src/hotspot/share/opto/superword.cpp
@@ -1102,7 +1102,7 @@ bool SuperWord::stmts_can_pack(Node* s1, Node* s2, int align) {
}
if (isomorphic(s1, s2)) {
- if (independent(s1, s2) || reduction(s1, s2)) {
+ if ((independent(s1, s2) && have_similar_inputs(s1, s2)) || reduction(s1, s2)) {
if (!exists_at(s1, 0) && !exists_at(s2, 1)) {
if (!s1->is_Mem() || are_adjacent_refs(s1, s2)) {
int s1_align = alignment(s1);
@@ -1180,6 +1180,20 @@ bool SuperWord::independent(Node* s1, Node* s2) {
return independent_path(shallow, deep);
}
+//--------------------------have_similar_inputs-----------------------
+// For a node pair (s1, s2) which is isomorphic and independent,
+// do s1 and s2 have similar input edges?
+bool SuperWord::have_similar_inputs(Node* s1, Node* s2) {
+ // assert(isomorphic(s1, s2) == true, "check isomorphic");
+ // assert(independent(s1, s2) == true, "check independent");
+ if (s1->req() > 1 && !s1->is_Store() && !s1->is_Load()) {
+ for (uint i = 1; i < s1->req(); i++) {
+ if (s1->in(i)->Opcode() != s2->in(i)->Opcode()) return false;
+ }
+ }
+ return true;
+}
+
//------------------------------reduction---------------------------
// Is there a data path between s1 and s2 and the nodes reductions?
bool SuperWord::reduction(Node* s1, Node* s2) {
@@ -1339,6 +1353,7 @@ bool SuperWord::follow_def_uses(Node_List* p) {
for (DUIterator_Fast jmax, j = s2->fast_outs(jmax); j < jmax; j++) {
Node* t2 = s2->fast_out(j);
if (!in_bb(t2)) continue;
+ if (t2->Opcode() == Op_AddI && t2 == _lp->as_CountedLoop()->incr()) continue; // don't mess with the iv
if (!opnd_positions_match(s1, t1, s2, t2))
continue;
if (stmts_can_pack(t1, t2, align)) {
@@ -2307,7 +2322,7 @@ void SuperWord::output() {
vn = VectorNode::make(opc, in1, in2, vlen, velt_basic_type(n));
vlen_in_bytes = vn->as_Vector()->length_in_bytes();
}
- } else if (opc == Op_SqrtD || opc == Op_AbsF || opc == Op_AbsD || opc == Op_NegF || opc == Op_NegD) {
+ } else if (opc == Op_SqrtF || opc == Op_SqrtD || opc == Op_AbsF || opc == Op_AbsD || opc == Op_NegF || opc == Op_NegD) {
// Promote operand to vector (Sqrt/Abs/Neg are 2 address instructions)
Node* in = vector_opd(p, 1);
vn = VectorNode::make(opc, in, NULL, vlen, velt_basic_type(n));
@@ -3299,7 +3314,7 @@ CountedLoopEndNode* SuperWord::get_pre_loop_end(CountedLoopNode* cl) {
return NULL;
}
- Node* p_f = cl->in(LoopNode::EntryControl)->in(0)->in(0);
+ Node* p_f = cl->skip_strip_mined()->in(LoopNode::EntryControl)->in(0)->in(0);
if (!p_f->is_IfFalse()) return NULL;
if (!p_f->in(0)->is_CountedLoopEnd()) return NULL;
CountedLoopEndNode* pre_end = p_f->in(0)->as_CountedLoopEnd();
diff --git a/src/hotspot/share/opto/superword.hpp b/src/hotspot/share/opto/superword.hpp
index 28a4f3c28de..8a4241afdf4 100644
--- a/src/hotspot/share/opto/superword.hpp
+++ b/src/hotspot/share/opto/superword.hpp
@@ -442,6 +442,9 @@ class SuperWord : public ResourceObj {
bool isomorphic(Node* s1, Node* s2);
// Is there no data path from s1 to s2 or s2 to s1?
bool independent(Node* s1, Node* s2);
+ // For a node pair (s1, s2) which is isomorphic and independent,
+ // do s1 and s2 have similar input edges?
+ bool have_similar_inputs(Node* s1, Node* s2);
// Is there a data path between s1 and s2 and both are reductions?
bool reduction(Node* s1, Node* s2);
// Helper for independent
diff --git a/src/hotspot/share/opto/vectornode.cpp b/src/hotspot/share/opto/vectornode.cpp
index ecf8247825e..57b0ecf0e7b 100644
--- a/src/hotspot/share/opto/vectornode.cpp
+++ b/src/hotspot/share/opto/vectornode.cpp
@@ -113,6 +113,9 @@ int VectorNode::opcode(int sopc, BasicType bt) {
case Op_NegD:
assert(bt == T_DOUBLE, "must be");
return Op_NegVD;
+ case Op_SqrtF:
+ assert(bt == T_FLOAT, "must be");
+ return Op_SqrtVF;
case Op_SqrtD:
assert(bt == T_DOUBLE, "must be");
return Op_SqrtVD;
@@ -316,7 +319,7 @@ VectorNode* VectorNode::make(int opc, Node* n1, Node* n2, uint vlen, BasicType b
case Op_NegVF: return new NegVFNode(n1, vt);
case Op_NegVD: return new NegVDNode(n1, vt);
- // Currently only supports double precision sqrt
+ case Op_SqrtVF: return new SqrtVFNode(n1, vt);
case Op_SqrtVD: return new SqrtVDNode(n1, vt);
case Op_LShiftVB: return new LShiftVBNode(n1, n2, vt);
diff --git a/src/hotspot/share/opto/vectornode.hpp b/src/hotspot/share/opto/vectornode.hpp
index 7b65aa9a91d..93da7bc4b7c 100644
--- a/src/hotspot/share/opto/vectornode.hpp
+++ b/src/hotspot/share/opto/vectornode.hpp
@@ -373,6 +373,14 @@ class NegVDNode : public VectorNode {
virtual int Opcode() const;
};
+//------------------------------SqrtVFNode--------------------------------------
+// Vector Sqrt float
+class SqrtVFNode : public VectorNode {
+ public:
+ SqrtVFNode(Node* in, const TypeVect* vt) : VectorNode(in,vt) {}
+ virtual int Opcode() const;
+};
+
//------------------------------SqrtVDNode--------------------------------------
// Vector Sqrt double
class SqrtVDNode : public VectorNode {
diff --git a/src/hotspot/share/precompiled/precompiled.hpp b/src/hotspot/share/precompiled/precompiled.hpp
index dd76a0b9c93..728a75535c0 100644
--- a/src/hotspot/share/precompiled/precompiled.hpp
+++ b/src/hotspot/share/precompiled/precompiled.hpp
@@ -131,7 +131,6 @@
# include "jvmtifiles/jvmti.h"
# include "logging/log.hpp"
# include "memory/allocation.hpp"
-# include "memory/allocation.inline.hpp"
# include "memory/arena.hpp"
# include "memory/heap.hpp"
# include "memory/iterator.hpp"
diff --git a/src/hotspot/share/prims/cdsoffsets.cpp b/src/hotspot/share/prims/cdsoffsets.cpp
new file mode 100644
index 00000000000..d38b7efbfff
--- /dev/null
+++ b/src/hotspot/share/prims/cdsoffsets.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_CDS
+#include "runtime/os.hpp"
+#include "memory/filemap.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+#include "prims/cdsoffsets.hpp"
+
+CDSOffsets* CDSOffsets::_all = NULL;
+#define ADD_NEXT(list, name, value) \
+ list->add_end(new CDSOffsets(name, value, NULL))
+
+#define CREATE_OFFSET_MAPS \
+ _all = new CDSOffsets("size_t_size", sizeof(size_t), NULL); \
+ ADD_NEXT(_all, "FileMapHeader::_magic", offset_of(FileMapInfo::FileMapHeader, _magic)); \
+ ADD_NEXT(_all, "FileMapHeader::_crc", offset_of(FileMapInfo::FileMapHeader, _crc)); \
+ ADD_NEXT(_all, "FileMapHeader::_version", offset_of(FileMapInfo::FileMapHeader, _version)); \
+ ADD_NEXT(_all, "FileMapHeader::_space[0]", offset_of(FileMapInfo::FileMapHeader, _space)); \
+ ADD_NEXT(_all, "space_info::_crc", offset_of(FileMapInfo::FileMapHeader::space_info, _crc)); \
+ ADD_NEXT(_all, "space_info::_used", offset_of(FileMapInfo::FileMapHeader::space_info, _used)); \
+ ADD_NEXT(_all, "FileMapHeader::_paths_misc_info_size", offset_of(FileMapInfo::FileMapHeader, _paths_misc_info_size)); \
+ ADD_NEXT(_all, "file_header_size", sizeof(FileMapInfo::FileMapHeader)); \
+ ADD_NEXT(_all, "space_info_size", sizeof(FileMapInfo::FileMapHeader::space_info));
+
+int CDSOffsets::find_offset(const char* name) {
+ if (_all == NULL) {
+ CREATE_OFFSET_MAPS
+ }
+ CDSOffsets* it = _all;
+ while(it) {
+ if (!strcmp(name, it->get_name())) {
+ return it->get_offset();
+ }
+ it = it->next();
+ }
+ return -1; // not found
+}
+
+void CDSOffsets::add_end(CDSOffsets* n) {
+ CDSOffsets* p = this;
+ while(p && p->_next) { p = p->_next; }
+ p->_next = n;
+}
+#endif // INCLUDE_CDS
diff --git a/src/hotspot/share/prims/cdsoffsets.hpp b/src/hotspot/share/prims/cdsoffsets.hpp
new file mode 100644
index 00000000000..aa147cc70a0
--- /dev/null
+++ b/src/hotspot/share/prims/cdsoffsets.hpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_PRIMS_CDSOFFSETS_HPP
+#define SHARE_PRIMS_CDSOFFSETS_HPP
+class CDSOffsets: public CHeapObj {
+ private:
+ char* _name;
+ int _offset;
+ CDSOffsets* _next;
+ static CDSOffsets* _all; // sole list for cds
+ public:
+ CDSOffsets(const char* name, int offset, CDSOffsets* next) {
+ _name = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtInternal);
+ strcpy(_name, name);
+ _offset = offset;
+ _next = next;
+ }
+
+ char* get_name() const { return _name; }
+ int get_offset() const { return _offset; }
+ CDSOffsets* next() const { return _next; }
+ void add_end(CDSOffsets* n);
+
+ static int find_offset(const char* name);
+};
+#endif // SHARE_PRIMS_CDSOFFSETS_HPP
diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp
index 26aedaaebe5..06d0f91a6b1 100644
--- a/src/hotspot/share/prims/jni.cpp
+++ b/src/hotspot/share/prims/jni.cpp
@@ -4119,7 +4119,7 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae
thread->initialize_thread_current();
if (!os::create_attached_thread(thread)) {
- delete thread;
+ thread->smr_delete();
return JNI_ERR;
}
// Enable stack overflow checks
@@ -4250,7 +4250,7 @@ jint JNICALL jni_DetachCurrentThread(JavaVM *vm) {
// (platform-dependent) methods where we do alternate stack
// maintenance work?)
thread->exit(false, JavaThread::jni_detach);
- delete thread;
+ thread->smr_delete();
HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK);
return JNI_OK;
diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp
index 368696e5d08..ad3a1cedb46 100644
--- a/src/hotspot/share/prims/jvm.cpp
+++ b/src/hotspot/share/prims/jvm.cpp
@@ -66,6 +66,7 @@
#include "runtime/perfData.hpp"
#include "runtime/reflection.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vm_operations.hpp"
#include "runtime/vm_version.hpp"
@@ -2737,16 +2738,12 @@ void jio_print(const char* s) {
// java.lang.Thread //////////////////////////////////////////////////////////////////////////////
-// In most of the JVM Thread support functions we need to be sure to lock the Threads_lock
-// to prevent the target thread from exiting after we have a pointer to the C++ Thread or
-// OSThread objects. The exception to this rule is when the target object is the thread
-// doing the operation, in which case we know that the thread won't exit until the
-// operation is done (all exits being voluntary). There are a few cases where it is
-// rather silly to do operations on yourself, like resuming yourself or asking whether
-// you are alive. While these can still happen, they are not subject to deadlocks if
-// the lock is held while the operation occurs (this is not the case for suspend, for
-// instance), and are very unlikely. Because IsAlive needs to be fast and its
-// implementation is local to this file, we always lock Threads_lock for that one.
+// In most of the JVM thread support functions we need to access the
+// thread through a ThreadsListHandle to prevent it from exiting and
+// being reclaimed while we try to operate on it. The exceptions to this
+// rule are when operating on the current thread, or if the monitor of
+// the target java.lang.Thread is locked at the Java level - in both
+// cases the target cannot exit.
static void thread_entry(JavaThread* thread, TRAPS) {
HandleMark hm(THREAD);
@@ -2821,7 +2818,7 @@ JVM_ENTRY(void, JVM_StartThread(JNIEnv* env, jobject jthread))
if (native_thread->osthread() == NULL) {
// No one should hold a reference to the 'native_thread'.
- delete native_thread;
+ native_thread->smr_delete();
if (JvmtiExport::should_post_resource_exhausted()) {
JvmtiExport::post_resource_exhausted(
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_THREADS,
@@ -2835,41 +2832,45 @@ JVM_ENTRY(void, JVM_StartThread(JNIEnv* env, jobject jthread))
JVM_END
+
// JVM_Stop is implemented using a VM_Operation, so threads are forced to safepoints
// before the quasi-asynchronous exception is delivered. This is a little obtrusive,
// but is thought to be reliable and simple. In the case, where the receiver is the
-// same thread as the sender, no safepoint is needed.
+// same thread as the sender, no VM_Operation is needed.
JVM_ENTRY(void, JVM_StopThread(JNIEnv* env, jobject jthread, jobject throwable))
JVMWrapper("JVM_StopThread");
+ // A nested ThreadsListHandle will grab the Threads_lock so create
+ // tlh before we resolve throwable.
+ ThreadsListHandle tlh(thread);
oop java_throwable = JNIHandles::resolve(throwable);
if (java_throwable == NULL) {
THROW(vmSymbols::java_lang_NullPointerException());
}
- oop java_thread = JNIHandles::resolve_non_null(jthread);
- JavaThread* receiver = java_lang_Thread::thread(java_thread);
- Events::log_exception(JavaThread::current(),
+ oop java_thread = NULL;
+ JavaThread* receiver = NULL;
+ bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &receiver, &java_thread);
+ Events::log_exception(thread,
"JVM_StopThread thread JavaThread " INTPTR_FORMAT " as oop " INTPTR_FORMAT " [exception " INTPTR_FORMAT "]",
p2i(receiver), p2i((address)java_thread), p2i(throwable));
- // First check if thread is alive
- if (receiver != NULL) {
- // Check if exception is getting thrown at self (use oop equality, since the
- // target object might exit)
- if (java_thread == thread->threadObj()) {
+
+ if (is_alive) {
+ // jthread refers to a live JavaThread.
+ if (thread == receiver) {
+ // Exception is getting thrown at self so no VM_Operation needed.
THROW_OOP(java_throwable);
} else {
- // Enques a VM_Operation to stop all threads and then deliver the exception...
- Thread::send_async_exception(java_thread, JNIHandles::resolve(throwable));
+ // Use a VM_Operation to throw the exception.
+ Thread::send_async_exception(java_thread, java_throwable);
}
- }
- else {
+ } else {
// Either:
// - target thread has not been started before being stopped, or
// - target thread already terminated
// We could read the threadStatus to determine which case it is
// but that is overkill as it doesn't matter. We must set the
// stillborn flag for the first case, and if the thread has already
- // exited setting this flag has no affect
+ // exited setting this flag has no effect.
java_lang_Thread::set_stillborn(java_thread);
}
JVM_END
@@ -2885,12 +2886,12 @@ JVM_END
JVM_ENTRY(void, JVM_SuspendThread(JNIEnv* env, jobject jthread))
JVMWrapper("JVM_SuspendThread");
- oop java_thread = JNIHandles::resolve_non_null(jthread);
- JavaThread* receiver = java_lang_Thread::thread(java_thread);
-
- if (receiver != NULL) {
- // thread has run and has not exited (still on threads list)
+ ThreadsListHandle tlh(thread);
+ JavaThread* receiver = NULL;
+ bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &receiver, NULL);
+ if (is_alive) {
+ // jthread refers to a live JavaThread.
{
MutexLockerEx ml(receiver->SR_lock(), Mutex::_no_safepoint_check_flag);
if (receiver->is_external_suspend()) {
@@ -2922,30 +2923,49 @@ JVM_END
JVM_ENTRY(void, JVM_ResumeThread(JNIEnv* env, jobject jthread))
JVMWrapper("JVM_ResumeThread");
- // Ensure that the C++ Thread and OSThread structures aren't freed before we operate.
- // We need to *always* get the threads lock here, since this operation cannot be allowed during
- // a safepoint. The safepoint code relies on suspending a thread to examine its state. If other
- // threads randomly resumes threads, then a thread might not be suspended when the safepoint code
- // looks at it.
- MutexLocker ml(Threads_lock);
- JavaThread* thr = java_lang_Thread::thread(JNIHandles::resolve_non_null(jthread));
- if (thr != NULL) {
- // the thread has run and is not in the process of exiting
- thr->java_resume();
+
+ ThreadsListHandle tlh(thread);
+ JavaThread* receiver = NULL;
+ bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &receiver, NULL);
+ if (is_alive) {
+ // jthread refers to a live JavaThread.
+
+ // This is the original comment for this Threads_lock grab:
+ // We need to *always* get the threads lock here, since this operation cannot be allowed during
+ // a safepoint. The safepoint code relies on suspending a thread to examine its state. If other
+ // threads randomly resumes threads, then a thread might not be suspended when the safepoint code
+ // looks at it.
+ //
+ // The above comment dates back to when we had both internal and
+ // external suspend APIs that shared a common underlying mechanism.
+ // External suspend is now entirely cooperative and doesn't share
+ // anything with internal suspend. That said, there are some
+ // assumptions in the VM that an external resume grabs the
+ // Threads_lock. We can't drop the Threads_lock grab here until we
+ // resolve the assumptions that exist elsewhere.
+ //
+ MutexLocker ml(Threads_lock);
+ receiver->java_resume();
}
JVM_END
JVM_ENTRY(void, JVM_SetThreadPriority(JNIEnv* env, jobject jthread, jint prio))
JVMWrapper("JVM_SetThreadPriority");
- // Ensure that the C++ Thread and OSThread structures aren't freed before we operate
- MutexLocker ml(Threads_lock);
- oop java_thread = JNIHandles::resolve_non_null(jthread);
+
+ ThreadsListHandle tlh(thread);
+ oop java_thread = NULL;
+ JavaThread* receiver = NULL;
+ bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &receiver, &java_thread);
java_lang_Thread::set_priority(java_thread, (ThreadPriority)prio);
- JavaThread* thr = java_lang_Thread::thread(java_thread);
- if (thr != NULL) { // Thread not yet started; priority pushed down when it is
- Thread::set_priority(thr, (ThreadPriority)prio);
+
+ if (is_alive) {
+ // jthread refers to a live JavaThread.
+ Thread::set_priority(receiver, (ThreadPriority)prio);
}
+ // Implied else: If the JavaThread hasn't started yet, then the
+ // priority set in the java.lang.Thread object above will be pushed
+ // down when it does start.
JVM_END
@@ -3016,67 +3036,39 @@ JVM_END
JVM_ENTRY(jint, JVM_CountStackFrames(JNIEnv* env, jobject jthread))
JVMWrapper("JVM_CountStackFrames");
- // Ensure that the C++ Thread and OSThread structures aren't freed before we operate
- oop java_thread = JNIHandles::resolve_non_null(jthread);
- bool throw_illegal_thread_state = false;
+ uint32_t debug_bits = 0;
+ ThreadsListHandle tlh(thread);
+ JavaThread* receiver = NULL;
+ bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &receiver, NULL);
int count = 0;
-
- {
- MutexLockerEx ml(thread->threadObj() == java_thread ? NULL : Threads_lock);
- // We need to re-resolve the java_thread, since a GC might have happened during the
- // acquire of the lock
- JavaThread* thr = java_lang_Thread::thread(JNIHandles::resolve_non_null(jthread));
-
- if (thr == NULL) {
- // do nothing
- } else if(! thr->is_external_suspend() || ! thr->frame_anchor()->walkable()) {
- // Check whether this java thread has been suspended already. If not, throws
- // IllegalThreadStateException. We defer to throw that exception until
- // Threads_lock is released since loading exception class has to leave VM.
- // The correct way to test a thread is actually suspended is
- // wait_for_ext_suspend_completion(), but we can't call that while holding
- // the Threads_lock. The above tests are sufficient for our purposes
- // provided the walkability of the stack is stable - which it isn't
- // 100% but close enough for most practical purposes.
- throw_illegal_thread_state = true;
- } else {
- // Count all java activation, i.e., number of vframes
- for(vframeStream vfst(thr); !vfst.at_end(); vfst.next()) {
- // Native frames are not counted
+ if (is_alive) {
+ // jthread refers to a live JavaThread.
+ if (receiver->is_thread_fully_suspended(true /* wait for suspend completion */, &debug_bits)) {
+ // Count all java activation, i.e., number of vframes.
+ for (vframeStream vfst(receiver); !vfst.at_end(); vfst.next()) {
+ // Native frames are not counted.
if (!vfst.method()->is_native()) count++;
- }
+ }
+ } else {
+ THROW_MSG_0(vmSymbols::java_lang_IllegalThreadStateException(),
+ "this thread is not suspended");
}
}
+ // Implied else: if JavaThread is not alive simply return a count of 0.
- if (throw_illegal_thread_state) {
- THROW_MSG_0(vmSymbols::java_lang_IllegalThreadStateException(),
- "this thread is not suspended");
- }
return count;
JVM_END
-// Consider: A better way to implement JVM_Interrupt() is to acquire
-// Threads_lock to resolve the jthread into a Thread pointer, fetch
-// Thread->platformevent, Thread->native_thr, Thread->parker, etc.,
-// drop Threads_lock, and the perform the unpark() and thr_kill() operations
-// outside the critical section. Threads_lock is hot so we want to minimize
-// the hold-time. A cleaner interface would be to decompose interrupt into
-// two steps. The 1st phase, performed under Threads_lock, would return
-// a closure that'd be invoked after Threads_lock was dropped.
-// This tactic is safe as PlatformEvent and Parkers are type-stable (TSM) and
-// admit spurious wakeups.
JVM_ENTRY(void, JVM_Interrupt(JNIEnv* env, jobject jthread))
JVMWrapper("JVM_Interrupt");
- // Ensure that the C++ Thread and OSThread structures aren't freed before we operate
- oop java_thread = JNIHandles::resolve_non_null(jthread);
- MutexLockerEx ml(thread->threadObj() == java_thread ? NULL : Threads_lock);
- // We need to re-resolve the java_thread, since a GC might have happened during the
- // acquire of the lock
- JavaThread* thr = java_lang_Thread::thread(JNIHandles::resolve_non_null(jthread));
- if (thr != NULL) {
- Thread::interrupt(thr);
+ ThreadsListHandle tlh(thread);
+ JavaThread* receiver = NULL;
+ bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &receiver, NULL);
+ if (is_alive) {
+ // jthread refers to a live JavaThread.
+ Thread::interrupt(receiver);
}
JVM_END
@@ -3084,16 +3076,14 @@ JVM_END
JVM_QUICK_ENTRY(jboolean, JVM_IsInterrupted(JNIEnv* env, jobject jthread, jboolean clear_interrupted))
JVMWrapper("JVM_IsInterrupted");
- // Ensure that the C++ Thread and OSThread structures aren't freed before we operate
- oop java_thread = JNIHandles::resolve_non_null(jthread);
- MutexLockerEx ml(thread->threadObj() == java_thread ? NULL : Threads_lock);
- // We need to re-resolve the java_thread, since a GC might have happened during the
- // acquire of the lock
- JavaThread* thr = java_lang_Thread::thread(JNIHandles::resolve_non_null(jthread));
- if (thr == NULL) {
- return JNI_FALSE;
+ ThreadsListHandle tlh(thread);
+ JavaThread* receiver = NULL;
+ bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &receiver, NULL);
+ if (is_alive) {
+ // jthread refers to a live JavaThread.
+ return (jboolean) Thread::is_interrupted(receiver, clear_interrupted != 0);
} else {
- return (jboolean) Thread::is_interrupted(thr, clear_interrupted != 0);
+ return JNI_FALSE;
}
JVM_END
@@ -3122,14 +3112,16 @@ JVM_END
JVM_ENTRY(void, JVM_SetNativeThreadName(JNIEnv* env, jobject jthread, jstring name))
JVMWrapper("JVM_SetNativeThreadName");
- ResourceMark rm(THREAD);
+
+ // We don't use a ThreadsListHandle here because the current thread
+ // must be alive.
oop java_thread = JNIHandles::resolve_non_null(jthread);
JavaThread* thr = java_lang_Thread::thread(java_thread);
- // Thread naming only supported for the current thread, doesn't work for
- // target threads.
- if (Thread::current() == thr && !thr->has_attached_via_jni()) {
+ if (thread == thr && !thr->has_attached_via_jni()) {
+ // Thread naming is only supported for the current thread and
// we don't set the name of an attached thread to avoid stepping
- // on other programs
+ // on other programs.
+ ResourceMark rm(thread);
const char *thread_name = java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(name));
os::set_native_thread_name(thread_name);
}
@@ -3561,6 +3553,8 @@ JVM_ENTRY(jobjectArray, JVM_DumpThreads(JNIEnv *env, jclass threadClass, jobject
thread_handle_array->append(h);
}
+ // The JavaThread references in thread_handle_array are validated
+ // in VM_ThreadDump::doit().
Handle stacktraces = ThreadService::dump_stack_traces(thread_handle_array, num_threads, CHECK_NULL);
return (jobjectArray)JNIHandles::make_local(env, stacktraces());
diff --git a/src/hotspot/share/prims/jvmtiEnter.xsl b/src/hotspot/share/prims/jvmtiEnter.xsl
index fbb8724c7ab..406ddc642c0 100644
--- a/src/hotspot/share/prims/jvmtiEnter.xsl
+++ b/src/hotspot/share/prims/jvmtiEnter.xsl
@@ -45,6 +45,7 @@
# include "prims/jvmtiEnter.hpp"
# include "prims/jvmtiRawMonitor.hpp"
# include "prims/jvmtiUtil.hpp"
+# include "runtime/threadSMR.hpp"
@@ -769,47 +770,27 @@ static jvmtiError JNICALL
- oop thread_oop = JNIHandles::resolve_external_guard(
+ err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(),
- );
- if (thread_oop == NULL) {
+ , &java_thread, NULL);
+ if (err != JVMTI_ERROR_NONE) {
- JVMTI_ERROR_INVALID_THREAD
- - jthread resolved to NULL - jthread = " PTR_FORMAT "
+ err
+ - jthread did not convert to a JavaThread - jthread = " PTR_FORMAT "
, p2i( )
}
- if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
-
- JVMTI_ERROR_INVALID_THREAD
- - oop is not a thread - jthread = " PTR_FORMAT "
- , p2i( )
-
-
- }
- java_thread = java_lang_Thread::thread(thread_oop);
- if (java_thread == NULL) {
-
-
-
- JVMTI_ERROR_THREAD_NOT_ALIVE
-
- - not a Java thread - jthread = " PTR_FORMAT "
- , p2i( )
-
-
- }
-
- JavaThread* java_thread;
+ JavaThread* java_thread = NULL;
+ ThreadsListHandle tlh(this_thread);
diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp
index b67e23f45ad..bff8274a3ea 100644
--- a/src/hotspot/share/prims/jvmtiEnv.cpp
+++ b/src/hotspot/share/prims/jvmtiEnv.cpp
@@ -62,6 +62,7 @@
#include "runtime/reflectionUtils.hpp"
#include "runtime/signature.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vmThread.hpp"
@@ -162,7 +163,6 @@ JvmtiEnv::GetThreadLocalStorage(jthread thread, void** data_ptr) {
*data_ptr = (state == NULL) ? NULL :
state->env_thread_state(this)->get_agent_thread_local_storage_data();
} else {
-
// jvmti_GetThreadLocalStorage is "in native" and doesn't transition
// the thread to _thread_in_vm. However, when the TLS for a thread
// other than the current thread is required we need to transition
@@ -172,17 +172,13 @@ JvmtiEnv::GetThreadLocalStorage(jthread thread, void** data_ptr) {
VM_ENTRY_BASE(jvmtiError, JvmtiEnv::GetThreadLocalStorage , current_thread)
debug_only(VMNativeEntryWrapper __vew;)
- oop thread_oop = JNIHandles::resolve_external_guard(thread);
- if (thread_oop == NULL) {
- return JVMTI_ERROR_INVALID_THREAD;
- }
- if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
- return JVMTI_ERROR_INVALID_THREAD;
- }
- JavaThread* java_thread = java_lang_Thread::thread(thread_oop);
- if (java_thread == NULL) {
- return JVMTI_ERROR_THREAD_NOT_ALIVE;
+ JavaThread* java_thread = NULL;
+ ThreadsListHandle tlh(current_thread);
+ jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), thread, &java_thread, NULL);
+ if (err != JVMTI_ERROR_NONE) {
+ return err;
}
+
JvmtiThreadState* state = java_thread->jvmti_thread_state();
*data_ptr = (state == NULL) ? NULL :
state->env_thread_state(this)->get_agent_thread_local_storage_data();
@@ -518,42 +514,60 @@ JvmtiEnv::SetEventCallbacks(const jvmtiEventCallbacks* callbacks, jint size_of_c
// event_thread - NULL is a valid value, must be checked
jvmtiError
JvmtiEnv::SetEventNotificationMode(jvmtiEventMode mode, jvmtiEvent event_type, jthread event_thread, ...) {
- JavaThread* java_thread = NULL;
- if (event_thread != NULL) {
- oop thread_oop = JNIHandles::resolve_external_guard(event_thread);
- if (thread_oop == NULL) {
- return JVMTI_ERROR_INVALID_THREAD;
+ if (event_thread == NULL) {
+ // Can be called at Agent_OnLoad() time with event_thread == NULL
+ // when Thread::current() does not work yet so we cannot create a
+ // ThreadsListHandle that is common to both thread-specific and
+ // global code paths.
+
+ // event_type must be valid
+ if (!JvmtiEventController::is_valid_event_type(event_type)) {
+ return JVMTI_ERROR_INVALID_EVENT_TYPE;
}
- if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
- return JVMTI_ERROR_INVALID_THREAD;
+
+ bool enabled = (mode == JVMTI_ENABLE);
+
+ // assure that needed capabilities are present
+ if (enabled && !JvmtiUtil::has_event_capability(event_type, get_capabilities())) {
+ return JVMTI_ERROR_MUST_POSSESS_CAPABILITY;
}
- java_thread = java_lang_Thread::thread(thread_oop);
- if (java_thread == NULL) {
- return JVMTI_ERROR_THREAD_NOT_ALIVE;
+
+ if (event_type == JVMTI_EVENT_CLASS_FILE_LOAD_HOOK && enabled) {
+ record_class_file_load_hook_enabled();
}
- }
+ JvmtiEventController::set_user_enabled(this, (JavaThread*) NULL, event_type, enabled);
+ } else {
+ // We have a specified event_thread.
- // event_type must be valid
- if (!JvmtiEventController::is_valid_event_type(event_type)) {
- return JVMTI_ERROR_INVALID_EVENT_TYPE;
- }
+ JavaThread* java_thread = NULL;
+ ThreadsListHandle tlh;
+ jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), event_thread, &java_thread, NULL);
+ if (err != JVMTI_ERROR_NONE) {
+ return err;
+ }
- // global events cannot be controlled at thread level.
- if (java_thread != NULL && JvmtiEventController::is_global_event(event_type)) {
- return JVMTI_ERROR_ILLEGAL_ARGUMENT;
- }
+ // event_type must be valid
+ if (!JvmtiEventController::is_valid_event_type(event_type)) {
+ return JVMTI_ERROR_INVALID_EVENT_TYPE;
+ }
- bool enabled = (mode == JVMTI_ENABLE);
+ // global events cannot be controlled at thread level.
+ if (JvmtiEventController::is_global_event(event_type)) {
+ return JVMTI_ERROR_ILLEGAL_ARGUMENT;
+ }
- // assure that needed capabilities are present
- if (enabled && !JvmtiUtil::has_event_capability(event_type, get_capabilities())) {
- return JVMTI_ERROR_MUST_POSSESS_CAPABILITY;
- }
+ bool enabled = (mode == JVMTI_ENABLE);
- if (event_type == JVMTI_EVENT_CLASS_FILE_LOAD_HOOK && enabled) {
- record_class_file_load_hook_enabled();
+ // assure that needed capabilities are present
+ if (enabled && !JvmtiUtil::has_event_capability(event_type, get_capabilities())) {
+ return JVMTI_ERROR_MUST_POSSESS_CAPABILITY;
+ }
+
+ if (event_type == JVMTI_EVENT_CLASS_FILE_LOAD_HOOK && enabled) {
+ record_class_file_load_hook_enabled();
+ }
+ JvmtiEventController::set_user_enabled(this, java_thread, event_type, enabled);
}
- JvmtiEventController::set_user_enabled(this, java_thread, event_type, enabled);
return JVMTI_ERROR_NONE;
} /* end SetEventNotificationMode */
@@ -817,35 +831,45 @@ JvmtiEnv::GetJLocationFormat(jvmtiJlocationFormat* format_ptr) {
// thread_state_ptr - pre-checked for NULL
jvmtiError
JvmtiEnv::GetThreadState(jthread thread, jint* thread_state_ptr) {
- jint state;
- oop thread_oop;
- JavaThread* thr;
+ JavaThread* current_thread = JavaThread::current();
+ JavaThread* java_thread = NULL;
+ oop thread_oop = NULL;
+ ThreadsListHandle tlh(current_thread);
if (thread == NULL) {
- thread_oop = JavaThread::current()->threadObj();
- } else {
- thread_oop = JNIHandles::resolve_external_guard(thread);
- }
+ java_thread = current_thread;
+ thread_oop = java_thread->threadObj();
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
- return JVMTI_ERROR_INVALID_THREAD;
+ if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
+ return JVMTI_ERROR_INVALID_THREAD;
+ }
+ } else {
+ jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), thread, &java_thread, &thread_oop);
+ if (err != JVMTI_ERROR_NONE) {
+ // We got an error code so we don't have a JavaThread *, but
+ // only return an error from here if we didn't get a valid
+ // thread_oop.
+ if (thread_oop == NULL) {
+ return err;
+ }
+ // We have a valid thread_oop so we can return some thread state.
+ }
}
// get most state bits
- state = (jint)java_lang_Thread::get_thread_status(thread_oop);
+ jint state = (jint)java_lang_Thread::get_thread_status(thread_oop);
- // add more state bits
- thr = java_lang_Thread::thread(thread_oop);
- if (thr != NULL) {
- JavaThreadState jts = thr->thread_state();
+ if (java_thread != NULL) {
+ // We have a JavaThread* so add more state bits.
+ JavaThreadState jts = java_thread->thread_state();
- if (thr->is_being_ext_suspended()) {
+ if (java_thread->is_being_ext_suspended()) {
state |= JVMTI_THREAD_STATE_SUSPENDED;
}
if (jts == _thread_in_native) {
state |= JVMTI_THREAD_STATE_IN_NATIVE;
}
- OSThread* osThread = thr->osthread();
+ OSThread* osThread = java_thread->osthread();
if (osThread != NULL && osThread->interrupted()) {
state |= JVMTI_THREAD_STATE_INTERRUPTED;
}
@@ -891,7 +915,6 @@ JvmtiEnv::GetAllThreads(jint* threads_count_ptr, jthread** threads_ptr) {
thread_objs[i] = Handle(tle.get_threadObj(i));
}
- // have to make global handles outside of Threads_lock
jthread *jthreads = new_jthreadArray(nthreads, thread_objs);
NULL_CHECK(jthreads, JVMTI_ERROR_OUT_OF_MEMORY);
@@ -935,19 +958,12 @@ JvmtiEnv::SuspendThread(JavaThread* java_thread) {
jvmtiError
JvmtiEnv::SuspendThreadList(jint request_count, const jthread* request_list, jvmtiError* results) {
int needSafepoint = 0; // > 0 if we need a safepoint
+ ThreadsListHandle tlh;
for (int i = 0; i < request_count; i++) {
- JavaThread *java_thread = get_JavaThread(request_list[i]);
- if (java_thread == NULL) {
- results[i] = JVMTI_ERROR_INVALID_THREAD;
- continue;
- }
- // the thread has not yet run or has exited (not on threads list)
- if (java_thread->threadObj() == NULL) {
- results[i] = JVMTI_ERROR_THREAD_NOT_ALIVE;
- continue;
- }
- if (java_lang_Thread::thread(java_thread->threadObj()) == NULL) {
- results[i] = JVMTI_ERROR_THREAD_NOT_ALIVE;
+ JavaThread *java_thread = NULL;
+ jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), request_list[i], &java_thread, NULL);
+ if (err != JVMTI_ERROR_NONE) {
+ results[i] = err;
continue;
}
// don't allow hidden thread suspend request.
@@ -1018,10 +1034,12 @@ JvmtiEnv::ResumeThread(JavaThread* java_thread) {
// results - pre-checked for NULL
jvmtiError
JvmtiEnv::ResumeThreadList(jint request_count, const jthread* request_list, jvmtiError* results) {
+ ThreadsListHandle tlh;
for (int i = 0; i < request_count; i++) {
- JavaThread *java_thread = get_JavaThread(request_list[i]);
- if (java_thread == NULL) {
- results[i] = JVMTI_ERROR_INVALID_THREAD;
+ JavaThread* java_thread = NULL;
+ jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), request_list[i], &java_thread, NULL);
+ if (err != JVMTI_ERROR_NONE) {
+ results[i] = err;
continue;
}
// don't allow hidden thread resume request.
@@ -1039,7 +1057,7 @@ JvmtiEnv::ResumeThreadList(jint request_count, const jthread* request_list, jvmt
continue;
}
- results[i] = JVMTI_ERROR_NONE; // indicate successful suspend
+ results[i] = JVMTI_ERROR_NONE; // indicate successful resume
}
// per-thread resume results returned via results parameter
return JVMTI_ERROR_NONE;
@@ -1064,20 +1082,14 @@ JvmtiEnv::StopThread(JavaThread* java_thread, jobject exception) {
// thread - NOT pre-checked
jvmtiError
JvmtiEnv::InterruptThread(jthread thread) {
- oop thread_oop = JNIHandles::resolve_external_guard(thread);
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass()))
- return JVMTI_ERROR_INVALID_THREAD;
-
+ // TODO: this is very similar to JVM_Interrupt(); share code in future
JavaThread* current_thread = JavaThread::current();
-
- // Todo: this is a duplicate of JVM_Interrupt; share code in future
- // Ensure that the C++ Thread and OSThread structures aren't freed before we operate
- MutexLockerEx ml(current_thread->threadObj() == thread_oop ? NULL : Threads_lock);
- // We need to re-resolve the java_thread, since a GC might have happened during the
- // acquire of the lock
-
- JavaThread* java_thread = java_lang_Thread::thread(JNIHandles::resolve_external_guard(thread));
- NULL_CHECK(java_thread, JVMTI_ERROR_THREAD_NOT_ALIVE);
+ JavaThread* java_thread = NULL;
+ ThreadsListHandle tlh(current_thread);
+ jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), thread, &java_thread, NULL);
+ if (err != JVMTI_ERROR_NONE) {
+ return err;
+ }
Thread::interrupt(java_thread);
@@ -1094,16 +1106,28 @@ JvmtiEnv::GetThreadInfo(jthread thread, jvmtiThreadInfo* info_ptr) {
HandleMark hm;
JavaThread* current_thread = JavaThread::current();
+ ThreadsListHandle tlh(current_thread);
// if thread is NULL the current thread is used
- oop thread_oop;
+ oop thread_oop = NULL;
if (thread == NULL) {
thread_oop = current_thread->threadObj();
+ if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
+ return JVMTI_ERROR_INVALID_THREAD;
+ }
} else {
- thread_oop = JNIHandles::resolve_external_guard(thread);
+ JavaThread* java_thread = NULL;
+ jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), thread, &java_thread, &thread_oop);
+ if (err != JVMTI_ERROR_NONE) {
+ // We got an error code so we don't have a JavaThread *, but
+ // only return an error from here if we didn't get a valid
+ // thread_oop.
+ if (thread_oop == NULL) {
+ return err;
+ }
+ // We have a valid thread_oop so we can return some thread info.
+ }
}
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass()))
- return JVMTI_ERROR_INVALID_THREAD;
Handle thread_obj(current_thread, thread_oop);
Handle name;
@@ -1272,17 +1296,31 @@ JvmtiEnv::GetCurrentContendedMonitor(JavaThread* java_thread, jobject* monitor_p
// arg - NULL is a valid value, must be checked
jvmtiError
JvmtiEnv::RunAgentThread(jthread thread, jvmtiStartFunction proc, const void* arg, jint priority) {
- oop thread_oop = JNIHandles::resolve_external_guard(thread);
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
+ JavaThread* current_thread = JavaThread::current();
+
+ JavaThread* java_thread = NULL;
+ oop thread_oop = NULL;
+ ThreadsListHandle tlh(current_thread);
+ jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), thread, &java_thread, &thread_oop);
+ if (err != JVMTI_ERROR_NONE) {
+ // We got an error code so we don't have a JavaThread *, but
+ // only return an error from here if we didn't get a valid
+ // thread_oop.
+ if (thread_oop == NULL) {
+ return err;
+ }
+ // We have a valid thread_oop.
+ }
+
+ if (java_thread != NULL) {
+ // 'thread' refers to an existing JavaThread.
return JVMTI_ERROR_INVALID_THREAD;
}
+
if (priority < JVMTI_THREAD_MIN_PRIORITY || priority > JVMTI_THREAD_MAX_PRIORITY) {
return JVMTI_ERROR_INVALID_PRIORITY;
}
- //Thread-self
- JavaThread* current_thread = JavaThread::current();
-
Handle thread_hndl(current_thread, thread_oop);
{
MutexLocker mu(Threads_lock); // grab Threads_lock
@@ -1292,7 +1330,9 @@ JvmtiEnv::RunAgentThread(jthread thread, jvmtiStartFunction proc, const void* ar
// At this point it may be possible that no osthread was created for the
// JavaThread due to lack of memory.
if (new_thread == NULL || new_thread->osthread() == NULL) {
- if (new_thread) delete new_thread;
+ if (new_thread != NULL) {
+ new_thread->smr_delete();
+ }
return JVMTI_ERROR_OUT_OF_MEMORY;
}
@@ -1394,36 +1434,53 @@ JvmtiEnv::GetThreadGroupChildren(jthreadGroup group, jint* thread_count_ptr, jth
int ngroups = 0;
int hidden_threads = 0;
- ResourceMark rm;
- HandleMark hm;
+ ResourceMark rm(current_thread);
+ HandleMark hm(current_thread);
Handle group_hdl(current_thread, group_obj);
- { MutexLocker mu(Threads_lock);
+ { // Cannot allow thread or group counts to change.
+ MutexLocker mu(Threads_lock);
nthreads = java_lang_ThreadGroup::nthreads(group_hdl());
ngroups = java_lang_ThreadGroup::ngroups(group_hdl());
if (nthreads > 0) {
+ ThreadsListHandle tlh(current_thread);
objArrayOop threads = java_lang_ThreadGroup::threads(group_hdl());
assert(nthreads <= threads->length(), "too many threads");
thread_objs = NEW_RESOURCE_ARRAY(Handle,nthreads);
for (int i=0, j=0; iobj_at(i);
assert(thread_obj != NULL, "thread_obj is NULL");
- JavaThread *javathread = java_lang_Thread::thread(thread_obj);
- // Filter out hidden java threads.
- if (javathread != NULL && javathread->is_hidden_from_external_view()) {
- hidden_threads++;
- continue;
+ JavaThread *java_thread = NULL;
+ jvmtiError err = JvmtiExport::cv_oop_to_JavaThread(tlh.list(), thread_obj, &java_thread);
+ if (err == JVMTI_ERROR_NONE) {
+ // Have a valid JavaThread*.
+ if (java_thread->is_hidden_from_external_view()) {
+ // Filter out hidden java threads.
+ hidden_threads++;
+ continue;
+ }
+ } else {
+ // We couldn't convert thread_obj into a JavaThread*.
+ if (err == JVMTI_ERROR_INVALID_THREAD) {
+ // The thread_obj does not refer to a java.lang.Thread object
+ // so skip it.
+ hidden_threads++;
+ continue;
+ }
+ // We have a valid thread_obj, but no JavaThread*; the caller
+ // can still have limited use for the thread_obj.
}
thread_objs[j++] = Handle(current_thread, thread_obj);
}
nthreads -= hidden_threads;
- }
+ } // ThreadsListHandle is destroyed here.
+
if (ngroups > 0) {
objArrayOop groups = java_lang_ThreadGroup::groups(group_hdl());
- assert(ngroups <= groups->length(), "too many threads");
+ assert(ngroups <= groups->length(), "too many groups");
group_objs = NEW_RESOURCE_ARRAY(Handle,ngroups);
for (int i=0; iobj_at(i);
@@ -1556,7 +1613,7 @@ JvmtiEnv::PopFrame(JavaThread* java_thread) {
}
// Check if java_thread is fully suspended
- if (!is_thread_fully_suspended(java_thread, true /* wait for suspend completion */, &debug_bits)) {
+ if (!java_thread->is_thread_fully_suspended(true /* wait for suspend completion */, &debug_bits)) {
return JVMTI_ERROR_THREAD_NOT_SUSPENDED;
}
// Check to see if a PopFrame was already in progress
@@ -1686,8 +1743,8 @@ JvmtiEnv::NotifyFramePop(JavaThread* java_thread, jint depth) {
return JVMTI_ERROR_THREAD_NOT_ALIVE;
}
- if (!JvmtiEnv::is_thread_fully_suspended(java_thread, true, &debug_bits)) {
- return JVMTI_ERROR_THREAD_NOT_SUSPENDED;
+ if (!java_thread->is_thread_fully_suspended(true, &debug_bits)) {
+ return JVMTI_ERROR_THREAD_NOT_SUSPENDED;
}
if (TraceJVMTICalls) {
diff --git a/src/hotspot/share/prims/jvmtiEnvBase.cpp b/src/hotspot/share/prims/jvmtiEnvBase.cpp
index 2e8918c57e1..18d59fbc2e8 100644
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp
@@ -44,6 +44,7 @@
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/signature.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframe_hp.hpp"
#include "runtime/vmThread.hpp"
@@ -487,37 +488,6 @@ JvmtiEnvBase::set_event_callbacks(const jvmtiEventCallbacks* callbacks,
}
}
-// Called from JVMTI entry points which perform stack walking. If the
-// associated JavaThread is the current thread, then wait_for_suspend
-// is not used. Otherwise, it determines if we should wait for the
-// "other" thread to complete external suspension. (NOTE: in future
-// releases the suspension mechanism should be reimplemented so this
-// is not necessary.)
-//
-bool
-JvmtiEnvBase::is_thread_fully_suspended(JavaThread* thr, bool wait_for_suspend, uint32_t *bits) {
- // "other" threads require special handling
- if (thr != JavaThread::current()) {
- if (wait_for_suspend) {
- // We are allowed to wait for the external suspend to complete
- // so give the other thread a chance to get suspended.
- if (!thr->wait_for_ext_suspend_completion(SuspendRetryCount,
- SuspendRetryDelay, bits)) {
- // didn't make it so let the caller know
- return false;
- }
- }
- // We aren't allowed to wait for the external suspend to complete
- // so if the other thread isn't externally suspended we need to
- // let the caller know.
- else if (!thr->is_ext_suspend_completed_with_lock(bits)) {
- return false;
- }
- }
-
- return true;
-}
-
// In the fullness of time, all users of the method should instead
// directly use allocate, besides being cleaner and faster, this will
@@ -560,19 +530,6 @@ JvmtiEnvBase::new_jthreadGroupArray(int length, Handle *handles) {
return (jthreadGroup *) new_jobjectArray(length,handles);
}
-
-JavaThread *
-JvmtiEnvBase::get_JavaThread(jthread jni_thread) {
- oop t = JNIHandles::resolve_external_guard(jni_thread);
- if (t == NULL || !t->is_a(SystemDictionary::Thread_klass())) {
- return NULL;
- }
- // The following returns NULL if the thread has not yet run or is in
- // process of exiting
- return java_lang_Thread::thread(t);
-}
-
-
// return the vframe on the specified thread and depth, NULL if no such frame
vframe*
JvmtiEnvBase::vframeFor(JavaThread* java_thread, jint depth) {
@@ -670,7 +627,7 @@ JvmtiEnvBase::get_current_contended_monitor(JavaThread *calling_thread, JavaThre
uint32_t debug_bits = 0;
#endif
assert((SafepointSynchronize::is_at_safepoint() ||
- is_thread_fully_suspended(java_thread, false, &debug_bits)),
+ java_thread->is_thread_fully_suspended(false, &debug_bits)),
"at safepoint or target thread is suspended");
oop obj = NULL;
ObjectMonitor *mon = java_thread->current_waiting_monitor();
@@ -709,7 +666,7 @@ JvmtiEnvBase::get_owned_monitors(JavaThread *calling_thread, JavaThread* java_th
uint32_t debug_bits = 0;
#endif
assert((SafepointSynchronize::is_at_safepoint() ||
- is_thread_fully_suspended(java_thread, false, &debug_bits)),
+ java_thread->is_thread_fully_suspended(false, &debug_bits)),
"at safepoint or target thread is suspended");
if (java_thread->has_last_Java_frame()) {
@@ -831,7 +788,7 @@ JvmtiEnvBase::get_stack_trace(JavaThread *java_thread,
uint32_t debug_bits = 0;
#endif
assert((SafepointSynchronize::is_at_safepoint() ||
- is_thread_fully_suspended(java_thread, false, &debug_bits)),
+ java_thread->is_thread_fully_suspended(false, &debug_bits)),
"at safepoint or target thread is suspended");
int count = 0;
if (java_thread->has_last_Java_frame()) {
@@ -914,7 +871,7 @@ JvmtiEnvBase::get_frame_location(JavaThread *java_thread, jint depth,
uint32_t debug_bits = 0;
#endif
assert((SafepointSynchronize::is_at_safepoint() ||
- is_thread_fully_suspended(java_thread, false, &debug_bits)),
+ java_thread->is_thread_fully_suspended(false, &debug_bits)),
"at safepoint or target thread is suspended");
Thread* current_thread = Thread::current();
ResourceMark rm(current_thread);
@@ -976,7 +933,7 @@ JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject objec
// first derive the object's owner and entry_count (if any)
{
// Revoke any biases before querying the mark word
- if (SafepointSynchronize::is_at_safepoint()) {
+ if (at_safepoint) {
BiasedLocking::revoke_at_safepoint(hobj);
} else {
BiasedLocking::revoke_and_rebias(hobj, false, calling_thread);
@@ -1008,11 +965,11 @@ JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject objec
}
if (owner != NULL) {
+ // Use current thread since function can be called from a
+ // JavaThread or the VMThread.
+ ThreadsListHandle tlh;
// This monitor is owned so we have to find the owning JavaThread.
- // Since owning_thread_from_monitor_owner() grabs a lock, GC can
- // move our object at this point. However, our owner value is safe
- // since it is either the Lock word on a stack or a JavaThread *.
- owning_thread = Threads::owning_thread_from_monitor_owner(owner, !at_safepoint);
+ owning_thread = Threads::owning_thread_from_monitor_owner(tlh.list(), owner);
// Cannot assume (owning_thread != NULL) here because this function
// may not have been called at a safepoint and the owning_thread
// might not be suspended.
@@ -1021,7 +978,7 @@ JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject objec
// or it has to be suspended. Any of these conditions will prevent both
// contending and waiting threads from modifying the state of
// the monitor.
- if (!at_safepoint && !JvmtiEnv::is_thread_fully_suspended(owning_thread, true, &debug_bits)) {
+ if (!at_safepoint && !owning_thread->is_thread_fully_suspended(true, &debug_bits)) {
// Don't worry! This return of JVMTI_ERROR_THREAD_NOT_SUSPENDED
// will not make it back to the JVM/TI agent. The error code will
// get intercepted in JvmtiEnv::GetObjectMonitorUsage() which
@@ -1033,7 +990,7 @@ JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject objec
ret.owner = (jthread)jni_reference(calling_thread, th);
}
// implied else: no owner
- }
+ } // ThreadsListHandle is destroyed here.
if (owning_thread != NULL) { // monitor is owned
// The recursions field of a monitor does not reflect recursions
@@ -1084,13 +1041,15 @@ JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject objec
if (ret.waiter_count > 0) {
// we have contending and/or waiting threads
HandleMark hm;
+ // Use current thread since function can be called from a
+ // JavaThread or the VMThread.
+ ThreadsListHandle tlh;
if (nWant > 0) {
// we have contending threads
ResourceMark rm;
// get_pending_threads returns only java thread so we do not need to
- // check for non java threads.
- GrowableArray* wantList = Threads::get_pending_threads(
- nWant, (address)mon, !at_safepoint);
+ // check for non java threads.
+ GrowableArray* wantList = Threads::get_pending_threads(tlh.list(), nWant, (address)mon);
if (wantList->length() < nWant) {
// robustness: the pending list has gotten smaller
nWant = wantList->length();
@@ -1101,7 +1060,7 @@ JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject objec
// thread could potentially change the state of the monitor by
// entering it. The JVM/TI spec doesn't allow this.
if (owning_thread == NULL && !at_safepoint &
- !JvmtiEnv::is_thread_fully_suspended(pending_thread, true, &debug_bits)) {
+ !pending_thread->is_thread_fully_suspended(true, &debug_bits)) {
if (ret.owner != NULL) {
destroy_jni_reference(calling_thread, ret.owner);
}
@@ -1139,7 +1098,7 @@ JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject objec
waiter = mon->next_waiter(waiter);
}
}
- }
+ } // ThreadsListHandle is destroyed here.
// Adjust count. nWant and nWait count values may be less than original.
ret.waiter_count = nWant + nWait;
@@ -1291,14 +1250,23 @@ VM_GetThreadListStackTraces::doit() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
ResourceMark rm;
+ ThreadsListHandle tlh;
for (int i = 0; i < _thread_count; ++i) {
jthread jt = _thread_list[i];
- oop thread_oop = JNIHandles::resolve_external_guard(jt);
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
- set_result(JVMTI_ERROR_INVALID_THREAD);
- return;
+ JavaThread* java_thread = NULL;
+ oop thread_oop = NULL;
+ jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), jt, &java_thread, &thread_oop);
+ if (err != JVMTI_ERROR_NONE) {
+ // We got an error code so we don't have a JavaThread *, but
+ // only return an error from here if we didn't get a valid
+ // thread_oop.
+ if (thread_oop == NULL) {
+ set_result(err);
+ return;
+ }
+ // We have a valid thread_oop.
}
- fill_frames(jt, java_lang_Thread::thread(thread_oop), thread_oop);
+ fill_frames(jt, java_thread, thread_oop);
}
allocate_and_fill_stacks(_thread_count);
}
@@ -1309,7 +1277,7 @@ VM_GetAllStackTraces::doit() {
ResourceMark rm;
_final_thread_count = 0;
- for (JavaThread *jt = Threads::first(); jt != NULL; jt = jt->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
oop thread_oop = jt->threadObj();
if (thread_oop != NULL &&
!jt->is_exiting() &&
@@ -1404,9 +1372,7 @@ JvmtiEnvBase::force_early_return(JavaThread* java_thread, jvalue value, TosState
}
// Check if java_thread is fully suspended
- if (!is_thread_fully_suspended(java_thread,
- true /* wait for suspend completion */,
- &debug_bits)) {
+ if (!java_thread->is_thread_fully_suspended(true /* wait for suspend completion */, &debug_bits)) {
return JVMTI_ERROR_THREAD_NOT_SUSPENDED;
}
@@ -1521,3 +1487,79 @@ JvmtiModuleClosure::get_all_modules(JvmtiEnv* env, jint* module_count_ptr, jobje
return JVMTI_ERROR_NONE;
}
+void
+VM_UpdateForPopTopFrame::doit() {
+ JavaThread* jt = _state->get_thread();
+ ThreadsListHandle tlh;
+ if (jt != NULL && tlh.includes(jt) && !jt->is_exiting() && jt->threadObj() != NULL) {
+ _state->update_for_pop_top_frame();
+ } else {
+ _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+ }
+}
+
+void
+VM_SetFramePop::doit() {
+ JavaThread* jt = _state->get_thread();
+ ThreadsListHandle tlh;
+ if (jt != NULL && tlh.includes(jt) && !jt->is_exiting() && jt->threadObj() != NULL) {
+ int frame_number = _state->count_frames() - _depth;
+ _state->env_thread_state((JvmtiEnvBase*)_env)->set_frame_pop(frame_number);
+ } else {
+ _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+ }
+}
+
+void
+VM_GetOwnedMonitorInfo::doit() {
+ _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+ ThreadsListHandle tlh;
+ if (_java_thread != NULL && tlh.includes(_java_thread)
+ && !_java_thread->is_exiting() && _java_thread->threadObj() != NULL) {
+ _result = ((JvmtiEnvBase *)_env)->get_owned_monitors(_calling_thread, _java_thread,
+ _owned_monitors_list);
+ }
+}
+
+void
+VM_GetCurrentContendedMonitor::doit() {
+ _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+ ThreadsListHandle tlh;
+ if (_java_thread != NULL && tlh.includes(_java_thread)
+ && !_java_thread->is_exiting() && _java_thread->threadObj() != NULL) {
+ _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,_java_thread,_owned_monitor_ptr);
+ }
+}
+
+void
+VM_GetStackTrace::doit() {
+ _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+ ThreadsListHandle tlh;
+ if (_java_thread != NULL && tlh.includes(_java_thread)
+ && !_java_thread->is_exiting() && _java_thread->threadObj() != NULL) {
+ _result = ((JvmtiEnvBase *)_env)->get_stack_trace(_java_thread,
+ _start_depth, _max_count,
+ _frame_buffer, _count_ptr);
+ }
+}
+
+void
+VM_GetFrameCount::doit() {
+ _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+ JavaThread* jt = _state->get_thread();
+ ThreadsListHandle tlh;
+ if (jt != NULL && tlh.includes(jt) && !jt->is_exiting() && jt->threadObj() != NULL) {
+ _result = ((JvmtiEnvBase*)_env)->get_frame_count(_state, _count_ptr);
+ }
+}
+
+void
+VM_GetFrameLocation::doit() {
+ _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+ ThreadsListHandle tlh;
+ if (_java_thread != NULL && tlh.includes(_java_thread)
+ && !_java_thread->is_exiting() && _java_thread->threadObj() != NULL) {
+ _result = ((JvmtiEnvBase*)_env)->get_frame_location(_java_thread, _depth,
+ _method_ptr, _location_ptr);
+ }
+}
diff --git a/src/hotspot/share/prims/jvmtiEnvBase.hpp b/src/hotspot/share/prims/jvmtiEnvBase.hpp
index c5786aca4af..84e3f54ae6d 100644
--- a/src/hotspot/share/prims/jvmtiEnvBase.hpp
+++ b/src/hotspot/share/prims/jvmtiEnvBase.hpp
@@ -280,9 +280,6 @@ class JvmtiEnvBase : public CHeapObj {
jthread * new_jthreadArray(int length, Handle *handles);
jthreadGroup * new_jthreadGroupArray(int length, Handle *handles);
- // convert from JNIHandle to JavaThread *
- JavaThread * get_JavaThread(jthread jni_thread);
-
// convert to a jni jclass from a non-null Klass*
jclass get_jni_class_non_null(Klass* k);
@@ -297,11 +294,6 @@ class JvmtiEnvBase : public CHeapObj {
public:
// get a field descriptor for the specified class and field
static bool get_field_descriptor(Klass* k, jfieldID field, fieldDescriptor* fd);
- // test for suspend - most (all?) of these should go away
- static bool is_thread_fully_suspended(JavaThread *thread,
- bool wait_for_suspend,
- uint32_t *bits);
-
// JVMTI API helper functions which are called at safepoint or thread is suspended.
jvmtiError get_frame_count(JvmtiThreadState *state, jint *count_ptr);
@@ -360,14 +352,7 @@ public:
}
VMOp_Type type() const { return VMOp_UpdateForPopTopFrame; }
jvmtiError result() { return _result; }
- void doit() {
- JavaThread* jt = _state->get_thread();
- if (Threads::includes(jt) && !jt->is_exiting() && jt->threadObj() != NULL) {
- _state->update_for_pop_top_frame();
- } else {
- _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
- }
- }
+ void doit();
};
// VM operation to set frame pop.
@@ -390,15 +375,7 @@ public:
bool allow_nested_vm_operations() const { return true; }
VMOp_Type type() const { return VMOp_SetFramePop; }
jvmtiError result() { return _result; }
- void doit() {
- JavaThread* jt = _state->get_thread();
- if (Threads::includes(jt) && !jt->is_exiting() && jt->threadObj() != NULL) {
- int frame_number = _state->count_frames() - _depth;
- _state->env_thread_state((JvmtiEnvBase*)_env)->set_frame_pop(frame_number);
- } else {
- _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
- }
- }
+ void doit();
};
@@ -422,14 +399,7 @@ public:
_result = JVMTI_ERROR_NONE;
}
VMOp_Type type() const { return VMOp_GetOwnedMonitorInfo; }
- void doit() {
- _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
- if (Threads::includes(_java_thread) && !_java_thread->is_exiting()
- && _java_thread->threadObj() != NULL) {
- _result = ((JvmtiEnvBase *)_env)->get_owned_monitors(_calling_thread, _java_thread,
- _owned_monitors_list);
- }
- }
+ void doit();
jvmtiError result() { return _result; }
};
@@ -476,13 +446,7 @@ public:
}
VMOp_Type type() const { return VMOp_GetCurrentContendedMonitor; }
jvmtiError result() { return _result; }
- void doit() {
- _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
- if (Threads::includes(_java_thread) && !_java_thread->is_exiting() &&
- _java_thread->threadObj() != NULL) {
- _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,_java_thread,_owned_monitor_ptr);
- }
- }
+ void doit();
};
// VM operation to get stack trace at safepoint.
@@ -509,15 +473,7 @@ public:
}
jvmtiError result() { return _result; }
VMOp_Type type() const { return VMOp_GetStackTrace; }
- void doit() {
- _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
- if (Threads::includes(_java_thread) && !_java_thread->is_exiting()
- && _java_thread->threadObj() != NULL) {
- _result = ((JvmtiEnvBase *)_env)->get_stack_trace(_java_thread,
- _start_depth, _max_count,
- _frame_buffer, _count_ptr);
- }
- }
+ void doit();
};
// forward declaration
@@ -607,13 +563,7 @@ public:
}
VMOp_Type type() const { return VMOp_GetFrameCount; }
jvmtiError result() { return _result; }
- void doit() {
- _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
- JavaThread* jt = _state->get_thread();
- if (Threads::includes(jt) && !jt->is_exiting() && jt->threadObj() != NULL) {
- _result = ((JvmtiEnvBase*)_env)->get_frame_count(_state, _count_ptr);
- }
- }
+ void doit();
};
// VM operation to frame location at safepoint.
@@ -637,14 +587,7 @@ public:
}
VMOp_Type type() const { return VMOp_GetFrameLocation; }
jvmtiError result() { return _result; }
- void doit() {
- _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
- if (Threads::includes(_java_thread) && !_java_thread->is_exiting() &&
- _java_thread->threadObj() != NULL) {
- _result = ((JvmtiEnvBase*)_env)->get_frame_location(_java_thread, _depth,
- _method_ptr, _location_ptr);
- }
- }
+ void doit();
};
diff --git a/src/hotspot/share/prims/jvmtiEnvThreadState.cpp b/src/hotspot/share/prims/jvmtiEnvThreadState.cpp
index d7ffd105596..643c4409274 100644
--- a/src/hotspot/share/prims/jvmtiEnvThreadState.cpp
+++ b/src/hotspot/share/prims/jvmtiEnvThreadState.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,7 @@
#include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/signature.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vm_operations.hpp"
diff --git a/src/hotspot/share/prims/jvmtiEnvThreadState.hpp b/src/hotspot/share/prims/jvmtiEnvThreadState.hpp
index c25e5e34547..9b947f93e94 100644
--- a/src/hotspot/share/prims/jvmtiEnvThreadState.hpp
+++ b/src/hotspot/share/prims/jvmtiEnvThreadState.hpp
@@ -27,7 +27,6 @@
#include "jvmtifiles/jvmti.h"
#include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
#include "oops/instanceKlass.hpp"
#include "prims/jvmtiEventController.hpp"
#include "utilities/globalDefinitions.hpp"
diff --git a/src/hotspot/share/prims/jvmtiEventController.cpp b/src/hotspot/share/prims/jvmtiEventController.cpp
index cdad3264c82..3c919c61106 100644
--- a/src/hotspot/share/prims/jvmtiEventController.cpp
+++ b/src/hotspot/share/prims/jvmtiEventController.cpp
@@ -33,7 +33,8 @@
#include "prims/jvmtiImpl.hpp"
#include "prims/jvmtiThreadState.inline.hpp"
#include "runtime/frame.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframe_hp.hpp"
#include "runtime/vmThread.hpp"
@@ -580,13 +581,10 @@ JvmtiEventControllerPrivate::recompute_enabled() {
// filtered events and there weren't last time
if ( (any_env_thread_enabled & THREAD_FILTERED_EVENT_BITS) != 0 &&
(was_any_env_thread_enabled & THREAD_FILTERED_EVENT_BITS) == 0) {
- {
- MutexLocker mu(Threads_lock); //hold the Threads_lock for the iteration
- for (JavaThread *tp = Threads::first(); tp != NULL; tp = tp->next()) {
- // state_for_while_locked() makes tp->is_exiting() check
- JvmtiThreadState::state_for_while_locked(tp); // create the thread state if missing
- }
- }// release Threads_lock
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *tp = jtiwh.next(); ) {
+ // state_for_while_locked() makes tp->is_exiting() check
+ JvmtiThreadState::state_for_while_locked(tp); // create the thread state if missing
+ }
}
// compute and set thread-filtered events
diff --git a/src/hotspot/share/prims/jvmtiEventController.hpp b/src/hotspot/share/prims/jvmtiEventController.hpp
index 90b000ccfb3..c250c2f4772 100644
--- a/src/hotspot/share/prims/jvmtiEventController.hpp
+++ b/src/hotspot/share/prims/jvmtiEventController.hpp
@@ -27,7 +27,6 @@
#include "jvmtifiles/jvmti.h"
#include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
#include "utilities/globalDefinitions.hpp"
// forward declaration
diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp
index 7d8de627808..631ddcdabaa 100644
--- a/src/hotspot/share/prims/jvmtiExport.cpp
+++ b/src/hotspot/share/prims/jvmtiExport.cpp
@@ -53,6 +53,7 @@
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vframe.hpp"
#include "services/serviceUtil.hpp"
#include "utilities/macros.hpp"
@@ -721,6 +722,108 @@ JvmtiExport::get_all_native_method_prefixes(int* count_ptr) {
}
}
+// Convert an external thread reference to a JavaThread found on the
+// specified ThreadsList. The ThreadsListHandle in the caller "protects"
+// the returned JavaThread *.
+//
+// If thread_oop_p is not NULL, then the caller wants to use the oop
+// after this call so the oop is returned. On success, *jt_pp is set
+// to the converted JavaThread * and JVMTI_ERROR_NONE is returned.
+// On error, returns various JVMTI_ERROR_* values.
+//
+jvmtiError
+JvmtiExport::cv_external_thread_to_JavaThread(ThreadsList * t_list,
+ jthread thread,
+ JavaThread ** jt_pp,
+ oop * thread_oop_p) {
+ assert(t_list != NULL, "must have a ThreadsList");
+ assert(jt_pp != NULL, "must have a return JavaThread pointer");
+ // thread_oop_p is optional so no assert()
+
+ oop thread_oop = JNIHandles::resolve_external_guard(thread);
+ if (thread_oop == NULL) {
+ // NULL jthread, GC'ed jthread or a bad JNI handle.
+ return JVMTI_ERROR_INVALID_THREAD;
+ }
+ // Looks like an oop at this point.
+
+ if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
+ // The oop is not a java.lang.Thread.
+ return JVMTI_ERROR_INVALID_THREAD;
+ }
+ // Looks like a java.lang.Thread oop at this point.
+
+ if (thread_oop_p != NULL) {
+ // Return the oop to the caller; the caller may still want
+ // the oop even if this function returns an error.
+ *thread_oop_p = thread_oop;
+ }
+
+ JavaThread * java_thread = java_lang_Thread::thread(thread_oop);
+ if (java_thread == NULL) {
+ // The java.lang.Thread does not contain a JavaThread * so it has
+ // not yet run or it has died.
+ return JVMTI_ERROR_THREAD_NOT_ALIVE;
+ }
+ // Looks like a live JavaThread at this point.
+
+ // We do not check the EnableThreadSMRExtraValidityChecks option
+ // for this includes() call because JVM/TI's spec is tighter.
+ if (!t_list->includes(java_thread)) {
+ // Not on the JavaThreads list so it is not alive.
+ return JVMTI_ERROR_THREAD_NOT_ALIVE;
+ }
+
+ // Return a live JavaThread that is "protected" by the
+ // ThreadsListHandle in the caller.
+ *jt_pp = java_thread;
+
+ return JVMTI_ERROR_NONE;
+}
+
+// Convert an oop to a JavaThread found on the specified ThreadsList.
+// The ThreadsListHandle in the caller "protects" the returned
+// JavaThread *.
+//
+// On success, *jt_pp is set to the converted JavaThread * and
+// JVMTI_ERROR_NONE is returned. On error, returns various
+// JVMTI_ERROR_* values.
+//
+jvmtiError
+JvmtiExport::cv_oop_to_JavaThread(ThreadsList * t_list, oop thread_oop,
+ JavaThread ** jt_pp) {
+ assert(t_list != NULL, "must have a ThreadsList");
+ assert(thread_oop != NULL, "must have an oop");
+ assert(jt_pp != NULL, "must have a return JavaThread pointer");
+
+ if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
+ // The oop is not a java.lang.Thread.
+ return JVMTI_ERROR_INVALID_THREAD;
+ }
+ // Looks like a java.lang.Thread oop at this point.
+
+ JavaThread * java_thread = java_lang_Thread::thread(thread_oop);
+ if (java_thread == NULL) {
+ // The java.lang.Thread does not contain a JavaThread * so it has
+ // not yet run or it has died.
+ return JVMTI_ERROR_THREAD_NOT_ALIVE;
+ }
+ // Looks like a live JavaThread at this point.
+
+ // We do not check the EnableThreadSMRExtraValidityChecks option
+ // for this includes() call because JVM/TI's spec is tighter.
+ if (!t_list->includes(java_thread)) {
+ // Not on the JavaThreads list so it is not alive.
+ return JVMTI_ERROR_THREAD_NOT_ALIVE;
+ }
+
+ // Return a live JavaThread that is "protected" by the
+ // ThreadsListHandle in the caller.
+ *jt_pp = java_thread;
+
+ return JVMTI_ERROR_NONE;
+}
+
class JvmtiClassFileLoadHookPoster : public StackObj {
private:
Symbol* _h_name;
@@ -2475,7 +2578,7 @@ extern "C" {
jint JvmtiExport::load_agent_library(const char *agent, const char *absParam,
const char *options, outputStream* st) {
- char ebuf[1024];
+ char ebuf[1024] = {0};
char buffer[JVM_MAXPATHLEN];
void* library = NULL;
jint result = JNI_ERR;
@@ -2525,6 +2628,8 @@ jint JvmtiExport::load_agent_library(const char *agent, const char *absParam,
if (!agent_lib->is_static_lib()) {
os::dll_unload(library);
}
+ st->print_cr("%s is not available in %s",
+ on_attach_symbols[0], agent_lib->name());
delete agent_lib;
} else {
// Invoke the Agent_OnAttach function
@@ -2551,9 +2656,14 @@ jint JvmtiExport::load_agent_library(const char *agent, const char *absParam,
}
// Agent_OnAttach executed so completion status is JNI_OK
- st->print_cr("%d", result);
+ st->print_cr("return code: %d", result);
result = JNI_OK;
}
+ } else {
+ st->print_cr("%s was not loaded.", agent);
+ if (*ebuf != '\0') {
+ st->print_cr("%s", ebuf);
+ }
}
return result;
}
@@ -2685,8 +2795,7 @@ void JvmtiVMObjectAllocEventCollector::oops_do_for_all_threads(OopClosure* f) {
return;
}
- // Runs at safepoint. So no need to acquire Threads_lock.
- for (JavaThread *jthr = Threads::first(); jthr != NULL; jthr = jthr->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jthr = jtiwh.next(); ) {
JvmtiThreadState *state = jthr->jvmti_thread_state();
if (state != NULL) {
JvmtiVMObjectAllocEventCollector *collector;
diff --git a/src/hotspot/share/prims/jvmtiExport.hpp b/src/hotspot/share/prims/jvmtiExport.hpp
index 7bae70b626e..3421150cab2 100644
--- a/src/hotspot/share/prims/jvmtiExport.hpp
+++ b/src/hotspot/share/prims/jvmtiExport.hpp
@@ -399,6 +399,14 @@ class JvmtiExport : public AllStatic {
// SetNativeMethodPrefix support
static char** get_all_native_method_prefixes(int* count_ptr) NOT_JVMTI_RETURN_(NULL);
+
+ // JavaThread lifecycle support:
+ static jvmtiError cv_external_thread_to_JavaThread(ThreadsList * t_list,
+ jthread thread,
+ JavaThread ** jt_pp,
+ oop * thread_oop_p);
+ static jvmtiError cv_oop_to_JavaThread(ThreadsList * t_list, oop thread_oop,
+ JavaThread ** jt_pp);
};
// Support class used by JvmtiDynamicCodeEventCollector and others. It
diff --git a/src/hotspot/share/prims/jvmtiImpl.cpp b/src/hotspot/share/prims/jvmtiImpl.cpp
index 4869a653144..82cdabdbd9b 100644
--- a/src/hotspot/share/prims/jvmtiImpl.cpp
+++ b/src/hotspot/share/prims/jvmtiImpl.cpp
@@ -46,6 +46,7 @@
#include "runtime/serviceThread.hpp"
#include "runtime/signature.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframe_hp.hpp"
#include "runtime/vm_operations.hpp"
@@ -878,10 +879,9 @@ bool JvmtiSuspendControl::resume(JavaThread *java_thread) {
void JvmtiSuspendControl::print() {
#ifndef PRODUCT
- MutexLocker mu(Threads_lock);
LogStreamHandle(Trace, jvmti) log_stream;
log_stream.print("Suspended Threads: [");
- for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
#ifdef JVMTI_TRACE
const char *name = JvmtiTrace::safe_get_thread_name(thread);
#else
diff --git a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp
index c73842e0500..abca485f97b 100644
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp
@@ -43,6 +43,7 @@
#include "oops/oop.inline.hpp"
#include "prims/jvmtiImpl.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
+#include "prims/jvmtiThreadState.inline.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "prims/methodComparator.hpp"
#include "runtime/deoptimization.hpp"
diff --git a/src/hotspot/share/prims/jvmtiTagMap.cpp b/src/hotspot/share/prims/jvmtiTagMap.cpp
index 3a45b2c8eff..6ea720a1e44 100644
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp
@@ -45,6 +45,8 @@
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/reflectionUtils.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
@@ -3174,7 +3176,7 @@ inline bool VM_HeapWalkOperation::collect_stack_roots(JavaThread* java_thread,
// stack to find all references and local JNI refs.
inline bool VM_HeapWalkOperation::collect_stack_roots() {
JNILocalRootsClosure blk;
- for (JavaThread* thread = Threads::first(); thread != NULL ; thread = thread->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
oop threadObj = thread->threadObj();
if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
// Collect the simple root for this thread before we
diff --git a/src/hotspot/share/prims/jvmtiThreadState.hpp b/src/hotspot/share/prims/jvmtiThreadState.hpp
index 9dfb9b85309..16a0a3455c7 100644
--- a/src/hotspot/share/prims/jvmtiThreadState.hpp
+++ b/src/hotspot/share/prims/jvmtiThreadState.hpp
@@ -27,7 +27,6 @@
#include "jvmtifiles/jvmti.h"
#include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
#include "prims/jvmtiEventController.hpp"
#include "runtime/thread.hpp"
#include "utilities/growableArray.hpp"
@@ -336,34 +335,10 @@ class JvmtiThreadState : public CHeapObj {
// already holding JvmtiThreadState_lock - retrieve or create JvmtiThreadState
// Can return NULL if JavaThread is exiting.
- inline static JvmtiThreadState *state_for_while_locked(JavaThread *thread) {
- assert(JvmtiThreadState_lock->is_locked(), "sanity check");
-
- JvmtiThreadState *state = thread->jvmti_thread_state();
- if (state == NULL) {
- if (thread->is_exiting()) {
- // don't add a JvmtiThreadState to a thread that is exiting
- return NULL;
- }
-
- state = new JvmtiThreadState(thread);
- }
- return state;
- }
-
+ static JvmtiThreadState *state_for_while_locked(JavaThread *thread);
// retrieve or create JvmtiThreadState
// Can return NULL if JavaThread is exiting.
- inline static JvmtiThreadState *state_for(JavaThread *thread) {
- JvmtiThreadState *state = thread->jvmti_thread_state();
- if (state == NULL) {
- MutexLocker mu(JvmtiThreadState_lock);
- // check again with the lock held
- state = state_for_while_locked(thread);
- } else {
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
- }
- return state;
- }
+ static JvmtiThreadState *state_for(JavaThread *thread);
// JVMTI ForceEarlyReturn support
diff --git a/src/hotspot/share/prims/jvmtiThreadState.inline.hpp b/src/hotspot/share/prims/jvmtiThreadState.inline.hpp
index 1b9926fb1da..e3859a3334d 100644
--- a/src/hotspot/share/prims/jvmtiThreadState.inline.hpp
+++ b/src/hotspot/share/prims/jvmtiThreadState.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -68,4 +68,31 @@ void JvmtiThreadState::set_head_env_thread_state(JvmtiEnvThreadState* ets) {
_head_env_thread_state = ets;
}
+inline JvmtiThreadState* JvmtiThreadState::state_for_while_locked(JavaThread *thread) {
+ assert(JvmtiThreadState_lock->is_locked(), "sanity check");
+
+ JvmtiThreadState *state = thread->jvmti_thread_state();
+ if (state == NULL) {
+ if (thread->is_exiting()) {
+ // don't add a JvmtiThreadState to a thread that is exiting
+ return NULL;
+ }
+
+ state = new JvmtiThreadState(thread);
+ }
+ return state;
+}
+
+inline JvmtiThreadState* JvmtiThreadState::state_for(JavaThread *thread) {
+ JvmtiThreadState *state = thread->jvmti_thread_state();
+ if (state == NULL) {
+ MutexLocker mu(JvmtiThreadState_lock);
+ // check again with the lock held
+ state = state_for_while_locked(thread);
+ } else {
+ CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+ }
+ return state;
+}
+
#endif // SHARE_VM_PRIMS_JVMTITHREADSTATE_INLINE_HPP
diff --git a/src/hotspot/share/prims/methodHandles.cpp b/src/hotspot/share/prims/methodHandles.cpp
index 8374755490e..2da189889fb 100644
--- a/src/hotspot/share/prims/methodHandles.cpp
+++ b/src/hotspot/share/prims/methodHandles.cpp
@@ -1029,6 +1029,26 @@ void MethodHandles::flush_dependent_nmethods(Handle call_site, Handle target) {
}
}
+void MethodHandles::trace_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid) {
+ if (TraceMethodHandles) {
+ const char* name = vmIntrinsics::name_at(iid);
+ if (*name == '_') name += 1;
+ const size_t len = strlen(name) + 50;
+ char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal);
+ const char* suffix = "";
+ if (is_signature_polymorphic(iid)) {
+ if (is_signature_polymorphic_static(iid))
+ suffix = "/static";
+ else
+ suffix = "/private";
+ }
+ jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix);
+ trace_method_handle(_masm, qname);
+ // Note: Don't free the allocated char array because it's used
+ // during runtime.
+ }
+}
+
//
// Here are the native methods in java.lang.invoke.MethodHandleNatives
// They are the private interface between this JVM and the HotSpot-specific
diff --git a/src/hotspot/share/prims/methodHandles.hpp b/src/hotspot/share/prims/methodHandles.hpp
index df83f23daf3..2be115636af 100644
--- a/src/hotspot/share/prims/methodHandles.hpp
+++ b/src/hotspot/share/prims/methodHandles.hpp
@@ -195,25 +195,7 @@ public:
// Tracing
static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
- static void trace_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid) {
- if (TraceMethodHandles) {
- const char* name = vmIntrinsics::name_at(iid);
- if (*name == '_') name += 1;
- const size_t len = strlen(name) + 50;
- char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal);
- const char* suffix = "";
- if (is_signature_polymorphic(iid)) {
- if (is_signature_polymorphic_static(iid))
- suffix = "/static";
- else
- suffix = "/private";
- }
- jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix);
- trace_method_handle(_masm, qname);
- // Note: Don't free the allocated char array because it's used
- // during runtime.
- }
- }
+ static void trace_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid);
};
//------------------------------------------------------------------------------
diff --git a/src/hotspot/share/prims/perf.cpp b/src/hotspot/share/prims/perf.cpp
index ba019e04cb2..cf93aaeb99b 100644
--- a/src/hotspot/share/prims/perf.cpp
+++ b/src/hotspot/share/prims/perf.cpp
@@ -30,7 +30,7 @@
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/interfaceSupport.hpp"
-#include "runtime/perfData.hpp"
+#include "runtime/perfData.inline.hpp"
#include "runtime/perfMemory.hpp"
/*
diff --git a/src/hotspot/share/prims/unsafe.cpp b/src/hotspot/share/prims/unsafe.cpp
index c6796d53a3a..a8d13138507 100644
--- a/src/hotspot/share/prims/unsafe.cpp
+++ b/src/hotspot/share/prims/unsafe.cpp
@@ -39,6 +39,8 @@
#include "runtime/interfaceSupport.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/reflection.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vm_version.hpp"
#include "services/threadService.hpp"
#include "trace/tracing.hpp"
@@ -144,18 +146,25 @@ jlong Unsafe_field_offset_from_byte_offset(jlong byte_offset) {
* Normalizes values and wraps accesses in
* JavaThread::doing_unsafe_access() if needed.
*/
+template
class MemoryAccess : StackObj {
JavaThread* _thread;
oop _obj;
ptrdiff_t _offset;
- // Resolves and returns the address of the memory access
- void* addr() {
- return index_oop_from_field_offset_long(_obj, _offset);
+ // Resolves and returns the address of the memory access.
+ // This raw memory access may fault, so we make sure it happens within the
+ // guarded scope by making the access volatile at least. Since the store
+ // of Thread::set_doing_unsafe_access() is also volatile, these accesses
+ // can not be reordered by the compiler. Therefore, if the access triggers
+ // a fault, we will know that Thread::doing_unsafe_access() returns true.
+ volatile T* addr() {
+ void* addr = index_oop_from_field_offset_long(_obj, _offset);
+ return static_cast(addr);
}
- template
- T normalize_for_write(T x) {
+ template
+ U normalize_for_write(U x) {
return x;
}
@@ -163,8 +172,8 @@ class MemoryAccess : StackObj {
return x & 1;
}
- template
- T normalize_for_read(T x) {
+ template
+ U normalize_for_read(U x) {
return x;
}
@@ -197,11 +206,10 @@ public:
assert_field_offset_sane(_obj, offset);
}
- template
T get() {
if (oopDesc::is_null(_obj)) {
GuardUnsafeAccess guard(_thread);
- T ret = RawAccess<>::load((T*)addr());
+ T ret = RawAccess<>::load(addr());
return normalize_for_read(ret);
} else {
T ret = HeapAccess<>::load_at(_obj, _offset);
@@ -209,22 +217,20 @@ public:
}
}
- template
void put(T x) {
if (oopDesc::is_null(_obj)) {
GuardUnsafeAccess guard(_thread);
- RawAccess<>::store((T*)addr(), normalize_for_write(x));
+ RawAccess<>::store(addr(), normalize_for_write(x));
} else {
HeapAccess<>::store_at(_obj, _offset, normalize_for_write(x));
}
}
- template
T get_volatile() {
if (oopDesc::is_null(_obj)) {
GuardUnsafeAccess guard(_thread);
- volatile T ret = RawAccess::load((volatile T*)addr());
+ volatile T ret = RawAccess::load(addr());
return normalize_for_read(ret);
} else {
T ret = HeapAccess::load_at(_obj, _offset);
@@ -232,11 +238,10 @@ public:
}
}
- template
void put_volatile(T x) {
if (oopDesc::is_null(_obj)) {
GuardUnsafeAccess guard(_thread);
- RawAccess::store((volatile T*)addr(), normalize_for_write(x));
+ RawAccess::store(addr(), normalize_for_write(x));
} else {
HeapAccess::store_at(_obj, _offset, normalize_for_write(x));
}
@@ -294,11 +299,11 @@ UNSAFE_LEAF(jint, Unsafe_unalignedAccess0(JNIEnv *env, jobject unsafe)) {
#define DEFINE_GETSETOOP(java_type, Type) \
\
UNSAFE_ENTRY(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
- return MemoryAccess(thread, obj, offset).get(); \
+ return MemoryAccess(thread, obj, offset).get(); \
} UNSAFE_END \
\
UNSAFE_ENTRY(void, Unsafe_Put##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
- MemoryAccess(thread, obj, offset).put(x); \
+ MemoryAccess(thread, obj, offset).put(x); \
} UNSAFE_END \
\
// END DEFINE_GETSETOOP.
@@ -317,11 +322,11 @@ DEFINE_GETSETOOP(jdouble, Double);
#define DEFINE_GETSETOOP_VOLATILE(java_type, Type) \
\
UNSAFE_ENTRY(java_type, Unsafe_Get##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
- return MemoryAccess(thread, obj, offset).get_volatile(); \
+ return MemoryAccess(thread, obj, offset).get_volatile(); \
} UNSAFE_END \
\
UNSAFE_ENTRY(void, Unsafe_Put##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
- MemoryAccess(thread, obj, offset).put_volatile(x); \
+ MemoryAccess(thread, obj, offset).put_volatile(x); \
} UNSAFE_END \
\
// END DEFINE_GETSETOOP_VOLATILE.
@@ -937,8 +942,12 @@ UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread))
Parker* p = NULL;
if (jthread != NULL) {
- oop java_thread = JNIHandles::resolve_non_null(jthread);
+ ThreadsListHandle tlh;
+ JavaThread* thr = NULL;
+ oop java_thread = NULL;
+ (void) tlh.cv_internal_thread_to_JavaThread(jthread, &thr, &java_thread);
if (java_thread != NULL) {
+ // This is a valid oop.
jlong lp = java_lang_Thread::park_event(java_thread);
if (lp != 0) {
// This cast is OK even though the jlong might have been read
@@ -946,22 +955,19 @@ UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread))
// always be zero anyway and the value set is always the same
p = (Parker*)addr_from_java(lp);
} else {
- // Grab lock if apparently null or using older version of library
- MutexLocker mu(Threads_lock);
- java_thread = JNIHandles::resolve_non_null(jthread);
-
- if (java_thread != NULL) {
- JavaThread* thr = java_lang_Thread::thread(java_thread);
- if (thr != NULL) {
- p = thr->parker();
- if (p != NULL) { // Bind to Java thread for next time.
- java_lang_Thread::set_park_event(java_thread, addr_to_java(p));
- }
+ // Not cached in the java.lang.Thread oop yet (could be an
+ // older version of library).
+ if (thr != NULL) {
+ // The JavaThread is alive.
+ p = thr->parker();
+ if (p != NULL) {
+ // Cache the Parker in the java.lang.Thread oop for next time.
+ java_lang_Thread::set_park_event(java_thread, addr_to_java(p));
}
}
}
}
- }
+ } // ThreadsListHandle is destroyed here.
if (p != NULL) {
HOTSPOT_THREAD_UNPARK((uintptr_t) p);
diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp
index 84510d0da19..28d8851ff08 100644
--- a/src/hotspot/share/prims/whitebox.cpp
+++ b/src/hotspot/share/prims/whitebox.cpp
@@ -55,11 +55,15 @@
#include "runtime/os.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/thread.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
+#if INCLUDE_CDS
+#include "prims/cdsoffsets.hpp"
+#endif // INCLUDE_CDS
#if INCLUDE_ALL_GCS
#include "gc/g1/concurrentMarkThread.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
@@ -665,7 +669,7 @@ class VM_WhiteBoxDeoptimizeFrames : public VM_WhiteBoxOperation {
int result() const { return _result; }
void doit() {
- for (JavaThread* t = Threads::first(); t != NULL; t = t->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
if (t->has_last_Java_frame()) {
for (StackFrameStream fst(t, UseBiasedLocking); !fst.is_done(); fst.next()) {
frame* f = fst.current();
@@ -1729,6 +1733,18 @@ WB_ENTRY(jboolean, WB_IsCDSIncludedInVmBuild(JNIEnv* env))
#endif
WB_END
+
+#if INCLUDE_CDS
+
+WB_ENTRY(jint, WB_GetOffsetForName(JNIEnv* env, jobject o, jstring name))
+ ResourceMark rm;
+ char* c_name = java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(name));
+ int result = CDSOffsets::find_offset(c_name);
+ return (jint)result;
+WB_END
+
+#endif // INCLUDE_CDS
+
WB_ENTRY(jint, WB_HandshakeWalkStack(JNIEnv* env, jobject wb, jobject thread_handle, jboolean all_threads))
class TraceSelfClosure : public ThreadClosure {
jint _num_threads_completed;
@@ -1917,6 +1933,9 @@ static JNINativeMethod methods[] = {
{CC"runMemoryUnitTests", CC"()V", (void*)&WB_RunMemoryUnitTests},
{CC"readFromNoaccessArea",CC"()V", (void*)&WB_ReadFromNoaccessArea},
{CC"stressVirtualSpaceResize",CC"(JJJ)I", (void*)&WB_StressVirtualSpaceResize},
+#if INCLUDE_CDS
+ {CC"getOffsetForName0", CC"(Ljava/lang/String;)I", (void*)&WB_GetOffsetForName},
+#endif
#if INCLUDE_ALL_GCS
{CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark},
{CC"g1IsHumongous0", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous },
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
index de4adedf8f2..2540390b5c9 100644
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -114,6 +114,108 @@ bool Arguments::_has_jimage = false;
char* Arguments::_ext_dirs = NULL;
+bool PathString::set_value(const char *value) {
+ if (_value != NULL) {
+ FreeHeap(_value);
+ }
+ _value = AllocateHeap(strlen(value)+1, mtArguments);
+ assert(_value != NULL, "Unable to allocate space for new path value");
+ if (_value != NULL) {
+ strcpy(_value, value);
+ } else {
+ // not able to allocate
+ return false;
+ }
+ return true;
+}
+
+void PathString::append_value(const char *value) {
+ char *sp;
+ size_t len = 0;
+ if (value != NULL) {
+ len = strlen(value);
+ if (_value != NULL) {
+ len += strlen(_value);
+ }
+ sp = AllocateHeap(len+2, mtArguments);
+ assert(sp != NULL, "Unable to allocate space for new append path value");
+ if (sp != NULL) {
+ if (_value != NULL) {
+ strcpy(sp, _value);
+ strcat(sp, os::path_separator());
+ strcat(sp, value);
+ FreeHeap(_value);
+ } else {
+ strcpy(sp, value);
+ }
+ _value = sp;
+ }
+ }
+}
+
+PathString::PathString(const char* value) {
+ if (value == NULL) {
+ _value = NULL;
+ } else {
+ _value = AllocateHeap(strlen(value)+1, mtArguments);
+ strcpy(_value, value);
+ }
+}
+
+PathString::~PathString() {
+ if (_value != NULL) {
+ FreeHeap(_value);
+ _value = NULL;
+ }
+}
+
+ModulePatchPath::ModulePatchPath(const char* module_name, const char* path) {
+ assert(module_name != NULL && path != NULL, "Invalid module name or path value");
+ size_t len = strlen(module_name) + 1;
+ _module_name = AllocateHeap(len, mtInternal);
+ strncpy(_module_name, module_name, len); // copy the trailing null
+ _path = new PathString(path);
+}
+
+ModulePatchPath::~ModulePatchPath() {
+ if (_module_name != NULL) {
+ FreeHeap(_module_name);
+ _module_name = NULL;
+ }
+ if (_path != NULL) {
+ delete _path;
+ _path = NULL;
+ }
+}
+
+SystemProperty::SystemProperty(const char* key, const char* value, bool writeable, bool internal) : PathString(value) {
+ if (key == NULL) {
+ _key = NULL;
+ } else {
+ _key = AllocateHeap(strlen(key)+1, mtArguments);
+ strcpy(_key, key);
+ }
+ _next = NULL;
+ _internal = internal;
+ _writeable = writeable;
+}
+
+AgentLibrary::AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib) {
+ _name = AllocateHeap(strlen(name)+1, mtArguments);
+ strcpy(_name, name);
+ if (options == NULL) {
+ _options = NULL;
+ } else {
+ _options = AllocateHeap(strlen(options)+1, mtArguments);
+ strcpy(_options, options);
+ }
+ _is_absolute_path = is_absolute_path;
+ _os_lib = os_lib;
+ _next = NULL;
+ _state = agent_invalid;
+ _is_static_lib = false;
+}
+
// Check if head of 'option' matches 'name', and sets 'tail' to the remaining
// part of the option string.
static bool match_option(const JavaVMOption *option, const char* name,
@@ -180,6 +282,23 @@ bool needs_module_property_warning = false;
#define UPGRADE_PATH "upgrade.path"
#define UPGRADE_PATH_LEN 12
+void Arguments::add_init_library(const char* name, char* options) {
+ _libraryList.add(new AgentLibrary(name, options, false, NULL));
+}
+
+void Arguments::add_init_agent(const char* name, char* options, bool absolute_path) {
+ _agentList.add(new AgentLibrary(name, options, absolute_path, NULL));
+}
+
+// Late-binding agents not started via arguments
+void Arguments::add_loaded_agent(AgentLibrary *agentLib) {
+ _agentList.add(agentLib);
+}
+
+void Arguments::add_loaded_agent(const char* name, char* options, bool absolute_path, void* os_lib) {
+ _agentList.add(new AgentLibrary(name, options, absolute_path, os_lib));
+}
+
// Return TRUE if option matches 'property', or 'property=', or 'property.'.
static bool matches_property_suffix(const char* option, const char* property, size_t len) {
return ((strncmp(option, property, len) == 0) &&
@@ -2152,12 +2271,7 @@ bool Arguments::check_vm_args_consistency() {
// Check lower bounds of the code cache
// Template Interpreter code is approximately 3X larger in debug builds.
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
- if (InitialCodeCacheSize < (uintx)os::vm_page_size()) {
- jio_fprintf(defaultStream::error_stream(),
- "Invalid InitialCodeCacheSize=%dK. Must be at least %dK.\n", InitialCodeCacheSize/K,
- os::vm_page_size()/K);
- status = false;
- } else if (ReservedCodeCacheSize < InitialCodeCacheSize) {
+ if (ReservedCodeCacheSize < InitialCodeCacheSize) {
jio_fprintf(defaultStream::error_stream(),
"Invalid ReservedCodeCacheSize: %dK. Must be at least InitialCodeCacheSize=%dK.\n",
ReservedCodeCacheSize/K, InitialCodeCacheSize/K);
@@ -2212,7 +2326,27 @@ bool Arguments::check_vm_args_consistency() {
}
FLAG_SET_CMDLINE(bool, PostLoopMultiversioning, false);
}
+ if (UseCountedLoopSafepoints && LoopStripMiningIter == 0) {
+ if (!FLAG_IS_DEFAULT(UseCountedLoopSafepoints) || !FLAG_IS_DEFAULT(LoopStripMiningIter)) {
+ warning("When counted loop safepoints are enabled, LoopStripMiningIter must be at least 1 (a safepoint every 1 iteration): setting it to 1");
+ }
+ LoopStripMiningIter = 1;
+ } else if (!UseCountedLoopSafepoints && LoopStripMiningIter > 0) {
+ if (!FLAG_IS_DEFAULT(UseCountedLoopSafepoints) || !FLAG_IS_DEFAULT(LoopStripMiningIter)) {
+ warning("Disabling counted safepoints implies no loop strip mining: setting LoopStripMiningIter to 0");
+ }
+ LoopStripMiningIter = 0;
+ }
+ if (FLAG_IS_DEFAULT(LoopStripMiningIterShortLoop)) {
+ // blind guess
+ LoopStripMiningIterShortLoop = LoopStripMiningIter / 10;
+ }
#endif
+ if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
+ if ((UseNUMAInterleaving && !FLAG_IS_DEFAULT(UseNUMAInterleaving)) || (UseNUMA && !FLAG_IS_DEFAULT(UseNUMA))) {
+ log_warning(arguments) ("NUMA support for Heap depends on the file system when AllocateHeapAt option is used.\n");
+ }
+ }
return status;
}
@@ -2770,18 +2904,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
if (FLAG_SET_CMDLINE(intx, ThreadStackSize, value) != Flag::SUCCESS) {
return JNI_EINVAL;
}
- } else if (match_option(option, "-XX:CodeCacheExpansionSize=", &tail)) {
- julong long_CodeCacheExpansionSize = 0;
- ArgsRange errcode = parse_memory_size(tail, &long_CodeCacheExpansionSize, os::vm_page_size());
- if (errcode != arg_in_range) {
- jio_fprintf(defaultStream::error_stream(),
- "Invalid argument: %s. Must be at least %luK.\n", option->optionString,
- os::vm_page_size()/K);
- return JNI_EINVAL;
- }
- if (FLAG_SET_CMDLINE(uintx, CodeCacheExpansionSize, (uintx)long_CodeCacheExpansionSize) != Flag::SUCCESS) {
- return JNI_EINVAL;
- }
} else if (match_option(option, "-Xmaxjitcodesize", &tail) ||
match_option(option, "-XX:ReservedCodeCacheSize=", &tail)) {
julong long_ReservedCodeCacheSize = 0;
@@ -2795,45 +2917,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
if (FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize) != Flag::SUCCESS) {
return JNI_EINVAL;
}
- // -XX:NonNMethodCodeHeapSize=
- } else if (match_option(option, "-XX:NonNMethodCodeHeapSize=", &tail)) {
- julong long_NonNMethodCodeHeapSize = 0;
-
- ArgsRange errcode = parse_memory_size(tail, &long_NonNMethodCodeHeapSize, 1);
- if (errcode != arg_in_range) {
- jio_fprintf(defaultStream::error_stream(),
- "Invalid maximum non-nmethod code heap size: %s.\n", option->optionString);
- return JNI_EINVAL;
- }
- if (FLAG_SET_CMDLINE(uintx, NonNMethodCodeHeapSize, (uintx)long_NonNMethodCodeHeapSize) != Flag::SUCCESS) {
- return JNI_EINVAL;
- }
- // -XX:ProfiledCodeHeapSize=
- } else if (match_option(option, "-XX:ProfiledCodeHeapSize=", &tail)) {
- julong long_ProfiledCodeHeapSize = 0;
-
- ArgsRange errcode = parse_memory_size(tail, &long_ProfiledCodeHeapSize, 1);
- if (errcode != arg_in_range) {
- jio_fprintf(defaultStream::error_stream(),
- "Invalid maximum profiled code heap size: %s.\n", option->optionString);
- return JNI_EINVAL;
- }
- if (FLAG_SET_CMDLINE(uintx, ProfiledCodeHeapSize, (uintx)long_ProfiledCodeHeapSize) != Flag::SUCCESS) {
- return JNI_EINVAL;
- }
- // -XX:NonProfiledCodeHeapSizee=
- } else if (match_option(option, "-XX:NonProfiledCodeHeapSize=", &tail)) {
- julong long_NonProfiledCodeHeapSize = 0;
-
- ArgsRange errcode = parse_memory_size(tail, &long_NonProfiledCodeHeapSize, 1);
- if (errcode != arg_in_range) {
- jio_fprintf(defaultStream::error_stream(),
- "Invalid maximum non-profiled code heap size: %s.\n", option->optionString);
- return JNI_EINVAL;
- }
- if (FLAG_SET_CMDLINE(uintx, NonProfiledCodeHeapSize, (uintx)long_NonProfiledCodeHeapSize) != Flag::SUCCESS) {
- return JNI_EINVAL;
- }
// -green
} else if (match_option(option, "-green")) {
jio_fprintf(defaultStream::error_stream(),
@@ -3936,6 +4019,14 @@ jint Arguments::match_special_option_and_act(const JavaVMInitArgs* args,
vm_exit(0);
}
#endif
+
+ if (match_option(option, "-XX:+UseAppCDS")) {
+ Flag* flag = Flag::find_flag("SharedArchiveFile", 17, true, true);
+ if (flag->is_diagnostic()) {
+ flag->clear_diagnostic();
+ }
+ continue;
+ }
}
return JNI_OK;
}
@@ -4306,26 +4397,7 @@ jint Arguments::apply_ergo() {
}
#endif
- bool aot_enabled = UseAOT && AOTLibrary != NULL;
- bool jvmci_enabled = NOT_JVMCI(false) JVMCI_ONLY(EnableJVMCI || UseJVMCICompiler);
- bool handshakes_supported = SafepointMechanism::supports_thread_local_poll() && !aot_enabled && !jvmci_enabled && ThreadLocalHandshakes;
// ThreadLocalHandshakesConstraintFunc handles the constraints.
- // Here we try to figure out if a mutual exclusive option have been set that conflict with a default.
- if (handshakes_supported) {
- FLAG_SET_DEFAULT(UseAOT, false); // Clear the AOT flag to make sure it doesn't try to initialize.
- } else {
- if (FLAG_IS_DEFAULT(ThreadLocalHandshakes) && ThreadLocalHandshakes) {
- if (aot_enabled) {
- // If user enabled AOT but ThreadLocalHandshakes is at default set it to false.
- log_debug(ergo)("Disabling ThreadLocalHandshakes for UseAOT.");
- FLAG_SET_DEFAULT(ThreadLocalHandshakes, false);
- } else if (jvmci_enabled){
- // If user enabled JVMCI but ThreadLocalHandshakes is at default set it to false.
- log_debug(ergo)("Disabling ThreadLocalHandshakes for EnableJVMCI/UseJVMCICompiler.");
- FLAG_SET_DEFAULT(ThreadLocalHandshakes, false);
- }
- }
- }
if (FLAG_IS_DEFAULT(ThreadLocalHandshakes) || !SafepointMechanism::supports_thread_local_poll()) {
log_debug(ergo)("ThreadLocalHandshakes %s", ThreadLocalHandshakes ? "enabled." : "disabled.");
} else {
@@ -4337,7 +4409,9 @@ jint Arguments::apply_ergo() {
jint Arguments::adjust_after_os() {
if (UseNUMA) {
- if (UseParallelGC || UseParallelOldGC) {
+ if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
+ FLAG_SET_ERGO(bool, UseNUMA, false);
+ } else if (UseParallelGC || UseParallelOldGC) {
if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
}
diff --git a/src/hotspot/share/runtime/arguments.hpp b/src/hotspot/share/runtime/arguments.hpp
index 43c0ff37d33..222f4b25ad6 100644
--- a/src/hotspot/share/runtime/arguments.hpp
+++ b/src/hotspot/share/runtime/arguments.hpp
@@ -27,6 +27,7 @@
#include "logging/logLevel.hpp"
#include "logging/logTag.hpp"
+#include "memory/allocation.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/perfData.hpp"
@@ -59,60 +60,11 @@ class PathString : public CHeapObj {
public:
char* value() const { return _value; }
- bool set_value(const char *value) {
- if (_value != NULL) {
- FreeHeap(_value);
- }
- _value = AllocateHeap(strlen(value)+1, mtArguments);
- assert(_value != NULL, "Unable to allocate space for new path value");
- if (_value != NULL) {
- strcpy(_value, value);
- } else {
- // not able to allocate
- return false;
- }
- return true;
- }
+ bool set_value(const char *value);
+ void append_value(const char *value);
- void append_value(const char *value) {
- char *sp;
- size_t len = 0;
- if (value != NULL) {
- len = strlen(value);
- if (_value != NULL) {
- len += strlen(_value);
- }
- sp = AllocateHeap(len+2, mtArguments);
- assert(sp != NULL, "Unable to allocate space for new append path value");
- if (sp != NULL) {
- if (_value != NULL) {
- strcpy(sp, _value);
- strcat(sp, os::path_separator());
- strcat(sp, value);
- FreeHeap(_value);
- } else {
- strcpy(sp, value);
- }
- _value = sp;
- }
- }
- }
-
- PathString(const char* value) {
- if (value == NULL) {
- _value = NULL;
- } else {
- _value = AllocateHeap(strlen(value)+1, mtArguments);
- strcpy(_value, value);
- }
- }
-
- ~PathString() {
- if (_value != NULL) {
- FreeHeap(_value);
- _value = NULL;
- }
- }
+ PathString(const char* value);
+ ~PathString();
};
// ModulePatchPath records the module/path pair as specified to --patch-module.
@@ -121,24 +73,8 @@ private:
char* _module_name;
PathString* _path;
public:
- ModulePatchPath(const char* module_name, const char* path) {
- assert(module_name != NULL && path != NULL, "Invalid module name or path value");
- size_t len = strlen(module_name) + 1;
- _module_name = AllocateHeap(len, mtInternal);
- strncpy(_module_name, module_name, len); // copy the trailing null
- _path = new PathString(path);
- }
-
- ~ModulePatchPath() {
- if (_module_name != NULL) {
- FreeHeap(_module_name);
- _module_name = NULL;
- }
- if (_path != NULL) {
- delete _path;
- _path = NULL;
- }
- }
+ ModulePatchPath(const char* module_name, const char* path);
+ ~ModulePatchPath();
inline void set_path(const char* path) { _path->set_value(path); }
inline const char* module_name() const { return _module_name; }
@@ -185,17 +121,7 @@ class SystemProperty : public PathString {
}
// Constructor
- SystemProperty(const char* key, const char* value, bool writeable, bool internal = false) : PathString(value) {
- if (key == NULL) {
- _key = NULL;
- } else {
- _key = AllocateHeap(strlen(key)+1, mtArguments);
- strcpy(_key, key);
- }
- _next = NULL;
- _internal = internal;
- _writeable = writeable;
- }
+ SystemProperty(const char* key, const char* value, bool writeable, bool internal = false);
};
@@ -234,21 +160,7 @@ public:
void set_invalid() { _state = agent_invalid; }
// Constructor
- AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib) {
- _name = AllocateHeap(strlen(name)+1, mtArguments);
- strcpy(_name, name);
- if (options == NULL) {
- _options = NULL;
- } else {
- _options = AllocateHeap(strlen(options)+1, mtArguments);
- strcpy(_options, options);
- }
- _is_absolute_path = is_absolute_path;
- _os_lib = os_lib;
- _next = NULL;
- _state = agent_invalid;
- _is_static_lib = false;
- }
+ AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib);
};
// maintain an order of entry list of AgentLibrary
@@ -420,19 +332,15 @@ class Arguments : AllStatic {
// -Xrun arguments
static AgentLibraryList _libraryList;
- static void add_init_library(const char* name, char* options)
- { _libraryList.add(new AgentLibrary(name, options, false, NULL)); }
+ static void add_init_library(const char* name, char* options);
// -agentlib and -agentpath arguments
static AgentLibraryList _agentList;
- static void add_init_agent(const char* name, char* options, bool absolute_path)
- { _agentList.add(new AgentLibrary(name, options, absolute_path, NULL)); }
+ static void add_init_agent(const char* name, char* options, bool absolute_path);
// Late-binding agents not started via arguments
- static void add_loaded_agent(AgentLibrary *agentLib)
- { _agentList.add(agentLib); }
- static void add_loaded_agent(const char* name, char* options, bool absolute_path, void* os_lib)
- { _agentList.add(new AgentLibrary(name, options, absolute_path, os_lib)); }
+ static void add_loaded_agent(AgentLibrary *agentLib);
+ static void add_loaded_agent(const char* name, char* options, bool absolute_path, void* os_lib);
// Operation modi
static Mode _mode;
diff --git a/src/hotspot/share/runtime/arguments_ext.hpp b/src/hotspot/share/runtime/arguments_ext.hpp
index d1c9f183e8e..3ae21e1267f 100644
--- a/src/hotspot/share/runtime/arguments_ext.hpp
+++ b/src/hotspot/share/runtime/arguments_ext.hpp
@@ -36,7 +36,6 @@ public:
// Otherwise returns false.
static inline bool process_options(const JavaVMOption *option) { return false; }
static inline void report_unsupported_options() { }
- static inline bool using_AppCDS() { return false; }
};
void ArgumentsExt::set_gc_specific_flags() {
diff --git a/src/hotspot/share/runtime/biasedLocking.cpp b/src/hotspot/share/runtime/biasedLocking.cpp
index de93a4b370a..a1e70a97478 100644
--- a/src/hotspot/share/runtime/biasedLocking.cpp
+++ b/src/hotspot/share/runtime/biasedLocking.cpp
@@ -32,6 +32,7 @@
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/task.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
@@ -214,12 +215,8 @@ static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_
if (requesting_thread == biased_thread) {
thread_is_alive = true;
} else {
- for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
- if (cur_thread == biased_thread) {
- thread_is_alive = true;
- break;
- }
- }
+ ThreadsListHandle tlh;
+ thread_is_alive = tlh.includes(biased_thread);
}
if (!thread_is_alive) {
if (allow_rebias) {
@@ -390,72 +387,76 @@ static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
Klass* k_o = o->klass();
Klass* klass = k_o;
- if (bulk_rebias) {
- // Use the epoch in the klass of the object to implicitly revoke
- // all biases of objects of this data type and force them to be
- // reacquired. However, we also need to walk the stacks of all
- // threads and update the headers of lightweight locked objects
- // with biases to have the current epoch.
+ {
+ JavaThreadIteratorWithHandle jtiwh;
- // If the prototype header doesn't have the bias pattern, don't
- // try to update the epoch -- assume another VM operation came in
- // and reset the header to the unbiased state, which will
- // implicitly cause all existing biases to be revoked
- if (klass->prototype_header()->has_bias_pattern()) {
- int prev_epoch = klass->prototype_header()->bias_epoch();
- klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
- int cur_epoch = klass->prototype_header()->bias_epoch();
+ if (bulk_rebias) {
+ // Use the epoch in the klass of the object to implicitly revoke
+ // all biases of objects of this data type and force them to be
+ // reacquired. However, we also need to walk the stacks of all
+ // threads and update the headers of lightweight locked objects
+ // with biases to have the current epoch.
- // Now walk all threads' stacks and adjust epochs of any biased
- // and locked objects of this data type we encounter
- for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
+ // If the prototype header doesn't have the bias pattern, don't
+ // try to update the epoch -- assume another VM operation came in
+ // and reset the header to the unbiased state, which will
+ // implicitly cause all existing biases to be revoked
+ if (klass->prototype_header()->has_bias_pattern()) {
+ int prev_epoch = klass->prototype_header()->bias_epoch();
+ klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
+ int cur_epoch = klass->prototype_header()->bias_epoch();
+
+ // Now walk all threads' stacks and adjust epochs of any biased
+ // and locked objects of this data type we encounter
+ for (; JavaThread *thr = jtiwh.next(); ) {
+ GrowableArray* cached_monitor_info = get_or_compute_monitor_info(thr);
+ for (int i = 0; i < cached_monitor_info->length(); i++) {
+ MonitorInfo* mon_info = cached_monitor_info->at(i);
+ oop owner = mon_info->owner();
+ markOop mark = owner->mark();
+ if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
+ // We might have encountered this object already in the case of recursive locking
+ assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
+ owner->set_mark(mark->set_bias_epoch(cur_epoch));
+ }
+ }
+ }
+ }
+
+ // At this point we're done. All we have to do is potentially
+ // adjust the header of the given object to revoke its bias.
+ revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
+ } else {
+ if (log_is_enabled(Info, biasedlocking)) {
+ ResourceMark rm;
+ log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
+ }
+
+ // Disable biased locking for this data type. Not only will this
+ // cause future instances to not be biased, but existing biased
+ // instances will notice that this implicitly caused their biases
+ // to be revoked.
+ klass->set_prototype_header(markOopDesc::prototype());
+
+ // Now walk all threads' stacks and forcibly revoke the biases of
+ // any locked and biased objects of this data type we encounter.
+ for (; JavaThread *thr = jtiwh.next(); ) {
GrowableArray* cached_monitor_info = get_or_compute_monitor_info(thr);
for (int i = 0; i < cached_monitor_info->length(); i++) {
MonitorInfo* mon_info = cached_monitor_info->at(i);
oop owner = mon_info->owner();
markOop mark = owner->mark();
if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
- // We might have encountered this object already in the case of recursive locking
- assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
- owner->set_mark(mark->set_bias_epoch(cur_epoch));
+ revoke_bias(owner, false, true, requesting_thread, NULL);
}
}
}
+
+ // Must force the bias of the passed object to be forcibly revoked
+ // as well to ensure guarantees to callers
+ revoke_bias(o, false, true, requesting_thread, NULL);
}
-
- // At this point we're done. All we have to do is potentially
- // adjust the header of the given object to revoke its bias.
- revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
- } else {
- if (log_is_enabled(Info, biasedlocking)) {
- ResourceMark rm;
- log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
- }
-
- // Disable biased locking for this data type. Not only will this
- // cause future instances to not be biased, but existing biased
- // instances will notice that this implicitly caused their biases
- // to be revoked.
- klass->set_prototype_header(markOopDesc::prototype());
-
- // Now walk all threads' stacks and forcibly revoke the biases of
- // any locked and biased objects of this data type we encounter.
- for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
- GrowableArray* cached_monitor_info = get_or_compute_monitor_info(thr);
- for (int i = 0; i < cached_monitor_info->length(); i++) {
- MonitorInfo* mon_info = cached_monitor_info->at(i);
- oop owner = mon_info->owner();
- markOop mark = owner->mark();
- if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
- revoke_bias(owner, false, true, requesting_thread, NULL);
- }
- }
- }
-
- // Must force the bias of the passed object to be forcibly revoked
- // as well to ensure guarantees to callers
- revoke_bias(o, false, true, requesting_thread, NULL);
- }
+ } // ThreadsListHandle is destroyed here.
log_info(biasedlocking)("* Ending bulk revocation");
@@ -481,7 +482,7 @@ static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
static void clean_up_cached_monitor_info() {
// Walk the thread list clearing out the cached monitors
- for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
thr->set_cached_monitor_info(NULL);
}
}
@@ -768,7 +769,7 @@ void BiasedLocking::preserve_marks() {
ResourceMark rm;
Thread* cur = Thread::current();
- for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
if (thread->has_last_Java_frame()) {
RegisterMap rm(thread);
for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
diff --git a/src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.cpp b/src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.cpp
index 9df5d2bbec8..55483983094 100644
--- a/src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.cpp
+++ b/src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.cpp
@@ -138,10 +138,6 @@ Flag::Error ThreadLocalHandshakesConstraintFunc(bool value, bool verbose) {
CommandLineError::print(verbose, "ThreadLocalHandshakes not yet supported on this platform\n");
return Flag::VIOLATES_CONSTRAINT;
}
- if (UseAOT JVMCI_ONLY(|| EnableJVMCI || UseJVMCICompiler)) {
- CommandLineError::print(verbose, "ThreadLocalHandshakes not yet supported in combination with AOT or JVMCI\n");
- return Flag::VIOLATES_CONSTRAINT;
- }
}
return Flag::SUCCESS;
}
diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp
index fb93fbdd712..c61f41d2933 100644
--- a/src/hotspot/share/runtime/deoptimization.cpp
+++ b/src/hotspot/share/runtime/deoptimization.cpp
@@ -50,6 +50,7 @@
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vframe_hp.hpp"
@@ -1297,7 +1298,7 @@ void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) {
assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint");
GrowableArray* objects_to_revoke = new GrowableArray();
- for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
if (jt->has_last_Java_frame()) {
StackFrameStream sfs(jt, true);
while (!sfs.is_done()) {
diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp
index bdf69c29da6..ad4b27a3b0b 100644
--- a/src/hotspot/share/runtime/globals.hpp
+++ b/src/hotspot/share/runtime/globals.hpp
@@ -917,9 +917,6 @@ public:
notproduct(bool, ZapVMHandleArea, trueInDebug, \
"Zap freed VM handle space with 0xBCBCBCBC") \
\
- develop(bool, ZapJNIHandleArea, trueInDebug, \
- "Zap freed JNI handle space with 0xFEFEFEFE") \
- \
notproduct(bool, ZapStackSegments, trueInDebug, \
"Zap allocated/freed stack segments with 0xFADFADED") \
\
@@ -2271,6 +2268,10 @@ public:
diagnostic(bool, VerifyDuringGC, false, \
"Verify memory system during GC (between phases)") \
\
+ diagnostic(ccstrlist, VerifyGCType, "", \
+ "GC type(s) to verify when Verify*GC is enabled." \
+ "Available types are collector specific.") \
+ \
diagnostic(ccstrlist, VerifySubSet, "", \
"Memory sub-systems to verify when Verify*GC flag(s) " \
"are enabled. One or more sub-systems can be specified " \
@@ -2484,6 +2485,12 @@ public:
LP64_ONLY(range(-1, max_intx/MICROUNITS)) \
NOT_LP64(range(-1, max_intx)) \
\
+ diagnostic(bool, EnableThreadSMRExtraValidityChecks, true, \
+ "Enable Thread SMR extra validity checks") \
+ \
+ diagnostic(bool, EnableThreadSMRStatistics, trueInDebug, \
+ "Enable Thread SMR Statistics") \
+ \
product(bool, Inline, true, \
"Enable inlining") \
\
@@ -3359,7 +3366,7 @@ public:
\
product_pd(uintx, InitialCodeCacheSize, \
"Initial code cache size (in bytes)") \
- range(0, max_uintx) \
+ range(os::vm_page_size(), max_uintx) \
\
develop_pd(uintx, CodeCacheMinimumUseSpace, \
"Minimum code cache size (in bytes) required to start VM.") \
@@ -3370,7 +3377,7 @@ public:
\
product_pd(uintx, ReservedCodeCacheSize, \
"Reserved code cache size (in bytes) - maximum code cache size") \
- range(0, max_uintx) \
+ range(os::vm_page_size(), max_uintx) \
\
product_pd(uintx, NonProfiledCodeHeapSize, \
"Size of code heap with non-profiled methods (in bytes)") \
@@ -3382,11 +3389,11 @@ public:
\
product_pd(uintx, NonNMethodCodeHeapSize, \
"Size of code heap with non-nmethods (in bytes)") \
- range(0, max_uintx) \
+ range(os::vm_page_size(), max_uintx) \
\
product_pd(uintx, CodeCacheExpansionSize, \
"Code cache expansion size (in bytes)") \
- range(0, max_uintx) \
+ range(32*K, max_uintx) \
\
diagnostic_pd(uintx, CodeCacheMinBlockLength, \
"Minimum number of segments in a code cache block") \
@@ -3926,6 +3933,13 @@ public:
"Address to allocate shared memory region for class data") \
range(0, SIZE_MAX) \
\
+ product(bool, UseAppCDS, false, \
+ "Enable Application Class Data Sharing when using shared spaces") \
+ writeable(CommandLineOnly) \
+ \
+ product(ccstr, SharedArchiveConfigFile, NULL, \
+ "Data to add to the CDS archive file") \
+ \
product(uintx, SharedSymbolTableBucketSize, 4, \
"Average number of symbols per bucket in shared table") \
range(2, 246) \
@@ -4073,7 +4087,11 @@ public:
diagnostic(bool, CompilerDirectivesPrint, false, \
"Print compiler directives on installation.") \
diagnostic(int, CompilerDirectivesLimit, 50, \
- "Limit on number of compiler directives.")
+ "Limit on number of compiler directives.") \
+ \
+ product(ccstr, AllocateHeapAt, NULL, \
+ "Path to the directoy where a temporary file will be created " \
+ "to use as the backing store for Java Heap.")
/*
diff --git a/src/hotspot/share/runtime/handshake.cpp b/src/hotspot/share/runtime/handshake.cpp
index f81c235c13d..0b816b25ed3 100644
--- a/src/hotspot/share/runtime/handshake.cpp
+++ b/src/hotspot/share/runtime/handshake.cpp
@@ -37,8 +37,6 @@
#include "utilities/formatBuffer.hpp"
#include "utilities/preserveException.hpp"
-#define ALL_JAVA_THREADS(X) for (JavaThread* X = Threads::first(); X; X = X->next())
-
class HandshakeOperation: public StackObj {
public:
virtual void do_handshake(JavaThread* thread) = 0;
@@ -94,8 +92,7 @@ bool VM_Handshake::handshake_has_timed_out(jlong start_time) {
void VM_Handshake::handle_timeout() {
LogStreamHandle(Warning, handshake) log_stream;
- MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
- ALL_JAVA_THREADS(thr) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
if (thr->has_handshake()) {
log_stream.print("Thread " PTR_FORMAT " has not cleared its handshake op", p2i(thr));
thr->print_thread_state_on(&log_stream);
@@ -117,8 +114,8 @@ class VM_HandshakeOneThread: public VM_Handshake {
TraceTime timer("Performing single-target operation (vmoperation doit)", TRACETIME_LOG(Info, handshake));
{
- MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
- if (Threads::includes(_target)) {
+ ThreadsListHandle tlh;
+ if (tlh.includes(_target)) {
set_handshake(_target);
_thread_alive = true;
}
@@ -139,9 +136,24 @@ class VM_HandshakeOneThread: public VM_Handshake {
handle_timeout();
}
+ // We need to re-think this with SMR ThreadsList.
+ // There is an assumption in the code that the Threads_lock should be
+ // locked during certain phases.
MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
- _target->handshake_process_by_vmthread();
-
+ ThreadsListHandle tlh;
+ if (tlh.includes(_target)) {
+ // Warning _target's address might be re-used.
+ // handshake_process_by_vmthread will check the semaphore for us again.
+ // Since we can't have more then one handshake in flight a reuse of
+ // _target's address should be okay since the new thread will not have
+ // an operation.
+ _target->handshake_process_by_vmthread();
+ } else {
+ // We can't warn here since the thread does cancel_handshake after
+ // it has been removed from the ThreadsList. So we should just keep
+ // looping here until while below returns false. If we have a bug,
+ // then we hang here, which is good for debugging.
+ }
} while (!poll_for_completed_thread());
}
@@ -157,15 +169,15 @@ class VM_HandshakeAllThreads: public VM_Handshake {
void doit() {
TraceTime timer("Performing operation (vmoperation doit)", TRACETIME_LOG(Info, handshake));
- int number_of_threads_issued = -1;
- int number_of_threads_completed = 0;
- {
- MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
- number_of_threads_issued = Threads::number_of_threads();
+ int number_of_threads_issued = 0;
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
+ set_handshake(thr);
+ number_of_threads_issued++;
+ }
- ALL_JAVA_THREADS(thr) {
- set_handshake(thr);
- }
+ if (number_of_threads_issued < 1) {
+ log_debug(handshake)("No threads to handshake.");
+ return;
}
if (!UseMembar) {
@@ -174,6 +186,7 @@ class VM_HandshakeAllThreads: public VM_Handshake {
log_debug(handshake)("Threads signaled, begin processing blocked threads by VMThtread");
const jlong start_time = os::elapsed_counter();
+ int number_of_threads_completed = 0;
do {
// Check if handshake operation has timed out
if (handshake_has_timed_out(start_time)) {
@@ -184,13 +197,19 @@ class VM_HandshakeAllThreads: public VM_Handshake {
// Observing a blocked state may of course be transient but the processing is guarded
// by semaphores and we optimistically begin by working on the blocked threads
{
+ // We need to re-think this with SMR ThreadsList.
+ // There is an assumption in the code that the Threads_lock should
+ // be locked during certain phases.
MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
- ALL_JAVA_THREADS(thr) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
+ // A new thread on the ThreadsList will not have an operation,
+ // hence it is skipped in handshake_process_by_vmthread.
thr->handshake_process_by_vmthread();
}
}
while (poll_for_completed_thread()) {
+ // Includes canceled operations by exiting threads.
number_of_threads_completed++;
}
@@ -212,7 +231,7 @@ public:
_thread_cl(cl), _target_thread(target), _all_threads(false), _thread_alive(false) {}
void doit() {
- ALL_JAVA_THREADS(t) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
if (_all_threads || t == _target_thread) {
if (t == _target_thread) {
_thread_alive = true;
@@ -298,8 +317,8 @@ void HandshakeState::cancel_inner(JavaThread* thread) {
assert(thread->thread_state() == _thread_in_vm, "must be in vm state");
#ifdef DEBUG
{
- MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
- assert(!Threads::includes(thread), "java thread must not be on threads list");
+ ThreadsListHandle tlh;
+ assert(!tlh.includes(_target), "java thread must not be on threads list");
}
#endif
HandshakeOperation* op = _operation;
diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp
index 11d84f00ee8..9c19095f142 100644
--- a/src/hotspot/share/runtime/java.cpp
+++ b/src/hotspot/share/runtime/java.cpp
@@ -356,6 +356,8 @@ void print_statistics() {
if (PrintNMTStatistics) {
MemTracker::final_report(tty);
}
+
+ Threads::log_smr_statistics();
}
#else // PRODUCT MODE STATISTICS
@@ -396,6 +398,8 @@ void print_statistics() {
if (LogTouchedMethods && PrintTouchedMethodsAtExit) {
Method::print_touched_methods(tty);
}
+
+ Threads::log_smr_statistics();
}
#endif
diff --git a/src/hotspot/share/runtime/jniHandles.cpp b/src/hotspot/share/runtime/jniHandles.cpp
index 8819f3c2cfc..483f9a5cb6e 100644
--- a/src/hotspot/share/runtime/jniHandles.cpp
+++ b/src/hotspot/share/runtime/jniHandles.cpp
@@ -279,13 +279,15 @@ JNIHandleBlock* JNIHandleBlock::_block_list = NULL;
#endif
+#ifdef ASSERT
void JNIHandleBlock::zap() {
// Zap block values
_top = 0;
for (int index = 0; index < block_size_in_oops; index++) {
- _handles[index] = badJNIHandle;
+ _handles[index] = NULL;
}
}
+#endif // ASSERT
JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread) {
assert(thread == NULL || thread == Thread::current(), "sanity check");
@@ -307,7 +309,7 @@ JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread) {
// Allocate new block
block = new JNIHandleBlock();
_blocks_allocated++;
- if (ZapJNIHandleArea) block->zap();
+ block->zap();
#ifndef PRODUCT
// Link new block to list of all allocated blocks
block->_block_list_link = _block_list;
@@ -339,7 +341,7 @@ void JNIHandleBlock::release_block(JNIHandleBlock* block, Thread* thread) {
// we _don't_ want the block to be kept on the free_handle_block.
// See for instance JavaThread::exit().
if (thread != NULL ) {
- if (ZapJNIHandleArea) block->zap();
+ block->zap();
JNIHandleBlock* freelist = thread->free_handle_block();
block->_pop_frame_link = NULL;
thread->set_free_handle_block(block);
@@ -360,7 +362,7 @@ void JNIHandleBlock::release_block(JNIHandleBlock* block, Thread* thread) {
MutexLockerEx ml(JNIHandleBlockFreeList_lock,
Mutex::_no_safepoint_check_flag);
while (block != NULL) {
- if (ZapJNIHandleArea) block->zap();
+ block->zap();
JNIHandleBlock* next = block->_next;
block->_next = _block_free_list;
_block_free_list = block;
@@ -453,13 +455,13 @@ jobject JNIHandleBlock::allocate_handle(oop obj) {
break;
}
current->_top = 0;
- if (ZapJNIHandleArea) current->zap();
+ current->zap();
}
// Clear initial block
_free_list = NULL;
_allocate_before_rebuild = 0;
_last = this;
- if (ZapJNIHandleArea) zap();
+ zap();
}
// Try last block
diff --git a/src/hotspot/share/runtime/jniHandles.hpp b/src/hotspot/share/runtime/jniHandles.hpp
index b7cd7668549..8bba3e3125b 100644
--- a/src/hotspot/share/runtime/jniHandles.hpp
+++ b/src/hotspot/share/runtime/jniHandles.hpp
@@ -148,7 +148,7 @@ class JNIHandleBlock : public CHeapObj {
static int _blocks_allocated; // For debugging/printing
// Fill block with bad_handle values
- void zap();
+ void zap() NOT_DEBUG_RETURN;
// Free list computation
void rebuild_free_list();
@@ -219,9 +219,8 @@ inline oop& JNIHandles::jweak_ref(jobject handle) {
template
inline oop JNIHandles::guard_value(oop value) {
if (!external_guard) {
- assert(value != badJNIHandle, "Pointing to zapped jni handle area");
assert(value != deleted_handle(), "Used a deleted global handle");
- } else if ((value == badJNIHandle) || (value == deleted_handle())) {
+ } else if (value == deleted_handle()) {
value = NULL;
}
return value;
diff --git a/src/hotspot/share/runtime/memprofiler.cpp b/src/hotspot/share/runtime/memprofiler.cpp
index 77a1f183daa..396285ac8c8 100644
--- a/src/hotspot/share/runtime/memprofiler.cpp
+++ b/src/hotspot/share/runtime/memprofiler.cpp
@@ -36,6 +36,7 @@
#include "runtime/os.hpp"
#include "runtime/task.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vmThread.hpp"
#ifndef PRODUCT
@@ -51,8 +52,6 @@ class MemProfilerTask : public PeriodicTask {
void MemProfilerTask::task() {
- // Get thread lock to provide mutual exclusion, and so we can iterate safely over the thread list.
- MutexLocker mu(Threads_lock);
MemProfiler::do_trace();
}
@@ -109,20 +108,21 @@ void MemProfiler::do_trace() {
// Calculate thread local sizes
size_t handles_memory_usage = VMThread::vm_thread()->handle_area()->size_in_bytes();
size_t resource_memory_usage = VMThread::vm_thread()->resource_area()->size_in_bytes();
- JavaThread *cur = Threads::first();
- while (cur != NULL) {
- handles_memory_usage += cur->handle_area()->size_in_bytes();
- resource_memory_usage += cur->resource_area()->size_in_bytes();
- cur = cur->next();
- }
+ {
+ JavaThreadIteratorWithHandle jtiwh;
+ for (; JavaThread *cur = jtiwh.next(); ) {
+ handles_memory_usage += cur->handle_area()->size_in_bytes();
+ resource_memory_usage += cur->resource_area()->size_in_bytes();
+ }
- // Print trace line in log
- fprintf(_log_fp, "%6.1f,%5d,%5d," UINTX_FORMAT_W(6) "," UINTX_FORMAT_W(6) ",",
- os::elapsedTime(),
- Threads::number_of_threads(),
- InstanceKlass::number_of_instance_classes(),
- Universe::heap()->used() / K,
- Universe::heap()->capacity() / K);
+ // Print trace line in log
+ fprintf(_log_fp, "%6.1f,%5d,%5d," UINTX_FORMAT_W(6) "," UINTX_FORMAT_W(6) ",",
+ os::elapsedTime(),
+ jtiwh.length(),
+ InstanceKlass::number_of_instance_classes(),
+ Universe::heap()->used() / K,
+ Universe::heap()->capacity() / K);
+ }
fprintf(_log_fp, UINTX_FORMAT_W(6) ",", CodeCache::capacity() / K);
diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp
index 40037a496a5..7f659bfecb1 100644
--- a/src/hotspot/share/runtime/objectMonitor.cpp
+++ b/src/hotspot/share/runtime/objectMonitor.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/vmSymbols.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
@@ -242,6 +243,19 @@ static volatile int InitDone = 0;
// * See also http://blogs.sun.com/dave
+void* ObjectMonitor::operator new (size_t size) throw() {
+ return AllocateHeap(size, mtInternal);
+}
+void* ObjectMonitor::operator new[] (size_t size) throw() {
+ return operator new (size);
+}
+void ObjectMonitor::operator delete(void* p) {
+ FreeHeap(p);
+}
+void ObjectMonitor::operator delete[] (void *p) {
+ operator delete(p);
+}
+
// -----------------------------------------------------------------------------
// Enter support
@@ -2138,6 +2152,7 @@ ObjectWaiter::ObjectWaiter(Thread* thread) {
_next = NULL;
_prev = NULL;
_notified = 0;
+ _notifier_tid = 0;
TState = TS_RUN;
_thread = thread;
_event = thread->_ParkEvent;
diff --git a/src/hotspot/share/runtime/objectMonitor.hpp b/src/hotspot/share/runtime/objectMonitor.hpp
index 484b99aa2da..ee610b1a4db 100644
--- a/src/hotspot/share/runtime/objectMonitor.hpp
+++ b/src/hotspot/share/runtime/objectMonitor.hpp
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_RUNTIME_OBJECTMONITOR_HPP
#define SHARE_VM_RUNTIME_OBJECTMONITOR_HPP
+#include "memory/allocation.hpp"
#include "memory/padded.hpp"
#include "runtime/os.hpp"
#include "runtime/park.hpp"
@@ -212,18 +213,10 @@ class ObjectMonitor {
static int Knob_VerifyMatch;
static int Knob_SpinLimit;
- void* operator new (size_t size) throw() {
- return AllocateHeap(size, mtInternal);
- }
- void* operator new[] (size_t size) throw() {
- return operator new (size);
- }
- void operator delete(void* p) {
- FreeHeap(p);
- }
- void operator delete[] (void *p) {
- operator delete(p);
- }
+ void* operator new (size_t size) throw();
+ void* operator new[] (size_t size) throw();
+ void operator delete(void* p);
+ void operator delete[] (void *p);
// TODO-FIXME: the "offset" routines should return a type of off_t instead of int ...
// ByteSize would also be an appropriate type.
diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp
index 0306faa5515..dce176a8a52 100644
--- a/src/hotspot/share/runtime/os.cpp
+++ b/src/hotspot/share/runtime/os.cpp
@@ -54,6 +54,7 @@
#include "runtime/os.inline.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vm_version.hpp"
#include "services/attachListener.hpp"
#include "services/mallocTracker.hpp"
@@ -197,15 +198,7 @@ char* os::iso8601_time(char* buffer, size_t buffer_length, bool utc) {
}
OSReturn os::set_priority(Thread* thread, ThreadPriority p) {
-#ifdef ASSERT
- if (!(!thread->is_Java_thread() ||
- Thread::current() == thread ||
- Threads_lock->owned_by_self()
- || thread->is_Compiler_thread()
- )) {
- assert(false, "possibility of dangling Thread pointer");
- }
-#endif
+ debug_only(Thread::check_for_dangling_thread_pointer(thread);)
if (p >= MinPriority && p <= MaxPriority) {
int priority = java_to_os_priority[p];
@@ -1100,7 +1093,7 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
}
#endif
- for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
// Check for privilege stack
if (thread->privileged_stack_top() != NULL &&
thread->privileged_stack_top()->contains(addr)) {
@@ -1126,7 +1119,6 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
if (verbose) thread->print_on(st);
return;
}
-
}
// Check if in metaspace and print types that have vptrs (only method now)
@@ -1665,7 +1657,6 @@ void os::initialize_initial_active_processor_count() {
}
void os::SuspendedThreadTask::run() {
- assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this");
internal_do_task();
_done = true;
}
@@ -1674,10 +1665,21 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
return os::pd_create_stack_guard_pages(addr, bytes);
}
-char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
- char* result = pd_reserve_memory(bytes, addr, alignment_hint);
- if (result != NULL) {
- MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
+char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint, int file_desc) {
+ char* result = NULL;
+
+ if (file_desc != -1) {
+ // Could have called pd_reserve_memory() followed by replace_existing_mapping_with_file_mapping(),
+ // but AIX may use SHM in which case its more trouble to detach the segment and remap memory to the file.
+ result = os::map_memory_to_file(addr, bytes, file_desc);
+ if (result != NULL) {
+ MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
+ }
+ } else {
+ result = pd_reserve_memory(bytes, addr, alignment_hint);
+ if (result != NULL) {
+ MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
+ }
}
return result;
@@ -1694,10 +1696,18 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
return result;
}
-char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
- char* result = pd_attempt_reserve_memory_at(bytes, addr);
- if (result != NULL) {
- MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
+char* os::attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc) {
+ char* result = NULL;
+ if (file_desc != -1) {
+ result = pd_attempt_reserve_memory_at(bytes, addr, file_desc);
+ if (result != NULL) {
+ MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
+ }
+ } else {
+ result = pd_attempt_reserve_memory_at(bytes, addr);
+ if (result != NULL) {
+ MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
+ }
}
return result;
}
diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp
index 61999125339..83b24f574ed 100644
--- a/src/hotspot/share/runtime/os.hpp
+++ b/src/hotspot/share/runtime/os.hpp
@@ -108,8 +108,9 @@ class os: AllStatic {
}
static char* pd_reserve_memory(size_t bytes, char* addr = 0,
- size_t alignment_hint = 0);
+ size_t alignment_hint = 0);
static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr);
+ static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc);
static void pd_split_reserved_memory(char *base, size_t size,
size_t split, bool realloc);
static bool pd_commit_memory(char* addr, size_t bytes, bool executable);
@@ -310,11 +311,11 @@ class os: AllStatic {
static int vm_allocation_granularity();
static char* reserve_memory(size_t bytes, char* addr = 0,
- size_t alignment_hint = 0);
+ size_t alignment_hint = 0, int file_desc = -1);
static char* reserve_memory(size_t bytes, char* addr,
size_t alignment_hint, MEMFLAGS flags);
- static char* reserve_memory_aligned(size_t size, size_t alignment);
- static char* attempt_reserve_memory_at(size_t bytes, char* addr);
+ static char* reserve_memory_aligned(size_t size, size_t alignment, int file_desc = -1);
+ static char* attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc = -1);
static void split_reserved_memory(char *base, size_t size,
size_t split, bool realloc);
static bool commit_memory(char* addr, size_t bytes, bool executable);
@@ -345,6 +346,14 @@ class os: AllStatic {
static bool create_stack_guard_pages(char* addr, size_t bytes);
static bool pd_create_stack_guard_pages(char* addr, size_t bytes);
static bool remove_stack_guard_pages(char* addr, size_t bytes);
+ // Helper function to create a new file with template jvmheap.XXXXXX.
+ // Returns a valid fd on success or else returns -1
+ static int create_file_for_heap(const char* dir);
+ // Map memory to the file referred by fd. This function is slightly different from map_memory()
+ // and is added to be used for implementation of -XX:AllocateHeapAt
+ static char* map_memory_to_file(char* base, size_t size, int fd);
+ // Replace existing reserved memory with file mapping
+ static char* replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd);
static char* map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only = false,
diff --git a/src/hotspot/share/runtime/park.cpp b/src/hotspot/share/runtime/park.cpp
index 6f278dff23a..6314b54bd64 100644
--- a/src/hotspot/share/runtime/park.cpp
+++ b/src/hotspot/share/runtime/park.cpp
@@ -23,10 +23,9 @@
*/
#include "precompiled.hpp"
+#include "memory/allocation.inline.hpp"
#include "runtime/thread.hpp"
-
-
// Lifecycle management for TSM ParkEvents.
// ParkEvents are type-stable (TSM).
// In our particular implementation they happen to be immortal.
diff --git a/src/hotspot/share/runtime/perfData.cpp b/src/hotspot/share/runtime/perfData.cpp
index d79bbc66b48..e6a9045825b 100644
--- a/src/hotspot/share/runtime/perfData.cpp
+++ b/src/hotspot/share/runtime/perfData.cpp
@@ -32,7 +32,7 @@
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
-#include "runtime/perfData.hpp"
+#include "runtime/perfData.inline.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -611,3 +611,10 @@ PerfDataList* PerfDataList::clone() {
return copy;
}
+
+PerfTraceTime::~PerfTraceTime() {
+ if (!UsePerfData || (_recursion_counter != NULL &&
+ --(*_recursion_counter) > 0)) return;
+ _t.stop();
+ _timerp->inc(_t.ticks());
+}
diff --git a/src/hotspot/share/runtime/perfData.hpp b/src/hotspot/share/runtime/perfData.hpp
index 8fa9f14d598..f667ecc9d03 100644
--- a/src/hotspot/share/runtime/perfData.hpp
+++ b/src/hotspot/share/runtime/perfData.hpp
@@ -25,10 +25,11 @@
#ifndef SHARE_VM_RUNTIME_PERFDATA_HPP
#define SHARE_VM_RUNTIME_PERFDATA_HPP
-#include "memory/allocation.inline.hpp"
+#include "memory/allocation.hpp"
#include "runtime/perfMemory.hpp"
#include "runtime/timer.hpp"
-#include "utilities/growableArray.hpp"
+
+template class GrowableArray;
/* jvmstat global and subsystem counter name space - enumeration value
* serve as an index into the PerfDataManager::_name_space[] array
@@ -244,6 +245,7 @@ class PerfData : public CHeapObj {
friend class StatSampler; // for access to protected void sample()
friend class PerfDataManager; // for access to protected destructor
+ friend class VMStructs;
public:
@@ -629,10 +631,10 @@ class PerfDataList : public CHeapObj {
bool contains(const char* name) { return find_by_name(name) != NULL; }
// return the number of PerfData items in this list
- int length() { return _set->length(); }
+ inline int length();
// add a PerfData item to this list
- void append(PerfData *p) { _set->append(p); }
+ inline void append(PerfData *p);
// remove the given PerfData item from this list. When called
// while iterating over the list, this method will result in a
@@ -640,7 +642,7 @@ class PerfDataList : public CHeapObj {
// method is also impacted by this method as elements with an
// index greater than the index of the element removed by this
// method will be shifted down by one.
- void remove(PerfData *p) { _set->remove(p); }
+ inline void remove(PerfData *p);
// create a new PerfDataList from this list. The new list is
// a shallow copy of the original list and care should be taken
@@ -651,7 +653,7 @@ class PerfDataList : public CHeapObj {
// for backward compatibility with GrowableArray - need to implement
// some form of iterator to provide a cleaner abstraction for
// iteration over the container.
- PerfData* at(int index) { return _set->at(index); }
+ inline PerfData* at(int index);
};
@@ -677,23 +679,23 @@ class PerfDataManager : AllStatic {
protected:
// return the list of all known PerfData items
static PerfDataList* all();
- static int count() { return _all->length(); }
+ static inline int count();
// return the list of all known PerfData items that are to be
// sampled by the StatSampler.
static PerfDataList* sampled();
- static int sampled_count() { return _sampled->length(); }
+ static inline int sampled_count();
// return the list of all known PerfData items that have a
// variability classification of type Constant
static PerfDataList* constants();
- static int constants_count() { return _constants->length(); }
+ static inline int constants_count();
public:
// method to check for the existence of a PerfData item with
// the given name.
- static bool exists(const char* name) { return _all->contains(name); }
+ static inline bool exists(const char* name);
// method to search for a instrumentation object by name
static PerfData* find_by_name(const char* name);
@@ -929,12 +931,7 @@ class PerfTraceTime : public StackObj {
inline void suspend() { if (!UsePerfData) return; _t.stop(); }
inline void resume() { if (!UsePerfData) return; _t.start(); }
- inline ~PerfTraceTime() {
- if (!UsePerfData || (_recursion_counter != NULL &&
- --(*_recursion_counter) > 0)) return;
- _t.stop();
- _timerp->inc(_t.ticks());
- }
+ ~PerfTraceTime();
};
/* The PerfTraceTimedEvent class is responsible for counting the
diff --git a/src/hotspot/share/runtime/perfData.inline.hpp b/src/hotspot/share/runtime/perfData.inline.hpp
new file mode 100644
index 00000000000..b867bbdbb4e
--- /dev/null
+++ b/src/hotspot/share/runtime/perfData.inline.hpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_PERFDATA_INLINE_HPP
+#define SHARE_VM_RUNTIME_PERFDATA_INLINE_HPP
+
+#include "runtime/perfData.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/growableArray.hpp"
+
+inline int PerfDataList::length() {
+ return _set->length();
+}
+
+inline void PerfDataList::append(PerfData *p) {
+ _set->append(p);
+}
+
+inline void PerfDataList::remove(PerfData *p) {
+ _set->remove(p);
+}
+
+inline PerfData* PerfDataList::at(int index) {
+ return _set->at(index);
+}
+
+inline int PerfDataManager::count() {
+ return _all->length();
+}
+
+inline int PerfDataManager::sampled_count() {
+ return _sampled->length();
+}
+
+inline int PerfDataManager::constants_count() {
+ return _constants->length();
+}
+
+inline bool PerfDataManager::exists(const char* name) {
+ return _all->contains(name);
+}
+
+#endif // SHARE_VM_RUNTIME_PERFDATA_INLINE_HPP
diff --git a/src/hotspot/share/runtime/safepoint.cpp b/src/hotspot/share/runtime/safepoint.cpp
index 291ee50b170..23d56c8e0d5 100644
--- a/src/hotspot/share/runtime/safepoint.cpp
+++ b/src/hotspot/share/runtime/safepoint.cpp
@@ -59,6 +59,7 @@
#include "runtime/sweeper.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/timerTrace.hpp"
#include "services/runtimeService.hpp"
#include "trace/tracing.hpp"
@@ -174,7 +175,7 @@ void SafepointSynchronize::begin() {
if (SafepointMechanism::uses_thread_local_poll()) {
// Arming the per thread poll while having _state != _not_synchronized means safepointing
log_trace(safepoint)("Setting thread local yield flag for threads");
- for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); ) {
// Make sure the threads start polling, it is time to yield.
SafepointMechanism::arm_local_poll(cur); // release store, global state -> local state
}
@@ -200,133 +201,137 @@ void SafepointSynchronize::begin() {
// Consider using active_processor_count() ... but that call is expensive.
int ncpus = os::processor_count() ;
+ unsigned int iterations = 0;
+ {
+ JavaThreadIteratorWithHandle jtiwh;
#ifdef ASSERT
- for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
- assert(cur->safepoint_state()->is_running(), "Illegal initial state");
- // Clear the visited flag to ensure that the critical counts are collected properly.
- cur->set_visited_for_critical_count(false);
- }
+ for (; JavaThread *cur = jtiwh.next(); ) {
+ assert(cur->safepoint_state()->is_running(), "Illegal initial state");
+ // Clear the visited flag to ensure that the critical counts are collected properly.
+ cur->set_visited_for_critical_count(false);
+ }
#endif // ASSERT
- if (SafepointTimeout)
- safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS;
+ if (SafepointTimeout)
+ safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS;
- // Iterate through all threads until it have been determined how to stop them all at a safepoint
- unsigned int iterations = 0;
- int steps = 0 ;
- while(still_running > 0) {
- for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
- assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
- ThreadSafepointState *cur_state = cur->safepoint_state();
- if (cur_state->is_running()) {
- cur_state->examine_state_of_thread();
- if (!cur_state->is_running()) {
- still_running--;
- // consider adjusting steps downward:
- // steps = 0
- // steps -= NNN
- // steps >>= 1
- // steps = MIN(steps, 2000-100)
- // if (iterations != 0) steps -= NNN
- }
- LogTarget(Trace, safepoint) lt;
- if (lt.is_enabled()) {
- ResourceMark rm;
- LogStream ls(lt);
- cur_state->print_on(&ls);
+ // Iterate through all threads until it have been determined how to stop them all at a safepoint
+ int steps = 0 ;
+ while(still_running > 0) {
+ jtiwh.rewind();
+ for (; JavaThread *cur = jtiwh.next(); ) {
+ assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
+ ThreadSafepointState *cur_state = cur->safepoint_state();
+ if (cur_state->is_running()) {
+ cur_state->examine_state_of_thread();
+ if (!cur_state->is_running()) {
+ still_running--;
+ // consider adjusting steps downward:
+ // steps = 0
+ // steps -= NNN
+ // steps >>= 1
+ // steps = MIN(steps, 2000-100)
+ // if (iterations != 0) steps -= NNN
+ }
+ LogTarget(Trace, safepoint) lt;
+ if (lt.is_enabled()) {
+ ResourceMark rm;
+ LogStream ls(lt);
+ cur_state->print_on(&ls);
+ }
}
}
- }
- if (iterations == 0) {
- initial_running = still_running;
- if (PrintSafepointStatistics) {
- begin_statistics(nof_threads, still_running);
- }
- }
-
- if (still_running > 0) {
- // Check for if it takes to long
- if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
- print_safepoint_timeout(_spinning_timeout);
+ if (iterations == 0) {
+ initial_running = still_running;
+ if (PrintSafepointStatistics) {
+ begin_statistics(nof_threads, still_running);
+ }
}
- // Spin to avoid context switching.
- // There's a tension between allowing the mutators to run (and rendezvous)
- // vs spinning. As the VM thread spins, wasting cycles, it consumes CPU that
- // a mutator might otherwise use profitably to reach a safepoint. Excessive
- // spinning by the VM thread on a saturated system can increase rendezvous latency.
- // Blocking or yielding incur their own penalties in the form of context switching
- // and the resultant loss of $ residency.
- //
- // Further complicating matters is that yield() does not work as naively expected
- // on many platforms -- yield() does not guarantee that any other ready threads
- // will run. As such we revert to naked_short_sleep() after some number of iterations.
- // nakes_short_sleep() is implemented as a short unconditional sleep.
- // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
- // can actually increase the time it takes the VM thread to detect that a system-wide
- // stop-the-world safepoint has been reached. In a pathological scenario such as that
- // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
- // In that case the mutators will be stalled waiting for the safepoint to complete and the
- // the VMthread will be sleeping, waiting for the mutators to rendezvous. The VMthread
- // will eventually wake up and detect that all mutators are safe, at which point
- // we'll again make progress.
- //
- // Beware too that that the VMThread typically runs at elevated priority.
- // Its default priority is higher than the default mutator priority.
- // Obviously, this complicates spinning.
- //
- // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
- // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
- //
- // See the comments in synchronizer.cpp for additional remarks on spinning.
- //
- // In the future we might:
- // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
- // This is tricky as the path used by a thread exiting the JVM (say on
- // on JNI call-out) simply stores into its state field. The burden
- // is placed on the VM thread, which must poll (spin).
- // 2. Find something useful to do while spinning. If the safepoint is GC-related
- // we might aggressively scan the stacks of threads that are already safe.
- // 3. Use Solaris schedctl to examine the state of the still-running mutators.
- // If all the mutators are ONPROC there's no reason to sleep or yield.
- // 4. YieldTo() any still-running mutators that are ready but OFFPROC.
- // 5. Check system saturation. If the system is not fully saturated then
- // simply spin and avoid sleep/yield.
- // 6. As still-running mutators rendezvous they could unpark the sleeping
- // VMthread. This works well for still-running mutators that become
- // safe. The VMthread must still poll for mutators that call-out.
- // 7. Drive the policy on time-since-begin instead of iterations.
- // 8. Consider making the spin duration a function of the # of CPUs:
- // Spin = (((ncpus-1) * M) + K) + F(still_running)
- // Alternately, instead of counting iterations of the outer loop
- // we could count the # of threads visited in the inner loop, above.
- // 9. On windows consider using the return value from SwitchThreadTo()
- // to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
-
- if (SafepointMechanism::uses_global_page_poll() && int(iterations) == DeferPollingPageLoopCount) {
- guarantee (PageArmed == 0, "invariant") ;
- PageArmed = 1 ;
- os::make_polling_page_unreadable();
- }
-
- // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
- // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
- ++steps ;
- if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
- SpinPause() ; // MP-Polite spin
- } else
- if (steps < DeferThrSuspendLoopCount) {
- os::naked_yield() ;
- } else {
- os::naked_short_sleep(1);
+ if (still_running > 0) {
+ // Check for if it takes to long
+ if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
+ print_safepoint_timeout(_spinning_timeout);
}
- iterations ++ ;
+ // Spin to avoid context switching.
+ // There's a tension between allowing the mutators to run (and rendezvous)
+ // vs spinning. As the VM thread spins, wasting cycles, it consumes CPU that
+ // a mutator might otherwise use profitably to reach a safepoint. Excessive
+ // spinning by the VM thread on a saturated system can increase rendezvous latency.
+ // Blocking or yielding incur their own penalties in the form of context switching
+ // and the resultant loss of $ residency.
+ //
+ // Further complicating matters is that yield() does not work as naively expected
+ // on many platforms -- yield() does not guarantee that any other ready threads
+ // will run. As such we revert to naked_short_sleep() after some number of iterations.
+ // nakes_short_sleep() is implemented as a short unconditional sleep.
+ // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
+ // can actually increase the time it takes the VM thread to detect that a system-wide
+ // stop-the-world safepoint has been reached. In a pathological scenario such as that
+ // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
+ // In that case the mutators will be stalled waiting for the safepoint to complete and the
+ // the VMthread will be sleeping, waiting for the mutators to rendezvous. The VMthread
+ // will eventually wake up and detect that all mutators are safe, at which point
+ // we'll again make progress.
+ //
+ // Beware too that that the VMThread typically runs at elevated priority.
+ // Its default priority is higher than the default mutator priority.
+ // Obviously, this complicates spinning.
+ //
+ // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
+ // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
+ //
+ // See the comments in synchronizer.cpp for additional remarks on spinning.
+ //
+ // In the future we might:
+ // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
+ // This is tricky as the path used by a thread exiting the JVM (say on
+ // on JNI call-out) simply stores into its state field. The burden
+ // is placed on the VM thread, which must poll (spin).
+ // 2. Find something useful to do while spinning. If the safepoint is GC-related
+ // we might aggressively scan the stacks of threads that are already safe.
+ // 3. Use Solaris schedctl to examine the state of the still-running mutators.
+ // If all the mutators are ONPROC there's no reason to sleep or yield.
+ // 4. YieldTo() any still-running mutators that are ready but OFFPROC.
+ // 5. Check system saturation. If the system is not fully saturated then
+ // simply spin and avoid sleep/yield.
+ // 6. As still-running mutators rendezvous they could unpark the sleeping
+ // VMthread. This works well for still-running mutators that become
+ // safe. The VMthread must still poll for mutators that call-out.
+ // 7. Drive the policy on time-since-begin instead of iterations.
+ // 8. Consider making the spin duration a function of the # of CPUs:
+ // Spin = (((ncpus-1) * M) + K) + F(still_running)
+ // Alternately, instead of counting iterations of the outer loop
+ // we could count the # of threads visited in the inner loop, above.
+ // 9. On windows consider using the return value from SwitchThreadTo()
+ // to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
+
+ if (SafepointMechanism::uses_global_page_poll() && int(iterations) == DeferPollingPageLoopCount) {
+ guarantee (PageArmed == 0, "invariant") ;
+ PageArmed = 1 ;
+ os::make_polling_page_unreadable();
+ }
+
+ // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
+ // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
+ ++steps ;
+ if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
+ SpinPause() ; // MP-Polite spin
+ } else
+ if (steps < DeferThrSuspendLoopCount) {
+ os::naked_yield() ;
+ } else {
+ os::naked_short_sleep(1);
+ }
+
+ iterations ++ ;
+ }
+ assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
}
- assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
- }
+ } // ThreadsListHandle destroyed here.
assert(still_running == 0, "sanity check");
if (PrintSafepointStatistics) {
@@ -341,7 +346,7 @@ void SafepointSynchronize::begin() {
sync_event.set_iterations(iterations);
sync_event.commit();
}
- } //EventSafepointStateSync
+ } // EventSafepointStateSynchronization destroyed here.
// wait until all threads are stopped
{
@@ -393,8 +398,8 @@ void SafepointSynchronize::begin() {
} // EventSafepointWaitBlocked
#ifdef ASSERT
- for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
- // make sure all the threads were visited
+ // Make sure all the threads were visited.
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); ) {
assert(cur->was_visited_for_critical_count(), "missed a thread");
}
#endif // ASSERT
@@ -452,81 +457,86 @@ void SafepointSynchronize::end() {
end_statistics(os::javaTimeNanos());
}
+ {
+ JavaThreadIteratorWithHandle jtiwh;
#ifdef ASSERT
- // A pending_exception cannot be installed during a safepoint. The threads
- // may install an async exception after they come back from a safepoint into
- // pending_exception after they unblock. But that should happen later.
- for (JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
- assert (!(cur->has_pending_exception() &&
- cur->safepoint_state()->is_at_poll_safepoint()),
- "safepoint installed a pending exception");
- }
+ // A pending_exception cannot be installed during a safepoint. The threads
+ // may install an async exception after they come back from a safepoint into
+ // pending_exception after they unblock. But that should happen later.
+ for (; JavaThread *cur = jtiwh.next(); ) {
+ assert (!(cur->has_pending_exception() &&
+ cur->safepoint_state()->is_at_poll_safepoint()),
+ "safepoint installed a pending exception");
+ }
#endif // ASSERT
- if (PageArmed) {
- assert(SafepointMechanism::uses_global_page_poll(), "sanity");
- // Make polling safepoint aware
- os::make_polling_page_readable();
- PageArmed = 0 ;
- }
-
- if (SafepointMechanism::uses_global_page_poll()) {
- // Remove safepoint check from interpreter
- Interpreter::ignore_safepoints();
- }
-
- {
- MutexLocker mu(Safepoint_lock);
-
- assert(_state == _synchronized, "must be synchronized before ending safepoint synchronization");
-
- if (SafepointMechanism::uses_thread_local_poll()) {
- _state = _not_synchronized;
- OrderAccess::storestore(); // global state -> local state
- for (JavaThread *current = Threads::first(); current; current = current->next()) {
- ThreadSafepointState* cur_state = current->safepoint_state();
- cur_state->restart(); // TSS _running
- SafepointMechanism::disarm_local_poll(current); // release store, local state -> polling page
- }
- log_debug(safepoint)("Leaving safepoint region");
- } else {
- // Set to not synchronized, so the threads will not go into the signal_thread_blocked method
- // when they get restarted.
- _state = _not_synchronized;
- OrderAccess::fence();
-
- log_debug(safepoint)("Leaving safepoint region");
-
- // Start suspended threads
- for (JavaThread *current = Threads::first(); current; current = current->next()) {
- // A problem occurring on Solaris is when attempting to restart threads
- // the first #cpus - 1 go well, but then the VMThread is preempted when we get
- // to the next one (since it has been running the longest). We then have
- // to wait for a cpu to become available before we can continue restarting
- // threads.
- // FIXME: This causes the performance of the VM to degrade when active and with
- // large numbers of threads. Apparently this is due to the synchronous nature
- // of suspending threads.
- //
- // TODO-FIXME: the comments above are vestigial and no longer apply.
- // Furthermore, using solaris' schedctl in this particular context confers no benefit
- if (VMThreadHintNoPreempt) {
- os::hint_no_preempt();
- }
- ThreadSafepointState* cur_state = current->safepoint_state();
- assert(cur_state->type() != ThreadSafepointState::_running, "Thread not suspended at safepoint");
- cur_state->restart();
- assert(cur_state->is_running(), "safepoint state has not been reset");
- }
+ if (PageArmed) {
+ assert(SafepointMechanism::uses_global_page_poll(), "sanity");
+ // Make polling safepoint aware
+ os::make_polling_page_readable();
+ PageArmed = 0 ;
}
- RuntimeService::record_safepoint_end();
+ if (SafepointMechanism::uses_global_page_poll()) {
+ // Remove safepoint check from interpreter
+ Interpreter::ignore_safepoints();
+ }
- // Release threads lock, so threads can be created/destroyed again. It will also starts all threads
- // blocked in signal_thread_blocked
- Threads_lock->unlock();
+ {
+ MutexLocker mu(Safepoint_lock);
+
+ assert(_state == _synchronized, "must be synchronized before ending safepoint synchronization");
+
+ if (SafepointMechanism::uses_thread_local_poll()) {
+ _state = _not_synchronized;
+ OrderAccess::storestore(); // global state -> local state
+ jtiwh.rewind();
+ for (; JavaThread *current = jtiwh.next(); ) {
+ ThreadSafepointState* cur_state = current->safepoint_state();
+ cur_state->restart(); // TSS _running
+ SafepointMechanism::disarm_local_poll(current); // release store, local state -> polling page
+ }
+ log_debug(safepoint)("Leaving safepoint region");
+ } else {
+ // Set to not synchronized, so the threads will not go into the signal_thread_blocked method
+ // when they get restarted.
+ _state = _not_synchronized;
+ OrderAccess::fence();
+
+ log_debug(safepoint)("Leaving safepoint region");
+
+ // Start suspended threads
+ jtiwh.rewind();
+ for (; JavaThread *current = jtiwh.next(); ) {
+ // A problem occurring on Solaris is when attempting to restart threads
+ // the first #cpus - 1 go well, but then the VMThread is preempted when we get
+ // to the next one (since it has been running the longest). We then have
+ // to wait for a cpu to become available before we can continue restarting
+ // threads.
+ // FIXME: This causes the performance of the VM to degrade when active and with
+ // large numbers of threads. Apparently this is due to the synchronous nature
+ // of suspending threads.
+ //
+ // TODO-FIXME: the comments above are vestigial and no longer apply.
+ // Furthermore, using solaris' schedctl in this particular context confers no benefit
+ if (VMThreadHintNoPreempt) {
+ os::hint_no_preempt();
+ }
+ ThreadSafepointState* cur_state = current->safepoint_state();
+ assert(cur_state->type() != ThreadSafepointState::_running, "Thread not suspended at safepoint");
+ cur_state->restart();
+ assert(cur_state->is_running(), "safepoint state has not been reset");
+ }
+ }
+
+ RuntimeService::record_safepoint_end();
+
+ // Release threads lock, so threads can be created/destroyed again.
+ // It will also release all threads blocked in signal_thread_blocked.
+ Threads_lock->unlock();
+ }
+ } // ThreadsListHandle destroyed here.
- }
Universe::heap()->safepoint_synchronize_end();
// record this time so VMThread can keep track how much time has elapsed
// since last safepoint.
@@ -915,12 +925,11 @@ void SafepointSynchronize::print_safepoint_timeout(SafepointTimeoutReason reason
tty->print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:");
ThreadSafepointState *cur_state;
ResourceMark rm;
- for (JavaThread *cur_thread = Threads::first(); cur_thread;
- cur_thread = cur_thread->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur_thread = jtiwh.next(); ) {
cur_state = cur_thread->safepoint_state();
if (cur_thread->thread_state() != _thread_blocked &&
- ((reason == _spinning_timeout && cur_state->is_running()) ||
+ ((reason == _spinning_timeout && cur_state->is_running()) ||
(reason == _blocking_timeout && !cur_state->has_called_back()))) {
tty->print("# ");
cur_thread->print();
@@ -1427,7 +1436,7 @@ void SafepointSynchronize::print_state() {
tty->print_cr("State: %s", (_state == _synchronizing) ? "synchronizing" :
"synchronized");
- for (JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); ) {
cur->safepoint_state()->print();
}
}
diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp
index 8e4c90f9199..abb070dffd7 100644
--- a/src/hotspot/share/runtime/sharedRuntime.cpp
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp
@@ -970,7 +970,7 @@ JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
{
// We return a bad value here to make sure that the exception is
// forwarded before we look at the return value.
- THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badJNIHandle);
+ THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badAddress);
}
JNI_END
diff --git a/src/hotspot/share/runtime/statSampler.cpp b/src/hotspot/share/runtime/statSampler.cpp
index 6cdd0743ad1..3f995380468 100644
--- a/src/hotspot/share/runtime/statSampler.cpp
+++ b/src/hotspot/share/runtime/statSampler.cpp
@@ -32,6 +32,7 @@
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/os.hpp"
+#include "runtime/perfData.inline.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/vm_version.hpp"
diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp
index d54b0d8b30a..6b8898959c8 100644
--- a/src/hotspot/share/runtime/synchronizer.cpp
+++ b/src/hotspot/share/runtime/synchronizer.cpp
@@ -894,7 +894,7 @@ ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
}
// FIXME: jvmti should call this
-JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
+JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
if (UseBiasedLocking) {
if (SafepointSynchronize::is_at_safepoint()) {
BiasedLocking::revoke_at_safepoint(h_obj);
@@ -923,7 +923,7 @@ JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
if (owner != NULL) {
// owning_thread_from_monitor_owner() may also return NULL here
- return Threads::owning_thread_from_monitor_owner(owner, doLock);
+ return Threads::owning_thread_from_monitor_owner(t_list, owner);
}
// Unlocked case, header in place
diff --git a/src/hotspot/share/runtime/synchronizer.hpp b/src/hotspot/share/runtime/synchronizer.hpp
index ba3ac60a267..415c222ab6f 100644
--- a/src/hotspot/share/runtime/synchronizer.hpp
+++ b/src/hotspot/share/runtime/synchronizer.hpp
@@ -32,6 +32,7 @@
#include "runtime/perfData.hpp"
class ObjectMonitor;
+class ThreadsList;
struct DeflateMonitorCounters {
int nInuse; // currently associated with objects
@@ -125,7 +126,7 @@ class ObjectSynchronizer : AllStatic {
static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj);
- static JavaThread* get_lock_owner(Handle h_obj, bool doLock);
+ static JavaThread* get_lock_owner(ThreadsList * t_list, Handle h_obj);
// JNI detach support
static void release_monitors_owned_by_thread(TRAPS);
diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp
index 48c757fbd83..d4191b0d89f 100644
--- a/src/hotspot/share/runtime/thread.cpp
+++ b/src/hotspot/share/runtime/thread.cpp
@@ -71,12 +71,12 @@
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniPeriodicChecker.hpp"
-#include "runtime/timerTrace.hpp"
#include "runtime/memprofiler.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/osThread.hpp"
+#include "runtime/prefetch.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -86,6 +86,9 @@
#include "runtime/task.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
+#include "runtime/threadSMR.inline.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/timerTrace.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vframe_hp.hpp"
@@ -104,6 +107,7 @@
#include "utilities/events.hpp"
#include "utilities/macros.hpp"
#include "utilities/preserveException.hpp"
+#include "utilities/resourceHash.hpp"
#include "utilities/vmError.hpp"
#if INCLUDE_ALL_GCS
#include "gc/cms/concurrentMarkSweepThread.hpp"
@@ -195,13 +199,19 @@ void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) {
void Thread::operator delete(void* p) {
if (UseBiasedLocking) {
- void* real_malloc_addr = ((Thread*) p)->_real_malloc_address;
- FreeHeap(real_malloc_addr);
+ FreeHeap(((Thread*) p)->_real_malloc_address);
} else {
FreeHeap(p);
}
}
+void JavaThread::smr_delete() {
+ if (_on_thread_list) {
+ Threads::smr_delete(this);
+ } else {
+ delete this;
+ }
+}
// Base class for all threads: VMThread, WatcherThread, ConcurrentMarkSweepThread,
// JavaThread
@@ -227,6 +237,9 @@ Thread::Thread() {
// This initial value ==> never claimed.
_oops_do_parity = 0;
+ _threads_hazard_ptr = NULL;
+ _nested_threads_hazard_ptr = NULL;
+ _nested_threads_hazard_ptr_cnt = 0;
// the handle mark links itself to last_handle_mark
new HandleMark(this);
@@ -398,9 +411,15 @@ void Thread::run() {
}
#ifdef ASSERT
-// Private method to check for dangling thread pointer
-void check_for_dangling_thread_pointer(Thread *thread) {
- assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
+// A JavaThread is considered "dangling" if it is not the current
+// thread, has been added the Threads list, the system is not at a
+// safepoint and the Thread is not "protected".
+//
+void Thread::check_for_dangling_thread_pointer(Thread *thread) {
+ assert(!thread->is_Java_thread() || Thread::current() == thread ||
+ !((JavaThread *) thread)->on_thread_list() ||
+ SafepointSynchronize::is_at_safepoint() ||
+ Threads::is_a_protected_JavaThread_with_lock((JavaThread *) thread),
"possibility of dangling Thread pointer");
}
#endif
@@ -732,6 +751,37 @@ bool JavaThread::wait_for_ext_suspend_completion(int retries, int delay,
return false;
}
+// Called from API entry points which perform stack walking. If the
+// associated JavaThread is the current thread, then wait_for_suspend
+// is not used. Otherwise, it determines if we should wait for the
+// "other" thread to complete external suspension. (NOTE: in future
+// releases the suspension mechanism should be reimplemented so this
+// is not necessary.)
+//
+bool
+JavaThread::is_thread_fully_suspended(bool wait_for_suspend, uint32_t *bits) {
+ if (this != JavaThread::current()) {
+ // "other" threads require special handling.
+ if (wait_for_suspend) {
+ // We are allowed to wait for the external suspend to complete
+ // so give the other thread a chance to get suspended.
+ if (!wait_for_ext_suspend_completion(SuspendRetryCount,
+ SuspendRetryDelay, bits)) {
+ // Didn't make it so let the caller know.
+ return false;
+ }
+ }
+ // We aren't allowed to wait for the external suspend to complete
+ // so if the other thread isn't externally suspended we need to
+ // let the caller know.
+ else if (!is_ext_suspend_completed_with_lock(bits)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
#ifndef PRODUCT
void JavaThread::record_jump(address target, address instr, const char* file,
int line) {
@@ -810,9 +860,33 @@ void Thread::print_on(outputStream* st) const {
ext().print_on(st);
osthread()->print_on(st);
}
+ if (_threads_hazard_ptr != NULL) {
+ st->print("_threads_hazard_ptr=" INTPTR_FORMAT, p2i(_threads_hazard_ptr));
+ }
+ if (_nested_threads_hazard_ptr != NULL) {
+ print_nested_threads_hazard_ptrs_on(st);
+ }
+ st->print(" ");
debug_only(if (WizardMode) print_owned_locks_on(st);)
}
+void Thread::print_nested_threads_hazard_ptrs_on(outputStream* st) const {
+ assert(_nested_threads_hazard_ptr != NULL, "must be set to print");
+
+ if (EnableThreadSMRStatistics) {
+ st->print(", _nested_threads_hazard_ptr_cnt=%u", _nested_threads_hazard_ptr_cnt);
+ }
+ st->print(", _nested_threads_hazard_ptrs=");
+ for (NestedThreadsList* node = _nested_threads_hazard_ptr; node != NULL;
+ node = node->next()) {
+ if (node != _nested_threads_hazard_ptr) {
+ // First node does not need a comma-space separator.
+ st->print(", ");
+ }
+ st->print(INTPTR_FORMAT, p2i(node->t_list()));
+ }
+}
+
// Thread::print_on_error() is called by fatal error handler. Don't use
// any lock or allocate memory.
void Thread::print_on_error(outputStream* st, char* buf, int buflen) const {
@@ -834,6 +908,13 @@ void Thread::print_on_error(outputStream* st, char* buf, int buflen) const {
if (osthread()) {
st->print(" [id=%d]", osthread()->thread_id());
}
+
+ if (_threads_hazard_ptr != NULL) {
+ st->print(" _threads_hazard_ptr=" INTPTR_FORMAT, p2i(_threads_hazard_ptr));
+ }
+ if (_nested_threads_hazard_ptr != NULL) {
+ print_nested_threads_hazard_ptrs_on(st);
+ }
}
void Thread::print_value_on(outputStream* st) const {
@@ -871,8 +952,8 @@ bool Thread::owns_locks_but_compiled_lock() const {
#ifndef PRODUCT
-// The flag: potential_vm_operation notifies if this particular safepoint state could potential
-// invoke the vm-thread (i.e., and oop allocation). In that case, we also have to make sure that
+// The flag: potential_vm_operation notifies if this particular safepoint state could potentially
+// invoke the vm-thread (e.g., an oop allocation). In that case, we also have to make sure that
// no threads which allow_vm_block's are held
void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
// Check if current thread is allowed to block at a safepoint
@@ -1399,10 +1480,11 @@ bool jvmci_counters_include(JavaThread* thread) {
void JavaThread::collect_counters(typeArrayOop array) {
if (JVMCICounterSize > 0) {
MutexLocker tl(Threads_lock);
+ JavaThreadIteratorWithHandle jtiwh;
for (int i = 0; i < array->length(); i++) {
array->long_at_put(i, _jvmci_old_thread_counters[i]);
}
- for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
+ for (; JavaThread *tp = jtiwh.next(); ) {
if (jvmci_counters_include(tp)) {
for (int i = 0; i < array->length(); i++) {
array->long_at_put(i, array->long_at(i) + tp->_jvmci_counters[i]);
@@ -1435,6 +1517,7 @@ void JavaThread::initialize() {
clear_must_deopt_id();
set_monitor_chunks(NULL);
set_next(NULL);
+ _on_thread_list = false;
set_thread_state(_thread_new);
_terminated = _not_terminated;
_privileged_stack_top = NULL;
@@ -1715,12 +1798,12 @@ void JavaThread::thread_main_inner() {
DTRACE_THREAD_PROBE(stop, this);
this->exit(false);
- delete this;
+ this->smr_delete();
}
static void ensure_join(JavaThread* thread) {
- // We do not need to grap the Threads_lock, since we are operating on ourself.
+ // We do not need to grab the Threads_lock, since we are operating on ourself.
Handle threadObj(thread, thread->threadObj());
assert(threadObj.not_null(), "java thread object must exist");
ObjectLocker lock(threadObj, thread);
@@ -1742,6 +1825,15 @@ static void ensure_join(JavaThread* thread) {
void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
assert(this == JavaThread::current(), "thread consistency check");
+ elapsedTimer _timer_exit_phase1;
+ elapsedTimer _timer_exit_phase2;
+ elapsedTimer _timer_exit_phase3;
+ elapsedTimer _timer_exit_phase4;
+
+ if (log_is_enabled(Debug, os, thread, timer)) {
+ _timer_exit_phase1.start();
+ }
+
HandleMark hm(this);
Handle uncaught_exception(this, this->pending_exception());
this->clear_pending_exception();
@@ -1841,12 +1933,20 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
// before_exit() has already posted JVMTI THREAD_END events
}
+ if (log_is_enabled(Debug, os, thread, timer)) {
+ _timer_exit_phase1.stop();
+ _timer_exit_phase2.start();
+ }
// Notify waiters on thread object. This has to be done after exit() is called
// on the thread (if the thread is the last thread in a daemon ThreadGroup the
// group should have the destroyed bit set before waiters are notified).
ensure_join(this);
assert(!this->has_pending_exception(), "ensure_join should have cleared");
+ if (log_is_enabled(Debug, os, thread, timer)) {
+ _timer_exit_phase2.stop();
+ _timer_exit_phase3.start();
+ }
// 6282335 JNI DetachCurrentThread spec states that all Java monitors
// held by this thread must be released. The spec does not distinguish
// between JNI-acquired and regular Java monitors. We can only see
@@ -1914,12 +2014,26 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
exit_type == JavaThread::normal_exit ? "exiting" : "detaching",
os::current_thread_id());
+ if (log_is_enabled(Debug, os, thread, timer)) {
+ _timer_exit_phase3.stop();
+ _timer_exit_phase4.start();
+ }
// Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread
Threads::remove(this);
- // If someone set a handshake on us just as we entered exit path, we simple cancel it.
- if (ThreadLocalHandshakes) {
- cancel_handshake();
+ if (log_is_enabled(Debug, os, thread, timer)) {
+ _timer_exit_phase4.stop();
+ ResourceMark rm(this);
+ log_debug(os, thread, timer)("name='%s'"
+ ", exit-phase1=" JLONG_FORMAT
+ ", exit-phase2=" JLONG_FORMAT
+ ", exit-phase3=" JLONG_FORMAT
+ ", exit-phase4=" JLONG_FORMAT,
+ get_thread_name(),
+ _timer_exit_phase1.milliseconds(),
+ _timer_exit_phase2.milliseconds(),
+ _timer_exit_phase3.milliseconds(),
+ _timer_exit_phase4.milliseconds());
}
}
@@ -1980,7 +2094,7 @@ void JavaThread::cleanup_failed_attach_current_thread() {
#endif // INCLUDE_ALL_GCS
Threads::remove(this);
- delete this;
+ this->smr_delete();
}
@@ -2235,10 +2349,9 @@ void JavaThread::send_thread_stop(oop java_throwable) {
// + Target thread will not enter any new monitors
//
void JavaThread::java_suspend() {
- { MutexLocker mu(Threads_lock);
- if (!Threads::includes(this) || is_exiting() || this->threadObj() == NULL) {
- return;
- }
+ ThreadsListHandle tlh;
+ if (!tlh.includes(this) || threadObj() == NULL || is_exiting()) {
+ return;
}
{ MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
@@ -2327,14 +2440,8 @@ int JavaThread::java_suspend_self() {
// verify the JavaThread has not yet been published in the Threads::list, and
// hence doesn't need protection from concurrent access at this stage
void JavaThread::verify_not_published() {
- if (!Threads_lock->owned_by_self()) {
- MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
- assert(!Threads::includes(this),
- "java thread shouldn't have been published yet!");
- } else {
- assert(!Threads::includes(this),
- "java thread shouldn't have been published yet!");
- }
+ ThreadsListHandle tlh;
+ assert(!tlh.includes(this), "JavaThread shouldn't have been published yet!");
}
#endif
@@ -2451,7 +2558,8 @@ void JavaThread::java_resume() {
// Sanity check: thread is gone, has started exiting or the thread
// was not externally suspended.
- if (!Threads::includes(this) || is_exiting() || !is_external_suspend()) {
+ ThreadsListHandle tlh;
+ if (!tlh.includes(this) || is_exiting() || !is_external_suspend()) {
return;
}
@@ -2925,6 +3033,13 @@ void JavaThread::print_on_error(outputStream* st, char *buf, int buflen) const {
st->print(", stack(" PTR_FORMAT "," PTR_FORMAT ")",
p2i(stack_end()), p2i(stack_base()));
st->print("]");
+
+ if (_threads_hazard_ptr != NULL) {
+ st->print(" _threads_hazard_ptr=" INTPTR_FORMAT, p2i(_threads_hazard_ptr));
+ }
+ if (_nested_threads_hazard_ptr != NULL) {
+ print_nested_threads_hazard_ptrs_on(st);
+ }
return;
}
@@ -3318,23 +3433,140 @@ void CodeCacheSweeperThread::nmethods_do(CodeBlobClosure* cf) {
// ======= Threads ========
// The Threads class links together all active threads, and provides
-// operations over all threads. It is protected by its own Mutex
-// lock, which is also used in other contexts to protect thread
-// operations from having the thread being operated on from exiting
-// and going away unexpectedly (e.g., safepoint synchronization)
+// operations over all threads. It is protected by the Threads_lock,
+// which is also used in other global contexts like safepointing.
+// ThreadsListHandles are used to safely perform operations on one
+// or more threads without the risk of the thread exiting during the
+// operation.
+//
+// Note: The Threads_lock is currently more widely used than we
+// would like. We are actively migrating Threads_lock uses to other
+// mechanisms in order to reduce Threads_lock contention.
+
+JavaThread* Threads::_thread_list = NULL;
+int Threads::_number_of_threads = 0;
+int Threads::_number_of_non_daemon_threads = 0;
+int Threads::_return_code = 0;
+int Threads::_thread_claim_parity = 0;
+size_t JavaThread::_stack_size_at_create = 0;
+// Safe Memory Reclamation (SMR) support:
+Monitor* Threads::_smr_delete_lock =
+ new Monitor(Monitor::special, "smr_delete_lock",
+ false /* allow_vm_block */,
+ Monitor::_safepoint_check_never);
+// The '_cnt', '_max' and '_times" fields are enabled via
+// -XX:+EnableThreadSMRStatistics:
+
+// # of parallel threads in _smr_delete_lock->wait().
+// Impl note: Hard to imagine > 64K waiting threads so this could be 16-bit,
+// but there is no nice 16-bit _FORMAT support.
+uint Threads::_smr_delete_lock_wait_cnt = 0;
+
+// Max # of parallel threads in _smr_delete_lock->wait().
+// Impl note: See _smr_delete_lock_wait_cnt note.
+uint Threads::_smr_delete_lock_wait_max = 0;
+
+// Flag to indicate when an _smr_delete_lock->notify() is needed.
+// Impl note: See _smr_delete_lock_wait_cnt note.
+volatile uint Threads::_smr_delete_notify = 0;
+
+// # of threads deleted over VM lifetime.
+// Impl note: Atomically incremented over VM lifetime so use unsigned for more
+// range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
+// isn't available everywhere (or is it?).
+volatile uint Threads::_smr_deleted_thread_cnt = 0;
+
+// Max time in millis to delete a thread.
+// Impl note: 16-bit might be too small on an overloaded machine. Use
+// unsigned since this is a time value. Set via Atomic::cmpxchg() in a
+// loop for correctness.
+volatile uint Threads::_smr_deleted_thread_time_max = 0;
+
+// Cumulative time in millis to delete threads.
+// Impl note: Atomically added to over VM lifetime so use unsigned for more
+// range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
+// isn't available everywhere (or is it?).
+volatile uint Threads::_smr_deleted_thread_times = 0;
+
+ThreadsList* volatile Threads::_smr_java_thread_list = new ThreadsList(0);
+
+// # of ThreadsLists allocated over VM lifetime.
+// Impl note: We allocate a new ThreadsList for every thread create and
+// every thread delete so we need a bigger type than the
+// _smr_deleted_thread_cnt field.
+uint64_t Threads::_smr_java_thread_list_alloc_cnt = 1;
+
+// # of ThreadsLists freed over VM lifetime.
+// Impl note: See _smr_java_thread_list_alloc_cnt note.
+uint64_t Threads::_smr_java_thread_list_free_cnt = 0;
+
+// Max size ThreadsList allocated.
+// Impl note: Max # of threads alive at one time should fit in unsigned 32-bit.
+uint Threads::_smr_java_thread_list_max = 0;
+
+// Max # of nested ThreadsLists for a thread.
+// Impl note: Hard to imagine > 64K nested ThreadsLists so this could be
+// 16-bit, but there is no nice 16-bit _FORMAT support.
+uint Threads::_smr_nested_thread_list_max = 0;
+
+// # of ThreadsListHandles deleted over VM lifetime.
+// Impl note: Atomically incremented over VM lifetime so use unsigned for
+// more range. There will be fewer ThreadsListHandles than threads so
+// unsigned 32-bit should be fine.
+volatile uint Threads::_smr_tlh_cnt = 0;
+
+// Max time in millis to delete a ThreadsListHandle.
+// Impl note: 16-bit might be too small on an overloaded machine. Use
+// unsigned since this is a time value. Set via Atomic::cmpxchg() in a
+// loop for correctness.
+volatile uint Threads::_smr_tlh_time_max = 0;
+
+// Cumulative time in millis to delete ThreadsListHandles.
+// Impl note: Atomically added to over VM lifetime so use unsigned for more
+// range. Unsigned 64-bit would be more future proof, but 64-bit atomic inc
+// isn't available everywhere (or is it?).
+volatile uint Threads::_smr_tlh_times = 0;
+
+ThreadsList* Threads::_smr_to_delete_list = NULL;
+
+// # of parallel ThreadsLists on the to-delete list.
+// Impl note: Hard to imagine > 64K ThreadsLists needing to be deleted so
+// this could be 16-bit, but there is no nice 16-bit _FORMAT support.
+uint Threads::_smr_to_delete_list_cnt = 0;
+
+// Max # of parallel ThreadsLists on the to-delete list.
+// Impl note: See _smr_to_delete_list_cnt note.
+uint Threads::_smr_to_delete_list_max = 0;
-JavaThread* Threads::_thread_list = NULL;
-int Threads::_number_of_threads = 0;
-int Threads::_number_of_non_daemon_threads = 0;
-int Threads::_return_code = 0;
-int Threads::_thread_claim_parity = 0;
-size_t JavaThread::_stack_size_at_create = 0;
#ifdef ASSERT
-bool Threads::_vm_complete = false;
+bool Threads::_vm_complete = false;
#endif
+static inline void *prefetch_and_load_ptr(void **addr, intx prefetch_interval) {
+ Prefetch::read((void*)addr, prefetch_interval);
+ return *addr;
+}
+
+// Possibly the ugliest for loop the world has seen. C++ does not allow
+// multiple types in the declaration section of the for loop. In this case
+// we are only dealing with pointers and hence can cast them. It looks ugly
+// but macros are ugly and therefore it's fine to make things absurdly ugly.
+#define DO_JAVA_THREADS(LIST, X) \
+ for (JavaThread *MACRO_scan_interval = (JavaThread*)(uintptr_t)PrefetchScanIntervalInBytes, \
+ *MACRO_list = (JavaThread*)(LIST), \
+ **MACRO_end = ((JavaThread**)((ThreadsList*)MACRO_list)->threads()) + ((ThreadsList*)MACRO_list)->length(), \
+ **MACRO_current_p = (JavaThread**)((ThreadsList*)MACRO_list)->threads(), \
+ *X = (JavaThread*)prefetch_and_load_ptr((void**)MACRO_current_p, (intx)MACRO_scan_interval); \
+ MACRO_current_p != MACRO_end; \
+ MACRO_current_p++, \
+ X = (JavaThread*)prefetch_and_load_ptr((void**)MACRO_current_p, (intx)MACRO_scan_interval))
+
+inline ThreadsList* Threads::get_smr_java_thread_list() {
+ return (ThreadsList*)OrderAccess::load_acquire(&_smr_java_thread_list);
+}
+
// All JavaThreads
-#define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
+#define ALL_JAVA_THREADS(X) DO_JAVA_THREADS(get_smr_java_thread_list(), X)
// All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
void Threads::threads_do(ThreadClosure* tc) {
@@ -3435,6 +3667,240 @@ static void call_initPhase3(TRAPS) {
vmSymbols::void_method_signature(), CHECK);
}
+// Safe Memory Reclamation (SMR) support:
+//
+
+// Acquire a stable ThreadsList.
+//
+ThreadsList *Threads::acquire_stable_list(Thread *self, bool is_ThreadsListSetter) {
+ assert(self != NULL, "sanity check");
+ // acquire_stable_list_nested_path() will grab the Threads_lock
+ // so let's make sure the ThreadsListHandle is in a safe place.
+ // ThreadsListSetter cannot make this check on this code path.
+ debug_only(if (!is_ThreadsListSetter && StrictSafepointChecks) self->check_for_valid_safepoint_state(/* potential_vm_operation */ false);)
+
+ if (self->get_threads_hazard_ptr() == NULL) {
+ // The typical case is first.
+ return acquire_stable_list_fast_path(self);
+ }
+
+ // The nested case is rare.
+ return acquire_stable_list_nested_path(self);
+}
+
+// Fast path (and lock free) way to acquire a stable ThreadsList.
+//
+ThreadsList *Threads::acquire_stable_list_fast_path(Thread *self) {
+ assert(self != NULL, "sanity check");
+ assert(self->get_threads_hazard_ptr() == NULL, "sanity check");
+ assert(self->get_nested_threads_hazard_ptr() == NULL,
+ "cannot have a nested hazard ptr with a NULL regular hazard ptr");
+
+ ThreadsList* threads;
+
+ // Stable recording of a hazard ptr for SMR. This code does not use
+ // locks so its use of the _smr_java_thread_list & _threads_hazard_ptr
+ // fields is racy relative to code that uses those fields with locks.
+ // OrderAccess and Atomic functions are used to deal with those races.
+ //
+ while (true) {
+ threads = get_smr_java_thread_list();
+
+ // Publish a tagged hazard ptr to denote that the hazard ptr is not
+ // yet verified as being stable. Due to the fence after the hazard
+ // ptr write, it will be sequentially consistent w.r.t. the
+ // sequentially consistent writes of the ThreadsList, even on
+ // non-multiple copy atomic machines where stores can be observed
+ // in different order from different observer threads.
+ ThreadsList* unverified_threads = Thread::tag_hazard_ptr(threads);
+ self->set_threads_hazard_ptr(unverified_threads);
+
+ // If _smr_java_thread_list has changed, we have lost a race with
+ // Threads::add() or Threads::remove() and have to try again.
+ if (get_smr_java_thread_list() != threads) {
+ continue;
+ }
+
+ // We try to remove the tag which will verify the hazard ptr as
+ // being stable. This exchange can race with a scanning thread
+ // which might invalidate the tagged hazard ptr to keep it from
+ // being followed to access JavaThread ptrs. If we lose the race,
+ // we simply retry. If we win the race, then the stable hazard
+ // ptr is officially published.
+ if (self->cmpxchg_threads_hazard_ptr(threads, unverified_threads) == unverified_threads) {
+ break;
+ }
+ }
+
+ // A stable hazard ptr has been published letting other threads know
+ // that the ThreadsList and the JavaThreads reachable from this list
+ // are protected and hence they should not be deleted until everyone
+ // agrees it is safe to do so.
+
+ return threads;
+}
+
+// Acquire a nested stable ThreadsList; this is rare so it uses
+// Threads_lock.
+//
+ThreadsList *Threads::acquire_stable_list_nested_path(Thread *self) {
+ assert(self != NULL, "sanity check");
+ assert(self->get_threads_hazard_ptr() != NULL,
+ "cannot have a NULL regular hazard ptr when acquiring a nested hazard ptr");
+
+ // The thread already has a hazard ptr (ThreadsList ref) so we need
+ // to create a nested ThreadsListHandle with the current ThreadsList
+ // since it might be different than our current hazard ptr. The need
+ // for a nested ThreadsListHandle is rare so we do this while holding
+ // the Threads_lock so we don't race with the scanning code; the code
+ // is so much simpler this way.
+
+ NestedThreadsList* node;
+ {
+ // Only grab the Threads_lock if we don't already own it.
+ MutexLockerEx ml(Threads_lock->owned_by_self() ? NULL : Threads_lock);
+ node = new NestedThreadsList(get_smr_java_thread_list());
+ // We insert at the front of the list to match up with the delete
+ // in release_stable_list().
+ node->set_next(self->get_nested_threads_hazard_ptr());
+ self->set_nested_threads_hazard_ptr(node);
+ if (EnableThreadSMRStatistics) {
+ self->inc_nested_threads_hazard_ptr_cnt();
+ if (self->nested_threads_hazard_ptr_cnt() > _smr_nested_thread_list_max) {
+ _smr_nested_thread_list_max = self->nested_threads_hazard_ptr_cnt();
+ }
+ }
+ }
+ log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::acquire_stable_list: add NestedThreadsList node containing ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(node->t_list()));
+
+ return node->t_list();
+}
+
+inline void Threads::add_smr_deleted_thread_times(uint add_value) {
+ Atomic::add(add_value, &_smr_deleted_thread_times);
+}
+
+inline void Threads::inc_smr_deleted_thread_cnt() {
+ Atomic::inc(&_smr_deleted_thread_cnt);
+}
+
+// Release a stable ThreadsList.
+//
+void Threads::release_stable_list(Thread *self) {
+ assert(self != NULL, "sanity check");
+ // release_stable_list_nested_path() will grab the Threads_lock
+ // so let's make sure the ThreadsListHandle is in a safe place.
+ debug_only(if (StrictSafepointChecks) self->check_for_valid_safepoint_state(/* potential_vm_operation */ false);)
+
+ if (self->get_nested_threads_hazard_ptr() == NULL) {
+ // The typical case is first.
+ release_stable_list_fast_path(self);
+ return;
+ }
+
+ // The nested case is rare.
+ release_stable_list_nested_path(self);
+}
+
+// Fast path way to release a stable ThreadsList. The release portion
+// is lock-free, but the wake up portion is not.
+//
+void Threads::release_stable_list_fast_path(Thread *self) {
+ assert(self != NULL, "sanity check");
+ assert(self->get_threads_hazard_ptr() != NULL, "sanity check");
+ assert(self->get_nested_threads_hazard_ptr() == NULL,
+ "cannot have a nested hazard ptr when releasing a regular hazard ptr");
+
+ // After releasing the hazard ptr, other threads may go ahead and
+ // free up some memory temporarily used by a ThreadsList snapshot.
+ self->set_threads_hazard_ptr(NULL);
+
+ // We use double-check locking to reduce traffic on the system
+ // wide smr_delete_lock.
+ if (Threads::smr_delete_notify()) {
+ // An exiting thread might be waiting in smr_delete(); we need to
+ // check with smr_delete_lock to be sure.
+ release_stable_list_wake_up((char *) "regular hazard ptr");
+ }
+}
+
+// Release a nested stable ThreadsList; this is rare so it uses
+// Threads_lock.
+//
+void Threads::release_stable_list_nested_path(Thread *self) {
+ assert(self != NULL, "sanity check");
+ assert(self->get_nested_threads_hazard_ptr() != NULL, "sanity check");
+ assert(self->get_threads_hazard_ptr() != NULL,
+ "must have a regular hazard ptr to have nested hazard ptrs");
+
+ // We have a nested ThreadsListHandle so we have to release it first.
+ // The need for a nested ThreadsListHandle is rare so we do this while
+ // holding the Threads_lock so we don't race with the scanning code;
+ // the code is so much simpler this way.
+
+ NestedThreadsList *node;
+ {
+ // Only grab the Threads_lock if we don't already own it.
+ MutexLockerEx ml(Threads_lock->owned_by_self() ? NULL : Threads_lock);
+ // We remove from the front of the list to match up with the insert
+ // in acquire_stable_list().
+ node = self->get_nested_threads_hazard_ptr();
+ self->set_nested_threads_hazard_ptr(node->next());
+ if (EnableThreadSMRStatistics) {
+ self->dec_nested_threads_hazard_ptr_cnt();
+ }
+ }
+
+ // An exiting thread might be waiting in smr_delete(); we need to
+ // check with smr_delete_lock to be sure.
+ release_stable_list_wake_up((char *) "nested hazard ptr");
+
+ log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::release_stable_list: delete NestedThreadsList node containing ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(node->t_list()));
+
+ delete node;
+}
+
+// Wake up portion of the release stable ThreadsList protocol;
+// uses the smr_delete_lock().
+//
+void Threads::release_stable_list_wake_up(char *log_str) {
+ assert(log_str != NULL, "sanity check");
+
+ // Note: smr_delete_lock is held in smr_delete() for the entire
+ // hazard ptr search so that we do not lose this notify() if
+ // the exiting thread has to wait. That code path also holds
+ // Threads_lock (which was grabbed before smr_delete_lock) so that
+ // threads_do() can be called. This means the system can't start a
+ // safepoint which means this thread can't take too long to get to
+ // a safepoint because of being blocked on smr_delete_lock.
+ //
+ MonitorLockerEx ml(Threads::smr_delete_lock(), Monitor::_no_safepoint_check_flag);
+ if (Threads::smr_delete_notify()) {
+ // Notify any exiting JavaThreads that are waiting in smr_delete()
+ // that we've released a ThreadsList.
+ ml.notify_all();
+ log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::release_stable_list notified %s", os::current_thread_id(), log_str);
+ }
+}
+
+inline void Threads::update_smr_deleted_thread_time_max(uint new_value) {
+ while (true) {
+ uint cur_value = _smr_deleted_thread_time_max;
+ if (new_value <= cur_value) {
+ // No need to update max value so we're done.
+ break;
+ }
+ if (Atomic::cmpxchg(new_value, &_smr_deleted_thread_time_max, cur_value) == cur_value) {
+ // Updated max value so we're done. Otherwise try it all again.
+ break;
+ }
+ }
+}
+
+inline ThreadsList* Threads::xchg_smr_java_thread_list(ThreadsList* new_list) {
+ return (ThreadsList*)Atomic::xchg(new_list, &_smr_java_thread_list);
+}
+
void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
TraceTime timer("Initialize java.lang classes", TRACETIME_LOG(Info, startuptime));
@@ -3616,7 +4082,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
if (!main_thread->set_as_starting_thread()) {
vm_shutdown_during_initialization(
"Failed necessary internal allocation. Out of swap space");
- delete main_thread;
+ main_thread->smr_delete();
*canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
return JNI_ENOMEM;
}
@@ -3631,7 +4097,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// Initialize global modules
jint status = init_globals();
if (status != JNI_OK) {
- delete main_thread;
+ main_thread->smr_delete();
*canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
return status;
}
@@ -4037,23 +4503,6 @@ void Threads::create_vm_init_libraries() {
}
}
-JavaThread* Threads::find_java_thread_from_java_tid(jlong java_tid) {
- assert(Threads_lock->owned_by_self(), "Must hold Threads_lock");
-
- JavaThread* java_thread = NULL;
- // Sequential search for now. Need to do better optimization later.
- for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
- oop tobj = thread->threadObj();
- if (!thread->is_exiting() &&
- tobj != NULL &&
- java_tid == java_lang_Thread::thread_id(tobj)) {
- java_thread = thread;
- break;
- }
- }
- return java_thread;
-}
-
// Last thread running calls java.lang.Shutdown.shutdown()
void JavaThread::invoke_shutdown_hooks() {
@@ -4179,6 +4628,11 @@ bool Threads::destroy_vm() {
notify_vm_shutdown();
+ // We are after VM_Exit::set_vm_exited() so we can't call
+ // thread->smr_delete() or we will block on the Threads_lock.
+ // Deleting the shutdown thread here is safe because another
+ // JavaThread cannot have an active ThreadsListHandle for
+ // this JavaThread.
delete thread;
#if INCLUDE_JVMCI
@@ -4212,6 +4666,501 @@ jboolean Threads::is_supported_jni_version(jint version) {
return JNI_FALSE;
}
+// Hash table of pointers found by a scan. Used for collecting hazard
+// pointers (ThreadsList references). Also used for collecting JavaThreads
+// that are indirectly referenced by hazard ptrs. An instance of this
+// class only contains one type of pointer.
+//
+class ThreadScanHashtable : public CHeapObj {
+ private:
+ static bool ptr_equals(void * const& s1, void * const& s2) {
+ return s1 == s2;
+ }
+
+ static unsigned int ptr_hash(void * const& s1) {
+ // 2654435761 = 2^32 * Phi (golden ratio)
+ return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
+ }
+
+ int _table_size;
+ // ResourceHashtable SIZE is specified at compile time so our
+ // dynamic _table_size is unused for now; 1031 is the first prime
+ // after 1024.
+ typedef ResourceHashtable PtrTable;
+ PtrTable * _ptrs;
+
+ public:
+ // ResourceHashtable is passed to various functions and populated in
+ // different places so we allocate it using C_HEAP to make it immune
+ // from any ResourceMarks that happen to be in the code paths.
+ ThreadScanHashtable(int table_size) : _table_size(table_size), _ptrs(new (ResourceObj::C_HEAP, mtThread) PtrTable()) {}
+
+ ~ThreadScanHashtable() { delete _ptrs; }
+
+ bool has_entry(void *pointer) {
+ int *val_ptr = _ptrs->get(pointer);
+ return val_ptr != NULL && *val_ptr == 1;
+ }
+
+ void add_entry(void *pointer) {
+ _ptrs->put(pointer, 1);
+ }
+};
+
+// Closure to gather JavaThreads indirectly referenced by hazard ptrs
+// (ThreadsList references) into a hash table. This closure handles part 2
+// of the dance - adding all the JavaThreads referenced by the hazard
+// pointer (ThreadsList reference) to the hash table.
+//
+class AddThreadHazardPointerThreadClosure : public ThreadClosure {
+ private:
+ ThreadScanHashtable *_table;
+
+ public:
+ AddThreadHazardPointerThreadClosure(ThreadScanHashtable *table) : _table(table) {}
+
+ virtual void do_thread(Thread *thread) {
+ if (!_table->has_entry((void*)thread)) {
+ // The same JavaThread might be on more than one ThreadsList or
+ // more than one thread might be using the same ThreadsList. In
+ // either case, we only need a single entry for a JavaThread.
+ _table->add_entry((void*)thread);
+ }
+ }
+};
+
+// Closure to gather JavaThreads indirectly referenced by hazard ptrs
+// (ThreadsList references) into a hash table. This closure handles part 1
+// of the dance - hazard ptr chain walking and dispatch to another
+// closure.
+//
+class ScanHazardPtrGatherProtectedThreadsClosure : public ThreadClosure {
+ private:
+ ThreadScanHashtable *_table;
+ public:
+ ScanHazardPtrGatherProtectedThreadsClosure(ThreadScanHashtable *table) : _table(table) {}
+
+ virtual void do_thread(Thread *thread) {
+ assert_locked_or_safepoint(Threads_lock);
+
+ if (thread == NULL) return;
+
+ // This code races with Threads::acquire_stable_list() which is
+ // lock-free so we have to handle some special situations.
+ //
+ ThreadsList *current_list = NULL;
+ while (true) {
+ current_list = thread->get_threads_hazard_ptr();
+ // No hazard ptr so nothing more to do.
+ if (current_list == NULL) {
+ assert(thread->get_nested_threads_hazard_ptr() == NULL,
+ "cannot have a nested hazard ptr with a NULL regular hazard ptr");
+ return;
+ }
+
+ // If the hazard ptr is verified as stable (since it is not tagged),
+ // then it is safe to use.
+ if (!Thread::is_hazard_ptr_tagged(current_list)) break;
+
+ // The hazard ptr is tagged as not yet verified as being stable
+ // so we are racing with acquire_stable_list(). This exchange
+ // attempts to invalidate the hazard ptr. If we win the race,
+ // then we can ignore this unstable hazard ptr and the other
+ // thread will retry the attempt to publish a stable hazard ptr.
+ // If we lose the race, then we retry our attempt to look at the
+ // hazard ptr.
+ if (thread->cmpxchg_threads_hazard_ptr(NULL, current_list) == current_list) return;
+ }
+
+ // The current JavaThread has a hazard ptr (ThreadsList reference)
+ // which might be _smr_java_thread_list or it might be an older
+ // ThreadsList that has been removed but not freed. In either case,
+ // the hazard ptr is protecting all the JavaThreads on that
+ // ThreadsList.
+ AddThreadHazardPointerThreadClosure add_cl(_table);
+ current_list->threads_do(&add_cl);
+
+ // Any NestedThreadsLists are also protecting JavaThreads so
+ // gather those also; the ThreadsLists may be different.
+ for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
+ node != NULL; node = node->next()) {
+ node->t_list()->threads_do(&add_cl);
+ }
+ }
+};
+
+// Closure to print JavaThreads that have a hazard ptr (ThreadsList
+// reference) that contains an indirect reference to a specific JavaThread.
+//
+class ScanHazardPtrPrintMatchingThreadsClosure : public ThreadClosure {
+ private:
+ JavaThread *_thread;
+ public:
+ ScanHazardPtrPrintMatchingThreadsClosure(JavaThread *thread) : _thread(thread) {}
+
+ virtual void do_thread(Thread *thread) {
+ assert_locked_or_safepoint(Threads_lock);
+
+ if (thread == NULL) return;
+ ThreadsList *current_list = thread->get_threads_hazard_ptr();
+ if (current_list == NULL) {
+ assert(thread->get_nested_threads_hazard_ptr() == NULL,
+ "cannot have a nested hazard ptr with a NULL regular hazard ptr");
+ return;
+ }
+ // If the hazard ptr is unverified, then ignore it.
+ if (Thread::is_hazard_ptr_tagged(current_list)) return;
+
+ // The current JavaThread has a hazard ptr (ThreadsList reference)
+ // which might be _smr_java_thread_list or it might be an older
+ // ThreadsList that has been removed but not freed. In either case,
+ // the hazard ptr is protecting all the JavaThreads on that
+ // ThreadsList, but we only care about matching a specific JavaThread.
+ DO_JAVA_THREADS(current_list, p) {
+ if (p == _thread) {
+ log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::smr_delete: thread1=" INTPTR_FORMAT " has a hazard pointer for thread2=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread), p2i(_thread));
+ break;
+ }
+ }
+
+ // Any NestedThreadsLists are also protecting JavaThreads so
+ // check those also; the ThreadsLists may be different.
+ for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
+ node != NULL; node = node->next()) {
+ DO_JAVA_THREADS(node->t_list(), p) {
+ if (p == _thread) {
+ log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::smr_delete: thread1=" INTPTR_FORMAT " has a nested hazard pointer for thread2=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread), p2i(_thread));
+ return;
+ }
+ }
+ }
+ }
+};
+
+// Return true if the specified JavaThread is protected by a hazard
+// pointer (ThreadsList reference). Otherwise, returns false.
+//
+bool Threads::is_a_protected_JavaThread(JavaThread *thread) {
+ assert_locked_or_safepoint(Threads_lock);
+
+ // Hash table size should be first power of two higher than twice
+ // the length of the Threads list.
+ int hash_table_size = MIN2(_number_of_threads, 32) << 1;
+ hash_table_size--;
+ hash_table_size |= hash_table_size >> 1;
+ hash_table_size |= hash_table_size >> 2;
+ hash_table_size |= hash_table_size >> 4;
+ hash_table_size |= hash_table_size >> 8;
+ hash_table_size |= hash_table_size >> 16;
+ hash_table_size++;
+
+ // Gather a hash table of the JavaThreads indirectly referenced by
+ // hazard ptrs.
+ ThreadScanHashtable *scan_table = new ThreadScanHashtable(hash_table_size);
+ ScanHazardPtrGatherProtectedThreadsClosure scan_cl(scan_table);
+ Threads::threads_do(&scan_cl);
+
+ bool thread_is_protected = false;
+ if (scan_table->has_entry((void*)thread)) {
+ thread_is_protected = true;
+ }
+ delete scan_table;
+ return thread_is_protected;
+}
+
+// Safely delete a JavaThread when it is no longer in use by a
+// ThreadsListHandle.
+//
+void Threads::smr_delete(JavaThread *thread) {
+ assert(!Threads_lock->owned_by_self(), "sanity");
+
+ bool has_logged_once = false;
+ elapsedTimer timer;
+ if (EnableThreadSMRStatistics) {
+ timer.start();
+ }
+
+ while (true) {
+ {
+ // No safepoint check because this JavaThread is not on the
+ // Threads list.
+ MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
+ // Cannot use a MonitorLockerEx helper here because we have
+ // to drop the Threads_lock first if we wait.
+ Threads::smr_delete_lock()->lock_without_safepoint_check();
+ // Set the smr_delete_notify flag after we grab smr_delete_lock
+ // and before we scan hazard ptrs because we're doing
+ // double-check locking in release_stable_list().
+ Threads::set_smr_delete_notify();
+
+ if (!is_a_protected_JavaThread(thread)) {
+ // This is the common case.
+ Threads::clear_smr_delete_notify();
+ Threads::smr_delete_lock()->unlock();
+ break;
+ }
+ if (!has_logged_once) {
+ has_logged_once = true;
+ log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::smr_delete: thread=" INTPTR_FORMAT " is not deleted.", os::current_thread_id(), p2i(thread));
+ if (log_is_enabled(Debug, os, thread)) {
+ ScanHazardPtrPrintMatchingThreadsClosure scan_cl(thread);
+ Threads::threads_do(&scan_cl);
+ }
+ }
+ } // We have to drop the Threads_lock to wait or delete the thread
+
+ if (EnableThreadSMRStatistics) {
+ _smr_delete_lock_wait_cnt++;
+ if (_smr_delete_lock_wait_cnt > _smr_delete_lock_wait_max) {
+ _smr_delete_lock_wait_max = _smr_delete_lock_wait_cnt;
+ }
+ }
+ // Wait for a release_stable_list() call before we check again. No
+ // safepoint check, no timeout, and not as suspend equivalent flag
+ // because this JavaThread is not on the Threads list.
+ Threads::smr_delete_lock()->wait(Mutex::_no_safepoint_check_flag, 0,
+ !Mutex::_as_suspend_equivalent_flag);
+ if (EnableThreadSMRStatistics) {
+ _smr_delete_lock_wait_cnt--;
+ }
+
+ Threads::clear_smr_delete_notify();
+ Threads::smr_delete_lock()->unlock();
+ // Retry the whole scenario.
+ }
+
+ if (ThreadLocalHandshakes) {
+ // The thread is about to be deleted so cancel any handshake.
+ thread->cancel_handshake();
+ }
+
+ delete thread;
+ if (EnableThreadSMRStatistics) {
+ timer.stop();
+ uint millis = (uint)timer.milliseconds();
+ Threads::inc_smr_deleted_thread_cnt();
+ Threads::add_smr_deleted_thread_times(millis);
+ Threads::update_smr_deleted_thread_time_max(millis);
+ }
+
+ log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::smr_delete: thread=" INTPTR_FORMAT " is deleted.", os::current_thread_id(), p2i(thread));
+}
+
+bool Threads::smr_delete_notify() {
+ // Use load_acquire() in order to see any updates to _smr_delete_notify
+ // earlier than when smr_delete_lock is grabbed.
+ return (OrderAccess::load_acquire(&_smr_delete_notify) != 0);
+}
+
+// set_smr_delete_notify() and clear_smr_delete_notify() are called
+// under the protection of the smr_delete_lock, but we also use an
+// Atomic operation to ensure the memory update is seen earlier than
+// when the smr_delete_lock is dropped.
+//
+void Threads::set_smr_delete_notify() {
+ Atomic::inc(&_smr_delete_notify);
+}
+
+void Threads::clear_smr_delete_notify() {
+ Atomic::dec(&_smr_delete_notify);
+}
+
+// Closure to gather hazard ptrs (ThreadsList references) into a hash table.
+//
+class ScanHazardPtrGatherThreadsListClosure : public ThreadClosure {
+ private:
+ ThreadScanHashtable *_table;
+ public:
+ ScanHazardPtrGatherThreadsListClosure(ThreadScanHashtable *table) : _table(table) {}
+
+ virtual void do_thread(Thread* thread) {
+ assert_locked_or_safepoint(Threads_lock);
+
+ if (thread == NULL) return;
+ ThreadsList *threads = thread->get_threads_hazard_ptr();
+ if (threads == NULL) {
+ assert(thread->get_nested_threads_hazard_ptr() == NULL,
+ "cannot have a nested hazard ptr with a NULL regular hazard ptr");
+ return;
+ }
+ // In this closure we always ignore the tag that might mark this
+ // hazard ptr as not yet verified. If we happen to catch an
+ // unverified hazard ptr that is subsequently discarded (not
+ // published), then the only side effect is that we might keep a
+ // to-be-deleted ThreadsList alive a little longer.
+ threads = Thread::untag_hazard_ptr(threads);
+ if (!_table->has_entry((void*)threads)) {
+ _table->add_entry((void*)threads);
+ }
+
+ // Any NestedThreadsLists are also protecting JavaThreads so
+ // gather those also; the ThreadsLists may be different.
+ for (NestedThreadsList* node = thread->get_nested_threads_hazard_ptr();
+ node != NULL; node = node->next()) {
+ threads = node->t_list();
+ if (!_table->has_entry((void*)threads)) {
+ _table->add_entry((void*)threads);
+ }
+ }
+ }
+};
+
+// Safely free a ThreadsList after a Threads::add() or Threads::remove().
+// The specified ThreadsList may not get deleted during this call if it
+// is still in-use (referenced by a hazard ptr). Other ThreadsLists
+// in the chain may get deleted by this call if they are no longer in-use.
+void Threads::smr_free_list(ThreadsList* threads) {
+ assert_locked_or_safepoint(Threads_lock);
+
+ threads->set_next_list(_smr_to_delete_list);
+ _smr_to_delete_list = threads;
+ if (EnableThreadSMRStatistics) {
+ _smr_to_delete_list_cnt++;
+ if (_smr_to_delete_list_cnt > _smr_to_delete_list_max) {
+ _smr_to_delete_list_max = _smr_to_delete_list_cnt;
+ }
+ }
+
+ // Hash table size should be first power of two higher than twice the length of the ThreadsList
+ int hash_table_size = MIN2(_number_of_threads, 32) << 1;
+ hash_table_size--;
+ hash_table_size |= hash_table_size >> 1;
+ hash_table_size |= hash_table_size >> 2;
+ hash_table_size |= hash_table_size >> 4;
+ hash_table_size |= hash_table_size >> 8;
+ hash_table_size |= hash_table_size >> 16;
+ hash_table_size++;
+
+ // Gather a hash table of the current hazard ptrs:
+ ThreadScanHashtable *scan_table = new ThreadScanHashtable(hash_table_size);
+ ScanHazardPtrGatherThreadsListClosure scan_cl(scan_table);
+ Threads::threads_do(&scan_cl);
+
+ // Walk through the linked list of pending freeable ThreadsLists
+ // and free the ones that are not referenced from hazard ptrs.
+ ThreadsList* current = _smr_to_delete_list;
+ ThreadsList* prev = NULL;
+ ThreadsList* next = NULL;
+ bool threads_is_freed = false;
+ while (current != NULL) {
+ next = current->next_list();
+ if (!scan_table->has_entry((void*)current)) {
+ // This ThreadsList is not referenced by a hazard ptr.
+ if (prev != NULL) {
+ prev->set_next_list(next);
+ }
+ if (_smr_to_delete_list == current) {
+ _smr_to_delete_list = next;
+ }
+
+ log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::smr_free_list: threads=" INTPTR_FORMAT " is freed.", os::current_thread_id(), p2i(current));
+ if (current == threads) threads_is_freed = true;
+ delete current;
+ if (EnableThreadSMRStatistics) {
+ _smr_java_thread_list_free_cnt++;
+ _smr_to_delete_list_cnt--;
+ }
+ } else {
+ prev = current;
+ }
+ current = next;
+ }
+
+ if (!threads_is_freed) {
+ // Only report "is not freed" on the original call to
+ // smr_free_list() for this ThreadsList.
+ log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::smr_free_list: threads=" INTPTR_FORMAT " is not freed.", os::current_thread_id(), p2i(threads));
+ }
+
+ delete scan_table;
+}
+
+// Remove a JavaThread from a ThreadsList. The returned ThreadsList is a
+// new copy of the specified ThreadsList with the specified JavaThread
+// removed.
+ThreadsList *ThreadsList::remove_thread(ThreadsList* list, JavaThread* java_thread) {
+ assert(list->_length > 0, "sanity");
+
+ uint i = 0;
+ DO_JAVA_THREADS(list, current) {
+ if (current == java_thread) {
+ break;
+ }
+ i++;
+ }
+ assert(i < list->_length, "did not find JavaThread on the list");
+ const uint index = i;
+ const uint new_length = list->_length - 1;
+ const uint head_length = index;
+ const uint tail_length = (new_length >= index) ? (new_length - index) : 0;
+ ThreadsList *const new_list = new ThreadsList(new_length);
+
+ if (head_length > 0) {
+ Copy::disjoint_words((HeapWord*)list->_threads, (HeapWord*)new_list->_threads, head_length);
+ }
+ if (tail_length > 0) {
+ Copy::disjoint_words((HeapWord*)list->_threads + index + 1, (HeapWord*)new_list->_threads + index, tail_length);
+ }
+
+ return new_list;
+}
+
+// Add a JavaThread to a ThreadsList. The returned ThreadsList is a
+// new copy of the specified ThreadsList with the specified JavaThread
+// appended to the end.
+ThreadsList *ThreadsList::add_thread(ThreadsList *list, JavaThread *java_thread) {
+ const uint index = list->_length;
+ const uint new_length = index + 1;
+ const uint head_length = index;
+ ThreadsList *const new_list = new ThreadsList(new_length);
+
+ if (head_length > 0) {
+ Copy::disjoint_words((HeapWord*)list->_threads, (HeapWord*)new_list->_threads, head_length);
+ }
+ *(JavaThread**)(new_list->_threads + index) = java_thread;
+
+ return new_list;
+}
+
+int ThreadsList::find_index_of_JavaThread(JavaThread *target) {
+ if (target == NULL) {
+ return -1;
+ }
+ for (uint i = 0; i < length(); i++) {
+ if (target == thread_at(i)) {
+ return (int)i;
+ }
+ }
+ return -1;
+}
+
+JavaThread* ThreadsList::find_JavaThread_from_java_tid(jlong java_tid) const {
+ DO_JAVA_THREADS(this, thread) {
+ oop tobj = thread->threadObj();
+ // Ignore the thread if it hasn't run yet, has exited
+ // or is starting to exit.
+ if (tobj != NULL && !thread->is_exiting() &&
+ java_tid == java_lang_Thread::thread_id(tobj)) {
+ // found a match
+ return thread;
+ }
+ }
+ return NULL;
+}
+
+bool ThreadsList::includes(const JavaThread * const p) const {
+ if (p == NULL) {
+ return false;
+ }
+ DO_JAVA_THREADS(this, q) {
+ if (q == p) {
+ return true;
+ }
+ }
+ return false;
+}
void Threads::add(JavaThread* p, bool force_daemon) {
// The threads lock must be owned at this point
@@ -4222,6 +5171,11 @@ void Threads::add(JavaThread* p, bool force_daemon) {
p->initialize_queues();
p->set_next(_thread_list);
_thread_list = p;
+
+ // Once a JavaThread is added to the Threads list, smr_delete() has
+ // to be used to delete it. Otherwise we can just delete it directly.
+ p->set_on_thread_list();
+
_number_of_threads++;
oop threadObj = p->threadObj();
bool daemon = true;
@@ -4234,6 +5188,20 @@ void Threads::add(JavaThread* p, bool force_daemon) {
ThreadService::add_thread(p, daemon);
+ // Maintain fast thread list
+ ThreadsList *new_list = ThreadsList::add_thread(get_smr_java_thread_list(), p);
+ if (EnableThreadSMRStatistics) {
+ _smr_java_thread_list_alloc_cnt++;
+ if (new_list->length() > _smr_java_thread_list_max) {
+ _smr_java_thread_list_max = new_list->length();
+ }
+ }
+ // Initial _smr_java_thread_list will not generate a "Threads::add" mesg.
+ log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::add: new ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(new_list));
+
+ ThreadsList *old_list = xchg_smr_java_thread_list(new_list);
+ smr_free_list(old_list);
+
// Possible GC point.
Events::log(p, "Thread added: " INTPTR_FORMAT, p2i(p));
}
@@ -4247,7 +5215,20 @@ void Threads::remove(JavaThread* p) {
// that we do not remove thread without safepoint code notice
{ MutexLocker ml(Threads_lock);
- assert(includes(p), "p must be present");
+ assert(get_smr_java_thread_list()->includes(p), "p must be present");
+
+ // Maintain fast thread list
+ ThreadsList *new_list = ThreadsList::remove_thread(get_smr_java_thread_list(), p);
+ if (EnableThreadSMRStatistics) {
+ _smr_java_thread_list_alloc_cnt++;
+ // This list is smaller so no need to check for a "longest" update.
+ }
+
+ // Final _smr_java_thread_list will not generate a "Threads::remove" mesg.
+ log_debug(thread, smr)("tid=" UINTX_FORMAT ": Threads::remove: new ThreadsList=" INTPTR_FORMAT, os::current_thread_id(), p2i(new_list));
+
+ ThreadsList *old_list = xchg_smr_java_thread_list(new_list);
+ smr_free_list(old_list);
JavaThread* current = _thread_list;
JavaThread* prev = NULL;
@@ -4262,6 +5243,7 @@ void Threads::remove(JavaThread* p) {
} else {
_thread_list = p->next();
}
+
_number_of_threads--;
oop threadObj = p->threadObj();
bool daemon = true;
@@ -4288,17 +5270,6 @@ void Threads::remove(JavaThread* p) {
Events::log(p, "Thread exited: " INTPTR_FORMAT, p2i(p));
}
-// Threads_lock must be held when this is called (or must be called during a safepoint)
-bool Threads::includes(JavaThread* p) {
- assert(Threads_lock->is_locked(), "sanity check");
- ALL_JAVA_THREADS(q) {
- if (q == p) {
- return true;
- }
- }
- return false;
-}
-
// Operations on the Threads list for GC. These are not explicitly locked,
// but the garbage collector must provide a safe context for them to run.
// In particular, these things should never be called when the Threads_lock
@@ -4411,47 +5382,36 @@ void Threads::deoptimized_wrt_marked_nmethods() {
// Get count Java threads that are waiting to enter the specified monitor.
-GrowableArray* Threads::get_pending_threads(int count,
- address monitor,
- bool doLock) {
- assert(doLock || SafepointSynchronize::is_at_safepoint(),
- "must grab Threads_lock or be at safepoint");
+GrowableArray* Threads::get_pending_threads(ThreadsList * t_list,
+ int count,
+ address monitor) {
GrowableArray* result = new GrowableArray(count);
int i = 0;
- {
- MutexLockerEx ml(doLock ? Threads_lock : NULL);
- ALL_JAVA_THREADS(p) {
- if (!p->can_call_java()) continue;
+ DO_JAVA_THREADS(t_list, p) {
+ if (!p->can_call_java()) continue;
- address pending = (address)p->current_pending_monitor();
- if (pending == monitor) { // found a match
- if (i < count) result->append(p); // save the first count matches
- i++;
- }
+ address pending = (address)p->current_pending_monitor();
+ if (pending == monitor) { // found a match
+ if (i < count) result->append(p); // save the first count matches
+ i++;
}
}
+
return result;
}
-JavaThread *Threads::owning_thread_from_monitor_owner(address owner,
- bool doLock) {
- assert(doLock ||
- Threads_lock->owned_by_self() ||
- SafepointSynchronize::is_at_safepoint(),
- "must grab Threads_lock or be at safepoint");
-
+JavaThread *Threads::owning_thread_from_monitor_owner(ThreadsList * t_list,
+ address owner) {
// NULL owner means not locked so we can skip the search
if (owner == NULL) return NULL;
- {
- MutexLockerEx ml(doLock ? Threads_lock : NULL);
- ALL_JAVA_THREADS(p) {
- // first, see if owner is the address of a Java thread
- if (owner == (address)p) return p;
- }
+ DO_JAVA_THREADS(t_list, p) {
+ // first, see if owner is the address of a Java thread
+ if (owner == (address)p) return p;
}
+
// Cannot assert on lack of success here since this function may be
// used by code that is trying to report useful problem information
// like deadlock detection.
@@ -4462,15 +5422,13 @@ JavaThread *Threads::owning_thread_from_monitor_owner(address owner,
// Lock Word in the owning Java thread's stack.
//
JavaThread* the_owner = NULL;
- {
- MutexLockerEx ml(doLock ? Threads_lock : NULL);
- ALL_JAVA_THREADS(q) {
- if (q->is_lock_owned(owner)) {
- the_owner = q;
- break;
- }
+ DO_JAVA_THREADS(t_list, q) {
+ if (q->is_lock_owned(owner)) {
+ the_owner = q;
+ break;
}
}
+
// cannot assert on lack of success here; see above comment
return the_owner;
}
@@ -4495,6 +5453,9 @@ void Threads::print_on(outputStream* st, bool print_stacks,
}
#endif // INCLUDE_SERVICES
+ print_smr_info_on(st);
+ st->cr();
+
ALL_JAVA_THREADS(p) {
ResourceMark rm;
p->print_on(st);
@@ -4521,9 +5482,105 @@ void Threads::print_on(outputStream* st, bool print_stacks,
wt->print_on(st);
st->cr();
}
+
st->flush();
}
+// Log Threads class SMR info.
+void Threads::log_smr_statistics() {
+ LogTarget(Info, thread, smr) log;
+ if (log.is_enabled()) {
+ LogStream out(log);
+ print_smr_info_on(&out);
+ }
+}
+
+// Print Threads class SMR info.
+void Threads::print_smr_info_on(outputStream* st) {
+ // Only grab the Threads_lock if we don't already own it
+ // and if we are not reporting an error.
+ MutexLockerEx ml((Threads_lock->owned_by_self() || VMError::is_error_reported()) ? NULL : Threads_lock);
+
+ st->print_cr("Threads class SMR info:");
+ st->print_cr("_smr_java_thread_list=" INTPTR_FORMAT ", length=%u, "
+ "elements={", p2i(_smr_java_thread_list),
+ _smr_java_thread_list->length());
+ print_smr_info_elements_on(st, _smr_java_thread_list);
+ st->print_cr("}");
+ if (_smr_to_delete_list != NULL) {
+ st->print_cr("_smr_to_delete_list=" INTPTR_FORMAT ", length=%u, "
+ "elements={", p2i(_smr_to_delete_list),
+ _smr_to_delete_list->length());
+ print_smr_info_elements_on(st, _smr_to_delete_list);
+ st->print_cr("}");
+ for (ThreadsList *t_list = _smr_to_delete_list->next_list();
+ t_list != NULL; t_list = t_list->next_list()) {
+ st->print("next-> " INTPTR_FORMAT ", length=%u, "
+ "elements={", p2i(t_list), t_list->length());
+ print_smr_info_elements_on(st, t_list);
+ st->print_cr("}");
+ }
+ }
+ if (!EnableThreadSMRStatistics) {
+ return;
+ }
+ st->print_cr("_smr_java_thread_list_alloc_cnt=" UINT64_FORMAT ","
+ "_smr_java_thread_list_free_cnt=" UINT64_FORMAT ","
+ "_smr_java_thread_list_max=%u, "
+ "_smr_nested_thread_list_max=%u",
+ _smr_java_thread_list_alloc_cnt,
+ _smr_java_thread_list_free_cnt,
+ _smr_java_thread_list_max,
+ _smr_nested_thread_list_max);
+ if (_smr_tlh_cnt > 0) {
+ st->print_cr("_smr_tlh_cnt=%u"
+ ", _smr_tlh_times=%u"
+ ", avg_smr_tlh_time=%0.2f"
+ ", _smr_tlh_time_max=%u",
+ _smr_tlh_cnt, _smr_tlh_times,
+ ((double) _smr_tlh_times / _smr_tlh_cnt),
+ _smr_tlh_time_max);
+ }
+ if (_smr_deleted_thread_cnt > 0) {
+ st->print_cr("_smr_deleted_thread_cnt=%u"
+ ", _smr_deleted_thread_times=%u"
+ ", avg_smr_deleted_thread_time=%0.2f"
+ ", _smr_deleted_thread_time_max=%u",
+ _smr_deleted_thread_cnt, _smr_deleted_thread_times,
+ ((double) _smr_deleted_thread_times / _smr_deleted_thread_cnt),
+ _smr_deleted_thread_time_max);
+ }
+ st->print_cr("_smr_delete_lock_wait_cnt=%u, _smr_delete_lock_wait_max=%u",
+ _smr_delete_lock_wait_cnt, _smr_delete_lock_wait_max);
+ st->print_cr("_smr_to_delete_list_cnt=%u, _smr_to_delete_list_max=%u",
+ _smr_to_delete_list_cnt, _smr_to_delete_list_max);
+}
+
+// Print ThreadsList elements (4 per line).
+void Threads::print_smr_info_elements_on(outputStream* st,
+ ThreadsList* t_list) {
+ uint cnt = 0;
+ JavaThreadIterator jti(t_list);
+ for (JavaThread *jt = jti.first(); jt != NULL; jt = jti.next()) {
+ st->print(INTPTR_FORMAT, p2i(jt));
+ if (cnt < t_list->length() - 1) {
+ // Separate with comma or comma-space except for the last one.
+ if (((cnt + 1) % 4) == 0) {
+ // Four INTPTR_FORMAT fit on an 80 column line so end the
+ // current line with just a comma.
+ st->print_cr(",");
+ } else {
+ // Not the last one on the current line so use comma-space:
+ st->print(", ");
+ }
+ } else {
+ // Last one so just end the current line.
+ st->cr();
+ }
+ cnt++;
+ }
+}
+
void Threads::print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf,
int buflen, bool* found_current) {
if (this_thread != NULL) {
@@ -4560,6 +5617,9 @@ class PrintOnErrorClosure : public ThreadClosure {
// memory (even in resource area), it might deadlock the error handler.
void Threads::print_on_error(outputStream* st, Thread* current, char* buf,
int buflen) {
+ print_smr_info_on(st);
+ st->cr();
+
bool found_current = false;
st->print_cr("Java Threads: ( => current thread )");
ALL_JAVA_THREADS(thread) {
@@ -4581,6 +5641,7 @@ void Threads::print_on_error(outputStream* st, Thread* current, char* buf,
st->cr();
}
st->cr();
+
st->print_cr("Threads with active compile tasks:");
print_threads_compiling(st, buf, buflen);
}
diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp
index 65fa5aae1c5..db1d465c751 100644
--- a/src/hotspot/share/runtime/thread.hpp
+++ b/src/hotspot/share/runtime/thread.hpp
@@ -57,6 +57,8 @@
#endif
class ThreadSafepointState;
+class ThreadsList;
+class NestedThreadsList;
class JvmtiThreadState;
class JvmtiGetLoadedClassesClosure;
@@ -101,6 +103,7 @@ class WorkerThread;
// - WatcherThread
class Thread: public ThreadShadow {
+ friend class Threads;
friend class VMStructs;
friend class JVMCIVMStructs;
private:
@@ -118,6 +121,47 @@ class Thread: public ThreadShadow {
protected:
// Support for forcing alignment of thread objects for biased locking
void* _real_malloc_address;
+ // JavaThread lifecycle support:
+ friend class ScanHazardPtrGatherProtectedThreadsClosure;
+ friend class ScanHazardPtrGatherThreadsListClosure;
+ friend class ScanHazardPtrPrintMatchingThreadsClosure;
+ friend class ThreadsListHandle;
+ friend class ThreadsListSetter;
+ ThreadsList* volatile _threads_hazard_ptr;
+ ThreadsList* cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value);
+ ThreadsList* get_threads_hazard_ptr();
+ void set_threads_hazard_ptr(ThreadsList* new_list);
+ static bool is_hazard_ptr_tagged(ThreadsList* list) {
+ return (intptr_t(list) & intptr_t(1)) == intptr_t(1);
+ }
+ static ThreadsList* tag_hazard_ptr(ThreadsList* list) {
+ return (ThreadsList*)(intptr_t(list) | intptr_t(1));
+ }
+ static ThreadsList* untag_hazard_ptr(ThreadsList* list) {
+ return (ThreadsList*)(intptr_t(list) & ~intptr_t(1));
+ }
+ NestedThreadsList* _nested_threads_hazard_ptr;
+ NestedThreadsList* get_nested_threads_hazard_ptr() {
+ return _nested_threads_hazard_ptr;
+ }
+ void set_nested_threads_hazard_ptr(NestedThreadsList* value) {
+ assert(Threads_lock->owned_by_self(),
+ "must own Threads_lock for _nested_threads_hazard_ptr to be valid.");
+ _nested_threads_hazard_ptr = value;
+ }
+ // This field is enabled via -XX:+EnableThreadSMRStatistics:
+ uint _nested_threads_hazard_ptr_cnt;
+ void dec_nested_threads_hazard_ptr_cnt() {
+ assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()");
+ _nested_threads_hazard_ptr_cnt--;
+ }
+ void inc_nested_threads_hazard_ptr_cnt() {
+ _nested_threads_hazard_ptr_cnt++;
+ }
+ uint nested_threads_hazard_ptr_cnt() {
+ return _nested_threads_hazard_ptr_cnt;
+ }
+
public:
void* operator new(size_t size) throw() { return allocate(size, true); }
void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
@@ -359,6 +403,9 @@ class Thread: public ThreadShadow {
static inline Thread* current_or_null_safe();
// Common thread operations
+#ifdef ASSERT
+ static void check_for_dangling_thread_pointer(Thread *thread);
+#endif
static void set_priority(Thread* thread, ThreadPriority priority);
static ThreadPriority get_priority(const Thread* const thread);
static void start(Thread* thread);
@@ -576,6 +623,7 @@ protected:
// Printing
virtual void print_on(outputStream* st) const;
+ virtual void print_nested_threads_hazard_ptrs_on(outputStream* st) const;
void print() const { print_on(tty); }
virtual void print_on_error(outputStream* st, char* buf, int buflen) const;
void print_value_on(outputStream* st) const;
@@ -798,6 +846,7 @@ class JavaThread: public Thread {
friend class WhiteBox;
private:
JavaThread* _next; // The next thread in the Threads list
+ bool _on_thread_list; // Is set when this JavaThread is added to the Threads list
oop _threadObj; // The Java level thread object
#ifdef ASSERT
@@ -1125,15 +1174,23 @@ class JavaThread: public Thread {
void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; }
bool is_at_poll_safepoint() { return _safepoint_state->is_at_poll_safepoint(); }
+ // JavaThread termination and lifecycle support:
+ void smr_delete();
+ bool on_thread_list() const { return _on_thread_list; }
+ void set_on_thread_list() { _on_thread_list = true; }
+
// thread has called JavaThread::exit() or is terminated
- bool is_exiting() { return _terminated == _thread_exiting || is_terminated(); }
+ bool is_exiting() const;
// thread is terminated (no longer on the threads list); we compare
// against the two non-terminated values so that a freed JavaThread
// will also be considered terminated.
- bool is_terminated() { return _terminated != _not_terminated && _terminated != _thread_exiting; }
- void set_terminated(TerminatedTypes t) { _terminated = t; }
+ bool check_is_terminated(TerminatedTypes l_terminated) const {
+ return l_terminated != _not_terminated && l_terminated != _thread_exiting;
+ }
+ bool is_terminated() const;
+ void set_terminated(TerminatedTypes t);
// special for Threads::remove() which is static:
- void set_terminated_value() { _terminated = _thread_terminated; }
+ void set_terminated_value();
void block_if_vm_exited();
bool doing_unsafe_access() { return _doing_unsafe_access; }
@@ -1220,6 +1277,9 @@ class JavaThread: public Thread {
// via the appropriate -XX options.
bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits);
+ // test for suspend - most (all?) of these should go away
+ bool is_thread_fully_suspended(bool wait_for_suspend, uint32_t *bits);
+
inline void set_external_suspend();
inline void clear_external_suspend();
@@ -2066,28 +2126,84 @@ inline CompilerThread* CompilerThread::current() {
class Threads: AllStatic {
friend class VMStructs;
private:
- static JavaThread* _thread_list;
- static int _number_of_threads;
- static int _number_of_non_daemon_threads;
- static int _return_code;
- static int _thread_claim_parity;
+ // Safe Memory Reclamation (SMR) support:
+ // The coordination between Threads::release_stable_list() and
+ // Threads::smr_delete() uses the smr_delete_lock in order to
+ // reduce the traffic on the Threads_lock.
+ static Monitor* _smr_delete_lock;
+ // The '_cnt', '_max' and '_times" fields are enabled via
+ // -XX:+EnableThreadSMRStatistics (see thread.cpp for a
+ // description about each field):
+ static uint _smr_delete_lock_wait_cnt;
+ static uint _smr_delete_lock_wait_max;
+ // The smr_delete_notify flag is used for proper double-check
+ // locking in order to reduce the traffic on the smr_delete_lock.
+ static volatile uint _smr_delete_notify;
+ static volatile uint _smr_deleted_thread_cnt;
+ static volatile uint _smr_deleted_thread_time_max;
+ static volatile uint _smr_deleted_thread_times;
+ static ThreadsList* volatile _smr_java_thread_list;
+ static uint64_t _smr_java_thread_list_alloc_cnt;
+ static uint64_t _smr_java_thread_list_free_cnt;
+ static uint _smr_java_thread_list_max;
+ static uint _smr_nested_thread_list_max;
+ static volatile uint _smr_tlh_cnt;
+ static volatile uint _smr_tlh_time_max;
+ static volatile uint _smr_tlh_times;
+ static ThreadsList* _smr_to_delete_list;
+ static uint _smr_to_delete_list_cnt;
+ static uint _smr_to_delete_list_max;
+
+ static JavaThread* _thread_list;
+ static int _number_of_threads;
+ static int _number_of_non_daemon_threads;
+ static int _return_code;
+ static int _thread_claim_parity;
#ifdef ASSERT
- static bool _vm_complete;
+ static bool _vm_complete;
#endif
static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS);
static void initialize_jsr292_core_classes(TRAPS);
+
+ static ThreadsList *acquire_stable_list_fast_path(Thread *self);
+ static ThreadsList *acquire_stable_list_nested_path(Thread *self);
+ static void add_smr_deleted_thread_times(uint add_value);
+ static void clear_smr_delete_notify();
+ static ThreadsList* get_smr_java_thread_list();
+ static void inc_smr_deleted_thread_cnt();
+ static void release_stable_list_fast_path(Thread *self);
+ static void release_stable_list_nested_path(Thread *self);
+ static void release_stable_list_wake_up(char *log_str);
+ static void set_smr_delete_notify();
+ static Monitor* smr_delete_lock() { return _smr_delete_lock; }
+ static bool smr_delete_notify();
+ static void smr_free_list(ThreadsList* threads);
+ static void update_smr_deleted_thread_time_max(uint new_value);
+ static ThreadsList* xchg_smr_java_thread_list(ThreadsList* new_list);
+
public:
// Thread management
// force_daemon is a concession to JNI, where we may need to add a
// thread to the thread list before allocating its thread object
static void add(JavaThread* p, bool force_daemon = false);
static void remove(JavaThread* p);
- static bool includes(JavaThread* p);
- static JavaThread* first() { return _thread_list; }
static void threads_do(ThreadClosure* tc);
static void possibly_parallel_threads_do(bool is_par, ThreadClosure* tc);
+ // SMR support:
+ static ThreadsList *acquire_stable_list(Thread *self, bool is_ThreadsListSetter);
+ static void release_stable_list(Thread *self);
+ static bool is_a_protected_JavaThread(JavaThread *thread);
+ static bool is_a_protected_JavaThread_with_lock(JavaThread *thread) {
+ MutexLockerEx ml(Threads_lock->owned_by_self() ? NULL : Threads_lock);
+ return is_a_protected_JavaThread(thread);
+ }
+ static void smr_delete(JavaThread *thread);
+ static void inc_smr_tlh_cnt();
+ static void update_smr_tlh_time_max(uint new_value);
+ static void add_smr_tlh_times(uint add_value);
+
// Initializes the vm and creates the vm thread
static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
static void convert_vm_init_libraries_to_agents();
@@ -2148,7 +2264,10 @@ class Threads: AllStatic {
// Verification
static void verify();
+ static void log_smr_statistics();
static void print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks);
+ static void print_smr_info_on(outputStream* st);
+ static void print_smr_info_elements_on(outputStream* st, ThreadsList* t_list);
static void print(bool print_stacks, bool internal_format) {
// this function is only used by debug.cpp
print_on(tty, print_stacks, internal_format, false /* no concurrent lock printed */);
@@ -2158,17 +2277,13 @@ class Threads: AllStatic {
int buflen, bool* found_current);
static void print_threads_compiling(outputStream* st, char* buf, int buflen);
- // Get Java threads that are waiting to enter a monitor. If doLock
- // is true, then Threads_lock is grabbed as needed. Otherwise, the
- // VM needs to be at a safepoint.
- static GrowableArray* get_pending_threads(int count,
- address monitor, bool doLock);
+ // Get Java threads that are waiting to enter a monitor.
+ static GrowableArray* get_pending_threads(ThreadsList * t_list,
+ int count, address monitor);
- // Get owning Java thread from the monitor's owner field. If doLock
- // is true, then Threads_lock is grabbed as needed. Otherwise, the
- // VM needs to be at a safepoint.
- static JavaThread *owning_thread_from_monitor_owner(address owner,
- bool doLock);
+ // Get owning Java thread from the monitor's owner field.
+ static JavaThread *owning_thread_from_monitor_owner(ThreadsList * t_list,
+ address owner);
// Number of threads on the active threads list
static int number_of_threads() { return _number_of_threads; }
@@ -2177,9 +2292,6 @@ class Threads: AllStatic {
// Deoptimizes all frames tied to marked nmethods
static void deoptimized_wrt_marked_nmethods();
-
- static JavaThread* find_java_thread_from_java_tid(jlong java_tid);
-
};
diff --git a/src/hotspot/share/runtime/thread.inline.hpp b/src/hotspot/share/runtime/thread.inline.hpp
index 5a664953b9a..3821d9317bf 100644
--- a/src/hotspot/share/runtime/thread.inline.hpp
+++ b/src/hotspot/share/runtime/thread.inline.hpp
@@ -25,13 +25,10 @@
#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP
#define SHARE_VM_RUNTIME_THREAD_INLINE_HPP
-#define SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
-
#include "runtime/atomic.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/thread.hpp"
-
-#undef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
+#include "runtime/threadSMR.hpp"
inline void Thread::set_suspend_flag(SuspendFlags f) {
assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
@@ -89,6 +86,18 @@ inline jlong Thread::cooked_allocated_bytes() {
return allocated_bytes;
}
+inline ThreadsList* Thread::cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value) {
+ return (ThreadsList*)Atomic::cmpxchg(exchange_value, &_threads_hazard_ptr, compare_value);
+}
+
+inline ThreadsList* Thread::get_threads_hazard_ptr() {
+ return (ThreadsList*)OrderAccess::load_acquire(&_threads_hazard_ptr);
+}
+
+inline void Thread::set_threads_hazard_ptr(ThreadsList* new_list) {
+ OrderAccess::release_store_fence(&_threads_hazard_ptr, new_list);
+}
+
inline void JavaThread::set_ext_suspended() {
set_suspend_flag (_ext_suspended);
}
@@ -176,4 +185,53 @@ inline volatile void* JavaThread::get_polling_page() {
return OrderAccess::load_acquire(polling_page_addr());
}
+inline bool JavaThread::is_exiting() const {
+ // Use load-acquire so that setting of _terminated by
+ // JavaThread::exit() is seen more quickly.
+ TerminatedTypes l_terminated = (TerminatedTypes)
+ OrderAccess::load_acquire((volatile jint *) &_terminated);
+ return l_terminated == _thread_exiting || check_is_terminated(l_terminated);
+}
+
+inline bool JavaThread::is_terminated() const {
+ // Use load-acquire so that setting of _terminated by
+ // JavaThread::exit() is seen more quickly.
+ TerminatedTypes l_terminated = (TerminatedTypes)
+ OrderAccess::load_acquire((volatile jint *) &_terminated);
+ return check_is_terminated(l_terminated);
+}
+
+inline void JavaThread::set_terminated(TerminatedTypes t) {
+ // use release-store so the setting of _terminated is seen more quickly
+ OrderAccess::release_store((volatile jint *) &_terminated, (jint) t);
+}
+
+// special for Threads::remove() which is static:
+inline void JavaThread::set_terminated_value() {
+ // use release-store so the setting of _terminated is seen more quickly
+ OrderAccess::release_store((volatile jint *) &_terminated, (jint) _thread_terminated);
+}
+
+inline void Threads::add_smr_tlh_times(uint add_value) {
+ Atomic::add(add_value, &_smr_tlh_times);
+}
+
+inline void Threads::inc_smr_tlh_cnt() {
+ Atomic::inc(&_smr_tlh_cnt);
+}
+
+inline void Threads::update_smr_tlh_time_max(uint new_value) {
+ while (true) {
+ uint cur_value = _smr_tlh_time_max;
+ if (new_value <= cur_value) {
+ // No need to update max value so we're done.
+ break;
+ }
+ if (Atomic::cmpxchg(new_value, &_smr_tlh_time_max, cur_value) == cur_value) {
+ // Updated max value so we're done. Otherwise try it all again.
+ break;
+ }
+ }
+}
+
#endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP
diff --git a/src/hotspot/share/runtime/threadSMR.cpp b/src/hotspot/share/runtime/threadSMR.cpp
new file mode 100644
index 00000000000..82ecc2590eb
--- /dev/null
+++ b/src/hotspot/share/runtime/threadSMR.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
+#include "services/threadService.hpp"
+
+// 'entries + 1' so we always have at least one entry.
+ThreadsList::ThreadsList(int entries) : _length(entries), _threads(NEW_C_HEAP_ARRAY(JavaThread*, entries + 1, mtThread)), _next_list(NULL) {
+ *(JavaThread**)(_threads + entries) = NULL; // Make sure the extra entry is NULL.
+}
+
+ThreadsList::~ThreadsList() {
+ FREE_C_HEAP_ARRAY(JavaThread*, _threads);
+}
+
+ThreadsListSetter::~ThreadsListSetter() {
+ if (_target_needs_release) {
+ // The hazard ptr in the target needs to be released.
+ Threads::release_stable_list(_target);
+ }
+}
+
+void ThreadsListSetter::set() {
+ assert(_target->get_threads_hazard_ptr() == NULL, "hazard ptr should not already be set");
+ (void) Threads::acquire_stable_list(_target, /* is_ThreadsListSetter */ true);
+ _target_needs_release = true;
+}
+
+ThreadsListHandle::ThreadsListHandle(Thread *self) : _list(Threads::acquire_stable_list(self, /* is_ThreadsListSetter */ false)), _self(self) {
+ assert(self == Thread::current(), "sanity check");
+ if (EnableThreadSMRStatistics) {
+ _timer.start();
+ }
+}
+
+ThreadsListHandle::~ThreadsListHandle() {
+ Threads::release_stable_list(_self);
+ if (EnableThreadSMRStatistics) {
+ _timer.stop();
+ uint millis = (uint)_timer.milliseconds();
+ Threads::inc_smr_tlh_cnt();
+ Threads::add_smr_tlh_times(millis);
+ Threads::update_smr_tlh_time_max(millis);
+ }
+}
+
+// Convert an internal thread reference to a JavaThread found on the
+// associated ThreadsList. This ThreadsListHandle "protects" the
+// returned JavaThread *.
+//
+// If thread_oop_p is not NULL, then the caller wants to use the oop
+// after this call so the oop is returned. On success, *jt_pp is set
+// to the converted JavaThread * and true is returned. On error,
+// returns false.
+//
+bool ThreadsListHandle::cv_internal_thread_to_JavaThread(jobject jthread,
+ JavaThread ** jt_pp,
+ oop * thread_oop_p) {
+ assert(this->list() != NULL, "must have a ThreadsList");
+ assert(jt_pp != NULL, "must have a return JavaThread pointer");
+ // thread_oop_p is optional so no assert()
+
+ // The JVM_* interfaces don't allow a NULL thread parameter; JVM/TI
+ // allows a NULL thread parameter to signify "current thread" which
+ // allows us to avoid calling cv_external_thread_to_JavaThread().
+ // The JVM_* interfaces have no such leeway.
+
+ oop thread_oop = JNIHandles::resolve_non_null(jthread);
+ // Looks like an oop at this point.
+ if (thread_oop_p != NULL) {
+ // Return the oop to the caller; the caller may still want
+ // the oop even if this function returns false.
+ *thread_oop_p = thread_oop;
+ }
+
+ JavaThread *java_thread = java_lang_Thread::thread(thread_oop);
+ if (java_thread == NULL) {
+ // The java.lang.Thread does not contain a JavaThread * so it has
+ // not yet run or it has died.
+ return false;
+ }
+ // Looks like a live JavaThread at this point.
+
+ if (java_thread != JavaThread::current()) {
+ // jthread is not for the current JavaThread so have to verify
+ // the JavaThread * against the ThreadsList.
+ if (EnableThreadSMRExtraValidityChecks && !includes(java_thread)) {
+ // Not on the JavaThreads list so it is not alive.
+ return false;
+ }
+ }
+
+ // Return a live JavaThread that is "protected" by the
+ // ThreadsListHandle in the caller.
+ *jt_pp = java_thread;
+ return true;
+}
diff --git a/src/hotspot/share/runtime/threadSMR.hpp b/src/hotspot/share/runtime/threadSMR.hpp
new file mode 100644
index 00000000000..1e177b7f435
--- /dev/null
+++ b/src/hotspot/share/runtime/threadSMR.hpp
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_THREADSMR_HPP
+#define SHARE_VM_RUNTIME_THREADSMR_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/timer.hpp"
+
+// Thread Safe Memory Reclamation (Thread-SMR) support.
+//
+// ThreadsListHandles are used to safely perform operations on one or more
+// threads without the risk of the thread or threads exiting during the
+// operation. It is no longer necessary to hold the Threads_lock to safely
+// perform an operation on a target thread.
+//
+// There are several different ways to refer to java.lang.Thread objects
+// so we have a few ways to get a protected JavaThread *:
+//
+// JNI jobject example:
+// jobject jthread = ...;
+// :
+// ThreadsListHandle tlh;
+// JavaThread* jt = NULL;
+// bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &jt, NULL);
+// if (is_alive) {
+// : // do stuff with 'jt'...
+// }
+//
+// JVM/TI jthread example:
+// jthread thread = ...;
+// :
+// JavaThread* jt = NULL;
+// ThreadsListHandle tlh;
+// jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), thread, &jt, NULL);
+// if (err != JVMTI_ERROR_NONE) {
+// return err;
+// }
+// : // do stuff with 'jt'...
+//
+// JVM/TI oop example (this one should be very rare):
+// oop thread_obj = ...;
+// :
+// JavaThread *jt = NULL;
+// ThreadsListHandle tlh;
+// jvmtiError err = JvmtiExport::cv_oop_to_JavaThread(tlh.list(), thread_obj, &jt);
+// if (err != JVMTI_ERROR_NONE) {
+// return err;
+// }
+// : // do stuff with 'jt'...
+//
+// A JavaThread * that is included in the ThreadsList that is held by
+// a ThreadsListHandle is protected as long as the ThreadsListHandle
+// remains in scope. The target JavaThread * may have logically exited,
+// but that target JavaThread * will not be deleted until it is no
+// longer protected by a ThreadsListHandle.
+
+
+// A fast list of JavaThreads.
+//
+class ThreadsList : public CHeapObj {
+ friend class ScanHazardPtrGatherProtectedThreadsClosure;
+ friend class Threads;
+
+ const uint _length;
+ ThreadsList* _next_list;
+ JavaThread *const *const _threads;
+
+ template
+ void threads_do_dispatch(T *cl, JavaThread *const thread) const;
+
+ ThreadsList *next_list() const { return _next_list; }
+ void set_next_list(ThreadsList *list) { _next_list = list; }
+
+public:
+ ThreadsList(int entries);
+ ~ThreadsList();
+
+ template
+ void threads_do(T *cl) const;
+
+ uint length() const { return _length; }
+
+ JavaThread *const thread_at(uint i) const { return _threads[i]; }
+
+ JavaThread *const *threads() const { return _threads; }
+
+ // Returns -1 if target is not found.
+ int find_index_of_JavaThread(JavaThread* target);
+ JavaThread* find_JavaThread_from_java_tid(jlong java_tid) const;
+ bool includes(const JavaThread * const p) const;
+
+ static ThreadsList* add_thread(ThreadsList* list, JavaThread* java_thread);
+ static ThreadsList* remove_thread(ThreadsList* list, JavaThread* java_thread);
+};
+
+// Linked list of ThreadsLists to support nested ThreadsListHandles.
+class NestedThreadsList : public CHeapObj {
+ ThreadsList*const _t_list;
+ NestedThreadsList* _next;
+
+public:
+ NestedThreadsList(ThreadsList* t_list) : _t_list(t_list) {
+ assert(Threads_lock->owned_by_self(),
+ "must own Threads_lock for saved t_list to be valid.");
+ }
+
+ ThreadsList* t_list() { return _t_list; }
+ NestedThreadsList* next() { return _next; }
+ void set_next(NestedThreadsList* value) { _next = value; }
+};
+
+// A helper to optionally set the hazard ptr in ourself. This helper can
+// be used by ourself or by another thread. If the hazard ptr is set(),
+// then the destructor will release it.
+//
+class ThreadsListSetter : public StackObj {
+private:
+ bool _target_needs_release; // needs release only when set()
+ Thread * _target;
+
+public:
+ ThreadsListSetter() : _target_needs_release(false), _target(Thread::current()) {
+ }
+ ~ThreadsListSetter();
+ ThreadsList* list();
+ void set();
+ bool target_needs_release() { return _target_needs_release; }
+};
+
+// This stack allocated ThreadsListHandle keeps all JavaThreads in the
+// ThreadsList from being deleted until it is safe.
+//
+class ThreadsListHandle : public StackObj {
+ ThreadsList * _list;
+ Thread *const _self;
+ elapsedTimer _timer; // Enabled via -XX:+EnableThreadSMRStatistics.
+
+public:
+ ThreadsListHandle(Thread *self = Thread::current());
+ ~ThreadsListHandle();
+
+ ThreadsList *list() const {
+ return _list;
+ }
+
+ template
+ void threads_do(T *cl) const {
+ return _list->threads_do(cl);
+ }
+
+ bool cv_internal_thread_to_JavaThread(jobject jthread, JavaThread ** jt_pp, oop * thread_oop_p);
+
+ bool includes(JavaThread* p) {
+ return _list->includes(p);
+ }
+
+ uint length() const {
+ return _list->length();
+ }
+};
+
+// This stack allocated JavaThreadIterator is used to walk the
+// specified ThreadsList using the following style:
+//
+// JavaThreadIterator jti(t_list);
+// for (JavaThread *jt = jti.first(); jt != NULL; jt = jti.next()) {
+// ...
+// }
+//
+class JavaThreadIterator : public StackObj {
+ ThreadsList * _list;
+ uint _index;
+
+public:
+ JavaThreadIterator(ThreadsList *list) : _list(list), _index(0) {
+ assert(list != NULL, "ThreadsList must not be NULL.");
+ }
+
+ JavaThread *first() {
+ _index = 0;
+ return _list->thread_at(_index);
+ }
+
+ uint length() const {
+ return _list->length();
+ }
+
+ ThreadsList *list() const {
+ return _list;
+ }
+
+ JavaThread *next() {
+ if (++_index >= length()) {
+ return NULL;
+ }
+ return _list->thread_at(_index);
+ }
+};
+
+// This stack allocated ThreadsListHandle and JavaThreadIterator combo
+// is used to walk the ThreadsList in the included ThreadsListHandle
+// using the following style:
+//
+// for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
+// ...
+// }
+//
+class JavaThreadIteratorWithHandle : public StackObj {
+ ThreadsListHandle _tlh;
+ uint _index;
+
+public:
+ JavaThreadIteratorWithHandle() : _index(0) {}
+
+ uint length() const {
+ return _tlh.length();
+ }
+
+ ThreadsList *list() const {
+ return _tlh.list();
+ }
+
+ JavaThread *next() {
+ if (_index >= length()) {
+ return NULL;
+ }
+ return _tlh.list()->thread_at(_index++);
+ }
+
+ void rewind() {
+ _index = 0;
+ }
+};
+
+#endif // SHARE_VM_RUNTIME_THREADSMR_HPP
diff --git a/src/hotspot/share/runtime/threadSMR.inline.hpp b/src/hotspot/share/runtime/threadSMR.inline.hpp
new file mode 100644
index 00000000000..0203fc6e55f
--- /dev/null
+++ b/src/hotspot/share/runtime/threadSMR.inline.hpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_THREADSMR_INLINE_HPP
+#define SHARE_VM_RUNTIME_THREADSMR_INLINE_HPP
+
+#include "runtime/atomic.hpp"
+#include "runtime/prefetch.inline.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
+
+// Devirtualize known thread closure types.
+template
+inline void ThreadsList::threads_do_dispatch(T *cl, JavaThread *const thread) const {
+ cl->T::do_thread(thread);
+}
+
+template <>
+inline void ThreadsList::threads_do_dispatch(ThreadClosure *cl, JavaThread *const thread) const {
+ cl->do_thread(thread);
+}
+
+template
+inline void ThreadsList::threads_do(T *cl) const {
+ const intx scan_interval = PrefetchScanIntervalInBytes;
+ JavaThread *const *const end = _threads + _length;
+ for (JavaThread *const *current_p = _threads; current_p != end; current_p++) {
+ Prefetch::read((void*)current_p, scan_interval);
+ JavaThread *const current = *current_p;
+ threads_do_dispatch(cl, current);
+ }
+}
+
+inline ThreadsList* ThreadsListSetter::list() {
+ ThreadsList *ret = _target->get_threads_hazard_ptr();
+ assert(ret != NULL, "hazard ptr should be set");
+ assert(!Thread::is_hazard_ptr_tagged(ret), "hazard ptr should be validated");
+ return ret;
+}
+
+#endif // SHARE_VM_RUNTIME_THREADSMR_INLINE_HPP
diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp
index 0466f7ae7db..9fb22233abb 100644
--- a/src/hotspot/share/runtime/vmStructs.cpp
+++ b/src/hotspot/share/runtime/vmStructs.cpp
@@ -830,7 +830,7 @@ typedef PaddedEnd PaddedObjectMonitor;
nonstatic_field(nmethod, _osr_link, nmethod*) \
nonstatic_field(nmethod, _scavenge_root_link, nmethod*) \
nonstatic_field(nmethod, _scavenge_root_state, jbyte) \
- nonstatic_field(nmethod, _state, volatile char) \
+ nonstatic_field(nmethod, _state, volatile signed char) \
nonstatic_field(nmethod, _exception_offset, int) \
nonstatic_field(nmethod, _orig_pc_offset, int) \
nonstatic_field(nmethod, _stub_offset, int) \
@@ -1350,8 +1350,8 @@ typedef PaddedEnd PaddedObjectMonitor;
declare_integer_type(int) \
declare_integer_type(long) \
declare_integer_type(char) \
+ declare_integer_type(volatile signed char) \
declare_unsigned_integer_type(unsigned char) \
- declare_unsigned_integer_type(volatile char) \
declare_unsigned_integer_type(u_char) \
declare_unsigned_integer_type(unsigned int) \
declare_unsigned_integer_type(uint) \
@@ -1534,6 +1534,7 @@ typedef PaddedEnd PaddedObjectMonitor;
declare_toplevel_type(PerfDataPrologue*) \
declare_toplevel_type(PerfDataEntry) \
declare_toplevel_type(PerfMemory) \
+ declare_type(PerfData, CHeapObj) \
\
/*********************************/ \
/* SymbolTable, SystemDictionary */ \
@@ -1958,6 +1959,7 @@ typedef PaddedEnd PaddedObjectMonitor;
declare_c2_type(NegFNode, NegNode) \
declare_c2_type(NegDNode, NegNode) \
declare_c2_type(AtanDNode, Node) \
+ declare_c2_type(SqrtFNode, Node) \
declare_c2_type(SqrtDNode, Node) \
declare_c2_type(ReverseBytesINode, Node) \
declare_c2_type(ReverseBytesLNode, Node) \
@@ -2480,6 +2482,12 @@ typedef PaddedEnd PaddedObjectMonitor;
declare_constant(InstanceKlass::inner_class_access_flags_offset) \
declare_constant(InstanceKlass::inner_class_next_offset) \
\
+ /*****************************************************/ \
+ /* InstanceKlass EnclosingMethodAttributeOffset enum */ \
+ /*****************************************************/ \
+ \
+ declare_constant(InstanceKlass::enclosing_method_attribute_size) \
+ \
/*********************************/ \
/* InstanceKlass ClassState enum */ \
/*********************************/ \
@@ -2635,6 +2643,46 @@ typedef PaddedEnd PaddedObjectMonitor;
declare_constant(Deoptimization::_reason_shift) \
declare_constant(Deoptimization::_debug_id_shift) \
\
+ /******************************************/ \
+ /* BasicType enum (globalDefinitions.hpp) */ \
+ /******************************************/ \
+ \
+ declare_constant(T_BOOLEAN) \
+ declare_constant(T_CHAR) \
+ declare_constant(T_FLOAT) \
+ declare_constant(T_DOUBLE) \
+ declare_constant(T_BYTE) \
+ declare_constant(T_SHORT) \
+ declare_constant(T_INT) \
+ declare_constant(T_LONG) \
+ declare_constant(T_OBJECT) \
+ declare_constant(T_ARRAY) \
+ declare_constant(T_VOID) \
+ declare_constant(T_ADDRESS) \
+ declare_constant(T_NARROWOOP) \
+ declare_constant(T_METADATA) \
+ declare_constant(T_NARROWKLASS) \
+ declare_constant(T_CONFLICT) \
+ declare_constant(T_ILLEGAL) \
+ \
+ /**********************************************/ \
+ /* BasicTypeSize enum (globalDefinitions.hpp) */ \
+ /**********************************************/ \
+ \
+ declare_constant(T_BOOLEAN_size) \
+ declare_constant(T_CHAR_size) \
+ declare_constant(T_FLOAT_size) \
+ declare_constant(T_DOUBLE_size) \
+ declare_constant(T_BYTE_size) \
+ declare_constant(T_SHORT_size) \
+ declare_constant(T_INT_size) \
+ declare_constant(T_LONG_size) \
+ declare_constant(T_OBJECT_size) \
+ declare_constant(T_ARRAY_size) \
+ declare_constant(T_NARROWOOP_size) \
+ declare_constant(T_NARROWKLASS_size) \
+ declare_constant(T_VOID_size) \
+ \
/*********************/ \
/* Matcher (C2 only) */ \
/*********************/ \
@@ -2733,6 +2781,21 @@ typedef PaddedEnd PaddedObjectMonitor;
declare_c2_preprocessor_constant("SAVED_ON_ENTRY_REG_COUNT", SAVED_ON_ENTRY_REG_COUNT) \
declare_c2_preprocessor_constant("C_SAVED_ON_ENTRY_REG_COUNT", C_SAVED_ON_ENTRY_REG_COUNT) \
\
+ /************/ \
+ /* PerfData */ \
+ /************/ \
+ \
+ /***********************/ \
+ /* PerfData Units enum */ \
+ /***********************/ \
+ \
+ declare_constant(PerfData::U_None) \
+ declare_constant(PerfData::U_Bytes) \
+ declare_constant(PerfData::U_Ticks) \
+ declare_constant(PerfData::U_Events) \
+ declare_constant(PerfData::U_String) \
+ declare_constant(PerfData::U_Hertz) \
+ \
/****************/ \
/* JVMCI */ \
/****************/ \
diff --git a/src/hotspot/share/runtime/vm_operations.cpp b/src/hotspot/share/runtime/vm_operations.cpp
index e9ad8af4314..9e5608f6f06 100644
--- a/src/hotspot/share/runtime/vm_operations.cpp
+++ b/src/hotspot/share/runtime/vm_operations.cpp
@@ -38,6 +38,7 @@
#include "runtime/interfaceSupport.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.inline.hpp"
#include "runtime/vm_operations.hpp"
#include "services/threadService.hpp"
#include "trace/tracing.hpp"
@@ -96,11 +97,12 @@ void VM_Operation::print_on_error(outputStream* st) const {
void VM_ThreadStop::doit() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
+ ThreadsListHandle tlh;
JavaThread* target = java_lang_Thread::thread(target_thread());
// Note that this now allows multiple ThreadDeath exceptions to be
// thrown at a thread.
- if (target != NULL) {
- // the thread has run and is not already in the process of exiting
+ if (target != NULL && (!EnableThreadSMRExtraValidityChecks || tlh.includes(target))) {
+ // The target thread has run and has not exited yet.
target->send_thread_stop(throwable());
}
}
@@ -146,9 +148,10 @@ void VM_DeoptimizeFrame::doit() {
void VM_DeoptimizeAll::doit() {
DeoptimizationMarker dm;
+ JavaThreadIteratorWithHandle jtiwh;
// deoptimize all java threads in the system
if (DeoptimizeALot) {
- for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+ for (; JavaThread *thread = jtiwh.next(); ) {
if (thread->has_last_Java_frame()) {
thread->deoptimize();
}
@@ -159,7 +162,7 @@ void VM_DeoptimizeAll::doit() {
int tnum = os::random() & 0x3;
int fnum = os::random() & 0x3;
int tcount = 0;
- for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+ for (; JavaThread *thread = jtiwh.next(); ) {
if (thread->has_last_Java_frame()) {
if (tcount++ == tnum) {
tcount = 0;
@@ -259,12 +262,19 @@ bool VM_FindDeadlocks::doit_prologue() {
}
void VM_FindDeadlocks::doit() {
- _deadlocks = ThreadService::find_deadlocks_at_safepoint(_concurrent_locks);
+ // Update the hazard ptr in the originating thread to the current
+ // list of threads. This VM operation needs the current list of
+ // threads for proper deadlock detection and those are the
+ // JavaThreads we need to be protected when we return info to the
+ // originating thread.
+ _setter.set();
+
+ _deadlocks = ThreadService::find_deadlocks_at_safepoint(_setter.list(), _concurrent_locks);
if (_out != NULL) {
int num_deadlocks = 0;
for (DeadlockCycle* cycle = _deadlocks; cycle != NULL; cycle = cycle->next()) {
num_deadlocks++;
- cycle->print_on(_out);
+ cycle->print_on_with(_setter.list(), _out);
}
if (num_deadlocks == 1) {
@@ -331,6 +341,12 @@ void VM_ThreadDump::doit_epilogue() {
void VM_ThreadDump::doit() {
ResourceMark rm;
+ // Set the hazard ptr in the originating thread to protect the
+ // current list of threads. This VM operation needs the current list
+ // of threads for a proper dump and those are the JavaThreads we need
+ // to be protected when we return info to the originating thread.
+ _result->set_t_list();
+
ConcurrentLocksDump concurrent_locks(true);
if (_with_locked_synchronizers) {
concurrent_locks.dump_at_safepoint();
@@ -338,7 +354,9 @@ void VM_ThreadDump::doit() {
if (_num_threads == 0) {
// Snapshot all live threads
- for (JavaThread* jt = Threads::first(); jt != NULL; jt = jt->next()) {
+
+ for (uint i = 0; i < _result->t_list()->length(); i++) {
+ JavaThread* jt = _result->t_list()->thread_at(i);
if (jt->is_exiting() ||
jt->is_hidden_from_external_view()) {
// skip terminating threads and hidden threads
@@ -354,6 +372,7 @@ void VM_ThreadDump::doit() {
} else {
// Snapshot threads in the given _threads array
// A dummy snapshot is created if a thread doesn't exist
+
for (int i = 0; i < _num_threads; i++) {
instanceHandle th = _threads->at(i);
if (th() == NULL) {
@@ -366,6 +385,12 @@ void VM_ThreadDump::doit() {
// Dump thread stack only if the thread is alive and not exiting
// and not VM internal thread.
JavaThread* jt = java_lang_Thread::thread(th());
+ if (jt != NULL && !_result->t_list()->includes(jt)) {
+ // _threads[i] doesn't refer to a valid JavaThread; this check
+ // is primarily for JVM_DumpThreads() which doesn't have a good
+ // way to validate the _threads array.
+ jt = NULL;
+ }
if (jt == NULL || /* thread not alive */
jt->is_exiting() ||
jt->is_hidden_from_external_view()) {
@@ -384,7 +409,7 @@ void VM_ThreadDump::doit() {
}
ThreadSnapshot* VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl) {
- ThreadSnapshot* snapshot = new ThreadSnapshot(java_thread);
+ ThreadSnapshot* snapshot = new ThreadSnapshot(_result->t_list(), java_thread);
snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors);
snapshot->set_concurrent_locks(tcl);
return snapshot;
@@ -403,11 +428,12 @@ int VM_Exit::set_vm_exited() {
_shutdown_thread = thr_cur;
_vm_exited = true; // global flag
- for(JavaThread *thr = Threads::first(); thr != NULL; thr = thr->next())
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
if (thr!=thr_cur && thr->thread_state() == _thread_in_native) {
++num_active;
thr->set_terminated(JavaThread::_vm_exited); // per-thread flag
}
+ }
return num_active;
}
@@ -435,11 +461,13 @@ int VM_Exit::wait_for_threads_in_native_to_block() {
int max_wait = max_wait_compiler_thread;
int attempts = 0;
+ JavaThreadIteratorWithHandle jtiwh;
while (true) {
int num_active = 0;
int num_active_compiler_thread = 0;
- for(JavaThread *thr = Threads::first(); thr != NULL; thr = thr->next()) {
+ jtiwh.rewind();
+ for (; JavaThread *thr = jtiwh.next(); ) {
if (thr!=thr_cur && thr->thread_state() == _thread_in_native) {
num_active++;
if (thr->is_Compiler_thread()) {
diff --git a/src/hotspot/share/runtime/vm_operations.hpp b/src/hotspot/share/runtime/vm_operations.hpp
index 4bc2fdd7794..3311ee8be25 100644
--- a/src/hotspot/share/runtime/vm_operations.hpp
+++ b/src/hotspot/share/runtime/vm_operations.hpp
@@ -392,12 +392,14 @@ class VM_PrintMetadata : public VM_Operation {
class DeadlockCycle;
class VM_FindDeadlocks: public VM_Operation {
private:
- bool _concurrent_locks;
- DeadlockCycle* _deadlocks;
- outputStream* _out;
+ bool _concurrent_locks;
+ DeadlockCycle* _deadlocks;
+ outputStream* _out;
+ ThreadsListSetter _setter; // Helper to set hazard ptr in the originating thread
+ // which protects the JavaThreads in _deadlocks.
public:
- VM_FindDeadlocks(bool concurrent_locks) : _concurrent_locks(concurrent_locks), _out(NULL), _deadlocks(NULL) {};
+ VM_FindDeadlocks(bool concurrent_locks) : _concurrent_locks(concurrent_locks), _out(NULL), _deadlocks(NULL), _setter() {};
VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _out(st), _deadlocks(NULL) {};
~VM_FindDeadlocks();
diff --git a/src/hotspot/share/services/diagnosticArgument.cpp b/src/hotspot/share/services/diagnosticArgument.cpp
index 691bd797dd1..4456260f29b 100644
--- a/src/hotspot/share/services/diagnosticArgument.cpp
+++ b/src/hotspot/share/services/diagnosticArgument.cpp
@@ -29,6 +29,29 @@
#include "runtime/thread.hpp"
#include "services/diagnosticArgument.hpp"
+StringArrayArgument::StringArrayArgument() {
+ _array = new(ResourceObj::C_HEAP, mtInternal)GrowableArray(32, true);
+ assert(_array != NULL, "Sanity check");
+}
+
+StringArrayArgument::~StringArrayArgument() {
+ for (int i=0; i<_array->length(); i++) {
+ if(_array->at(i) != NULL) { // Safety check
+ FREE_C_HEAP_ARRAY(char, _array->at(i));
+ }
+ }
+ delete _array;
+}
+
+void StringArrayArgument::add(const char* str, size_t len) {
+ if (str != NULL) {
+ char* ptr = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
+ strncpy(ptr, str, len);
+ ptr[len] = 0;
+ _array->append(ptr);
+ }
+}
+
void GenDCmdArgument::read_value(const char* str, size_t len, TRAPS) {
/* NOTE:Some argument types doesn't require a value,
* for instance boolean arguments: "enableFeatureX". is
diff --git a/src/hotspot/share/services/diagnosticArgument.hpp b/src/hotspot/share/services/diagnosticArgument.hpp
index d276d3b6294..654650ccd68 100644
--- a/src/hotspot/share/services/diagnosticArgument.hpp
+++ b/src/hotspot/share/services/diagnosticArgument.hpp
@@ -35,29 +35,14 @@ class StringArrayArgument : public CHeapObj {
private:
GrowableArray* _array;
public:
- StringArrayArgument() {
- _array = new(ResourceObj::C_HEAP, mtInternal)GrowableArray(32, true);
- assert(_array != NULL, "Sanity check");
- }
- void add(const char* str, size_t len) {
- if (str != NULL) {
- char* ptr = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
- strncpy(ptr, str, len);
- ptr[len] = 0;
- _array->append(ptr);
- }
- }
+ StringArrayArgument();
+ ~StringArrayArgument();
+
+ void add(const char* str, size_t len);
+
GrowableArray* array() {
return _array;
}
- ~StringArrayArgument() {
- for (int i=0; i<_array->length(); i++) {
- if(_array->at(i) != NULL) { // Safety check
- FREE_C_HEAP_ARRAY(char, _array->at(i));
- }
- }
- delete _array;
- }
};
class NanoTimeArgument {
diff --git a/src/hotspot/share/services/heapDumper.cpp b/src/hotspot/share/services/heapDumper.cpp
index e931b2ec334..71dbe24f0b5 100644
--- a/src/hotspot/share/services/heapDumper.cpp
+++ b/src/hotspot/share/services/heapDumper.cpp
@@ -39,6 +39,8 @@
#include "runtime/jniHandles.hpp"
#include "runtime/os.hpp"
#include "runtime/reflectionUtils.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
@@ -1895,7 +1897,7 @@ void VM_HeapDumper::dump_stack_traces() {
_stack_traces = NEW_C_HEAP_ARRAY(ThreadStackTrace*, Threads::number_of_threads(), mtInternal);
int frame_serial_num = 0;
- for (JavaThread* thread = Threads::first(); thread != NULL ; thread = thread->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
oop threadObj = thread->threadObj();
if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
// dump thread stack trace
diff --git a/src/hotspot/share/services/jmm.h b/src/hotspot/share/services/jmm.h
deleted file mode 100644
index df232f6feec..00000000000
--- a/src/hotspot/share/services/jmm.h
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef _JAVA_JMM_H_
-#define _JAVA_JMM_H_
-
-/*
- * This is a private interface used by JDK for JVM monitoring
- * and management.
- *
- * Bump the version number when either of the following happens:
- *
- * 1. There is a change in functions in JmmInterface.
- *
- * 2. There is a change in the contract between VM and Java classes.
- */
-
-#include "jni.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-enum {
- JMM_VERSION_1 = 0x20010000,
- JMM_VERSION_1_0 = 0x20010000,
- JMM_VERSION_1_1 = 0x20010100, // JDK 6
- JMM_VERSION_1_2 = 0x20010200, // JDK 7
- JMM_VERSION_1_2_1 = 0x20010201, // JDK 7 GA
- JMM_VERSION_1_2_2 = 0x20010202,
- JMM_VERSION_2 = 0x20020000, // JDK 10
- JMM_VERSION = 0x20020000
-};
-
-typedef struct {
- unsigned int isLowMemoryDetectionSupported : 1;
- unsigned int isCompilationTimeMonitoringSupported : 1;
- unsigned int isThreadContentionMonitoringSupported : 1;
- unsigned int isCurrentThreadCpuTimeSupported : 1;
- unsigned int isOtherThreadCpuTimeSupported : 1;
- unsigned int isObjectMonitorUsageSupported : 1;
- unsigned int isSynchronizerUsageSupported : 1;
- unsigned int isThreadAllocatedMemorySupported : 1;
- unsigned int isRemoteDiagnosticCommandsSupported : 1;
- unsigned int : 22;
-} jmmOptionalSupport;
-
-typedef enum {
- JMM_CLASS_LOADED_COUNT = 1, /* Total number of loaded classes */
- JMM_CLASS_UNLOADED_COUNT = 2, /* Total number of unloaded classes */
- JMM_THREAD_TOTAL_COUNT = 3, /* Total number of threads that have been started */
- JMM_THREAD_LIVE_COUNT = 4, /* Current number of live threads */
- JMM_THREAD_PEAK_COUNT = 5, /* Peak number of live threads */
- JMM_THREAD_DAEMON_COUNT = 6, /* Current number of daemon threads */
- JMM_JVM_INIT_DONE_TIME_MS = 7, /* Time when the JVM finished initialization */
- JMM_COMPILE_TOTAL_TIME_MS = 8, /* Total accumulated time spent in compilation */
- JMM_GC_TIME_MS = 9, /* Total accumulated time spent in collection */
- JMM_GC_COUNT = 10, /* Total number of collections */
- JMM_JVM_UPTIME_MS = 11, /* The JVM uptime in milliseconds */
-
- JMM_INTERNAL_ATTRIBUTE_INDEX = 100,
- JMM_CLASS_LOADED_BYTES = 101, /* Number of bytes loaded instance classes */
- JMM_CLASS_UNLOADED_BYTES = 102, /* Number of bytes unloaded instance classes */
- JMM_TOTAL_CLASSLOAD_TIME_MS = 103, /* Accumulated VM class loader time (TraceClassLoadingTime) */
- JMM_VM_GLOBAL_COUNT = 104, /* Number of VM internal flags */
- JMM_SAFEPOINT_COUNT = 105, /* Total number of safepoints */
- JMM_TOTAL_SAFEPOINTSYNC_TIME_MS = 106, /* Accumulated time spent getting to safepoints */
- JMM_TOTAL_STOPPED_TIME_MS = 107, /* Accumulated time spent at safepoints */
- JMM_TOTAL_APP_TIME_MS = 108, /* Accumulated time spent in Java application */
- JMM_VM_THREAD_COUNT = 109, /* Current number of VM internal threads */
- JMM_CLASS_INIT_TOTAL_COUNT = 110, /* Number of classes for which initializers were run */
- JMM_CLASS_INIT_TOTAL_TIME_MS = 111, /* Accumulated time spent in class initializers */
- JMM_METHOD_DATA_SIZE_BYTES = 112, /* Size of method data in memory */
- JMM_CLASS_VERIFY_TOTAL_TIME_MS = 113, /* Accumulated time spent in class verifier */
- JMM_SHARED_CLASS_LOADED_COUNT = 114, /* Number of shared classes loaded */
- JMM_SHARED_CLASS_UNLOADED_COUNT = 115, /* Number of shared classes unloaded */
- JMM_SHARED_CLASS_LOADED_BYTES = 116, /* Number of bytes loaded shared classes */
- JMM_SHARED_CLASS_UNLOADED_BYTES = 117, /* Number of bytes unloaded shared classes */
-
- JMM_OS_ATTRIBUTE_INDEX = 200,
- JMM_OS_PROCESS_ID = 201, /* Process id of the JVM */
- JMM_OS_MEM_TOTAL_PHYSICAL_BYTES = 202, /* Physical memory size */
-
- JMM_GC_EXT_ATTRIBUTE_INFO_SIZE = 401 /* the size of the GC specific attributes for a given GC memory manager */
-} jmmLongAttribute;
-
-typedef enum {
- JMM_VERBOSE_GC = 21,
- JMM_VERBOSE_CLASS = 22,
- JMM_THREAD_CONTENTION_MONITORING = 23,
- JMM_THREAD_CPU_TIME = 24,
- JMM_THREAD_ALLOCATED_MEMORY = 25
-} jmmBoolAttribute;
-
-
-enum {
- JMM_THREAD_STATE_FLAG_SUSPENDED = 0x00100000,
- JMM_THREAD_STATE_FLAG_NATIVE = 0x00400000
-};
-
-#define JMM_THREAD_STATE_FLAG_MASK 0xFFF00000
-
-typedef enum {
- JMM_STAT_PEAK_THREAD_COUNT = 801,
- JMM_STAT_THREAD_CONTENTION_COUNT = 802,
- JMM_STAT_THREAD_CONTENTION_TIME = 803,
- JMM_STAT_THREAD_CONTENTION_STAT = 804,
- JMM_STAT_PEAK_POOL_USAGE = 805,
- JMM_STAT_GC_STAT = 806
-} jmmStatisticType;
-
-typedef enum {
- JMM_USAGE_THRESHOLD_HIGH = 901,
- JMM_USAGE_THRESHOLD_LOW = 902,
- JMM_COLLECTION_USAGE_THRESHOLD_HIGH = 903,
- JMM_COLLECTION_USAGE_THRESHOLD_LOW = 904
-} jmmThresholdType;
-
-/* Should match what is allowed in globals.hpp */
-typedef enum {
- JMM_VMGLOBAL_TYPE_UNKNOWN = 0,
- JMM_VMGLOBAL_TYPE_JBOOLEAN = 1,
- JMM_VMGLOBAL_TYPE_JSTRING = 2,
- JMM_VMGLOBAL_TYPE_JLONG = 3,
- JMM_VMGLOBAL_TYPE_JDOUBLE = 4
-} jmmVMGlobalType;
-
-typedef enum {
- JMM_VMGLOBAL_ORIGIN_DEFAULT = 1, /* Default value */
- JMM_VMGLOBAL_ORIGIN_COMMAND_LINE = 2, /* Set at command line (or JNI invocation) */
- JMM_VMGLOBAL_ORIGIN_MANAGEMENT = 3, /* Set via management interface */
- JMM_VMGLOBAL_ORIGIN_ENVIRON_VAR = 4, /* Set via environment variables */
- JMM_VMGLOBAL_ORIGIN_CONFIG_FILE = 5, /* Set via config file (such as .hotspotrc) */
- JMM_VMGLOBAL_ORIGIN_ERGONOMIC = 6, /* Set via ergonomic */
- JMM_VMGLOBAL_ORIGIN_ATTACH_ON_DEMAND = 7, /* Set via attach */
- JMM_VMGLOBAL_ORIGIN_OTHER = 99 /* Set via some other mechanism */
-} jmmVMGlobalOrigin;
-
-typedef struct {
- jstring name;
- jvalue value;
- jmmVMGlobalType type; /* Data type */
- jmmVMGlobalOrigin origin; /* Default or non-default value */
- unsigned int writeable : 1; /* dynamically writeable */
- unsigned int external : 1; /* external supported interface */
- unsigned int reserved : 30;
- void *reserved1;
- void *reserved2;
-} jmmVMGlobal;
-
-typedef struct {
- const char* name;
- char type;
- const char* description;
-} jmmExtAttributeInfo;
-
-/* Caller has to set the following fields before calling GetLastGCStat
- * o usage_before_gc - array of MemoryUsage objects
- * o usage_after_gc - array of MemoryUsage objects
- * o gc_ext_attribute_values_size - size of gc_ext_atttribute_values array
- * o gc_ext_attribtue_values - array of jvalues
- */
-typedef struct {
- jlong gc_index; /* Index of the collections */
- jlong start_time; /* Start time of the GC */
- jlong end_time; /* End time of the GC */
- jobjectArray usage_before_gc; /* Memory usage array before GC */
- jobjectArray usage_after_gc; /* Memory usage array after GC */
- jint gc_ext_attribute_values_size; /* set by the caller of GetGCStat */
- jvalue* gc_ext_attribute_values; /* Array of jvalue for GC extension attributes */
- jint num_gc_ext_attributes; /* number of GC extension attribute values s are filled */
- /* -1 indicates gc_ext_attribute_values is not big enough */
-} jmmGCStat;
-
-typedef struct {
- const char* name; /* Name of the diagnostic command */
- const char* description; /* Short description */
- const char* impact; /* Impact on the JVM */
- const char* permission_class; /* Class name of the required permission if any */
- const char* permission_name; /* Permission name of the required permission if any */
- const char* permission_action; /* Action name of the required permission if any*/
- int num_arguments; /* Number of supported options or arguments */
- jboolean enabled; /* True if the diagnostic command can be invoked, false otherwise */
-} dcmdInfo;
-
-typedef struct {
- const char* name; /* Option/Argument name*/
- const char* description; /* Short description */
- const char* type; /* Type: STRING, BOOLEAN, etc. */
- const char* default_string; /* Default value in a parsable string */
- jboolean mandatory; /* True if the option/argument is mandatory */
- jboolean option; /* True if it is an option, false if it is an argument */
- /* (see diagnosticFramework.hpp for option/argument definitions) */
- jboolean multiple; /* True if the option can be specified several time */
- int position; /* Expected position for this argument (this field is */
- /* meaningless for options) */
-} dcmdArgInfo;
-
-typedef struct jmmInterface_1_ {
- void* reserved1;
- void* reserved2;
-
- jint (JNICALL *GetVersion) (JNIEnv *env);
-
- jint (JNICALL *GetOptionalSupport) (JNIEnv *env,
- jmmOptionalSupport* support_ptr);
-
- jint (JNICALL *GetThreadInfo) (JNIEnv *env,
- jlongArray ids,
- jint maxDepth,
- jobjectArray infoArray);
-
- jobjectArray (JNICALL *GetMemoryPools) (JNIEnv* env, jobject mgr);
-
- jobjectArray (JNICALL *GetMemoryManagers) (JNIEnv* env, jobject pool);
-
- jobject (JNICALL *GetMemoryPoolUsage) (JNIEnv* env, jobject pool);
- jobject (JNICALL *GetPeakMemoryPoolUsage) (JNIEnv* env, jobject pool);
-
- void (JNICALL *GetThreadAllocatedMemory)
- (JNIEnv *env,
- jlongArray ids,
- jlongArray sizeArray);
-
- jobject (JNICALL *GetMemoryUsage) (JNIEnv* env, jboolean heap);
-
- jlong (JNICALL *GetLongAttribute) (JNIEnv *env, jobject obj, jmmLongAttribute att);
- jboolean (JNICALL *GetBoolAttribute) (JNIEnv *env, jmmBoolAttribute att);
- jboolean (JNICALL *SetBoolAttribute) (JNIEnv *env, jmmBoolAttribute att, jboolean flag);
-
- jint (JNICALL *GetLongAttributes) (JNIEnv *env,
- jobject obj,
- jmmLongAttribute* atts,
- jint count,
- jlong* result);
-
- jobjectArray (JNICALL *FindCircularBlockedThreads) (JNIEnv *env);
-
- // Not used in JDK 6 or JDK 7
- jlong (JNICALL *GetThreadCpuTime) (JNIEnv *env, jlong thread_id);
-
- jobjectArray (JNICALL *GetVMGlobalNames) (JNIEnv *env);
- jint (JNICALL *GetVMGlobals) (JNIEnv *env,
- jobjectArray names,
- jmmVMGlobal *globals,
- jint count);
-
- jint (JNICALL *GetInternalThreadTimes) (JNIEnv *env,
- jobjectArray names,
- jlongArray times);
-
- jboolean (JNICALL *ResetStatistic) (JNIEnv *env,
- jvalue obj,
- jmmStatisticType type);
-
- void (JNICALL *SetPoolSensor) (JNIEnv *env,
- jobject pool,
- jmmThresholdType type,
- jobject sensor);
-
- jlong (JNICALL *SetPoolThreshold) (JNIEnv *env,
- jobject pool,
- jmmThresholdType type,
- jlong threshold);
- jobject (JNICALL *GetPoolCollectionUsage) (JNIEnv* env, jobject pool);
-
- jint (JNICALL *GetGCExtAttributeInfo) (JNIEnv *env,
- jobject mgr,
- jmmExtAttributeInfo *ext_info,
- jint count);
- void (JNICALL *GetLastGCStat) (JNIEnv *env,
- jobject mgr,
- jmmGCStat *gc_stat);
-
- jlong (JNICALL *GetThreadCpuTimeWithKind)
- (JNIEnv *env,
- jlong thread_id,
- jboolean user_sys_cpu_time);
- void (JNICALL *GetThreadCpuTimesWithKind)
- (JNIEnv *env,
- jlongArray ids,
- jlongArray timeArray,
- jboolean user_sys_cpu_time);
-
- jint (JNICALL *DumpHeap0) (JNIEnv *env,
- jstring outputfile,
- jboolean live);
- jobjectArray (JNICALL *FindDeadlocks) (JNIEnv *env,
- jboolean object_monitors_only);
- void (JNICALL *SetVMGlobal) (JNIEnv *env,
- jstring flag_name,
- jvalue new_value);
- void* reserved6;
- jobjectArray (JNICALL *DumpThreads) (JNIEnv *env,
- jlongArray ids,
- jboolean lockedMonitors,
- jboolean lockedSynchronizers,
- jint maxDepth);
- void (JNICALL *SetGCNotificationEnabled) (JNIEnv *env,
- jobject mgr,
- jboolean enabled);
- jobjectArray (JNICALL *GetDiagnosticCommands) (JNIEnv *env);
- void (JNICALL *GetDiagnosticCommandInfo)
- (JNIEnv *env,
- jobjectArray cmds,
- dcmdInfo *infoArray);
- void (JNICALL *GetDiagnosticCommandArgumentsInfo)
- (JNIEnv *env,
- jstring commandName,
- dcmdArgInfo *infoArray);
- jstring (JNICALL *ExecuteDiagnosticCommand)
- (JNIEnv *env,
- jstring command);
- void (JNICALL *SetDiagnosticFrameworkNotificationEnabled)
- (JNIEnv *env,
- jboolean enabled);
-} JmmInterface;
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif /* __cplusplus */
-
-#endif /* !_JAVA_JMM_H_ */
diff --git a/src/hotspot/share/services/management.cpp b/src/hotspot/share/services/management.cpp
index bc50816d83d..8273fec1b2e 100644
--- a/src/hotspot/share/services/management.cpp
+++ b/src/hotspot/share/services/management.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "jmm.h"
#include "classfile/systemDictionary.hpp"
#include "compiler/compileBroker.hpp"
#include "memory/iterator.hpp"
@@ -41,12 +42,12 @@
#include "runtime/os.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "services/classLoadingService.hpp"
#include "services/diagnosticCommand.hpp"
#include "services/diagnosticFramework.hpp"
#include "services/writeableFlags.hpp"
#include "services/heapDumper.hpp"
-#include "services/jmm.h"
#include "services/lowMemoryDetector.hpp"
#include "services/gcNotifier.hpp"
#include "services/nmtDCmd.hpp"
@@ -1025,11 +1026,15 @@ static void do_thread_dump(ThreadDumpResult* dump_result,
// First get an array of threadObj handles.
// A JavaThread may terminate before we get the stack trace.
GrowableArray* thread_handle_array = new GrowableArray(num_threads);
+
{
- MutexLockerEx ml(Threads_lock);
+ // Need this ThreadsListHandle for converting Java thread IDs into
+ // threadObj handles; dump_result->set_t_list() is called in the
+ // VM op below so we can't use it yet.
+ ThreadsListHandle tlh;
for (int i = 0; i < num_threads; i++) {
jlong tid = ids_ah->long_at(i);
- JavaThread* jt = Threads::find_java_thread_from_java_tid(tid);
+ JavaThread* jt = tlh.list()->find_JavaThread_from_java_tid(tid);
oop thread_obj = (jt != NULL ? jt->threadObj() : (oop)NULL);
instanceHandle threadObj_h(THREAD, (instanceOop) thread_obj);
thread_handle_array->append(threadObj_h);
@@ -1101,22 +1106,21 @@ JVM_ENTRY(jint, jmm_GetThreadInfo(JNIEnv *env, jlongArray ids, jint maxDepth, jo
ThreadDumpResult dump_result(num_threads);
if (maxDepth == 0) {
- // no stack trace dumped - do not need to stop the world
- {
- MutexLockerEx ml(Threads_lock);
- for (int i = 0; i < num_threads; i++) {
- jlong tid = ids_ah->long_at(i);
- JavaThread* jt = Threads::find_java_thread_from_java_tid(tid);
- ThreadSnapshot* ts;
- if (jt == NULL) {
- // if the thread does not exist or now it is terminated,
- // create dummy snapshot
- ts = new ThreadSnapshot();
- } else {
- ts = new ThreadSnapshot(jt);
- }
- dump_result.add_thread_snapshot(ts);
+ // No stack trace to dump so we do not need to stop the world.
+ // Since we never do the VM op here we must set the threads list.
+ dump_result.set_t_list();
+ for (int i = 0; i < num_threads; i++) {
+ jlong tid = ids_ah->long_at(i);
+ JavaThread* jt = dump_result.t_list()->find_JavaThread_from_java_tid(tid);
+ ThreadSnapshot* ts;
+ if (jt == NULL) {
+ // if the thread does not exist or now it is terminated,
+ // create dummy snapshot
+ ts = new ThreadSnapshot();
+ } else {
+ ts = new ThreadSnapshot(dump_result.t_list(), jt);
}
+ dump_result.add_thread_snapshot(ts);
}
} else {
// obtain thread dump with the specific list of threads with stack trace
@@ -1131,6 +1135,7 @@ JVM_ENTRY(jint, jmm_GetThreadInfo(JNIEnv *env, jlongArray ids, jint maxDepth, jo
int num_snapshots = dump_result.num_snapshots();
assert(num_snapshots == num_threads, "Must match the number of thread snapshots");
+ assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot");
int index = 0;
for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; index++, ts = ts->next()) {
// For each thread, create an java/lang/management/ThreadInfo object
@@ -1196,6 +1201,7 @@ JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboo
}
int num_snapshots = dump_result.num_snapshots();
+ assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot");
// create the result ThreadInfo[] object
InstanceKlass* ik = Management::java_lang_management_ThreadInfo_klass(CHECK_NULL);
@@ -1319,10 +1325,10 @@ JVM_ENTRY(jboolean, jmm_ResetStatistic(JNIEnv *env, jvalue obj, jmmStatisticType
}
// Look for the JavaThread of this given tid
- MutexLockerEx ml(Threads_lock);
+ JavaThreadIteratorWithHandle jtiwh;
if (tid == 0) {
// reset contention statistics for all threads if tid == 0
- for (JavaThread* java_thread = Threads::first(); java_thread != NULL; java_thread = java_thread->next()) {
+ for (; JavaThread *java_thread = jtiwh.next(); ) {
if (type == JMM_STAT_THREAD_CONTENTION_COUNT) {
ThreadService::reset_contention_count_stat(java_thread);
} else {
@@ -1331,7 +1337,7 @@ JVM_ENTRY(jboolean, jmm_ResetStatistic(JNIEnv *env, jvalue obj, jmmStatisticType
}
} else {
// reset contention statistics for a given thread
- JavaThread* java_thread = Threads::find_java_thread_from_java_tid(tid);
+ JavaThread* java_thread = jtiwh.list()->find_JavaThread_from_java_tid(tid);
if (java_thread == NULL) {
return false;
}
@@ -1399,8 +1405,8 @@ JVM_ENTRY(jlong, jmm_GetThreadCpuTime(JNIEnv *env, jlong thread_id))
// current thread
return os::current_thread_cpu_time();
} else {
- MutexLockerEx ml(Threads_lock);
- java_thread = Threads::find_java_thread_from_java_tid(thread_id);
+ ThreadsListHandle tlh;
+ java_thread = tlh.list()->find_JavaThread_from_java_tid(thread_id);
if (java_thread != NULL) {
return os::thread_cpu_time((Thread*) java_thread);
}
@@ -1649,6 +1655,7 @@ ThreadTimesClosure::ThreadTimesClosure(objArrayHandle names,
// Called with Threads_lock held
//
void ThreadTimesClosure::do_thread(Thread* thread) {
+ assert(Threads_lock->owned_by_self(), "Must hold Threads_lock");
assert(thread != NULL, "thread was NULL");
// exclude externally visible JavaThreads
@@ -2109,9 +2116,9 @@ JVM_ENTRY(void, jmm_GetThreadAllocatedMemory(JNIEnv *env, jlongArray ids,
"the given array of thread IDs");
}
- MutexLockerEx ml(Threads_lock);
+ ThreadsListHandle tlh;
for (int i = 0; i < num_threads; i++) {
- JavaThread* java_thread = Threads::find_java_thread_from_java_tid(ids_ah->long_at(i));
+ JavaThread* java_thread = tlh.list()->find_JavaThread_from_java_tid(ids_ah->long_at(i));
if (java_thread != NULL) {
sizeArray_h->long_at_put(i, java_thread->cooked_allocated_bytes());
}
@@ -2138,8 +2145,8 @@ JVM_ENTRY(jlong, jmm_GetThreadCpuTimeWithKind(JNIEnv *env, jlong thread_id, jboo
// current thread
return os::current_thread_cpu_time(user_sys_cpu_time != 0);
} else {
- MutexLockerEx ml(Threads_lock);
- java_thread = Threads::find_java_thread_from_java_tid(thread_id);
+ ThreadsListHandle tlh;
+ java_thread = tlh.list()->find_JavaThread_from_java_tid(thread_id);
if (java_thread != NULL) {
return os::thread_cpu_time((Thread*) java_thread, user_sys_cpu_time != 0);
}
@@ -2180,9 +2187,9 @@ JVM_ENTRY(void, jmm_GetThreadCpuTimesWithKind(JNIEnv *env, jlongArray ids,
"the given array of thread IDs");
}
- MutexLockerEx ml(Threads_lock);
+ ThreadsListHandle tlh;
for (int i = 0; i < num_threads; i++) {
- JavaThread* java_thread = Threads::find_java_thread_from_java_tid(ids_ah->long_at(i));
+ JavaThread* java_thread = tlh.list()->find_JavaThread_from_java_tid(ids_ah->long_at(i));
if (java_thread != NULL) {
timeArray_h->long_at_put(i, os::thread_cpu_time((Thread*)java_thread,
user_sys_cpu_time != 0));
diff --git a/src/hotspot/share/services/management.hpp b/src/hotspot/share/services/management.hpp
index f1168166281..4f475060d67 100644
--- a/src/hotspot/share/services/management.hpp
+++ b/src/hotspot/share/services/management.hpp
@@ -25,10 +25,10 @@
#ifndef SHARE_VM_SERVICES_MANAGEMENT_HPP
#define SHARE_VM_SERVICES_MANAGEMENT_HPP
+#include "jmm.h"
#include "memory/allocation.hpp"
#include "runtime/handles.hpp"
#include "runtime/timer.hpp"
-#include "services/jmm.h"
class OopClosure;
class ThreadSnapshot;
diff --git a/src/hotspot/share/services/memoryManager.cpp b/src/hotspot/share/services/memoryManager.cpp
index 8c6fc6d6f1a..5bc16bfd837 100644
--- a/src/hotspot/share/services/memoryManager.cpp
+++ b/src/hotspot/share/services/memoryManager.cpp
@@ -37,7 +37,7 @@
#include "services/gcNotifier.hpp"
#include "utilities/dtrace.hpp"
-MemoryManager::MemoryManager() {
+MemoryManager::MemoryManager(const char* name) : _name(name) {
_num_pools = 0;
(void)const_cast(_memory_mgr_obj = instanceOop(NULL));
}
@@ -52,43 +52,11 @@ void MemoryManager::add_pool(MemoryPool* pool) {
}
MemoryManager* MemoryManager::get_code_cache_memory_manager() {
- return (MemoryManager*) new CodeCacheMemoryManager();
+ return new MemoryManager("CodeCacheManager");
}
MemoryManager* MemoryManager::get_metaspace_memory_manager() {
- return (MemoryManager*) new MetaspaceMemoryManager();
-}
-
-GCMemoryManager* MemoryManager::get_copy_memory_manager() {
- return (GCMemoryManager*) new CopyMemoryManager();
-}
-
-GCMemoryManager* MemoryManager::get_msc_memory_manager() {
- return (GCMemoryManager*) new MSCMemoryManager();
-}
-
-GCMemoryManager* MemoryManager::get_parnew_memory_manager() {
- return (GCMemoryManager*) new ParNewMemoryManager();
-}
-
-GCMemoryManager* MemoryManager::get_cms_memory_manager() {
- return (GCMemoryManager*) new CMSMemoryManager();
-}
-
-GCMemoryManager* MemoryManager::get_psScavenge_memory_manager() {
- return (GCMemoryManager*) new PSScavengeMemoryManager();
-}
-
-GCMemoryManager* MemoryManager::get_psMarkSweep_memory_manager() {
- return (GCMemoryManager*) new PSMarkSweepMemoryManager();
-}
-
-GCMemoryManager* MemoryManager::get_g1YoungGen_memory_manager() {
- return (GCMemoryManager*) new G1YoungGenMemoryManager();
-}
-
-GCMemoryManager* MemoryManager::get_g1OldGen_memory_manager() {
- return (GCMemoryManager*) new G1OldGenMemoryManager();
+ return new MemoryManager("Metaspace Manager");
}
instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
@@ -203,7 +171,8 @@ void GCStatInfo::clear() {
}
-GCMemoryManager::GCMemoryManager() : MemoryManager() {
+GCMemoryManager::GCMemoryManager(const char* name, const char* gc_end_message) :
+ MemoryManager(name), _gc_end_message(gc_end_message) {
_num_collections = 0;
_last_gc_stat = NULL;
_last_gc_lock = new Mutex(Mutex::leaf, "_last_gc_lock", true,
@@ -308,9 +277,7 @@ void GCMemoryManager::gc_end(bool recordPostGCUsage,
}
if (is_notification_enabled()) {
- bool isMajorGC = this == MemoryService::get_major_gc_manager();
- GCNotifier::pushNotification(this, isMajorGC ? "end of major GC" : "end of minor GC",
- GCCause::to_string(cause));
+ GCNotifier::pushNotification(this, _gc_end_message, GCCause::to_string(cause));
}
}
}
diff --git a/src/hotspot/share/services/memoryManager.hpp b/src/hotspot/share/services/memoryManager.hpp
index f2a7d4c3420..7d8ef854813 100644
--- a/src/hotspot/share/services/memoryManager.hpp
+++ b/src/hotspot/share/services/memoryManager.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,10 @@
#ifndef SHARE_VM_SERVICES_MEMORYMANAGER_HPP
#define SHARE_VM_SERVICES_MEMORYMANAGER_HPP
+#include "gc/shared/gcCause.hpp"
#include "memory/allocation.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "runtime/handles.hpp"
#include "runtime/timer.hpp"
#include "services/memoryUsage.hpp"
@@ -49,11 +52,13 @@ private:
MemoryPool* _pools[max_num_pools];
int _num_pools;
+ const char* _name;
+
protected:
volatile instanceOop _memory_mgr_obj;
public:
- MemoryManager();
+ MemoryManager(const char* name);
int num_memory_pools() const { return _num_pools; }
MemoryPool* get_memory_pool(int index) {
@@ -67,7 +72,8 @@ public:
virtual instanceOop get_memory_manager_instance(TRAPS);
virtual bool is_gc_memory_manager() { return false; }
- virtual const char* name() = 0;
+
+ const char* name() const { return _name; }
// GC support
void oops_do(OopClosure* f);
@@ -75,29 +81,6 @@ public:
// Static factory methods to get a memory manager of a specific type
static MemoryManager* get_code_cache_memory_manager();
static MemoryManager* get_metaspace_memory_manager();
- static GCMemoryManager* get_copy_memory_manager();
- static GCMemoryManager* get_msc_memory_manager();
- static GCMemoryManager* get_parnew_memory_manager();
- static GCMemoryManager* get_cms_memory_manager();
- static GCMemoryManager* get_psScavenge_memory_manager();
- static GCMemoryManager* get_psMarkSweep_memory_manager();
- static GCMemoryManager* get_g1YoungGen_memory_manager();
- static GCMemoryManager* get_g1OldGen_memory_manager();
-};
-
-class CodeCacheMemoryManager : public MemoryManager {
-private:
-public:
- CodeCacheMemoryManager() : MemoryManager() {}
-
- const char* name() { return "CodeCacheManager"; }
-};
-
-class MetaspaceMemoryManager : public MemoryManager {
-public:
- MetaspaceMemoryManager() : MemoryManager() {}
-
- const char* name() { return "Metaspace Manager"; }
};
class GCStatInfo : public ResourceObj {
@@ -159,8 +142,9 @@ private:
GCStatInfo* _current_gc_stat;
int _num_gc_threads;
volatile bool _notification_enabled;
+ const char* _gc_end_message;
public:
- GCMemoryManager();
+ GCMemoryManager(const char* name, const char* gc_end_message);
~GCMemoryManager();
void initialize_gc_stat_info();
@@ -186,71 +170,4 @@ public:
bool is_notification_enabled() { return _notification_enabled; }
};
-// These subclasses of GCMemoryManager are defined to include
-// GC-specific information.
-// TODO: Add GC-specific information
-class CopyMemoryManager : public GCMemoryManager {
-private:
-public:
- CopyMemoryManager() : GCMemoryManager() {}
-
- const char* name() { return "Copy"; }
-};
-
-class MSCMemoryManager : public GCMemoryManager {
-private:
-public:
- MSCMemoryManager() : GCMemoryManager() {}
-
- const char* name() { return "MarkSweepCompact"; }
-};
-
-class ParNewMemoryManager : public GCMemoryManager {
-private:
-public:
- ParNewMemoryManager() : GCMemoryManager() {}
-
- const char* name() { return "ParNew"; }
-};
-
-class CMSMemoryManager : public GCMemoryManager {
-private:
-public:
- CMSMemoryManager() : GCMemoryManager() {}
-
- const char* name() { return "ConcurrentMarkSweep";}
-};
-
-class PSScavengeMemoryManager : public GCMemoryManager {
-private:
-public:
- PSScavengeMemoryManager() : GCMemoryManager() {}
-
- const char* name() { return "PS Scavenge"; }
-};
-
-class PSMarkSweepMemoryManager : public GCMemoryManager {
-private:
-public:
- PSMarkSweepMemoryManager() : GCMemoryManager() {}
-
- const char* name() { return "PS MarkSweep"; }
-};
-
-class G1YoungGenMemoryManager : public GCMemoryManager {
-private:
-public:
- G1YoungGenMemoryManager() : GCMemoryManager() {}
-
- const char* name() { return "G1 Young Generation"; }
-};
-
-class G1OldGenMemoryManager : public GCMemoryManager {
-private:
-public:
- G1OldGenMemoryManager() : GCMemoryManager() {}
-
- const char* name() { return "G1 Old Generation"; }
-};
-
#endif // SHARE_VM_SERVICES_MEMORYMANAGER_HPP
diff --git a/src/hotspot/share/services/memoryPool.cpp b/src/hotspot/share/services/memoryPool.cpp
index cec78ae67a4..8248b472225 100644
--- a/src/hotspot/share/services/memoryPool.cpp
+++ b/src/hotspot/share/services/memoryPool.cpp
@@ -25,8 +25,6 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
-#include "gc/serial/defNewGeneration.hpp"
-#include "gc/shared/space.hpp"
#include "memory/metaspace.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
@@ -38,9 +36,6 @@
#include "services/memoryPool.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#endif
MemoryPool::MemoryPool(const char* name,
PoolType type,
@@ -182,95 +177,6 @@ void MemoryPool::oops_do(OopClosure* f) {
}
}
-ContiguousSpacePool::ContiguousSpacePool(ContiguousSpace* space,
- const char* name,
- PoolType type,
- size_t max_size,
- bool support_usage_threshold) :
- CollectedMemoryPool(name, type, space->capacity(), max_size,
- support_usage_threshold), _space(space) {
-}
-
-size_t ContiguousSpacePool::used_in_bytes() {
- return space()->used();
-}
-
-MemoryUsage ContiguousSpacePool::get_memory_usage() {
- size_t maxSize = (available_for_allocation() ? max_size() : 0);
- size_t used = used_in_bytes();
- size_t committed = _space->capacity();
-
- return MemoryUsage(initial_size(), used, committed, maxSize);
-}
-
-SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* young_gen,
- const char* name,
- PoolType type,
- size_t max_size,
- bool support_usage_threshold) :
- CollectedMemoryPool(name, type, young_gen->from()->capacity(), max_size,
- support_usage_threshold), _young_gen(young_gen) {
-}
-
-size_t SurvivorContiguousSpacePool::used_in_bytes() {
- return _young_gen->from()->used();
-}
-
-size_t SurvivorContiguousSpacePool::committed_in_bytes() {
- return _young_gen->from()->capacity();
-}
-
-MemoryUsage SurvivorContiguousSpacePool::get_memory_usage() {
- size_t maxSize = (available_for_allocation() ? max_size() : 0);
- size_t used = used_in_bytes();
- size_t committed = committed_in_bytes();
-
- return MemoryUsage(initial_size(), used, committed, maxSize);
-}
-
-#if INCLUDE_ALL_GCS
-CompactibleFreeListSpacePool::CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
- const char* name,
- PoolType type,
- size_t max_size,
- bool support_usage_threshold) :
- CollectedMemoryPool(name, type, space->capacity(), max_size,
- support_usage_threshold), _space(space) {
-}
-
-size_t CompactibleFreeListSpacePool::used_in_bytes() {
- return _space->used();
-}
-
-MemoryUsage CompactibleFreeListSpacePool::get_memory_usage() {
- size_t maxSize = (available_for_allocation() ? max_size() : 0);
- size_t used = used_in_bytes();
- size_t committed = _space->capacity();
-
- return MemoryUsage(initial_size(), used, committed, maxSize);
-}
-#endif // INCLUDE_ALL_GCS
-
-GenerationPool::GenerationPool(Generation* gen,
- const char* name,
- PoolType type,
- bool support_usage_threshold) :
- CollectedMemoryPool(name, type, gen->capacity(), gen->max_capacity(),
- support_usage_threshold), _gen(gen) {
-}
-
-size_t GenerationPool::used_in_bytes() {
- return _gen->used();
-}
-
-MemoryUsage GenerationPool::get_memory_usage() {
- size_t used = used_in_bytes();
- size_t committed = _gen->capacity();
- size_t maxSize = (available_for_allocation() ? max_size() : 0);
-
- return MemoryUsage(initial_size(), used, committed, maxSize);
-}
-
CodeHeapPool::CodeHeapPool(CodeHeap* codeHeap, const char* name, bool support_usage_threshold) :
MemoryPool(name, NonHeap, codeHeap->capacity(), codeHeap->max_capacity(),
support_usage_threshold, false), _codeHeap(codeHeap) {
diff --git a/src/hotspot/share/services/memoryPool.hpp b/src/hotspot/share/services/memoryPool.hpp
index 0144f61786c..b1c21580b43 100644
--- a/src/hotspot/share/services/memoryPool.hpp
+++ b/src/hotspot/share/services/memoryPool.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,12 +37,8 @@
// both heap and non-heap memory.
// Forward declaration
-class CompactibleFreeListSpace;
-class ContiguousSpace;
class MemoryManager;
class SensorInfo;
-class Generation;
-class DefNewGeneration;
class ThresholdSupport;
class MemoryPool : public CHeapObj {
@@ -144,67 +140,11 @@ class MemoryPool : public CHeapObj {
class CollectedMemoryPool : public MemoryPool {
public:
- CollectedMemoryPool(const char* name, PoolType type, size_t init_size, size_t max_size, bool support_usage_threshold) :
- MemoryPool(name, type, init_size, max_size, support_usage_threshold, true) {};
+ CollectedMemoryPool(const char* name, size_t init_size, size_t max_size, bool support_usage_threshold) :
+ MemoryPool(name, MemoryPool::Heap, init_size, max_size, support_usage_threshold, true) {};
bool is_collected_pool() { return true; }
};
-class ContiguousSpacePool : public CollectedMemoryPool {
-private:
- ContiguousSpace* _space;
-
-public:
- ContiguousSpacePool(ContiguousSpace* space, const char* name, PoolType type, size_t max_size, bool support_usage_threshold);
-
- ContiguousSpace* space() { return _space; }
- MemoryUsage get_memory_usage();
- size_t used_in_bytes();
-};
-
-class SurvivorContiguousSpacePool : public CollectedMemoryPool {
-private:
- DefNewGeneration* _young_gen;
-
-public:
- SurvivorContiguousSpacePool(DefNewGeneration* young_gen,
- const char* name,
- PoolType type,
- size_t max_size,
- bool support_usage_threshold);
-
- MemoryUsage get_memory_usage();
-
- size_t used_in_bytes();
- size_t committed_in_bytes();
-};
-
-#if INCLUDE_ALL_GCS
-class CompactibleFreeListSpacePool : public CollectedMemoryPool {
-private:
- CompactibleFreeListSpace* _space;
-public:
- CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
- const char* name,
- PoolType type,
- size_t max_size,
- bool support_usage_threshold);
-
- MemoryUsage get_memory_usage();
- size_t used_in_bytes();
-};
-#endif // INCLUDE_ALL_GCS
-
-
-class GenerationPool : public CollectedMemoryPool {
-private:
- Generation* _gen;
-public:
- GenerationPool(Generation* gen, const char* name, PoolType type, bool support_usage_threshold);
-
- MemoryUsage get_memory_usage();
- size_t used_in_bytes();
-};
-
class CodeHeapPool: public MemoryPool {
private:
CodeHeap* _codeHeap;
diff --git a/src/hotspot/share/services/memoryService.cpp b/src/hotspot/share/services/memoryService.cpp
index d579cd91738..e96e2a0d151 100644
--- a/src/hotspot/share/services/memoryService.cpp
+++ b/src/hotspot/share/services/memoryService.cpp
@@ -25,13 +25,7 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
-#include "gc/parallel/mutableSpace.hpp"
-#include "gc/serial/defNewGeneration.hpp"
-#include "gc/serial/tenuredGeneration.hpp"
-#include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/generation.hpp"
-#include "gc/shared/generationSpec.hpp"
+#include "gc/shared/collectedHeap.hpp"
#include "logging/logConfiguration.hpp"
#include "memory/heap.hpp"
#include "memory/memRegion.hpp"
@@ -46,24 +40,12 @@
#include "services/memoryService.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psOldGen.hpp"
-#include "gc/parallel/psYoungGen.hpp"
-#include "services/g1MemoryPool.hpp"
-#include "services/psMemoryPool.hpp"
-#endif // INCLUDE_ALL_GCS
GrowableArray* MemoryService::_pools_list =
new (ResourceObj::C_HEAP, mtInternal) GrowableArray(init_pools_list_size, true);
GrowableArray* MemoryService::_managers_list =
new (ResourceObj::C_HEAP, mtInternal) GrowableArray(init_managers_list_size, true);
-GCMemoryManager* MemoryService::_minor_gc_manager = NULL;
-GCMemoryManager* MemoryService::_major_gc_manager = NULL;
MemoryManager* MemoryService::_code_cache_manager = NULL;
GrowableArray* MemoryService::_code_heap_pools =
new (ResourceObj::C_HEAP, mtInternal) GrowableArray(init_code_heap_pools_size, true);
@@ -84,311 +66,28 @@ void GcThreadCountClosure::do_thread(Thread* thread) {
}
void MemoryService::set_universe_heap(CollectedHeap* heap) {
- CollectedHeap::Name kind = heap->kind();
- switch (kind) {
- case CollectedHeap::SerialHeap :
- case CollectedHeap::CMSHeap : {
- add_gen_collected_heap_info(GenCollectedHeap::heap());
- break;
- }
-#if INCLUDE_ALL_GCS
- case CollectedHeap::ParallelScavengeHeap : {
- add_parallel_scavenge_heap_info(ParallelScavengeHeap::heap());
- break;
- }
- case CollectedHeap::G1CollectedHeap : {
- add_g1_heap_info(G1CollectedHeap::heap());
- break;
- }
-#endif // INCLUDE_ALL_GCS
- default: {
- guarantee(false, "Unrecognized kind of heap");
- }
- }
+ ResourceMark rm; // For internal allocations in GrowableArray.
+
+ GrowableArray gc_mem_pools = heap->memory_pools();
+ _pools_list->appendAll(&gc_mem_pools);
// set the GC thread count
GcThreadCountClosure gctcc;
heap->gc_threads_do(&gctcc);
int count = gctcc.count();
- if (count > 0) {
- _minor_gc_manager->set_num_gc_threads(count);
- _major_gc_manager->set_num_gc_threads(count);
- }
- // All memory pools and memory managers are initialized.
- //
- _minor_gc_manager->initialize_gc_stat_info();
- _major_gc_manager->initialize_gc_stat_info();
-}
+ GrowableArray gc_memory_managers = heap->memory_managers();
+ for (int i = 0; i < gc_memory_managers.length(); i++) {
+ GCMemoryManager* gc_manager = gc_memory_managers.at(i);
-// Add memory pools for GenCollectedHeap
-// This function currently only supports two generations collected heap.
-// The collector for GenCollectedHeap will have two memory managers.
-void MemoryService::add_gen_collected_heap_info(GenCollectedHeap* heap) {
- CollectorPolicy* policy = heap->collector_policy();
-
- assert(policy->is_generation_policy(), "Only support two generations");
- GenCollectorPolicy* gen_policy = policy->as_generation_policy();
- if (gen_policy != NULL) {
- Generation::Name kind = gen_policy->young_gen_spec()->name();
- switch (kind) {
- case Generation::DefNew:
- _minor_gc_manager = MemoryManager::get_copy_memory_manager();
- break;
-#if INCLUDE_ALL_GCS
- case Generation::ParNew:
- _minor_gc_manager = MemoryManager::get_parnew_memory_manager();
- break;
-#endif // INCLUDE_ALL_GCS
- default:
- guarantee(false, "Unrecognized generation spec");
- break;
- }
- if (policy->is_mark_sweep_policy()) {
- _major_gc_manager = MemoryManager::get_msc_memory_manager();
-#if INCLUDE_ALL_GCS
- } else if (policy->is_concurrent_mark_sweep_policy()) {
- _major_gc_manager = MemoryManager::get_cms_memory_manager();
-#endif // INCLUDE_ALL_GCS
- } else {
- guarantee(false, "Unknown two-gen policy");
- }
- } else {
- guarantee(false, "Non two-gen policy");
- }
- _managers_list->append(_minor_gc_manager);
- _managers_list->append(_major_gc_manager);
-
- add_generation_memory_pool(heap->young_gen(), _major_gc_manager, _minor_gc_manager);
- add_generation_memory_pool(heap->old_gen(), _major_gc_manager);
-}
-
-#if INCLUDE_ALL_GCS
-// Add memory pools for ParallelScavengeHeap
-// This function currently only supports two generations collected heap.
-// The collector for ParallelScavengeHeap will have two memory managers.
-void MemoryService::add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap) {
- // Two managers to keep statistics about _minor_gc_manager and _major_gc_manager GC.
- _minor_gc_manager = MemoryManager::get_psScavenge_memory_manager();
- _major_gc_manager = MemoryManager::get_psMarkSweep_memory_manager();
- _managers_list->append(_minor_gc_manager);
- _managers_list->append(_major_gc_manager);
-
- add_psYoung_memory_pool(heap->young_gen(), _major_gc_manager, _minor_gc_manager);
- add_psOld_memory_pool(heap->old_gen(), _major_gc_manager);
-}
-
-void MemoryService::add_g1_heap_info(G1CollectedHeap* g1h) {
- assert(UseG1GC, "sanity");
-
- _minor_gc_manager = MemoryManager::get_g1YoungGen_memory_manager();
- _major_gc_manager = MemoryManager::get_g1OldGen_memory_manager();
- _managers_list->append(_minor_gc_manager);
- _managers_list->append(_major_gc_manager);
-
- add_g1YoungGen_memory_pool(g1h, _major_gc_manager, _minor_gc_manager);
- add_g1OldGen_memory_pool(g1h, _major_gc_manager);
-}
-#endif // INCLUDE_ALL_GCS
-
-MemoryPool* MemoryService::add_gen(Generation* gen,
- const char* name,
- bool is_heap,
- bool support_usage_threshold) {
-
- MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
- GenerationPool* pool = new GenerationPool(gen, name, type, support_usage_threshold);
- _pools_list->append(pool);
- return (MemoryPool*) pool;
-}
-
-MemoryPool* MemoryService::add_space(ContiguousSpace* space,
- const char* name,
- bool is_heap,
- size_t max_size,
- bool support_usage_threshold) {
- MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
- ContiguousSpacePool* pool = new ContiguousSpacePool(space, name, type, max_size, support_usage_threshold);
-
- _pools_list->append(pool);
- return (MemoryPool*) pool;
-}
-
-MemoryPool* MemoryService::add_survivor_spaces(DefNewGeneration* young_gen,
- const char* name,
- bool is_heap,
- size_t max_size,
- bool support_usage_threshold) {
- MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
- SurvivorContiguousSpacePool* pool = new SurvivorContiguousSpacePool(young_gen, name, type, max_size, support_usage_threshold);
-
- _pools_list->append(pool);
- return (MemoryPool*) pool;
-}
-
-#if INCLUDE_ALL_GCS
-MemoryPool* MemoryService::add_cms_space(CompactibleFreeListSpace* space,
- const char* name,
- bool is_heap,
- size_t max_size,
- bool support_usage_threshold) {
- MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
- CompactibleFreeListSpacePool* pool = new CompactibleFreeListSpacePool(space, name, type, max_size, support_usage_threshold);
- _pools_list->append(pool);
- return (MemoryPool*) pool;
-}
-#endif // INCLUDE_ALL_GCS
-
-// Add memory pool(s) for one generation
-void MemoryService::add_generation_memory_pool(Generation* gen,
- MemoryManager* major_mgr,
- MemoryManager* minor_mgr) {
- guarantee(gen != NULL, "No generation for memory pool");
- Generation::Name kind = gen->kind();
- int index = _pools_list->length();
-
- switch (kind) {
- case Generation::DefNew: {
- assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
- DefNewGeneration* young_gen = (DefNewGeneration*) gen;
- // Add a memory pool for each space and young gen doesn't
- // support low memory detection as it is expected to get filled up.
- MemoryPool* eden = add_space(young_gen->eden(),
- "Eden Space",
- true, /* is_heap */
- young_gen->max_eden_size(),
- false /* support_usage_threshold */);
- MemoryPool* survivor = add_survivor_spaces(young_gen,
- "Survivor Space",
- true, /* is_heap */
- young_gen->max_survivor_size(),
- false /* support_usage_threshold */);
- break;
- }
-
-#if INCLUDE_ALL_GCS
- case Generation::ParNew:
- {
- assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
- // Add a memory pool for each space and young gen doesn't
- // support low memory detection as it is expected to get filled up.
- ParNewGeneration* parnew_gen = (ParNewGeneration*) gen;
- MemoryPool* eden = add_space(parnew_gen->eden(),
- "Par Eden Space",
- true /* is_heap */,
- parnew_gen->max_eden_size(),
- false /* support_usage_threshold */);
- MemoryPool* survivor = add_survivor_spaces(parnew_gen,
- "Par Survivor Space",
- true, /* is_heap */
- parnew_gen->max_survivor_size(),
- false /* support_usage_threshold */);
-
- break;
- }
-#endif // INCLUDE_ALL_GCS
-
- case Generation::MarkSweepCompact: {
- assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager");
- add_gen(gen,
- "Tenured Gen",
- true, /* is_heap */
- true /* support_usage_threshold */);
- break;
- }
-
-#if INCLUDE_ALL_GCS
- case Generation::ConcurrentMarkSweep:
- {
- assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager");
- ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*) gen;
- MemoryPool* pool = add_cms_space(cms->cmsSpace(),
- "CMS Old Gen",
- true, /* is_heap */
- cms->reserved().byte_size(),
- true /* support_usage_threshold */);
- break;
- }
-#endif // INCLUDE_ALL_GCS
-
- default:
- assert(false, "should not reach here");
- // no memory pool added for others
- break;
- }
-
- assert(major_mgr != NULL, "Should have at least one manager");
- // Link managers and the memory pools together
- for (int i = index; i < _pools_list->length(); i++) {
- MemoryPool* pool = _pools_list->at(i);
- major_mgr->add_pool(pool);
- if (minor_mgr != NULL) {
- minor_mgr->add_pool(pool);
+ if (count > 0) {
+ gc_manager->set_num_gc_threads(count);
}
+ gc_manager->initialize_gc_stat_info();
+ _managers_list->append(gc_manager);
}
}
-
-#if INCLUDE_ALL_GCS
-void MemoryService::add_psYoung_memory_pool(PSYoungGen* young_gen, MemoryManager* major_mgr, MemoryManager* minor_mgr) {
- assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
-
- // Add a memory pool for each space and young gen doesn't
- // support low memory detection as it is expected to get filled up.
- EdenMutableSpacePool* eden = new EdenMutableSpacePool(young_gen,
- young_gen->eden_space(),
- "PS Eden Space",
- MemoryPool::Heap,
- false /* support_usage_threshold */);
-
- SurvivorMutableSpacePool* survivor = new SurvivorMutableSpacePool(young_gen,
- "PS Survivor Space",
- MemoryPool::Heap,
- false /* support_usage_threshold */);
-
- major_mgr->add_pool(eden);
- major_mgr->add_pool(survivor);
- minor_mgr->add_pool(eden);
- minor_mgr->add_pool(survivor);
- _pools_list->append(eden);
- _pools_list->append(survivor);
-}
-
-void MemoryService::add_psOld_memory_pool(PSOldGen* old_gen, MemoryManager* mgr) {
- PSGenerationPool* old_gen_pool = new PSGenerationPool(old_gen,
- "PS Old Gen",
- MemoryPool::Heap,
- true /* support_usage_threshold */);
- mgr->add_pool(old_gen_pool);
- _pools_list->append(old_gen_pool);
-}
-
-void MemoryService::add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
- MemoryManager* major_mgr,
- MemoryManager* minor_mgr) {
- assert(major_mgr != NULL && minor_mgr != NULL, "should have two managers");
-
- G1EdenPool* eden = new G1EdenPool(g1h);
- G1SurvivorPool* survivor = new G1SurvivorPool(g1h);
-
- major_mgr->add_pool(eden);
- major_mgr->add_pool(survivor);
- minor_mgr->add_pool(eden);
- minor_mgr->add_pool(survivor);
- _pools_list->append(eden);
- _pools_list->append(survivor);
-}
-
-void MemoryService::add_g1OldGen_memory_pool(G1CollectedHeap* g1h,
- MemoryManager* mgr) {
- assert(mgr != NULL, "should have one manager");
-
- G1OldGenPool* old_gen = new G1OldGenPool(g1h);
- mgr->add_pool(old_gen);
- _pools_list->append(old_gen);
-}
-#endif // INCLUDE_ALL_GCS
-
void MemoryService::add_code_heap_memory_pool(CodeHeap* heap, const char* name) {
// Create new memory pool for this heap
MemoryPool* code_heap_pool = new CodeHeapPool(heap, name, true /* support_usage_threshold */);
@@ -463,18 +162,11 @@ void MemoryService::track_memory_pool_usage(MemoryPool* pool) {
}
}
-void MemoryService::gc_begin(bool fullGC, bool recordGCBeginTime,
+void MemoryService::gc_begin(GCMemoryManager* manager, bool recordGCBeginTime,
bool recordAccumulatedGCTime,
bool recordPreGCUsage, bool recordPeakUsage) {
- GCMemoryManager* mgr;
- if (fullGC) {
- mgr = _major_gc_manager;
- } else {
- mgr = _minor_gc_manager;
- }
- assert(mgr->is_gc_memory_manager(), "Sanity check");
- mgr->gc_begin(recordGCBeginTime, recordPreGCUsage, recordAccumulatedGCTime);
+ manager->gc_begin(recordGCBeginTime, recordPreGCUsage, recordAccumulatedGCTime);
// Track the peak memory usage when GC begins
if (recordPeakUsage) {
@@ -485,22 +177,13 @@ void MemoryService::gc_begin(bool fullGC, bool recordGCBeginTime,
}
}
-void MemoryService::gc_end(bool fullGC, bool recordPostGCUsage,
+void MemoryService::gc_end(GCMemoryManager* manager, bool recordPostGCUsage,
bool recordAccumulatedGCTime,
bool recordGCEndTime, bool countCollection,
GCCause::Cause cause) {
-
- GCMemoryManager* mgr;
- if (fullGC) {
- mgr = (GCMemoryManager*) _major_gc_manager;
- } else {
- mgr = (GCMemoryManager*) _minor_gc_manager;
- }
- assert(mgr->is_gc_memory_manager(), "Sanity check");
-
// register the GC end statistics and memory usage
- mgr->gc_end(recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime,
- countCollection, cause);
+ manager->gc_end(recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime,
+ countCollection, cause);
}
void MemoryService::oops_do(OopClosure* f) {
@@ -551,36 +234,7 @@ Handle MemoryService::create_MemoryUsage_obj(MemoryUsage usage, TRAPS) {
return obj;
}
-// GC manager type depends on the type of Generation. Depending on the space
-// availability and vm options the gc uses major gc manager or minor gc
-// manager or both. The type of gc manager depends on the generation kind.
-// For DefNew and ParNew generation doing scavenge gc uses minor gc manager (so
-// _fullGC is set to false ) and for other generation kinds doing
-// mark-sweep-compact uses major gc manager (so _fullGC is set to true).
-TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause) {
- switch (kind) {
- case Generation::DefNew:
-#if INCLUDE_ALL_GCS
- case Generation::ParNew:
-#endif // INCLUDE_ALL_GCS
- _fullGC = false;
- break;
- case Generation::MarkSweepCompact:
-#if INCLUDE_ALL_GCS
- case Generation::ConcurrentMarkSweep:
-#endif // INCLUDE_ALL_GCS
- _fullGC = true;
- break;
- default:
- _fullGC = false;
- assert(false, "Unrecognized gc generation kind.");
- }
- // this has to be called in a stop the world pause and represent
- // an entire gc pause, start to finish:
- initialize(_fullGC, cause, true, true, true, true, true, true, true);
-}
-
-TraceMemoryManagerStats::TraceMemoryManagerStats(bool fullGC,
+TraceMemoryManagerStats::TraceMemoryManagerStats(GCMemoryManager* gc_memory_manager,
GCCause::Cause cause,
bool recordGCBeginTime,
bool recordPreGCUsage,
@@ -589,14 +243,14 @@ TraceMemoryManagerStats::TraceMemoryManagerStats(bool fullGC,
bool recordAccumulatedGCTime,
bool recordGCEndTime,
bool countCollection) {
- initialize(fullGC, cause, recordGCBeginTime, recordPreGCUsage, recordPeakUsage,
+ initialize(gc_memory_manager, cause, recordGCBeginTime, recordPreGCUsage, recordPeakUsage,
recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime,
countCollection);
}
// for a subclass to create then initialize an instance before invoking
// the MemoryService
-void TraceMemoryManagerStats::initialize(bool fullGC,
+void TraceMemoryManagerStats::initialize(GCMemoryManager* gc_memory_manager,
GCCause::Cause cause,
bool recordGCBeginTime,
bool recordPreGCUsage,
@@ -605,7 +259,7 @@ void TraceMemoryManagerStats::initialize(bool fullGC,
bool recordAccumulatedGCTime,
bool recordGCEndTime,
bool countCollection) {
- _fullGC = fullGC;
+ _gc_memory_manager = gc_memory_manager;
_recordGCBeginTime = recordGCBeginTime;
_recordPreGCUsage = recordPreGCUsage;
_recordPeakUsage = recordPeakUsage;
@@ -615,11 +269,11 @@ void TraceMemoryManagerStats::initialize(bool fullGC,
_countCollection = countCollection;
_cause = cause;
- MemoryService::gc_begin(_fullGC, _recordGCBeginTime, _recordAccumulatedGCTime,
+ MemoryService::gc_begin(_gc_memory_manager, _recordGCBeginTime, _recordAccumulatedGCTime,
_recordPreGCUsage, _recordPeakUsage);
}
TraceMemoryManagerStats::~TraceMemoryManagerStats() {
- MemoryService::gc_end(_fullGC, _recordPostGCUsage, _recordAccumulatedGCTime,
+ MemoryService::gc_end(_gc_memory_manager, _recordPostGCUsage, _recordAccumulatedGCTime,
_recordGCEndTime, _countCollection, _cause);
}
diff --git a/src/hotspot/share/services/memoryService.hpp b/src/hotspot/share/services/memoryService.hpp
index 86a6a95fa90..4ebc0813f23 100644
--- a/src/hotspot/share/services/memoryService.hpp
+++ b/src/hotspot/share/services/memoryService.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#define SHARE_VM_SERVICES_MEMORYSERVICE_HPP
#include "gc/shared/gcCause.hpp"
-#include "gc/shared/generation.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "runtime/handles.hpp"
@@ -37,16 +36,7 @@ class MemoryPool;
class MemoryManager;
class GCMemoryManager;
class CollectedHeap;
-class Generation;
-class DefNewGeneration;
-class PSYoungGen;
-class PSOldGen;
class CodeHeap;
-class ContiguousSpace;
-class CompactibleFreeListSpace;
-class GenCollectedHeap;
-class ParallelScavengeHeap;
-class G1CollectedHeap;
// VM Monitoring and Management Support
@@ -61,10 +51,6 @@ private:
static GrowableArray* _pools_list;
static GrowableArray* _managers_list;
- // memory managers for minor and major GC statistics
- static GCMemoryManager* _major_gc_manager;
- static GCMemoryManager* _minor_gc_manager;
-
// memory manager and code heap pools for the CodeCache
static MemoryManager* _code_cache_manager;
static GrowableArray* _code_heap_pools;
@@ -72,51 +58,6 @@ private:
static MemoryPool* _metaspace_pool;
static MemoryPool* _compressed_class_pool;
- static void add_generation_memory_pool(Generation* gen,
- MemoryManager* major_mgr,
- MemoryManager* minor_mgr);
- static void add_generation_memory_pool(Generation* gen,
- MemoryManager* major_mgr) {
- add_generation_memory_pool(gen, major_mgr, NULL);
- }
-
-
- static void add_psYoung_memory_pool(PSYoungGen* young_gen,
- MemoryManager* major_mgr,
- MemoryManager* minor_mgr);
- static void add_psOld_memory_pool(PSOldGen* old_gen,
- MemoryManager* mgr);
-
- static void add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
- MemoryManager* major_mgr,
- MemoryManager* minor_mgr);
- static void add_g1OldGen_memory_pool(G1CollectedHeap* g1h,
- MemoryManager* mgr);
-
- static MemoryPool* add_space(ContiguousSpace* space,
- const char* name,
- bool is_heap,
- size_t max_size,
- bool support_usage_threshold);
- static MemoryPool* add_survivor_spaces(DefNewGeneration* young_gen,
- const char* name,
- bool is_heap,
- size_t max_size,
- bool support_usage_threshold);
- static MemoryPool* add_gen(Generation* gen,
- const char* name,
- bool is_heap,
- bool support_usage_threshold);
- static MemoryPool* add_cms_space(CompactibleFreeListSpace* space,
- const char* name,
- bool is_heap,
- size_t max_size,
- bool support_usage_threshold);
-
- static void add_gen_collected_heap_info(GenCollectedHeap* heap);
- static void add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap);
- static void add_g1_heap_info(G1CollectedHeap* g1h);
-
public:
static void set_universe_heap(CollectedHeap* heap);
static void add_code_heap_memory_pool(CodeHeap* heap, const char* name);
@@ -155,10 +96,10 @@ public:
}
static void track_memory_pool_usage(MemoryPool* pool);
- static void gc_begin(bool fullGC, bool recordGCBeginTime,
+ static void gc_begin(GCMemoryManager* manager, bool recordGCBeginTime,
bool recordAccumulatedGCTime,
bool recordPreGCUsage, bool recordPeakUsage);
- static void gc_end(bool fullGC, bool recordPostGCUsage,
+ static void gc_end(GCMemoryManager* manager, bool recordPostGCUsage,
bool recordAccumulatedGCTime,
bool recordGCEndTime, bool countCollection,
GCCause::Cause cause);
@@ -170,19 +111,11 @@ public:
// Create an instance of java/lang/management/MemoryUsage
static Handle create_MemoryUsage_obj(MemoryUsage usage, TRAPS);
-
- static const GCMemoryManager* get_minor_gc_manager() {
- return _minor_gc_manager;
- }
-
- static const GCMemoryManager* get_major_gc_manager() {
- return _major_gc_manager;
- }
};
class TraceMemoryManagerStats : public StackObj {
private:
- bool _fullGC;
+ GCMemoryManager* _gc_memory_manager;
bool _recordGCBeginTime;
bool _recordPreGCUsage;
bool _recordPeakUsage;
@@ -193,7 +126,7 @@ private:
GCCause::Cause _cause;
public:
TraceMemoryManagerStats() {}
- TraceMemoryManagerStats(bool fullGC,
+ TraceMemoryManagerStats(GCMemoryManager* gc_memory_manager,
GCCause::Cause cause,
bool recordGCBeginTime = true,
bool recordPreGCUsage = true,
@@ -203,7 +136,7 @@ public:
bool recordGCEndTime = true,
bool countCollection = true);
- void initialize(bool fullGC,
+ void initialize(GCMemoryManager* gc_memory_manager,
GCCause::Cause cause,
bool recordGCBeginTime,
bool recordPreGCUsage,
@@ -213,7 +146,6 @@ public:
bool recordGCEndTime,
bool countCollection);
- TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause);
~TraceMemoryManagerStats();
};
diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp
index da25120bb37..9a1a4ae393d 100644
--- a/src/hotspot/share/services/threadService.cpp
+++ b/src/hotspot/share/services/threadService.cpp
@@ -34,9 +34,9 @@
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
-#include "runtime/thread.hpp"
-#include "runtime/vframe.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.inline.hpp"
+#include "runtime/vframe.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
#include "services/threadService.hpp"
@@ -148,7 +148,7 @@ void ThreadService::current_thread_exiting(JavaThread* jt) {
// FIXME: JVMTI should call this function
Handle ThreadService::get_current_contended_monitor(JavaThread* thread) {
assert(thread != NULL, "should be non-NULL");
- assert(Threads_lock->owned_by_self(), "must grab Threads_lock or be at safepoint");
+ debug_only(Thread::check_for_dangling_thread_pointer(thread);)
ObjectMonitor *wait_obj = thread->current_waiting_monitor();
@@ -266,6 +266,7 @@ Handle ThreadService::dump_stack_traces(GrowableArray* threads,
int num_snapshots = dump_result.num_snapshots();
assert(num_snapshots == num_threads, "Must have num_threads thread snapshots");
+ assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot");
int i = 0;
for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) {
ThreadStackTrace* stacktrace = ts->get_stack_trace();
@@ -297,7 +298,9 @@ void ThreadService::reset_contention_time_stat(JavaThread* thread) {
}
// Find deadlocks involving object monitors and concurrent locks if concurrent_locks is true
-DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(bool concurrent_locks) {
+DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) {
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+
// This code was modified from the original Threads::find_deadlocks code.
int globalDfn = 0, thisDfn;
ObjectMonitor* waitingToLockMonitor = NULL;
@@ -306,15 +309,16 @@ DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(bool concurrent_locks)
JavaThread *currentThread, *previousThread;
int num_deadlocks = 0;
- for (JavaThread* p = Threads::first(); p != NULL; p = p->next()) {
- // Initialize the depth-first-number
- p->set_depth_first_number(-1);
+ // Initialize the depth-first-number for each JavaThread.
+ JavaThreadIterator jti(t_list);
+ for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) {
+ jt->set_depth_first_number(-1);
}
DeadlockCycle* deadlocks = NULL;
DeadlockCycle* last = NULL;
DeadlockCycle* cycle = new DeadlockCycle();
- for (JavaThread* jt = Threads::first(); jt != NULL; jt = jt->next()) {
+ for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) {
if (jt->depth_first_number() >= 0) {
// this thread was already visited
continue;
@@ -339,9 +343,8 @@ DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(bool concurrent_locks)
if (waitingToLockMonitor != NULL) {
address currentOwner = (address)waitingToLockMonitor->owner();
if (currentOwner != NULL) {
- currentThread = Threads::owning_thread_from_monitor_owner(
- currentOwner,
- false /* no locking needed */);
+ currentThread = Threads::owning_thread_from_monitor_owner(t_list,
+ currentOwner);
if (currentThread == NULL) {
// This function is called at a safepoint so the JavaThread
// that owns waitingToLockMonitor should be findable, but
@@ -366,6 +369,8 @@ DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(bool concurrent_locks)
if (concurrent_locks) {
if (waitingToLockBlocker->is_a(SystemDictionary::abstract_ownable_synchronizer_klass())) {
oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
+ // This JavaThread (if there is one) is protected by the
+ // ThreadsListSetter in VM_FindDeadlocks::doit().
currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL;
} else {
currentThread = NULL;
@@ -414,7 +419,7 @@ DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(bool concurrent_locks)
return deadlocks;
}
-ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _next(NULL), _last(NULL) {
+ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _next(NULL), _last(NULL), _setter() {
// Create a new ThreadDumpResult object and append to the list.
// If GC happens before this function returns, Method*
@@ -422,7 +427,7 @@ ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snap
ThreadService::add_thread_dump(this);
}
-ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _next(NULL), _last(NULL) {
+ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _next(NULL), _last(NULL), _setter() {
// Create a new ThreadDumpResult object and append to the list.
// If GC happens before this function returns, oops
// will be visited.
@@ -467,6 +472,10 @@ void ThreadDumpResult::metadata_do(void f(Metadata*)) {
}
}
+ThreadsList* ThreadDumpResult::t_list() {
+ return _setter.list();
+}
+
StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) {
_method = jvf->method();
_bci = jvf->bci();
@@ -683,6 +692,8 @@ void ConcurrentLocksDump::build_map(GrowableArray* aos_objects) {
oop o = aos_objects->at(i);
oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o);
if (owner_thread_obj != NULL) {
+ // See comments in ThreadConcurrentLocks to see how this
+ // JavaThread* is protected.
JavaThread* thread = java_lang_Thread::thread(owner_thread_obj);
assert(o->is_instance(), "Must be an instanceOop");
add_lock(thread, (instanceOop) o);
@@ -764,7 +775,7 @@ ThreadStatistics::ThreadStatistics() {
memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts));
}
-ThreadSnapshot::ThreadSnapshot(JavaThread* thread) {
+ThreadSnapshot::ThreadSnapshot(ThreadsList * t_list, JavaThread* thread) {
_thread = thread;
_threadObj = thread->threadObj();
_stack_trace = NULL;
@@ -796,7 +807,7 @@ ThreadSnapshot::ThreadSnapshot(JavaThread* thread) {
_thread_status = java_lang_Thread::RUNNABLE;
} else {
_blocker_object = obj();
- JavaThread* owner = ObjectSynchronizer::get_lock_owner(obj, false);
+ JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj);
if ((owner == NULL && _thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER)
|| (owner != NULL && owner->is_attaching_via_jni())) {
// ownership information of the monitor is not available
@@ -865,7 +876,7 @@ DeadlockCycle::~DeadlockCycle() {
delete _threads;
}
-void DeadlockCycle::print_on(outputStream* st) const {
+void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const {
st->cr();
st->print_cr("Found one Java-level deadlock:");
st->print("=============================");
@@ -895,9 +906,8 @@ void DeadlockCycle::print_on(outputStream* st) const {
// No Java object associated - a JVMTI raw monitor
owner_desc = " (JVMTI raw monitor),\n which is held by";
}
- currentThread = Threads::owning_thread_from_monitor_owner(
- (address)waitingToLockMonitor->owner(),
- false /* no locking needed */);
+ currentThread = Threads::owning_thread_from_monitor_owner(t_list,
+ (address)waitingToLockMonitor->owner());
if (currentThread == NULL) {
// The deadlock was detected at a safepoint so the JavaThread
// that owns waitingToLockMonitor should be findable, but
@@ -915,6 +925,7 @@ void DeadlockCycle::print_on(outputStream* st) const {
"Must be an AbstractOwnableSynchronizer");
oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
currentThread = java_lang_Thread::thread(ownerObj);
+ assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL");
}
st->print("%s \"%s\"", owner_desc, currentThread->get_thread_name());
}
@@ -943,9 +954,7 @@ ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread,
int init_size = ThreadService::get_live_thread_count();
_threads_array = new GrowableArray(init_size);
- MutexLockerEx ml(Threads_lock);
-
- for (JavaThread* jt = Threads::first(); jt != NULL; jt = jt->next()) {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
// skips JavaThreads in the process of exiting
// and also skips VM internal JavaThreads
// Threads in _thread_new or _thread_new_trans state are included.
diff --git a/src/hotspot/share/services/threadService.hpp b/src/hotspot/share/services/threadService.hpp
index 46bc012f73e..950482e462d 100644
--- a/src/hotspot/share/services/threadService.hpp
+++ b/src/hotspot/share/services/threadService.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/perfData.hpp"
+#include "runtime/thread.hpp"
#include "services/management.hpp"
#include "services/serviceUtil.hpp"
@@ -109,7 +110,7 @@ public:
static void reset_contention_count_stat(JavaThread* thread);
static void reset_contention_time_stat(JavaThread* thread);
- static DeadlockCycle* find_deadlocks_at_safepoint(bool object_monitors_only);
+ static DeadlockCycle* find_deadlocks_at_safepoint(ThreadsList * t_list, bool object_monitors_only);
// GC support
static void oops_do(OopClosure* f);
@@ -189,6 +190,8 @@ public:
// Thread snapshot to represent the thread state and statistics
class ThreadSnapshot : public CHeapObj {
private:
+ // This JavaThread* is protected by being stored in objects that are
+ // protected by a ThreadsListSetter (ThreadDumpResult).
JavaThread* _thread;
oop _threadObj;
java_lang_Thread::ThreadStatus _thread_status;
@@ -213,7 +216,7 @@ public:
// Dummy snapshot
ThreadSnapshot() : _thread(NULL), _threadObj(NULL), _stack_trace(NULL), _concurrent_locks(NULL), _next(NULL),
_blocker_object(NULL), _blocker_object_owner(NULL) {};
- ThreadSnapshot(JavaThread* thread);
+ ThreadSnapshot(ThreadsList * t_list, JavaThread* thread);
~ThreadSnapshot();
java_lang_Thread::ThreadStatus thread_status() { return _thread_status; }
@@ -310,6 +313,12 @@ class ThreadConcurrentLocks : public CHeapObj {
private:
GrowableArray* _owned_locks;
ThreadConcurrentLocks* _next;
+ // This JavaThread* is protected in one of two different ways
+ // depending on the usage of the ThreadConcurrentLocks object:
+ // 1) by being stored in objects that are only allocated and used at a
+ // safepoint (ConcurrentLocksDump), or 2) by being stored in objects
+ // that are protected by a ThreadsListSetter (ThreadSnapshot inside
+ // ThreadDumpResult).
JavaThread* _thread;
public:
ThreadConcurrentLocks(JavaThread* thread);
@@ -333,8 +342,12 @@ class ConcurrentLocksDump : public StackObj {
void add_lock(JavaThread* thread, instanceOop o);
public:
- ConcurrentLocksDump(bool retain_map_on_free) : _map(NULL), _last(NULL), _retain_map_on_free(retain_map_on_free) {};
- ConcurrentLocksDump() : _map(NULL), _last(NULL), _retain_map_on_free(false) {};
+ ConcurrentLocksDump(bool retain_map_on_free) : _map(NULL), _last(NULL), _retain_map_on_free(retain_map_on_free) {
+ assert(SafepointSynchronize::is_at_safepoint(), "Must be constructed at a safepoint.");
+ };
+ ConcurrentLocksDump() : _map(NULL), _last(NULL), _retain_map_on_free(false) {
+ assert(SafepointSynchronize::is_at_safepoint(), "Must be constructed at a safepoint.");
+ };
~ConcurrentLocksDump();
void dump_at_safepoint();
@@ -349,6 +362,9 @@ class ThreadDumpResult : public StackObj {
ThreadSnapshot* _snapshots;
ThreadSnapshot* _last;
ThreadDumpResult* _next;
+ ThreadsListSetter _setter; // Helper to set hazard ptr in the originating thread
+ // which protects the JavaThreads in _snapshots.
+
public:
ThreadDumpResult();
ThreadDumpResult(int num_threads);
@@ -360,6 +376,9 @@ class ThreadDumpResult : public StackObj {
int num_threads() { return _num_threads; }
int num_snapshots() { return _num_snapshots; }
ThreadSnapshot* snapshots() { return _snapshots; }
+ void set_t_list() { _setter.set(); }
+ ThreadsList* t_list();
+ bool t_list_has_been_set() { return _setter.target_needs_release(); }
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
};
@@ -381,7 +400,7 @@ class DeadlockCycle : public CHeapObj {
bool is_deadlock() { return _is_deadlock; }
int num_threads() { return _threads->length(); }
GrowableArray* threads() { return _threads; }
- void print_on(outputStream* st) const;
+ void print_on_with(ThreadsList * t_list, outputStream* st) const;
};
// Utility class to get list of java threads.
diff --git a/src/hotspot/share/utilities/decoder.cpp b/src/hotspot/share/utilities/decoder.cpp
index 61a274c68fb..38ba9decb6e 100644
--- a/src/hotspot/share/utilities/decoder.cpp
+++ b/src/hotspot/share/utilities/decoder.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
+#include "memory/allocation.inline.hpp"
#include "runtime/os.hpp"
#include "utilities/decoder.hpp"
#include "utilities/vmError.hpp"
diff --git a/src/hotspot/share/utilities/decoder_elf.cpp b/src/hotspot/share/utilities/decoder_elf.cpp
index cd438dc1c49..c8639c2b972 100644
--- a/src/hotspot/share/utilities/decoder_elf.cpp
+++ b/src/hotspot/share/utilities/decoder_elf.cpp
@@ -26,6 +26,7 @@
#if !defined(_WINDOWS) && !defined(__APPLE__)
#include "decoder_elf.hpp"
+#include "memory/allocation.inline.hpp"
ElfDecoder::~ElfDecoder() {
if (_opened_elf_files != NULL) {
diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp
index 33f9962da15..e98ec391144 100644
--- a/src/hotspot/share/utilities/globalDefinitions.hpp
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp
@@ -951,7 +951,6 @@ const int badResourceValue = 0xAB; // value used to zap
const int freeBlockPad = 0xBA; // value used to pad freed blocks.
const int uninitBlockPad = 0xF1; // value used to zap newly malloc'd blocks.
const juint uninitMetaWordVal= 0xf7f7f7f7; // value used to zap newly allocated metachunk
-const intptr_t badJNIHandleVal = (intptr_t) UCONST64(0xFEFEFEFEFEFEFEFE); // value used to zap jni handle area
const juint badHeapWordVal = 0xBAADBABE; // value used to zap heap after GC
const juint badMetaWordVal = 0xBAADFADE; // value used to zap metadata heap after GC
const int badCodeHeapNewVal= 0xCC; // value used to zap Code heap at allocation
@@ -963,7 +962,6 @@ const int badCodeHeapFreeVal = 0xDD; // value used to zap
#define badAddress ((address)::badAddressVal)
#define badOop (cast_to_oop(::badOopVal))
#define badHeapWord (::badHeapWordVal)
-#define badJNIHandle (cast_to_oop(::badJNIHandleVal))
// Default TaskQueue size is 16K (32-bit) or 128K (64-bit)
#define TASKQUEUE_SIZE (NOT_LP64(1<<14) LP64_ONLY(1<<17))
diff --git a/src/hotspot/share/utilities/growableArray.cpp b/src/hotspot/share/utilities/growableArray.cpp
index d6dc256a2c7..076529897c6 100644
--- a/src/hotspot/share/utilities/growableArray.cpp
+++ b/src/hotspot/share/utilities/growableArray.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/growableArray.hpp"
@@ -56,3 +57,7 @@ void* GenericGrowableArray::raw_allocate(int elementSize) {
return _arena->Amalloc(byte_size);
}
}
+
+void GenericGrowableArray::free_C_heap(void* elements) {
+ FreeHeap(elements);
+}
diff --git a/src/hotspot/share/utilities/growableArray.hpp b/src/hotspot/share/utilities/growableArray.hpp
index f65f45cede7..4c5a4914db7 100644
--- a/src/hotspot/share/utilities/growableArray.hpp
+++ b/src/hotspot/share/utilities/growableArray.hpp
@@ -26,9 +26,9 @@
#define SHARE_VM_UTILITIES_GROWABLEARRAY_HPP
#include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
// A growable array.
@@ -144,6 +144,8 @@ class GenericGrowableArray : public ResourceObj {
assert(on_stack(), "fast ResourceObj path only");
return (void*)resource_allocate_bytes(thread, elementSize * _max);
}
+
+ void free_C_heap(void* elements);
};
template class GrowableArrayIterator;
@@ -451,7 +453,7 @@ template void GrowableArray::grow(int j) {
for ( ; i < _max; i++) ::new ((void*)&newData[i]) E();
for (i = 0; i < old_max; i++) _data[i].~E();
if (on_C_heap() && _data != NULL) {
- FreeHeap(_data);
+ free_C_heap(_data);
}
_data = newData;
}
@@ -475,7 +477,7 @@ template void GrowableArray::clear_and_deallocate() {
clear();
if (_data != NULL) {
for (int i = 0; i < _max; i++) _data[i].~E();
- FreeHeap(_data);
+ free_C_heap(_data);
_data = NULL;
}
}
diff --git a/src/hotspot/share/utilities/stack.hpp b/src/hotspot/share/utilities/stack.hpp
index 7c866897691..b606bce533a 100644
--- a/src/hotspot/share/utilities/stack.hpp
+++ b/src/hotspot/share/utilities/stack.hpp
@@ -26,7 +26,6 @@
#define SHARE_VM_UTILITIES_STACK_HPP
#include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
// Class Stack (below) grows and shrinks by linking together "segments" which
// are allocated on demand. Segments are arrays of the element type (E) plus an
diff --git a/src/hotspot/share/utilities/vmError.cpp b/src/hotspot/share/utilities/vmError.cpp
index 580fc86e89e..8d127605e13 100644
--- a/src/hotspot/share/utilities/vmError.cpp
+++ b/src/hotspot/share/utilities/vmError.cpp
@@ -36,6 +36,7 @@
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
#include "runtime/vm_version.hpp"
@@ -1655,7 +1656,12 @@ void VMError::controlled_crash(int how) {
char * const dataPtr = NULL; // bad data pointer
const void (*funcPtr)(void) = (const void(*)()) 0xF; // bad function pointer
- // Keep this in sync with test/runtime/ErrorHandling/ErrorHandler.java
+ // Keep this in sync with test/hotspot/jtreg/runtime/ErrorHandling/ErrorHandler.java
+ // which tests cases 1 thru 13.
+ // Case 14 is tested by test/hotspot/jtreg/runtime/ErrorHandling/SafeFetchInErrorHandlingTest.java.
+ // Case 15 is tested by test/hotspot/jtreg/runtime/ErrorHandling/SecondaryErrorTest.java.
+ // Case 16 is tested by test/hotspot/jtreg/runtime/ErrorHandling/ThreadsListHandleInErrorHandlingTest.java.
+ // Case 17 is tested by test/hotspot/jtreg/runtime/ErrorHandling/NestedThreadsListHandleInErrorHandlingTest.java.
switch (how) {
case 1: vmassert(str == NULL, "expected null");
case 2: vmassert(num == 1023 && *str == 'X',
@@ -1683,6 +1689,17 @@ void VMError::controlled_crash(int how) {
case 13: (*funcPtr)(); break;
case 14: crash_with_segfault(); break;
case 15: crash_with_sigfpe(); break;
+ case 16: {
+ ThreadsListHandle tlh;
+ fatal("Force crash with an active ThreadsListHandle.");
+ }
+ case 17: {
+ ThreadsListHandle tlh;
+ {
+ ThreadsListHandle tlh2;
+ fatal("Force crash with a nested ThreadsListHandle.");
+ }
+ }
default: tty->print_cr("ERROR: %d: unexpected test_num value.", how);
}
diff --git a/src/java.base/share/classes/com/sun/crypto/provider/SunJCE.java b/src/java.base/share/classes/com/sun/crypto/provider/SunJCE.java
index e44a8476dc2..3e681a57285 100644
--- a/src/java.base/share/classes/com/sun/crypto/provider/SunJCE.java
+++ b/src/java.base/share/classes/com/sun/crypto/provider/SunJCE.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -769,6 +769,8 @@ public final class SunJCE extends Provider {
"com.sun.crypto.provider.TlsMasterSecretGenerator");
put("Alg.Alias.KeyGenerator.SunTls12MasterSecret",
"SunTlsMasterSecret");
+ put("Alg.Alias.KeyGenerator.SunTlsExtendedMasterSecret",
+ "SunTlsMasterSecret");
put("KeyGenerator.SunTlsKeyMaterial",
"com.sun.crypto.provider.TlsKeyMaterialGenerator");
diff --git a/src/java.base/share/classes/com/sun/crypto/provider/TlsMasterSecretGenerator.java b/src/java.base/share/classes/com/sun/crypto/provider/TlsMasterSecretGenerator.java
index c772396e990..fda0828f6d4 100644
--- a/src/java.base/share/classes/com/sun/crypto/provider/TlsMasterSecretGenerator.java
+++ b/src/java.base/share/classes/com/sun/crypto/provider/TlsMasterSecretGenerator.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -102,21 +102,32 @@ public final class TlsMasterSecretGenerator extends KeyGeneratorSpi {
try {
byte[] master;
- byte[] clientRandom = spec.getClientRandom();
- byte[] serverRandom = spec.getServerRandom();
-
if (protocolVersion >= 0x0301) {
- byte[] seed = concat(clientRandom, serverRandom);
+ byte[] label;
+ byte[] seed;
+ byte[] extendedMasterSecretSessionHash =
+ spec.getExtendedMasterSecretSessionHash();
+ if (extendedMasterSecretSessionHash.length != 0) {
+ label = LABEL_EXTENDED_MASTER_SECRET;
+ seed = extendedMasterSecretSessionHash;
+ } else {
+ byte[] clientRandom = spec.getClientRandom();
+ byte[] serverRandom = spec.getServerRandom();
+ label = LABEL_MASTER_SECRET;
+ seed = concat(clientRandom, serverRandom);
+ }
master = ((protocolVersion >= 0x0303) ?
- doTLS12PRF(premaster, LABEL_MASTER_SECRET, seed, 48,
- spec.getPRFHashAlg(), spec.getPRFHashLength(),
- spec.getPRFBlockSize()) :
- doTLS10PRF(premaster, LABEL_MASTER_SECRET, seed, 48));
+ doTLS12PRF(premaster, label, seed, 48,
+ spec.getPRFHashAlg(), spec.getPRFHashLength(),
+ spec.getPRFBlockSize()) :
+ doTLS10PRF(premaster, label, seed, 48));
} else {
master = new byte[48];
MessageDigest md5 = MessageDigest.getInstance("MD5");
MessageDigest sha = MessageDigest.getInstance("SHA");
+ byte[] clientRandom = spec.getClientRandom();
+ byte[] serverRandom = spec.getServerRandom();
byte[] tmp = new byte[20];
for (int i = 0; i < 3; i++) {
sha.update(SSL3_CONST[i]);
@@ -175,5 +186,5 @@ public final class TlsMasterSecretGenerator extends KeyGeneratorSpi {
}
}
-
}
+
diff --git a/src/java.base/share/classes/com/sun/crypto/provider/TlsPrfGenerator.java b/src/java.base/share/classes/com/sun/crypto/provider/TlsPrfGenerator.java
index 90cde7c6bfb..2f945361c3f 100644
--- a/src/java.base/share/classes/com/sun/crypto/provider/TlsPrfGenerator.java
+++ b/src/java.base/share/classes/com/sun/crypto/provider/TlsPrfGenerator.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,6 +55,11 @@ abstract class TlsPrfGenerator extends KeyGeneratorSpi {
static final byte[] LABEL_MASTER_SECRET = // "master secret"
{ 109, 97, 115, 116, 101, 114, 32, 115, 101, 99, 114, 101, 116 };
+ static final byte[] LABEL_EXTENDED_MASTER_SECRET =
+ // "extended master secret"
+ { 101, 120, 116, 101, 110, 100, 101, 100, 32, 109, 97, 115, 116,
+ 101, 114, 32, 115, 101, 99, 114, 101, 116 };
+
static final byte[] LABEL_KEY_EXPANSION = // "key expansion"
{ 107, 101, 121, 32, 101, 120, 112, 97, 110, 115, 105, 111, 110 };
diff --git a/src/java.base/share/classes/com/sun/java/util/jar/pack/Constants.java b/src/java.base/share/classes/com/sun/java/util/jar/pack/Constants.java
index 50dc8020b25..5eb79adb6ae 100644
--- a/src/java.base/share/classes/com/sun/java/util/jar/pack/Constants.java
+++ b/src/java.base/share/classes/com/sun/java/util/jar/pack/Constants.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,7 @@ class Constants {
1.7 to 1.7.X 51,0
1.8 to 1.8.X 52,0
1.9 to 1.9.X 53,0
+ 1.10 to 1.10.X 54,0
*/
public static final Package.Version JAVA_MIN_CLASS_VERSION =
@@ -67,6 +68,9 @@ class Constants {
public static final Package.Version JAVA9_MAX_CLASS_VERSION =
Package.Version.of(53, 00);
+ public static final Package.Version JAVA10_MAX_CLASS_VERSION =
+ Package.Version.of(54, 00);
+
public static final int JAVA_PACKAGE_MAGIC = 0xCAFED00D;
public static final Package.Version JAVA5_PACKAGE_VERSION =
@@ -83,7 +87,7 @@ class Constants {
// upper limit, should point to the latest class version
public static final Package.Version JAVA_MAX_CLASS_VERSION =
- JAVA9_MAX_CLASS_VERSION;
+ JAVA10_MAX_CLASS_VERSION;
// upper limit should point to the latest package version, for version info!.
public static final Package.Version MAX_PACKAGE_VERSION =
diff --git a/src/java.base/share/classes/java/io/FileInputStream.java b/src/java.base/share/classes/java/io/FileInputStream.java
index 5dfe71907fe..72a12ff76de 100644
--- a/src/java.base/share/classes/java/io/FileInputStream.java
+++ b/src/java.base/share/classes/java/io/FileInputStream.java
@@ -25,6 +25,7 @@
package java.io;
+import java.lang.reflect.Method;
import java.nio.channels.FileChannel;
import sun.nio.ch.FileChannelImpl;
@@ -37,6 +38,22 @@ import sun.nio.ch.FileChannelImpl;
* FileInputStream is meant for reading streams of raw bytes
* such as image data. For reading streams of characters, consider using
* FileReader.
+ *
+ * @apiNote
+ * To release resources used by this stream {@link #close} should be called
+ * directly or by try-with-resources. Subclasses are responsible for the cleanup
+ * of resources acquired by the subclass.
+ * Subclasses that override {@link #finalize} in order to perform cleanup
+ * should be modified to use alternative cleanup mechanisms such as
+ * {@link java.lang.ref.Cleaner} and remove the overriding {@code finalize} method.
+ *
+ * @implSpec
+ * If this FileInputStream has been subclassed and the {@link #close}
+ * method has been overridden, the {@link #close} method will be
+ * called when the FileInputStream is unreachable.
+ * Otherwise, it is implementation specific how the resource cleanup described in
+ * {@link #close} is performed.
+
*
* @author Arthur van Hoff
* @see java.io.File
@@ -63,6 +80,8 @@ class FileInputStream extends InputStream
private volatile boolean closed;
+ private final AltFinalizer altFinalizer;
+
/**
* Creates a FileInputStream by
* opening a connection to an actual file,
@@ -137,6 +156,10 @@ class FileInputStream extends InputStream
fd.attach(this);
path = name;
open(name);
+ altFinalizer = AltFinalizer.get(this);
+ if (altFinalizer == null) {
+ fd.registerCleanup(); // open set the fd, register the cleanup
+ }
}
/**
@@ -173,6 +196,7 @@ class FileInputStream extends InputStream
}
fd = fdObj;
path = null;
+ altFinalizer = null;
/*
* FileDescriptor is being shared by streams.
@@ -316,6 +340,14 @@ class FileInputStream extends InputStream
*
If this stream has an associated channel then the channel is closed
* as well.
*
+ * @apiNote
+ * Overriding {@link #close} to perform cleanup actions is reliable
+ * only when called directly or when called by try-with-resources.
+ * Do not depend on finalization to invoke {@code close};
+ * finalization is not reliable and is deprecated.
+ * If cleanup of native resources is needed, other mechanisms such as
+ * {@linkplain java.lang.ref.Cleaner} should be used.
+ *
* @exception IOException if an I/O error occurs.
*
* @revised 1.4
@@ -404,16 +436,27 @@ class FileInputStream extends InputStream
private static native void initIDs();
-
static {
initIDs();
}
/**
- * Ensures that the close method of this file input stream is
+ * Ensures that the {@link #close} method of this file input stream is
* called when there are no more references to it.
+ * The {@link #finalize} method does not call {@link #close} directly.
*
- * @deprecated The {@code finalize} method has been deprecated.
+ * @apiNote
+ * To release resources used by this stream {@link #close} should be called
+ * directly or by try-with-resources.
+ *
+ * @implSpec
+ * If this FileInputStream has been subclassed and the {@link #close}
+ * method has been overridden, the {@link #close} method will be
+ * called when the FileInputStream is unreachable.
+ * Otherwise, it is implementation specific how the resource cleanup described in
+ * {@link #close} is performed.
+ *
+ * @deprecated The {@code finalize} method has been deprecated and will be removed.
* Subclasses that override {@code finalize} in order to perform cleanup
* should be modified to use alternative cleanup mechanisms and
* to remove the overriding {@code finalize} method.
@@ -425,15 +468,57 @@ class FileInputStream extends InputStream
* @exception IOException if an I/O error occurs.
* @see java.io.FileInputStream#close()
*/
- @Deprecated(since="9")
+ @Deprecated(since="9", forRemoval = true)
protected void finalize() throws IOException {
- if ((fd != null) && (fd != FileDescriptor.in)) {
- /* if fd is shared, the references in FileDescriptor
- * will ensure that finalizer is only called when
- * safe to do so. All references using the fd have
- * become unreachable. We can call close()
- */
- close();
+ }
+
+ /**
+ * Class to call {@code FileInputStream.close} when finalized.
+ * If finalization of the stream is needed, an instance is created
+ * in its constructor(s). When the set of instances
+ * related to the stream is unreachable, the AltFinalizer performs
+ * the needed call to the stream's {@code close} method.
+ */
+ static class AltFinalizer {
+ private final FileInputStream fis;
+
+ /*
+ * Returns a finalizer object if the FIS needs a finalizer; otherwise null.
+ * If the FIS has a close method; it needs an AltFinalizer.
+ */
+ static AltFinalizer get(FileInputStream fis) {
+ Class> clazz = fis.getClass();
+ while (clazz != FileInputStream.class) {
+ try {
+ clazz.getDeclaredMethod("close");
+ return new AltFinalizer(fis);
+ } catch (NoSuchMethodException nsme) {
+ // ignore
+ }
+ clazz = clazz.getSuperclass();
+ }
+ return null;
+ }
+
+ private AltFinalizer(FileInputStream fis) {
+ this.fis = fis;
+ }
+
+ @Override
+ @SuppressWarnings("deprecation")
+ protected final void finalize() {
+ try {
+ if ((fis.fd != null) && (fis.fd != FileDescriptor.in)) {
+ /* if fd is shared, the references in FileDescriptor
+ * will ensure that finalizer is only called when
+ * safe to do so. All references using the fd have
+ * become unreachable. We can call close()
+ */
+ fis.close();
+ }
+ } catch (IOException ioe) {
+ // ignore
+ }
}
}
}
diff --git a/src/java.base/share/classes/java/io/FileOutputStream.java b/src/java.base/share/classes/java/io/FileOutputStream.java
index f9c1baa3518..cb9c8cda0f3 100644
--- a/src/java.base/share/classes/java/io/FileOutputStream.java
+++ b/src/java.base/share/classes/java/io/FileOutputStream.java
@@ -25,6 +25,7 @@
package java.io;
+import java.lang.reflect.Method;
import java.nio.channels.FileChannel;
import jdk.internal.misc.SharedSecrets;
import jdk.internal.misc.JavaIOFileDescriptorAccess;
@@ -44,6 +45,21 @@ import sun.nio.ch.FileChannelImpl;
* such as image data. For writing streams of characters, consider using
* FileWriter.
*
+ * @apiNote
+ * To release resources used by this stream {@link #close} should be called
+ * directly or by try-with-resources. Subclasses are responsible for the cleanup
+ * of resources acquired by the subclass.
+ * Subclasses that override {@link #finalize} in order to perform cleanup
+ * should be modified to use alternative cleanup mechanisms such as
+ * {@link java.lang.ref.Cleaner} and remove the overriding {@code finalize} method.
+ *
+ * @implSpec
+ * If this FileOutputStream has been subclassed and the {@link #close}
+ * method has been overridden, the {@link #close} method will be
+ * called when the FileInputStream is unreachable.
+ * Otherwise, it is implementation specific how the resource cleanup described in
+ * {@link #close} is performed.
+ *
* @author Arthur van Hoff
* @see java.io.File
* @see java.io.FileDescriptor
@@ -80,6 +96,8 @@ class FileOutputStream extends OutputStream
private volatile boolean closed;
+ private final AltFinalizer altFinalizer;
+
/**
* Creates a file output stream to write to the file with the
* specified name. A new FileDescriptor object is
@@ -218,6 +236,10 @@ class FileOutputStream extends OutputStream
this.path = name;
open(name, append);
+ altFinalizer = AltFinalizer.get(this);
+ if (altFinalizer == null) {
+ fd.registerCleanup(); // open set the fd, register the cleanup
+ }
}
/**
@@ -253,6 +275,7 @@ class FileOutputStream extends OutputStream
}
this.fd = fdObj;
this.path = null;
+ this.altFinalizer = null;
fd.attach(this);
}
@@ -340,6 +363,14 @@ class FileOutputStream extends OutputStream
*
If this stream has an associated channel then the channel is closed
* as well.
*
+ * @apiNote
+ * Overriding {@link #close} to perform cleanup actions is reliable
+ * only when called directly or when called by try-with-resources.
+ * Do not depend on finalization to invoke {@code close};
+ * finalization is not reliable and is deprecated.
+ * If cleanup of native resources is needed, other mechanisms such as
+ * {@linkplain java.lang.ref.Cleaner} should be used.
+ *
* @exception IOException if an I/O error occurs.
*
* @revised 1.4
@@ -429,34 +460,35 @@ class FileOutputStream extends OutputStream
/**
* Cleans up the connection to the file, and ensures that the
- * close method of this file output stream is
+ * {@link #close} method of this file output stream is
* called when there are no more references to this stream.
+ * The {@link #finalize} method does not call {@link #close} directly.
+ *
+ * @apiNote
+ * To release resources used by this stream {@link #close} should be called
+ * directly or by try-with-resources.
+ *
+ * @implSpec
+ * If this FileOutputStream has been subclassed and the {@link #close}
+ * method has been overridden, the {@link #close} method will be
+ * called when the FileOutputStream is unreachable.
+ * Otherwise, it is implementation specific how the resource cleanup described in
+ * {@link #close} is performed.
+ *
+ * @deprecated The {@code finalize} method has been deprecated and will be removed.
+ * Subclasses that override {@code finalize} in order to perform cleanup
+ * should be modified to use alternative cleanup mechanisms and
+ * to remove the overriding {@code finalize} method.
+ * When overriding the {@code finalize} method, its implementation must explicitly
+ * ensure that {@code super.finalize()} is invoked as described in {@link Object#finalize}.
+ * See the specification for {@link Object#finalize()} for further
+ * information about migration options.
*
- * @deprecated The {@code finalize} method has been deprecated.
- * Subclasses that override {@code finalize} in order to perform cleanup
- * should be modified to use alternative cleanup mechanisms and
- * to remove the overriding {@code finalize} method.
- * When overriding the {@code finalize} method, its implementation must explicitly
- * ensure that {@code super.finalize()} is invoked as described in {@link Object#finalize}.
- * See the specification for {@link Object#finalize()} for further
- * information about migration options.
* @exception IOException if an I/O error occurs.
* @see java.io.FileInputStream#close()
*/
- @Deprecated(since="9")
+ @Deprecated(since="9", forRemoval = true)
protected void finalize() throws IOException {
- if (fd != null) {
- if (fd == FileDescriptor.out || fd == FileDescriptor.err) {
- flush();
- } else {
- /* if fd is shared, the references in FileDescriptor
- * will ensure that finalizer is only called when
- * safe to do so. All references using the fd have
- * become unreachable. We can call close()
- */
- close();
- }
- }
}
private static native void initIDs();
@@ -465,4 +497,59 @@ class FileOutputStream extends OutputStream
initIDs();
}
+ /**
+ * Class to call {@code FileOutputStream.close} when finalized.
+ * If finalization of the stream is needed, an instance is created
+ * in its constructor(s). When the set of instances
+ * related to the stream is unreachable, the AltFinalizer performs
+ * the needed call to the stream's {@code close} method.
+ */
+ static class AltFinalizer {
+ private final FileOutputStream fos;
+
+ /*
+ * Returns a finalizer object if the FOS needs a finalizer; otherwise null.
+ * If the FOS has a close method; it needs an AltFinalizer.
+ */
+ static AltFinalizer get(FileOutputStream fos) {
+ Class> clazz = fos.getClass();
+ while (clazz != FileOutputStream.class) {
+ try {
+ clazz.getDeclaredMethod("close");
+ return new AltFinalizer(fos);
+ } catch (NoSuchMethodException nsme) {
+ // ignore
+ }
+ clazz = clazz.getSuperclass();
+ }
+ return null;
+ }
+
+ private AltFinalizer(FileOutputStream fos) {
+ this.fos = fos;
+ }
+
+ @Override
+ @SuppressWarnings("deprecation")
+ protected final void finalize() {
+ try {
+ if (fos.fd != null) {
+ if (fos.fd == FileDescriptor.out || fos.fd == FileDescriptor.err) {
+ // Subclass may override flush; otherwise it is no-op
+ fos.flush();
+ } else {
+ /* if fd is shared, the references in FileDescriptor
+ * will ensure that finalizer is only called when
+ * safe to do so. All references using the fd have
+ * become unreachable. We can call close()
+ */
+ fos.close();
+ }
+ }
+ } catch (IOException ioe) {
+ // ignore
+ }
+ }
+ }
+
}
diff --git a/src/java.base/share/classes/java/io/RandomAccessFile.java b/src/java.base/share/classes/java/io/RandomAccessFile.java
index 5c946d1d715..4b6e66ff140 100644
--- a/src/java.base/share/classes/java/io/RandomAccessFile.java
+++ b/src/java.base/share/classes/java/io/RandomAccessFile.java
@@ -257,6 +257,7 @@ public class RandomAccessFile implements DataOutput, DataInput, Closeable {
fd.attach(this);
path = name;
open(name, imode);
+ fd.registerCleanup(); // open sets the fd, register the cleanup
}
/**
diff --git a/src/java.base/share/classes/java/io/Reader.java b/src/java.base/share/classes/java/io/Reader.java
index 13e90d76ea6..a1fada4952e 100644
--- a/src/java.base/share/classes/java/io/Reader.java
+++ b/src/java.base/share/classes/java/io/Reader.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,8 @@
package java.io;
+import java.util.Objects;
+
/**
* Abstract class for reading character streams. The only methods that a
* subclass must implement are read(char[], int, int) and close(). Most
@@ -50,6 +52,8 @@ package java.io;
public abstract class Reader implements Readable, Closeable {
+ private static final int TRANSFER_BUFFER_SIZE = 8192;
+
/**
* The object used to synchronize operations on this stream. For
* efficiency, a character-stream object may use an object other than
@@ -262,4 +266,41 @@ public abstract class Reader implements Readable, Closeable {
*/
public abstract void close() throws IOException;
+ /**
+ * Reads all characters from this reader and writes the characters to the
+ * given writer in the order that they are read. On return, this reader
+ * will be at end of the stream. This method does not close either reader
+ * or writer.
+ *
+ * This method may block indefinitely reading from the reader, or
+ * writing to the writer. The behavior for the case where the reader
+ * and/or writer is asynchronously closed, or the thread
+ * interrupted during the transfer, is highly reader and writer
+ * specific, and therefore not specified.
+ *
+ * If an I/O error occurs reading from the reader or writing to the
+ * writer, then it may do so after some characters have been read or
+ * written. Consequently the reader may not be at end of the stream and
+ * one, or both, streams may be in an inconsistent state. It is strongly
+ * recommended that both streams be promptly closed if an I/O error occurs.
+ *
+ * @param out the writer, non-null
+ * @return the number of characters transferred
+ * @throws IOException if an I/O error occurs when reading or writing
+ * @throws NullPointerException if {@code out} is {@code null}
+ *
+ * @since 10
+ */
+ public long transferTo(Writer out) throws IOException {
+ Objects.requireNonNull(out, "out");
+ long transferred = 0;
+ char[] buffer = new char[TRANSFER_BUFFER_SIZE];
+ int nRead;
+ while ((nRead = read(buffer, 0, TRANSFER_BUFFER_SIZE)) >= 0) {
+ out.write(buffer, 0, nRead);
+ transferred += nRead;
+ }
+ return transferred;
+ }
+
}
diff --git a/src/java.base/share/classes/java/lang/ClassLoader.java b/src/java.base/share/classes/java/lang/ClassLoader.java
index 74c46417631..c18a9ad9774 100644
--- a/src/java.base/share/classes/java/lang/ClassLoader.java
+++ b/src/java.base/share/classes/java/lang/ClassLoader.java
@@ -30,6 +30,7 @@ import java.io.IOException;
import java.io.UncheckedIOException;
import java.io.File;
import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
import java.net.URL;
import java.security.AccessController;
import java.security.AccessControlContext;
@@ -37,6 +38,7 @@ import java.security.CodeSource;
import java.security.PrivilegedAction;
import java.security.ProtectionDomain;
import java.security.cert.Certificate;
+import java.util.ArrayDeque;
import java.util.Arrays;
import java.util.Collections;
import java.util.Deque;
@@ -44,7 +46,6 @@ import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
-import java.util.LinkedList;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Objects;
@@ -1867,7 +1868,7 @@ public abstract class ClassLoader {
* to be the system class loader. During construction, the class loader
* should take great care to avoid calling {@code getSystemClassLoader()}.
* If circular initialization of the system class loader is detected then
- * an unspecified error or exception is thrown.
+ * an {@code IllegalStateException} is thrown.
*
* @implNote The system property to override the system class loader is not
* examined until the VM is almost fully initialized. Code that executes
@@ -1918,8 +1919,8 @@ public abstract class ClassLoader {
// the system class loader is the built-in app class loader during startup
return getBuiltinAppClassLoader();
case 3:
- String msg = "getSystemClassLoader should only be called after VM booted";
- throw new InternalError(msg);
+ String msg = "getSystemClassLoader cannot be called during the system class loader instantiation";
+ throw new IllegalStateException(msg);
case 4:
// system fully initialized
assert VM.isBooted() && scl != null;
@@ -1969,7 +1970,17 @@ public abstract class ClassLoader {
.getDeclaredConstructor(ClassLoader.class);
scl = (ClassLoader) ctor.newInstance(builtinLoader);
} catch (Exception e) {
- throw new Error(e);
+ Throwable cause = e;
+ if (e instanceof InvocationTargetException) {
+ cause = e.getCause();
+ if (cause instanceof Error) {
+ throw (Error) cause;
+ }
+ }
+ if (cause instanceof RuntimeException) {
+ throw (RuntimeException) cause;
+ }
+ throw new Error(cause.getMessage(), cause);
}
} else {
scl = builtinLoader;
@@ -2485,7 +2496,7 @@ public abstract class ClassLoader {
}
// native libraries being loaded
- static Deque nativeLibraryContext = new LinkedList<>();
+ static Deque nativeLibraryContext = new ArrayDeque<>(8);
/*
* The run() method will be invoked when this class loader becomes
diff --git a/src/java.base/share/classes/java/lang/ModuleLayer.java b/src/java.base/share/classes/java/lang/ModuleLayer.java
index d9b1b3a3714..3dc809632c7 100644
--- a/src/java.base/share/classes/java/lang/ModuleLayer.java
+++ b/src/java.base/share/classes/java/lang/ModuleLayer.java
@@ -322,8 +322,8 @@ public final class ModuleLayer {
* @return The newly created layer
*
* @throws IllegalArgumentException
- * If the parent of the given configuration is not the configuration
- * for this layer
+ * If the given configuration has more than one parent or the parent
+ * of the configuration is not the configuration for this layer
* @throws LayerInstantiationException
* If the layer cannot be created for any of the reasons specified
* by the static {@code defineModulesWithOneLoader} method
@@ -364,8 +364,8 @@ public final class ModuleLayer {
* @return The newly created layer
*
* @throws IllegalArgumentException
- * If the parent of the given configuration is not the configuration
- * for this layer
+ * If the given configuration has more than one parent or the parent
+ * of the configuration is not the configuration for this layer
* @throws LayerInstantiationException
* If the layer cannot be created for any of the reasons specified
* by the static {@code defineModulesWithManyLoaders} method
@@ -403,8 +403,8 @@ public final class ModuleLayer {
* @return The newly created layer
*
* @throws IllegalArgumentException
- * If the parent of the given configuration is not the configuration
- * for this layer
+ * If the given configuration has more than one parent or the parent
+ * of the configuration is not the configuration for this layer
* @throws LayerInstantiationException
* If the layer cannot be created for any of the reasons specified
* by the static {@code defineModules} method
@@ -473,8 +473,8 @@ public final class ModuleLayer {
* @return A controller that controls the newly created layer
*
* @throws IllegalArgumentException
- * If the parent configurations do not match the configuration of
- * the parent layers, including order
+ * If the parent(s) of the given configuration do not match the
+ * configuration of the parent layers, including order
* @throws LayerInstantiationException
* If all modules cannot be defined to the same class loader for any
* of the reasons listed above
@@ -546,8 +546,8 @@ public final class ModuleLayer {
* @return A controller that controls the newly created layer
*
* @throws IllegalArgumentException
- * If the parent configurations do not match the configuration of
- * the parent layers, including order
+ * If the parent(s) of the given configuration do not match the
+ * configuration of the parent layers, including order
* @throws LayerInstantiationException
* If the layer cannot be created because the configuration contains
* a module named "{@code java.base}" or a module contains a package
@@ -637,8 +637,8 @@ public final class ModuleLayer {
* @return A controller that controls the newly created layer
*
* @throws IllegalArgumentException
- * If the parent configurations do not match the configuration of
- * the parent layers, including order
+ * If the parent(s) of the given configuration do not match the
+ * configuration of the parent layers, including order
* @throws LayerInstantiationException
* If creating the layer fails for any of the reasons listed above
* @throws SecurityException
@@ -845,9 +845,8 @@ public final class ModuleLayer {
return layers()
.skip(1) // skip this layer
- .map(l -> l.nameToModule)
- .filter(map -> map.containsKey(name))
- .map(map -> map.get(name))
+ .map(l -> l.nameToModule.get(name))
+ .filter(Objects::nonNull)
.findAny();
}
diff --git a/src/java.base/share/classes/java/lang/Runtime.java b/src/java.base/share/classes/java/lang/Runtime.java
index dc36b769a07..b3f5c08b659 100644
--- a/src/java.base/share/classes/java/lang/Runtime.java
+++ b/src/java.base/share/classes/java/lang/Runtime.java
@@ -876,62 +876,6 @@ public class Runtime {
ClassLoader.loadLibrary(fromClass, libname, false);
}
- /**
- * Creates a localized version of an input stream. This method takes
- * an {@code InputStream} and returns an {@code InputStream}
- * equivalent to the argument in all respects except that it is
- * localized: as characters in the local character set are read from
- * the stream, they are automatically converted from the local
- * character set to Unicode.
- *
- * If the argument is already a localized stream, it may be returned
- * as the result.
- *
- * @param in InputStream to localize
- * @return a localized input stream
- * @see java.io.InputStream
- * @see java.io.BufferedReader#BufferedReader(java.io.Reader)
- * @see java.io.InputStreamReader#InputStreamReader(java.io.InputStream)
- * @deprecated As of JDK 1.1, the preferred way to translate a byte
- * stream in the local encoding into a character stream in Unicode is via
- * the {@code InputStreamReader} and {@code BufferedReader}
- * classes.
- * This method is subject to removal in a future version of Java SE.
- */
- @Deprecated(since="1.1", forRemoval=true)
- public InputStream getLocalizedInputStream(InputStream in) {
- return in;
- }
-
- /**
- * Creates a localized version of an output stream. This method
- * takes an {@code OutputStream} and returns an
- * {@code OutputStream} equivalent to the argument in all respects
- * except that it is localized: as Unicode characters are written to
- * the stream, they are automatically converted to the local
- * character set.
- *
- * If the argument is already a localized stream, it may be returned
- * as the result.
- *
- * @deprecated As of JDK 1.1, the preferred way to translate a
- * Unicode character stream into a byte stream in the local encoding is via
- * the {@code OutputStreamWriter}, {@code BufferedWriter}, and
- * {@code PrintWriter} classes.
- * This method is subject to removal in a future version of Java SE.
- *
- * @param out OutputStream to localize
- * @return a localized output stream
- * @see java.io.OutputStream
- * @see java.io.BufferedWriter#BufferedWriter(java.io.Writer)
- * @see java.io.OutputStreamWriter#OutputStreamWriter(java.io.OutputStream)
- * @see java.io.PrintWriter#PrintWriter(java.io.OutputStream)
- */
- @Deprecated(since="1.1", forRemoval=true)
- public OutputStream getLocalizedOutputStream(OutputStream out) {
- return out;
- }
-
/**
* Returns the version of the Java Runtime Environment as a {@link Version}.
*
diff --git a/src/java.base/share/classes/java/lang/String.java b/src/java.base/share/classes/java/lang/String.java
index db55267ee83..e3da498617c 100644
--- a/src/java.base/share/classes/java/lang/String.java
+++ b/src/java.base/share/classes/java/lang/String.java
@@ -645,19 +645,6 @@ public final class String
this(builder, null);
}
- /*
- * Package private constructor which shares value array for speed.
- * this constructor is always expected to be called with share==true.
- * a separate constructor is needed because we already have a public
- * String(char[]) constructor that makes a copy of the given char[].
- */
- // TBD: this is kept for package internal use (Thread/System),
- // should be removed if they all have a byte[] version
- String(char[] val, boolean share) {
- // assert share : "unshared not supported";
- this(val, 0, val.length, null);
- }
-
/**
* Returns the length of this string.
* The length is equal to the number of Unicode
diff --git a/src/java.base/share/classes/java/lang/System.java b/src/java.base/share/classes/java/lang/System.java
index 0232642b42c..6271f87a6f0 100644
--- a/src/java.base/share/classes/java/lang/System.java
+++ b/src/java.base/share/classes/java/lang/System.java
@@ -2109,9 +2109,6 @@ public final class System {
public void registerShutdownHook(int slot, boolean registerShutdownInProgress, Runnable hook) {
Shutdown.add(slot, registerShutdownInProgress, hook);
}
- public String newStringUnsafe(char[] chars) {
- return new String(chars, true);
- }
public Thread newThreadWithAcc(Runnable target, AccessControlContext acc) {
return new Thread(target, acc);
}
diff --git a/src/java.base/share/classes/java/lang/module/Configuration.java b/src/java.base/share/classes/java/lang/module/Configuration.java
index 042a1362dd9..988d5c7a40a 100644
--- a/src/java.base/share/classes/java/lang/module/Configuration.java
+++ b/src/java.base/share/classes/java/lang/module/Configuration.java
@@ -543,9 +543,8 @@ public final class Configuration {
if (!parents.isEmpty()) {
return configurations()
.skip(1) // skip this configuration
- .map(cf -> cf.nameToModule)
- .filter(map -> map.containsKey(name))
- .map(map -> map.get(name))
+ .map(cf -> cf.nameToModule.get(name))
+ .filter(Objects::nonNull)
.findFirst();
}
diff --git a/src/java.base/share/classes/java/lang/module/package-info.java b/src/java.base/share/classes/java/lang/module/package-info.java
index 74021cd0bef..41278ffda0f 100644
--- a/src/java.base/share/classes/java/lang/module/package-info.java
+++ b/src/java.base/share/classes/java/lang/module/package-info.java
@@ -34,7 +34,7 @@
* will cause a {@code NullPointerException}, unless otherwise specified.
*
*
- * Resolution
+ * {@index "Module Resolution"}
*
* Resolution is the process of computing how modules depend on each other.
* The process occurs at compile time and run time.
diff --git a/src/java.base/share/classes/java/net/SocketInputStream.java b/src/java.base/share/classes/java/net/SocketInputStream.java
index 04ee3378eaa..bb3481b7164 100644
--- a/src/java.base/share/classes/java/net/SocketInputStream.java
+++ b/src/java.base/share/classes/java/net/SocketInputStream.java
@@ -283,7 +283,7 @@ class SocketInputStream extends FileInputStream
/**
* Overrides finalize, the fd is closed by the Socket.
*/
- @SuppressWarnings("deprecation")
+ @SuppressWarnings({"deprecation", "removal"})
protected void finalize() {}
/**
diff --git a/src/java.base/share/classes/java/net/SocketOutputStream.java b/src/java.base/share/classes/java/net/SocketOutputStream.java
index 0ba877bf56e..ddf3dcf9bb5 100644
--- a/src/java.base/share/classes/java/net/SocketOutputStream.java
+++ b/src/java.base/share/classes/java/net/SocketOutputStream.java
@@ -175,7 +175,7 @@ class SocketOutputStream extends FileOutputStream
/**
* Overrides finalize, the fd is closed by the Socket.
*/
- @SuppressWarnings("deprecation")
+ @SuppressWarnings({"deprecation", "removal"})
protected void finalize() {}
/**
diff --git a/src/java.base/share/classes/java/security/IdentityScope.java b/src/java.base/share/classes/java/security/IdentityScope.java
index 49c7c180322..2bd517cd422 100644
--- a/src/java.base/share/classes/java/security/IdentityScope.java
+++ b/src/java.base/share/classes/java/security/IdentityScope.java
@@ -90,8 +90,8 @@ class IdentityScope extends Identity {
try {
Class.forName(classname);
} catch (ClassNotFoundException e) {
- //Security.error("unable to establish a system scope from " +
- // classname);
+ System.err.println("unable to establish a system scope from " +
+ classname);
e.printStackTrace();
}
}
diff --git a/src/java.base/share/classes/java/security/ProtectionDomain.java b/src/java.base/share/classes/java/security/ProtectionDomain.java
index 365096f619a..7830ee4d8f9 100644
--- a/src/java.base/share/classes/java/security/ProtectionDomain.java
+++ b/src/java.base/share/classes/java/security/ProtectionDomain.java
@@ -25,15 +25,13 @@
package java.security;
-import java.lang.ref.Reference;
-import java.lang.ref.ReferenceQueue;
-import java.lang.ref.SoftReference;
-import java.lang.ref.WeakReference;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
+import java.util.Map;
import java.util.Objects;
-import java.util.concurrent.ConcurrentHashMap;
+import java.util.WeakHashMap;
import jdk.internal.misc.JavaSecurityAccess;
import jdk.internal.misc.JavaSecurityProtectionDomainAccess;
import static jdk.internal.misc.JavaSecurityProtectionDomainAccess.ProtectionDomainCache;
@@ -115,23 +113,10 @@ public class ProtectionDomain {
}
static {
- // setup SharedSecrets to allow access to doIntersectionPrivilege
- // methods and ProtectionDomain cache
+ // Set up JavaSecurityAccess in SharedSecrets
SharedSecrets.setJavaSecurityAccess(new JavaSecurityAccessImpl());
- SharedSecrets.setJavaSecurityProtectionDomainAccess(
- new JavaSecurityProtectionDomainAccess() {
- @Override
- public ProtectionDomainCache getProtectionDomainCache() {
- return new PDCache();
- }
- });
}
- /**
- * Used for storing ProtectionDomains as keys in a Map.
- */
- static final class Key {}
-
/* CodeSource */
private CodeSource codesource ;
@@ -571,117 +556,27 @@ public class ProtectionDomain {
}
/**
- * A cache of ProtectionDomains and their Permissions.
- *
- * This class stores ProtectionDomains as weak keys in a ConcurrentHashMap
- * with additional support for checking and removing weak keys that are no
- * longer in use. There can be cases where the permission collection may
- * have a chain of strong references back to the ProtectionDomain, which
- * ordinarily would prevent the entry from being removed from the map. To
- * address that, we wrap the permission collection in a SoftReference so
- * that it can be reclaimed by the garbage collector due to memory demand.
+ * Used for storing ProtectionDomains as keys in a Map.
*/
- private static class PDCache implements ProtectionDomainCache {
- private final ConcurrentHashMap>
- pdMap = new ConcurrentHashMap<>();
- private final ReferenceQueue queue = new ReferenceQueue<>();
+ final class Key {}
- @Override
- public void put(ProtectionDomain pd, PermissionCollection pc) {
- processQueue(queue, pdMap);
- WeakProtectionDomainKey weakPd =
- new WeakProtectionDomainKey(pd, queue);
- pdMap.put(weakPd, new SoftReference<>(pc));
- }
-
- @Override
- public PermissionCollection get(ProtectionDomain pd) {
- processQueue(queue, pdMap);
- WeakProtectionDomainKey weakPd = new WeakProtectionDomainKey(pd);
- SoftReference sr = pdMap.get(weakPd);
- return (sr == null) ? null : sr.get();
- }
-
- /**
- * Removes weak keys from the map that have been enqueued
- * on the reference queue and are no longer in use.
- */
- private static void processQueue(ReferenceQueue queue,
- ConcurrentHashMap extends
- WeakReference, ?> pdMap) {
- Reference extends Key> ref;
- while ((ref = queue.poll()) != null) {
- pdMap.remove(ref);
- }
- }
- }
-
- /**
- * A weak key for a ProtectionDomain.
- */
- private static class WeakProtectionDomainKey extends WeakReference {
- /**
- * Saved value of the referent's identity hash code, to maintain
- * a consistent hash code after the referent has been cleared
- */
- private final int hash;
-
- /**
- * A key representing a null ProtectionDomain.
- */
- private static final Key NULL_KEY = new Key();
-
- /**
- * Create a new WeakProtectionDomain with the specified domain and
- * registered with a queue.
- */
- WeakProtectionDomainKey(ProtectionDomain pd, ReferenceQueue rq) {
- this((pd == null ? NULL_KEY : pd.key), rq);
- }
-
- WeakProtectionDomainKey(ProtectionDomain pd) {
- this(pd == null ? NULL_KEY : pd.key);
- }
-
- private WeakProtectionDomainKey(Key key, ReferenceQueue rq) {
- super(key, rq);
- hash = key.hashCode();
- }
-
- private WeakProtectionDomainKey(Key key) {
- super(key);
- hash = key.hashCode();
- }
-
- /**
- * Returns the identity hash code of the original referent.
- */
- @Override
- public int hashCode() {
- return hash;
- }
-
- /**
- * Returns true if the given object is an identical
- * WeakProtectionDomainKey instance, or, if this object's referent
- * has not been cleared and the given object is another
- * WeakProtectionDomainKey instance with an identical non-null
- * referent as this one.
- */
- @Override
- public boolean equals(Object obj) {
- if (obj == this) {
- return true;
- }
-
- if (obj instanceof WeakProtectionDomainKey) {
- Object referent = get();
- return (referent != null) &&
- (referent == ((WeakProtectionDomainKey)obj).get());
- } else {
- return false;
- }
- }
+ static {
+ SharedSecrets.setJavaSecurityProtectionDomainAccess(
+ new JavaSecurityProtectionDomainAccess() {
+ public ProtectionDomainCache getProtectionDomainCache() {
+ return new ProtectionDomainCache() {
+ private final Map map =
+ Collections.synchronizedMap
+ (new WeakHashMap());
+ public void put(ProtectionDomain pd,
+ PermissionCollection pc) {
+ map.put((pd == null ? null : pd.key), pc);
+ }
+ public PermissionCollection get(ProtectionDomain pd) {
+ return pd == null ? map.get(null) : map.get(pd.key);
+ }
+ };
+ }
+ });
}
}
diff --git a/src/java.base/share/classes/java/security/Signature.java b/src/java.base/share/classes/java/security/Signature.java
index 3f0a9d96771..83a154ed216 100644
--- a/src/java.base/share/classes/java/security/Signature.java
+++ b/src/java.base/share/classes/java/security/Signature.java
@@ -1074,7 +1074,7 @@ public abstract class Signature extends SignatureSpi {
debug.println("Further warnings of this type will "
+ "be suppressed");
}
- new Exception("Call trace").printStackTrace();
+ new Exception("Debug call trace").printStackTrace();
}
}
Exception lastException = null;
diff --git a/src/java.base/share/classes/java/text/MessageFormat.java b/src/java.base/share/classes/java/text/MessageFormat.java
index 5dd78c87432..4560abd822b 100644
--- a/src/java.base/share/classes/java/text/MessageFormat.java
+++ b/src/java.base/share/classes/java/text/MessageFormat.java
@@ -701,6 +701,10 @@ public class MessageFormat extends Format {
* larger than the number of format elements in the pattern string
*/
public void setFormat(int formatElementIndex, Format newFormat) {
+
+ if (formatElementIndex > maxOffset) {
+ throw new ArrayIndexOutOfBoundsException(formatElementIndex);
+ }
formats[formatElementIndex] = newFormat;
}
diff --git a/src/java.base/share/classes/java/util/Collection.java b/src/java.base/share/classes/java/util/Collection.java
index e66bd49aa78..d016f8c374b 100644
--- a/src/java.base/share/classes/java/util/Collection.java
+++ b/src/java.base/share/classes/java/util/Collection.java
@@ -54,19 +54,15 @@ import java.util.stream.StreamSupport;
* constructors) but all of the general-purpose {@code Collection}
* implementations in the Java platform libraries comply.
*
- * The "destructive" methods contained in this interface, that is, the
- * methods that modify the collection on which they operate, are specified to
- * throw {@code UnsupportedOperationException} if this collection does not
- * support the operation. If this is the case, these methods may, but are not
- * required to, throw an {@code UnsupportedOperationException} if the
- * invocation would have no effect on the collection. For example, invoking
- * the {@link #addAll(Collection)} method on an unmodifiable collection may,
- * but is not required to, throw the exception if the collection to be added
- * is empty.
+ *
Certain methods are specified to be
+ * optional. If a collection implementation doesn't implement a
+ * particular operation, it should define the corresponding method to throw
+ * {@code UnsupportedOperationException}. Such methods are marked "optional
+ * operation" in method specifications of the collections interfaces.
*
- *
- * Some collection implementations have restrictions on the elements that
- * they may contain. For example, some implementations prohibit null elements,
+ *
Some collection implementations
+ * have restrictions on the elements that they may contain.
+ * For example, some implementations prohibit null elements,
* and some have restrictions on the types of their elements. Attempting to
* add an ineligible element throws an unchecked exception, typically
* {@code NullPointerException} or {@code ClassCastException}. Attempting
@@ -111,6 +107,86 @@ import java.util.stream.StreamSupport;
* methods. Implementations may optionally handle the self-referential scenario,
* however most current implementations do not do so.
*
+ *
View Collections
+ *
+ * Most collections manage storage for elements they contain. By contrast, view
+ * collections themselves do not store elements, but instead they rely on a
+ * backing collection to store the actual elements. Operations that are not handled
+ * by the view collection itself are delegated to the backing collection. Examples of
+ * view collections include the wrapper collections returned by methods such as
+ * {@link Collections#checkedCollection Collections.checkedCollection},
+ * {@link Collections#synchronizedCollection Collections.synchronizedCollection}, and
+ * {@link Collections#unmodifiableCollection Collections.unmodifiableCollection}.
+ * Other examples of view collections include collections that provide a
+ * different representation of the same elements, for example, as
+ * provided by {@link List#subList List.subList},
+ * {@link NavigableSet#subSet NavigableSet.subSet}, or
+ * {@link Map#entrySet Map.entrySet}.
+ * Any changes made to the backing collection are visible in the view collection.
+ * Correspondingly, any changes made to the view collection — if changes
+ * are permitted — are written through to the backing collection.
+ * Although they technically aren't collections, instances of
+ * {@link Iterator} and {@link ListIterator} can also allow modifications
+ * to be written through to the backing collection, and in some cases,
+ * modifications to the backing collection will be visible to the Iterator
+ * during iteration.
+ *
+ *
Unmodifiable Collections
+ *
+ * Certain methods of this interface are considered "destructive" and are called
+ * "mutator" methods in that they modify the group of objects contained within
+ * the collection on which they operate. They can be specified to throw
+ * {@code UnsupportedOperationException} if this collection implementation
+ * does not support the operation. Such methods should (but are not required
+ * to) throw an {@code UnsupportedOperationException} if the invocation would
+ * have no effect on the collection. For example, consider a collection that
+ * does not support the {@link #add add} operation. What will happen if the
+ * {@link #addAll addAll} method is invoked on this collection, with an empty
+ * collection as the argument? The addition of zero elements has no effect,
+ * so it is permissible for this collection simply to do nothing and not to throw
+ * an exception. However, it is recommended that such cases throw an exception
+ * unconditionally, as throwing only in certain cases can lead to
+ * programming errors.
+ *
+ *
An unmodifiable collection is a collection, all of whose
+ * mutator methods (as defined above) are specified to throw
+ * {@code UnsupportedOperationException}. Such a collection thus cannot be
+ * modified by calling any methods on it. For a collection to be properly
+ * unmodifiable, any view collections derived from it must also be unmodifiable.
+ * For example, if a List is unmodifiable, the List returned by
+ * {@link List#subList List.subList} is also unmodifiable.
+ *
+ *
An unmodifiable collection is not necessarily immutable. If the
+ * contained elements are mutable, the entire collection is clearly
+ * mutable, even though it might be unmodifiable. For example, consider
+ * two unmodifiable lists containing mutable elements. The result of calling
+ * {@code list1.equals(list2)} might differ from one call to the next if
+ * the elements had been mutated, even though both lists are unmodifiable.
+ * However, if an unmodifiable collection contains all immutable elements,
+ * it can be considered effectively immutable.
+ *
+ *
Unmodifiable View Collections
+ *
+ * An unmodifiable view collection is a collection that is unmodifiable
+ * and that is also a view onto a backing collection. Its mutator methods throw
+ * {@code UnsupportedOperationException}, as described above, while
+ * reading and querying methods are delegated to the backing collection.
+ * The effect is to provide read-only access to the backing collection.
+ * This is useful for a component to provide users with read access to
+ * an internal collection, while preventing them from modifying such
+ * collections unexpectedly. Examples of unmodifiable view collections
+ * are those returned by the
+ * {@link Collections#unmodifiableCollection Collections.unmodifiableCollection},
+ * {@link Collections#unmodifiableList Collections.unmodifiableList}, and
+ * related methods.
+ *
+ *
Note that changes to the backing collection might still be possible,
+ * and if they occur, they are visible through the unmodifiable view. Thus,
+ * an unmodifiable view collection is not necessarily immutable. However,
+ * if the backing collection of an unmodifiable view is effectively immutable,
+ * or if the only reference to the backing collection is through an
+ * unmodifiable view, the view can be considered effectively immutable.
+ *
*
This interface is a member of the
*
* Java Collections Framework.
diff --git a/src/java.base/share/classes/java/util/Collections.java b/src/java.base/share/classes/java/util/Collections.java
index 8aa9c53c455..4904ef56c18 100644
--- a/src/java.base/share/classes/java/util/Collections.java
+++ b/src/java.base/share/classes/java/util/Collections.java
@@ -989,9 +989,8 @@ public class Collections {
// Unmodifiable Wrappers
/**
- * Returns an unmodifiable view of the specified collection. This method
- * allows modules to provide users with "read-only" access to internal
- * collections. Query operations on the returned collection "read through"
+ * Returns an unmodifiable view of the
+ * specified collection. Query operations on the returned collection "read through"
* to the specified collection, and attempts to modify the returned
* collection, whether direct or via its iterator, result in an
* {@code UnsupportedOperationException}.
@@ -1102,9 +1101,8 @@ public class Collections {
}
/**
- * Returns an unmodifiable view of the specified set. This method allows
- * modules to provide users with "read-only" access to internal sets.
- * Query operations on the returned set "read through" to the specified
+ * Returns an unmodifiable view of the
+ * specified set. Query operations on the returned set "read through" to the specified
* set, and attempts to modify the returned set, whether direct or via its
* iterator, result in an {@code UnsupportedOperationException}.
*
@@ -1132,9 +1130,8 @@ public class Collections {
}
/**
- * Returns an unmodifiable view of the specified sorted set. This method
- * allows modules to provide users with "read-only" access to internal
- * sorted sets. Query operations on the returned sorted set "read
+ * Returns an unmodifiable view of the
+ * specified sorted set. Query operations on the returned sorted set "read
* through" to the specified sorted set. Attempts to modify the returned
* sorted set, whether direct, via its iterator, or via its
* {@code subSet}, {@code headSet}, or {@code tailSet} views, result in
@@ -1180,9 +1177,8 @@ public class Collections {
}
/**
- * Returns an unmodifiable view of the specified navigable set. This method
- * allows modules to provide users with "read-only" access to internal
- * navigable sets. Query operations on the returned navigable set "read
+ * Returns an unmodifiable view of the
+ * specified navigable set. Query operations on the returned navigable set "read
* through" to the specified navigable set. Attempts to modify the returned
* navigable set, whether direct, via its iterator, or via its
* {@code subSet}, {@code headSet}, or {@code tailSet} views, result in
@@ -1269,9 +1265,8 @@ public class Collections {
}
/**
- * Returns an unmodifiable view of the specified list. This method allows
- * modules to provide users with "read-only" access to internal
- * lists. Query operations on the returned list "read through" to the
+ * Returns an unmodifiable view of the
+ * specified list. Query operations on the returned list "read through" to the
* specified list, and attempts to modify the returned list, whether
* direct or via its iterator, result in an
* {@code UnsupportedOperationException}.
@@ -1415,9 +1410,8 @@ public class Collections {
}
/**
- * Returns an unmodifiable view of the specified map. This method
- * allows modules to provide users with "read-only" access to internal
- * maps. Query operations on the returned map "read through"
+ * Returns an unmodifiable view of the
+ * specified map. Query operations on the returned map "read through"
* to the specified map, and attempts to modify the returned
* map, whether direct or via its collection views, result in an
* {@code UnsupportedOperationException}.
@@ -1765,9 +1759,8 @@ public class Collections {
}
/**
- * Returns an unmodifiable view of the specified sorted map. This method
- * allows modules to provide users with "read-only" access to internal
- * sorted maps. Query operations on the returned sorted map "read through"
+ * Returns an unmodifiable view of the
+ * specified sorted map. Query operations on the returned sorted map "read through"
* to the specified sorted map. Attempts to modify the returned
* sorted map, whether direct, via its collection views, or via its
* {@code subMap}, {@code headMap}, or {@code tailMap} views, result in
@@ -1809,9 +1802,8 @@ public class Collections {
}
/**
- * Returns an unmodifiable view of the specified navigable map. This method
- * allows modules to provide users with "read-only" access to internal
- * navigable maps. Query operations on the returned navigable map "read
+ * Returns an unmodifiable view of the
+ * specified navigable map. Query operations on the returned navigable map "read
* through" to the specified navigable map. Attempts to modify the returned
* navigable map, whether direct, via its collection views, or via its
* {@code subMap}, {@code headMap}, or {@code tailMap} views, result in
diff --git a/src/java.base/share/classes/java/util/EnumSet.java b/src/java.base/share/classes/java/util/EnumSet.java
index 0adeab410b0..88c3cb9056a 100644
--- a/src/java.base/share/classes/java/util/EnumSet.java
+++ b/src/java.base/share/classes/java/util/EnumSet.java
@@ -75,7 +75,6 @@ import jdk.internal.misc.SharedSecrets;
* @author Josh Bloch
* @since 1.5
* @see EnumMap
- * @serial exclude
*/
@SuppressWarnings("serial") // No serialVersionUID due to usage of
// serial proxy pattern
@@ -85,12 +84,12 @@ public abstract class EnumSet> extends AbstractSet
/**
* The class of all the elements of this set.
*/
- final Class elementType;
+ final transient Class elementType;
/**
- * All of the values comprising T. (Cached for performance.)
+ * All of the values comprising E. (Cached for performance.)
*/
- final Enum>[] universe;
+ final transient Enum>[] universe;
EnumSet(ClasselementType, Enum>[] universe) {
this.elementType = elementType;
@@ -416,7 +415,7 @@ public abstract class EnumSet