Additional VM arguments to provide to forked off VMs. Same as -jvmArgs <args>
OPTIONS
Additional arguments to send to JMH.
+
Notes for Specific Tests
+
Docker Tests
+
Docker tests with default parameters may fail on systems with glibc versions not compatible with the one used in the default docker image (e.g., Oracle Linux 7.6 for x86). For example, they pass on Ubuntu 16.04 but fail on Ubuntu 18.04 if run like this on x86:
+
$ make test TEST="jtreg:test/hotspot/jtreg/runtime/containers/docker"
+
To run these tests correctly, additional parameters for the correct docker image are required on Ubuntu 18.04 by using JAVA_OPTIONS.
+
$ make test TEST="jtreg:test/hotspot/jtreg/runtime/containers/docker" JTREG="JAVA_OPTIONS=-Djdk.test.docker.image.name=ubuntu -Djdk.test.docker.image.version=latest"
diff --git a/doc/testing.md b/doc/testing.md
index 61302e23b8f..6f0a23df4b5 100644
--- a/doc/testing.md
+++ b/doc/testing.md
@@ -373,6 +373,21 @@ Additional VM arguments to provide to forked off VMs. Same as `-jvmArgs `
#### OPTIONS
Additional arguments to send to JMH.
+## Notes for Specific Tests
+
+### Docker Tests
+
+Docker tests with default parameters may fail on systems with glibc versions not
+compatible with the one used in the default docker image (e.g., Oracle Linux 7.6 for x86).
+For example, they pass on Ubuntu 16.04 but fail on Ubuntu 18.04 if run like this on x86:
+
+ $ make test TEST="jtreg:test/hotspot/jtreg/runtime/containers/docker"
+
+To run these tests correctly, additional parameters for the correct docker image are
+required on Ubuntu 18.04 by using `JAVA_OPTIONS`.
+
+ $ make test TEST="jtreg:test/hotspot/jtreg/runtime/containers/docker" JTREG="JAVA_OPTIONS=-Djdk.test.docker.image.name=ubuntu -Djdk.test.docker.image.version=latest"
+
---
# Override some definitions in the global css file that are not optimal for
# this document.
diff --git a/make/Init.gmk b/make/Init.gmk
index b8b78fbd5e0..d801499298b 100644
--- a/make/Init.gmk
+++ b/make/Init.gmk
@@ -238,11 +238,13 @@ else # HAS_SPEC=true
ifeq ($(LOG_NOFILE), true)
# Disable build log if LOG=[level,]nofile was given
override BUILD_LOG_PIPE :=
+ override BUILD_LOG_PIPE_SIMPLE :=
endif
ifeq ($(filter dist-clean, $(SEQUENTIAL_TARGETS)), dist-clean)
# We can't have a log file if we're about to remove it.
override BUILD_LOG_PIPE :=
+ override BUILD_LOG_PIPE_SIMPLE :=
endif
ifeq ($(OUTPUT_SYNC_SUPPORTED), true)
@@ -277,7 +279,7 @@ else # HAS_SPEC=true
else
$(ECHO) "Re-running configure using default settings"
endif
- ( cd $(OUTPUTDIR) && PATH="$(ORIGINAL_PATH)" AUTOCONF="$(AUTOCONF)" \
+ ( cd $(CONFIGURE_START_DIR) && PATH="$(ORIGINAL_PATH)" AUTOCONF="$(AUTOCONF)" \
CUSTOM_ROOT="$(CUSTOM_ROOT)" \
CUSTOM_CONFIG_DIR="$(CUSTOM_CONFIG_DIR)" \
$(BASH) $(TOPDIR)/configure $(CONFIGURE_COMMAND_LINE) )
@@ -303,7 +305,7 @@ else # HAS_SPEC=true
main: $(INIT_TARGETS)
ifneq ($(SEQUENTIAL_TARGETS)$(PARALLEL_TARGETS), )
$(call RotateLogFiles)
- $(PRINTF) "Building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE)
+ $(PRINTF) "Building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE_SIMPLE)
ifneq ($(SEQUENTIAL_TARGETS), )
# Don't touch build output dir since we might be cleaning. That
# means no log pipe.
@@ -325,7 +327,7 @@ else # HAS_SPEC=true
$(PARALLEL_TARGETS) $(COMPARE_BUILD_MAKE) $(BUILD_LOG_PIPE) || \
( exitcode=$$? && \
$(PRINTF) "\nERROR: Build failed for $(TARGET_DESCRIPTION) (exit code $$exitcode) \n" \
- $(BUILD_LOG_PIPE) && \
+ $(BUILD_LOG_PIPE_SIMPLE) && \
cd $(TOPDIR) && $(MAKE) $(MAKE_ARGS) -j 1 -f make/Init.gmk \
HAS_SPEC=true on-failure ; \
exit $$exitcode ) )
@@ -336,7 +338,7 @@ else # HAS_SPEC=true
if test -f $(MAKESUPPORT_OUTPUTDIR)/exit-with-error ; then \
exit 1 ; \
fi
- $(PRINTF) "Finished building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE)
+ $(PRINTF) "Finished building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE_SIMPLE)
$(call ReportProfileTimes)
endif
diff --git a/make/InitSupport.gmk b/make/InitSupport.gmk
index 005d12dae63..57d22812ad5 100644
--- a/make/InitSupport.gmk
+++ b/make/InitSupport.gmk
@@ -296,6 +296,9 @@ else # $(HAS_SPEC)=true
BUILD_PROFILE_LOG := $(OUTPUTDIR)/build-profile.log
BUILD_LOG_PIPE := > >($(TEE) -a $(BUILD_LOG)) 2> >($(TEE) -a $(BUILD_LOG) >&2) && wait
+ # Use this for simple echo/printf commands that are never expected to print
+ # to stderr.
+ BUILD_LOG_PIPE_SIMPLE := | $(TEE) -a $(BUILD_LOG)
ifneq ($(CUSTOM_ROOT), )
topdir=$(CUSTOM_ROOT)
@@ -514,7 +517,7 @@ else # $(HAS_SPEC)=true
"`$(LS) $(BUILDTIMESDIR)/build_time_diff_* | $(GREP) -v _TOTAL | \
$(XARGS) $(CAT) | $(SORT) -k 2`" \
"`$(CAT) $(BUILDTIMESDIR)/build_time_diff_TOTAL`" \
- $(BUILD_LOG_PIPE)
+ $(BUILD_LOG_PIPE_SIMPLE)
endef
define ReportProfileTimes
@@ -524,7 +527,7 @@ else # $(HAS_SPEC)=true
$(CAT) $(BUILD_PROFILE_LOG) && \
$(ECHO) End $(notdir $(BUILD_PROFILE_LOG)); \
} \
- $(BUILD_LOG_PIPE)
+ $(BUILD_LOG_PIPE_SIMPLE)
)
endef
diff --git a/make/Main.gmk b/make/Main.gmk
index 0e5ff72b2a6..4da01b131d5 100644
--- a/make/Main.gmk
+++ b/make/Main.gmk
@@ -335,6 +335,7 @@ BOOTCYCLE_TARGET := product-images
bootcycle-images:
ifneq ($(COMPILE_TYPE), cross)
$(call LogWarn, Boot cycle build step 2: Building a new JDK image using previously built image)
+ $(call MakeDir, $(OUTPUTDIR)/bootcycle-build)
+$(MAKE) $(MAKE_ARGS) -f $(TOPDIR)/make/Init.gmk PARALLEL_TARGETS=$(BOOTCYCLE_TARGET) \
JOBS= SPEC=$(dir $(SPEC))bootcycle-spec.gmk main
else
@@ -650,7 +651,6 @@ else
# Declare dependencies between hotspot-* targets
$(foreach v, $(JVM_VARIANTS), \
- $(eval hotspot-$v: hotspot-$v-gensrc hotspot-$v-libs) \
$(eval hotspot-$v-gensrc: java.base-copy) \
$(eval hotspot-$v-libs: hotspot-$v-gensrc java.base-copy) \
)
@@ -943,6 +943,10 @@ JVM_TOOLS_TARGETS ?= buildtools-hotspot
buildtools: buildtools-langtools interim-langtools interim-rmic \
buildtools-jdk $(JVM_TOOLS_TARGETS)
+# Declare dependencies from hotspot- targets
+$(foreach v, $(JVM_VARIANTS), \
+ $(eval hotspot-$v: hotspot-$v-gensrc hotspot-$v-libs) \
+)
hotspot: $(HOTSPOT_VARIANT_TARGETS)
# Create targets hotspot-libs and hotspot-gensrc.
diff --git a/make/autoconf/basics.m4 b/make/autoconf/basics.m4
index 9bc0d0c337e..c57dc5e560a 100644
--- a/make/autoconf/basics.m4
+++ b/make/autoconf/basics.m4
@@ -627,7 +627,7 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
AC_DEFUN_ONCE([BASIC_SETUP_PATHS],
[
# Save the current directory this script was started from
- CURDIR="$PWD"
+ CONFIGURE_START_DIR="$PWD"
# We might need to rewrite ORIGINAL_PATH, if it includes "#", to quote them
# for make. We couldn't do this when we retrieved ORIGINAL_PATH, since SED
@@ -653,9 +653,10 @@ AC_DEFUN_ONCE([BASIC_SETUP_PATHS],
AC_MSG_CHECKING([for top-level directory])
AC_MSG_RESULT([$TOPDIR])
AC_SUBST(TOPDIR)
+ AC_SUBST(CONFIGURE_START_DIR)
# We can only call BASIC_FIXUP_PATH after BASIC_CHECK_PATHS_WINDOWS.
- BASIC_FIXUP_PATH(CURDIR)
+ BASIC_FIXUP_PATH(CONFIGURE_START_DIR)
BASIC_FIXUP_PATH(TOPDIR)
# Locate the directory of this script.
@@ -868,9 +869,10 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
# Test from where we are running configure, in or outside of src root.
AC_MSG_CHECKING([where to store configuration])
- if test "x$CURDIR" = "x$TOPDIR" || test "x$CURDIR" = "x$CUSTOM_ROOT" \
- || test "x$CURDIR" = "x$TOPDIR/make/autoconf" \
- || test "x$CURDIR" = "x$TOPDIR/make" ; then
+ if test "x$CONFIGURE_START_DIR" = "x$TOPDIR" \
+ || test "x$CONFIGURE_START_DIR" = "x$CUSTOM_ROOT" \
+ || test "x$CONFIGURE_START_DIR" = "x$TOPDIR/make/autoconf" \
+ || test "x$CONFIGURE_START_DIR" = "x$TOPDIR/make" ; then
# We are running configure from the src root.
# Create a default ./build/target-variant-debuglevel output root.
if test "x${CONF_NAME}" = x; then
@@ -895,9 +897,9 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
# If configuration is situated in normal build directory, just use the build
# directory name as configuration name, otherwise use the complete path.
if test "x${CONF_NAME}" = x; then
- CONF_NAME=`$ECHO $CURDIR | $SED -e "s!^${TOPDIR}/build/!!"`
+ CONF_NAME=`$ECHO $CONFIGURE_START_DIR | $SED -e "s!^${TOPDIR}/build/!!"`
fi
- OUTPUTDIR="$CURDIR"
+ OUTPUTDIR="$CONFIGURE_START_DIR"
AC_MSG_RESULT([in current directory])
# WARNING: This might be a bad thing to do. You need to be sure you want to
@@ -917,14 +919,14 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
-e 's/ //g' \
| $TR -d '\n'`
if test "x$filtered_files" != x; then
- AC_MSG_NOTICE([Current directory is $CURDIR.])
+ AC_MSG_NOTICE([Current directory is $CONFIGURE_START_DIR.])
AC_MSG_NOTICE([Since this is not the source root, configure will output the configuration here])
AC_MSG_NOTICE([(as opposed to creating a configuration in /build/).])
AC_MSG_NOTICE([However, this directory is not empty. This is not allowed, since it could])
AC_MSG_NOTICE([seriously mess up just about everything.])
AC_MSG_NOTICE([Try 'cd $TOPDIR' and restart configure])
AC_MSG_NOTICE([(or create a new empty directory and cd to it).])
- AC_MSG_ERROR([Will not continue creating configuration in $CURDIR])
+ AC_MSG_ERROR([Will not continue creating configuration in $CONFIGURE_START_DIR])
fi
fi
fi
diff --git a/make/autoconf/basics_windows.m4 b/make/autoconf/basics_windows.m4
index 995ed661ed7..fd95d404fe5 100644
--- a/make/autoconf/basics_windows.m4
+++ b/make/autoconf/basics_windows.m4
@@ -551,7 +551,7 @@ AC_DEFUN_ONCE([BASIC_COMPILE_FIXPATH],
$MKDIR -p $FIXPATH_DIR $CONFIGURESUPPORT_OUTPUTDIR/bin
cd $FIXPATH_DIR
$CC $FIXPATH_SRC_W -Fe$FIXPATH_BIN_W > $FIXPATH_DIR/fixpath1.log 2>&1
- cd $CURDIR
+ cd $CONFIGURE_START_DIR
if test ! -x $FIXPATH_BIN; then
AC_MSG_RESULT([no])
@@ -574,7 +574,7 @@ AC_DEFUN_ONCE([BASIC_COMPILE_FIXPATH],
cd $FIXPATH_DIR
$FIXPATH $CC $FIXPATH_SRC -Fe$FIXPATH_DIR/fixpath2.exe \
> $FIXPATH_DIR/fixpath2.log 2>&1
- cd $CURDIR
+ cd $CONFIGURE_START_DIR
if test ! -x $FIXPATH_DIR/fixpath2.exe; then
AC_MSG_RESULT([no])
cat $FIXPATH_DIR/fixpath2.log
diff --git a/make/autoconf/flags-other.m4 b/make/autoconf/flags-other.m4
index dd5d2ae4fe9..8263b3142a4 100644
--- a/make/autoconf/flags-other.m4
+++ b/make/autoconf/flags-other.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -81,10 +81,10 @@ AC_DEFUN([FLAGS_SETUP_RCFLAGS],
RC_FLAGS="$RC_FLAGS \
-D\"JDK_VERSION_STRING=\$(VERSION_STRING)\" \
-D\"JDK_COMPANY=\$(COMPANY_NAME)\" \
- -D\"JDK_COMPONENT=\$(PRODUCT_NAME) \$(JDK_RC_PLATFORM_NAME) binary\" \
+ -D\"JDK_COMPONENT=\$(JDK_RC_NAME) binary\" \
-D\"JDK_VER=\$(VERSION_NUMBER)\" \
-D\"JDK_COPYRIGHT=Copyright \xA9 $COPYRIGHT_YEAR\" \
- -D\"JDK_NAME=\$(PRODUCT_NAME) \$(JDK_RC_PLATFORM_NAME) \$(VERSION_FEATURE)\" \
+ -D\"JDK_NAME=\$(JDK_RC_NAME) \$(VERSION_FEATURE)\" \
-D\"JDK_FVER=\$(subst .,\$(COMMA),\$(VERSION_NUMBER_FOUR_POSITIONS))\""
JVM_RCFLAGS="$JVM_RCFLAGS \
diff --git a/make/autoconf/jdk-version.m4 b/make/autoconf/jdk-version.m4
index 48a5e079091..f1ba26699b2 100644
--- a/make/autoconf/jdk-version.m4
+++ b/make/autoconf/jdk-version.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -69,6 +69,23 @@ AC_DEFUN_ONCE([JDKVER_SETUP_JDK_VERSION_NUMBERS],
AC_SUBST(MACOSX_BUNDLE_NAME_BASE)
AC_SUBST(MACOSX_BUNDLE_ID_BASE)
+ # Set the JDK RC name
+ AC_ARG_WITH(jdk-rc-name, [AS_HELP_STRING([--with-jdk-rc-name],
+ [Set JDK RC name. This is used for FileDescription and ProductName properties
+ of MS Windows binaries. @<:@not specified@:>@])])
+ if test "x$with_jdk_rc_name" = xyes; then
+ AC_MSG_ERROR([--with-jdk-rc-name must have a value])
+ elif [ ! [[ $with_jdk_rc_name =~ ^[[:print:]]*$ ]] ]; then
+ AC_MSG_ERROR([--with-jdk-rc-name contains non-printing characters: $with_jdk_rc_name])
+ elif test "x$with_jdk_rc_name" != x; then
+ # Set JDK_RC_NAME to a custom value if '--with-jdk-rc-name' was used and is not empty.
+ JDK_RC_NAME="$with_jdk_rc_name"
+ else
+ # Otherwise calculate from "version-numbers" included above.
+ JDK_RC_NAME="$PRODUCT_NAME $JDK_RC_PLATFORM_NAME"
+ fi
+ AC_SUBST(JDK_RC_NAME)
+
# The vendor name, if any
AC_ARG_WITH(vendor-name, [AS_HELP_STRING([--with-vendor-name],
[Set vendor name. Among others, used to set the 'java.vendor'
diff --git a/make/autoconf/spec.gmk.in b/make/autoconf/spec.gmk.in
index bdb628e120e..43e84640d32 100644
--- a/make/autoconf/spec.gmk.in
+++ b/make/autoconf/spec.gmk.in
@@ -32,6 +32,8 @@
# The command line given to configure.
CONFIGURE_COMMAND_LINE:=@CONFIGURE_COMMAND_LINE@
+# The current directory when configure was run
+CONFIGURE_START_DIR:=@CONFIGURE_START_DIR@
# A self-referential reference to this file.
SPEC:=@SPEC@
@@ -139,7 +141,6 @@ SYSROOT_LDFLAGS := @SYSROOT_LDFLAGS@
# The top-level directory of the source repository
TOPDIR:=@TOPDIR@
-
IMPORT_MODULES_CLASSES:=@IMPORT_MODULES_CLASSES@
IMPORT_MODULES_CMDS:=@IMPORT_MODULES_CMDS@
IMPORT_MODULES_LIBS:=@IMPORT_MODULES_LIBS@
@@ -156,6 +157,7 @@ LAUNCHER_NAME:=@LAUNCHER_NAME@
PRODUCT_NAME:=@PRODUCT_NAME@
PRODUCT_SUFFIX:=@PRODUCT_SUFFIX@
JDK_RC_PLATFORM_NAME:=@JDK_RC_PLATFORM_NAME@
+JDK_RC_NAME:=@JDK_RC_NAME@
COMPANY_NAME:=@COMPANY_NAME@
HOTSPOT_VM_DISTRO:=@HOTSPOT_VM_DISTRO@
MACOSX_BUNDLE_NAME_BASE=@MACOSX_BUNDLE_NAME_BASE@
diff --git a/make/autoconf/toolchain_windows.m4 b/make/autoconf/toolchain_windows.m4
index 4c533f52161..e83efcfa676 100644
--- a/make/autoconf/toolchain_windows.m4
+++ b/make/autoconf/toolchain_windows.m4
@@ -472,7 +472,7 @@ AC_DEFUN([TOOLCHAIN_SETUP_VISUAL_STUDIO_ENV],
# Change directory so we don't need to mess with Windows paths in redirects.
cd $VS_ENV_TMP_DIR
$CMD /c extract-vs-env.bat | $CAT
- cd $CURDIR
+ cd $CONFIGURE_START_DIR
if test ! -s $VS_ENV_TMP_DIR/set-vs-env.sh; then
AC_MSG_NOTICE([Could not succesfully extract the environment variables needed for the VS setup.])
diff --git a/make/common/NativeCompilation.gmk b/make/common/NativeCompilation.gmk
index 8c93d310cc4..479eaa33ec4 100644
--- a/make/common/NativeCompilation.gmk
+++ b/make/common/NativeCompilation.gmk
@@ -231,8 +231,8 @@ define SetupCompileNativeFileBody
# Only continue if this object file hasn't been processed already. This lets
# the first found source file override any other with the same name.
- ifeq ($$(findstring $$($1_OBJ), $$($$($1_BASE)_OBJS_SO_FAR)), )
- $$($1_BASE)_OBJS_SO_FAR += $$($1_OBJ)
+ ifeq ($$($1_OBJ_PROCESSED), )
+ $1_OBJ_PROCESSED := true
# This is the definite source file to use for $1_FILENAME.
$1_SRC_FILE := $$($1_FILE)
@@ -308,14 +308,18 @@ define SetupCompileNativeFileBody
ifeq ($$(filter %.s %.S, $$($1_FILENAME)), )
# And this is the dependency file for this obj file.
- $1_DEP := $$(patsubst %$(OBJ_SUFFIX),%.d,$$($1_OBJ))
+ $1_DEPS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d,$$($1_OBJ))
# The dependency target file lists all dependencies as empty targets to
# avoid make error "No rule to make target" for removed files
- $1_DEP_TARGETS := $$(patsubst %$(OBJ_SUFFIX),%.d.targets,$$($1_OBJ))
+ $1_DEPS_TARGETS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d.targets,$$($1_OBJ))
- # Include previously generated dependency information. (if it exists)
- -include $$($1_DEP)
- -include $$($1_DEP_TARGETS)
+ # Only try to load individual dependency information files if the global
+ # file hasn't been loaded (could happen if make was interrupted).
+ ifneq ($$($$($1_BASE)_DEPS_FILE_LOADED), true)
+ # Include previously generated dependency information. (if it exists)
+ -include $$($1_DEPS_FILE)
+ -include $$($1_DEPS_TARGETS_FILE)
+ endif
endif
ifneq ($$(strip $$($1_CFLAGS) $$($1_CXXFLAGS) $$($1_OPTIMIZATION)), )
@@ -340,16 +344,16 @@ define SetupCompileNativeFileBody
# object file in the generated deps files. Fixing it with sed. If
# compiling assembly, don't try this.
$$(call ExecuteWithLog, $$@, \
- $$($1_COMPILER) $$($1_DEP_FLAG) $$($1_DEP).tmp $$($1_COMPILE_OPTIONS))
- $(SED) 's|^$$(@F):|$$@:|' $$($1_DEP).tmp > $$($1_DEP)
+ $$($1_COMPILER) $$($1_DEP_FLAG) $$($1_DEPS_FILE).tmp $$($1_COMPILE_OPTIONS))
+ $(SED) 's|^$$(@F):|$$@:|' $$($1_DEPS_FILE).tmp > $$($1_DEPS_FILE)
else
$$(call ExecuteWithLog, $$@, \
- $$($1_COMPILER) $$($1_DEP_FLAG) $$($1_DEP) $$($1_COMPILE_OPTIONS))
+ $$($1_COMPILER) $$($1_DEP_FLAG) $$($1_DEPS_FILE) $$($1_COMPILE_OPTIONS))
endif
# Create a dependency target file from the dependency file.
# Solution suggested by http://make.mad-scientist.net/papers/advanced-auto-dependency-generation/
- ifneq ($$($1_DEP), )
- $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEP) > $$($1_DEP_TARGETS)
+ ifneq ($$($1_DEPS_FILE), )
+ $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) > $$($1_DEPS_TARGETS_FILE)
endif
else
# The Visual Studio compiler lacks a feature for generating make
@@ -363,10 +367,10 @@ define SetupCompileNativeFileBody
$$($1_COMPILER) -showIncludes $$($1_COMPILE_OPTIONS)) \
| $(TR) -d '\r' | $(GREP) -v -e "^Note: including file:" \
-e "^$$($1_FILENAME)$$$$" || test "$$$$?" = "1" ; \
- $(ECHO) $$@: \\ > $$($1_DEP) ; \
+ $(ECHO) $$@: \\ > $$($1_DEPS_FILE) ; \
$(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_OBJ).log \
- | $(SORT) -u >> $$($1_DEP) ; \
- $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEP) > $$($1_DEP_TARGETS)
+ | $(SORT) -u >> $$($1_DEPS_FILE) ; \
+ $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) > $$($1_DEPS_TARGETS_FILE)
endif
endif
endef
@@ -486,6 +490,9 @@ define SetupNativeCompilationBody
$1_NOSUFFIX := $$($1_PREFIX)$$($1_NAME)
$1_SAFE_NAME := $$(strip $$(subst /,_, $1))
+# Need to make sure TARGET is first on list
+ $1 := $$($1_TARGET)
+
# Setup the toolchain to be used
$$(call SetIfEmpty, $1_TOOLCHAIN, TOOLCHAIN_DEFAULT)
$$(call SetIfEmpty, $1_CC, $$($$($1_TOOLCHAIN)_CC))
@@ -719,20 +726,21 @@ define SetupNativeCompilationBody
$1_PCH_FILE := $$($1_OBJECT_DIR)/precompiled/$$(notdir $$($1_PRECOMPILED_HEADER)).pch
$1_USE_PCH_FLAGS := -include-pch $$($1_PCH_FILE)
endif
- $1_PCH_DEP := $$($1_PCH_FILE).d
- $1_PCH_DEP_TARGETS := $$($1_PCH_FILE).d.targets
+ $1_PCH_DEPS_FILE := $$($1_PCH_FILE).d
+ $1_PCH_DEPS_TARGETS_FILE := $$($1_PCH_FILE).d.targets
- -include $$($1_PCH_DEP)
- -include $$($1_PCH_DEP_TARGETS)
+ -include $$($1_PCH_DEPS_FILE)
+ -include $$($1_PCH_DEPS_TARGETS_FILE)
$1_PCH_COMMAND := $$($1_CC) $$($1_CFLAGS) $$($1_EXTRA_CFLAGS) $$($1_SYSROOT_CFLAGS) \
- $$($1_OPT_CFLAGS) -x c++-header -c $(C_FLAG_DEPS) $$($1_PCH_DEP)
+ $$($1_OPT_CFLAGS) -x c++-header -c $(C_FLAG_DEPS) $$($1_PCH_DEPS_FILE)
$$($1_PCH_FILE): $$($1_PRECOMPILED_HEADER) $$($1_COMPILE_VARDEPS_FILE)
$$(call LogInfo, Generating precompiled header)
$$(call MakeDir, $$(@D))
$$(call ExecuteWithLog, $$@, $$($1_PCH_COMMAND) $$< -o $$@)
- $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_PCH_DEP) > $$($1_PCH_DEP_TARGETS)
+ $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_PCH_DEPS_FILE) \
+ > $$($1_PCH_DEPS_TARGETS_FILE)
$$($1_ALL_OBJS): $$($1_PCH_FILE)
@@ -748,6 +756,34 @@ define SetupNativeCompilationBody
endif
endif
+ # Create a rule to collect all the individual make dependency files into a
+ # single makefile.
+ $1_DEPS_FILE := $$($1_OBJECT_DIR)/$1.d
+
+ $$($1_DEPS_FILE): $$($1_ALL_OBJS)
+ $(RM) $$@
+ # CD into dir to reduce risk of hitting command length limits, which
+ # could otherwise happen if TOPDIR is a very long path.
+ $(CD) $$($1_OBJECT_DIR) && $(CAT) *.d > $$@.tmp
+ $(CD) $$($1_OBJECT_DIR) && $(CAT) *.d.targets | $(SORT) -u >> $$@.tmp
+ # After generating the file, which happens after all objects have been
+ # compiled, copy it to .old extension. On the next make invocation, this
+ # .old file will be included by make.
+ $(CP) $$@.tmp $$@.old
+ $(MV) $$@.tmp $$@
+
+ $1 += $$($1_DEPS_FILE)
+
+ # The include must be on the .old file, which represents the state from the
+ # previous invocation of make. The file being included must not have a rule
+ # defined for it as otherwise make will think it has to run the rule before
+ # being able to include the file, which would be wrong since we specifically
+ # need the file as it was generated by a previous make invocation.
+ ifneq ($$(wildcard $$($1_DEPS_FILE).old), )
+ $1_DEPS_FILE_LOADED := true
+ -include $$($1_DEPS_FILE).old
+ endif
+
# Now call SetupCompileNativeFile for each source file we are going to compile.
$$(foreach file, $$($1_SRCS), \
$$(eval $$(call SetupCompileNativeFile, $1_$$(notdir $$(file)),\
@@ -774,10 +810,10 @@ define SetupNativeCompilationBody
ifeq ($(call isTargetOs, windows), true)
ifneq ($$($1_VERSIONINFO_RESOURCE), )
$1_RES := $$($1_OBJECT_DIR)/$$($1_BASENAME).res
- $1_RES_DEP := $$($1_RES).d
- $1_RES_DEP_TARGETS := $$($1_RES).d.targets
- -include $$($1_RES_DEP)
- -include $$($1_RES_DEP_TARGETS)
+ $1_RES_DEPS_FILE := $$($1_RES).d
+ $1_RES_DEPS_TARGETS_FILE := $$($1_RES).d.targets
+ -include $$($1_RES_DEPS_FILE)
+ -include $$($1_RES_DEPS_TARGETS_FILE)
$1_RES_VARDEPS := $$($1_RC) $$($1_RC_FLAGS)
$1_RES_VARDEPS_FILE := $$(call DependOnVariable, $1_RES_VARDEPS, \
@@ -794,16 +830,18 @@ define SetupNativeCompilationBody
# For some unknown reason, in this case CL actually outputs the show
# includes to stderr so need to redirect it to hide the output from the
# main log.
- $$(call ExecuteWithLog, $$($1_RES_DEP).obj, \
+ $$(call ExecuteWithLog, $$($1_RES_DEPS_FILE).obj, \
$$($1_CC) $$(filter-out -l%, $$($1_RC_FLAGS)) \
$$($1_SYSROOT_CFLAGS) -showIncludes -nologo -TC \
- $(CC_OUT_OPTION)$$($1_RES_DEP).obj -P -Fi$$($1_RES_DEP).pp \
+ $(CC_OUT_OPTION)$$($1_RES_DEPS_FILE).obj -P -Fi$$($1_RES_DEPS_FILE).pp \
$$($1_VERSIONINFO_RESOURCE)) 2>&1 \
| $(TR) -d '\r' | $(GREP) -v -e "^Note: including file:" \
-e "^$$(notdir $$($1_VERSIONINFO_RESOURCE))$$$$" || test "$$$$?" = "1" ; \
- $(ECHO) $$($1_RES): \\ > $$($1_RES_DEP) ; \
- $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_RES_DEP).obj.log >> $$($1_RES_DEP) ; \
- $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_RES_DEP) > $$($1_RES_DEP_TARGETS)
+ $(ECHO) $$($1_RES): \\ > $$($1_RES_DEPS_FILE) ; \
+ $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_RES_DEPS_FILE).obj.log \
+ >> $$($1_RES_DEPS_FILE) ; \
+ $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_RES_DEPS_FILE) \
+ > $$($1_RES_DEPS_TARGETS_FILE)
endif
endif
@@ -830,9 +868,6 @@ define SetupNativeCompilationBody
$1_EXTRA_LDFLAGS += $(call SET_SHARED_LIBRARY_MAPFILE,$$($1_REAL_MAPFILE))
endif
- # Need to make sure TARGET is first on list
- $1 := $$($1_TARGET)
-
ifneq ($$($1_COPY_DEBUG_SYMBOLS), false)
$1_COPY_DEBUG_SYMBOLS := $(COPY_DEBUG_SYMBOLS)
endif
diff --git a/make/hotspot/lib/JvmFeatures.gmk b/make/hotspot/lib/JvmFeatures.gmk
index 7c855492e53..4cf0f29fd89 100644
--- a/make/hotspot/lib/JvmFeatures.gmk
+++ b/make/hotspot/lib/JvmFeatures.gmk
@@ -172,8 +172,6 @@ endif
ifneq ($(call check-jvm-feature, shenandoahgc), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_SHENANDOAHGC=0
JVM_EXCLUDE_PATTERNS += gc/shenandoah
-else
- JVM_CFLAGS_FEATURES += -DSUPPORT_BARRIER_ON_PRIMITIVES -DSUPPORT_NOT_TO_SPACE_INVARIANT
endif
ifneq ($(call check-jvm-feature, jfr), true)
diff --git a/make/test/JtregNativeJdk.gmk b/make/test/JtregNativeJdk.gmk
index c9841ca198c..cfe0300475e 100644
--- a/make/test/JtregNativeJdk.gmk
+++ b/make/test/JtregNativeJdk.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -61,6 +61,7 @@ ifeq ($(call isTargetOs, windows), true)
BUILD_JDK_JTREG_LIBRARIES_LIBS_libstringPlatformChars := $(WIN_LIB_JAVA)
WIN_LIB_JLI := $(SUPPORT_OUTPUTDIR)/native/java.base/libjli/jli.lib
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeJliLaunchTest := $(WIN_LIB_JLI)
+ BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeCallerAccessTest := jvm.lib
else
BUILD_JDK_JTREG_LIBRARIES_LIBS_libstringPlatformChars := -ljava
BUILD_JDK_JTREG_LIBRARIES_LIBS_libDirectIO := -ljava
@@ -70,6 +71,7 @@ else
BUILD_JDK_JTREG_LIBRARIES_LIBS_libInheritedChannel := -ljava -lsocket -lnsl
endif
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeJliLaunchTest := -ljli
+ BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeCallerAccessTest := -ljvm
endif
ifeq ($(call isTargetOs, macosx), true)
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index d4bb3b2d28f..e683e84465a 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -3932,7 +3932,8 @@ operand immL_4294967295()
operand immL_bitmask()
%{
- predicate(((n->get_long() & 0xc000000000000000l) == 0)
+ predicate((n->get_long() != 0)
+ && ((n->get_long() & 0xc000000000000000l) == 0)
&& is_power_of_2(n->get_long() + 1));
match(ConL);
@@ -3943,7 +3944,8 @@ operand immL_bitmask()
operand immI_bitmask()
%{
- predicate(((n->get_int() & 0xc0000000) == 0)
+ predicate((n->get_int() != 0)
+ && ((n->get_int() & 0xc0000000) == 0)
&& is_power_of_2(n->get_int() + 1));
match(ConI);
@@ -11432,11 +11434,13 @@ instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_co
instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
%{
match(Set dst (AndI (URShiftI src rshift) mask));
+ // Make sure we are not going to exceed what ubfxw can do.
+ predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
ins_cost(INSN_COST);
format %{ "ubfxw $dst, $src, $rshift, $mask" %}
ins_encode %{
- int rshift = $rshift$$constant;
+ int rshift = $rshift$$constant & 31;
long mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfxw(as_Register($dst$$reg),
@@ -11447,13 +11451,15 @@ instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
%{
match(Set dst (AndL (URShiftL src rshift) mask));
+ // Make sure we are not going to exceed what ubfx can do.
+ predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
ins_cost(INSN_COST);
format %{ "ubfx $dst, $src, $rshift, $mask" %}
ins_encode %{
- int rshift = $rshift$$constant;
+ int rshift = $rshift$$constant & 63;
long mask = $mask$$constant;
- int width = exact_log2(mask+1);
+ int width = exact_log2_long(mask+1);
__ ubfx(as_Register($dst$$reg),
as_Register($src$$reg), rshift, width);
%}
@@ -11465,11 +11471,13 @@ instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
%{
match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
+ // Make sure we are not going to exceed what ubfxw can do.
+ predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
ins_cost(INSN_COST * 2);
format %{ "ubfx $dst, $src, $rshift, $mask" %}
ins_encode %{
- int rshift = $rshift$$constant;
+ int rshift = $rshift$$constant & 31;
long mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfx(as_Register($dst$$reg),
@@ -11510,7 +11518,7 @@ instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
ins_encode %{
int lshift = $lshift$$constant;
long mask = $mask$$constant;
- int width = exact_log2(mask+1);
+ int width = exact_log2_long(mask+1);
__ ubfiz(as_Register($dst$$reg),
as_Register($src$$reg), lshift, width);
%}
diff --git a/src/hotspot/cpu/aarch64/aarch64_ad.m4 b/src/hotspot/cpu/aarch64/aarch64_ad.m4
index 0dd69df6579..d2b6fb0de3b 100644
--- a/src/hotspot/cpu/aarch64/aarch64_ad.m4
+++ b/src/hotspot/cpu/aarch64/aarch64_ad.m4
@@ -181,31 +181,35 @@ define(`BFX_INSN',
`instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
%{
match(Set dst (And$1 ($2$1 src rshift) mask));
+ // Make sure we are not going to exceed what $3 can do.
+ predicate((exact_log2$6(n->in(2)->get_$5() + 1) + (n->in(1)->in(2)->get_int() & $4)) <= ($4 + 1));
ins_cost(INSN_COST);
format %{ "$3 $dst, $src, $rshift, $mask" %}
ins_encode %{
- int rshift = $rshift$$constant;
+ int rshift = $rshift$$constant & $4;
long mask = $mask$$constant;
- int width = exact_log2(mask+1);
+ int width = exact_log2$6(mask+1);
__ $3(as_Register($dst$$reg),
as_Register($src$$reg), rshift, width);
%}
ins_pipe(ialu_reg_shift);
%}')
-BFX_INSN(I,URShift,ubfxw)
-BFX_INSN(L,URShift,ubfx)
+BFX_INSN(I, URShift, ubfxw, 31, int)
+BFX_INSN(L, URShift, ubfx, 63, long, _long)
// We can use ubfx when extending an And with a mask when we know mask
// is positive. We know that because immI_bitmask guarantees it.
instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
%{
match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
+ // Make sure we are not going to exceed what ubfxw can do.
+ predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
ins_cost(INSN_COST * 2);
format %{ "ubfx $dst, $src, $rshift, $mask" %}
ins_encode %{
- int rshift = $rshift$$constant;
+ int rshift = $rshift$$constant & 31;
long mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfx(as_Register($dst$$reg),
@@ -228,7 +232,7 @@ define(`UBFIZ_INSN',
ins_encode %{
int lshift = $lshift$$constant;
long mask = $mask$$constant;
- int width = exact_log2(mask+1);
+ int width = exact_log2$5(mask+1);
__ $2(as_Register($dst$$reg),
as_Register($src$$reg), lshift, width);
%}
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
index dc9a6e64174..fdf5ea313df 100644
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
@@ -1211,8 +1211,8 @@ public:
/* The size bit is in bit 30, not 31 */
sz = (operand_size)(sz == word ? 0b00:0b01);
}
- f(sz, 31, 30), f(0b001000, 29, 24), f(1, 23), f(a, 22), f(1, 21);
- rf(Rs, 16), f(r, 15), f(0b11111, 14, 10), rf(Rn, 5), rf(Rt, 0);
+ f(sz, 31, 30), f(0b001000, 29, 24), f(not_pair ? 1 : 0, 23), f(a, 22), f(1, 21);
+ zrf(Rs, 16), f(r, 15), f(0b11111, 14, 10), srf(Rn, 5), zrf(Rt, 0);
}
// CAS
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index 17480052d43..7febbe72eee 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -34,6 +34,7 @@
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
+#include "code/compiledIC.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
@@ -2063,11 +2064,10 @@ void LIR_Assembler::emit_static_call_stub() {
int start = __ offset();
__ relocate(static_stub_Relocation::spec(call_pc));
- __ mov_metadata(rmethod, (Metadata*)NULL);
- __ movptr(rscratch1, 0);
- __ br(rscratch1);
+ __ emit_static_call_stub();
- assert(__ offset() - start <= call_stub_size(), "stub too big");
+ assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
+ <= call_stub_size(), "stub too big");
__ end_a_stub();
}
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
index 3f577251b76..8e663ba270f 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
@@ -69,7 +69,9 @@ friend class ArrayCopyStub;
void deoptimize_trap(CodeEmitInfo *info);
enum {
- _call_stub_size = 12 * NativeInstruction::instruction_size,
+ // call stub: CompiledStaticCall::to_interp_stub_size() +
+ // CompiledStaticCall::to_trampoline_stub_size()
+ _call_stub_size = 13 * NativeInstruction::instruction_size,
_call_aot_stub_size = 0,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
_deopt_handler_size = 7 * NativeInstruction::instruction_size
diff --git a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
index ca3cc39a0be..5b1e2dad4eb 100644
--- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
@@ -61,14 +61,14 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
// Don't create a Metadata reloc if we're generating immutable PIC.
if (cbuf.immutable_PIC()) {
__ movptr(rmethod, 0);
- } else {
- __ mov_metadata(rmethod, (Metadata*)NULL);
- }
-#else
- __ mov_metadata(rmethod, (Metadata*)NULL);
+ __ movptr(rscratch1, 0);
+ __ br(rscratch1);
+
+ } else
#endif
- __ movptr(rscratch1, 0);
- __ br(rscratch1);
+ {
+ __ emit_static_call_stub();
+ }
assert((__ offset() - offset) <= (int)to_interp_stub_size(), "stub too big");
__ end_a_stub();
@@ -77,7 +77,8 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
#undef __
int CompiledStaticCall::to_interp_stub_size() {
- return 7 * NativeInstruction::instruction_size;
+ // isb; movk; movz; movz; movk; movz; movz; br
+ return 8 * NativeInstruction::instruction_size;
}
int CompiledStaticCall::to_trampoline_stub_size() {
@@ -159,7 +160,8 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
}
// Creation also verifies the object.
- NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
+ NativeMovConstReg* method_holder
+ = nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
#ifndef PRODUCT
NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address());
@@ -184,7 +186,8 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
assert(stub != NULL, "stub not found");
assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
// Creation also verifies the object.
- NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
+ NativeMovConstReg* method_holder
+ = nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
method_holder->set_data(0);
}
@@ -201,8 +204,9 @@ void CompiledDirectStaticCall::verify() {
address stub = find_stub(false /* is_aot */);
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
- NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
- NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
+ NativeMovConstReg* method_holder
+ = nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
+ NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
// Verify state.
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.cpp b/src/hotspot/cpu/aarch64/frame_aarch64.cpp
index d3a6feedd86..419dde8111b 100644
--- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp
@@ -559,7 +559,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// validate constantPoolCache*
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
- if (cp == NULL || !cp->is_metaspace_object()) return false;
+ if (MetaspaceObj::is_valid(cp) == false) return false;
// validate locals
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
index c063ce7afa2..a104748dbf6 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
@@ -40,7 +40,7 @@
#define __ masm->
-address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
+address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL;
void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register addr, Register count, RegSet saved_regs) {
@@ -87,6 +87,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, Dec
void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register start, Register count, Register scratch, RegSet saved_regs) {
if (is_oop) {
+ Label done;
+
+ // Avoid calling runtime if count == 0
+ __ cbz(count, done);
+
+ // Is updating references?
+ Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+ __ ldrb(rscratch1, gc_state);
+ __ tbz(rscratch1, ShenandoahHeap::UPDATEREFS_BITPOS, done);
+
__ push(saved_regs, sp);
assert_different_registers(start, count, scratch);
assert_different_registers(c_rarg0, count);
@@ -94,6 +104,8 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
__ mov(c_rarg1, count);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2);
__ pop(saved_regs, sp);
+
+ __ bind(done);
}
}
@@ -198,60 +210,31 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
__ bind(done);
}
-void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) {
- if (ShenandoahReadBarrier) {
- read_barrier_impl(masm, dst);
- }
-}
-
-void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) {
- assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
+void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst) {
+ assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
Label is_null;
__ cbz(dst, is_null);
- read_barrier_not_null_impl(masm, dst);
+ resolve_forward_pointer_not_null(masm, dst);
__ bind(is_null);
}
-void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) {
- if (ShenandoahReadBarrier) {
- read_barrier_not_null_impl(masm, dst);
- }
-}
-
-
-void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) {
- assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
+// IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2.
+void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst) {
+ assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
__ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
}
-void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) {
- if (ShenandoahWriteBarrier) {
- write_barrier_impl(masm, dst);
- }
-}
-
-void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) {
- assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
- assert(dst != rscratch1, "need rscratch1");
+void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Register tmp) {
+ assert(ShenandoahLoadRefBarrier, "Should be enabled");
assert(dst != rscratch2, "need rscratch2");
Label done;
-
+ __ enter();
Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
- __ ldrb(rscratch1, gc_state);
+ __ ldrb(rscratch2, gc_state);
// Check for heap stability
- __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
- __ tst(rscratch1, rscratch2);
- __ br(Assembler::EQ, done);
-
- // Heap is unstable, need to perform the read-barrier even if WB is inactive
- __ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
-
- // Check for evacuation-in-progress and jump to WB slow-path if needed
- __ mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
- __ tst(rscratch1, rscratch2);
- __ br(Assembler::EQ, done);
+ __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
RegSet to_save = RegSet::of(r0);
if (dst != r0) {
@@ -259,7 +242,7 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
__ mov(r0, dst);
}
- __ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb())));
+ __ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb())));
if (dst != r0) {
__ mov(dst, r0);
@@ -267,14 +250,11 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
}
__ bind(done);
+ __ leave();
}
void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
if (ShenandoahStoreValEnqueueBarrier) {
- Label is_null;
- __ cbz(dst, is_null);
- write_barrier_impl(masm, dst);
- __ bind(is_null);
// Save possibly live regs.
RegSet live_regs = RegSet::range(r0, r4) - dst;
__ push(live_regs, sp);
@@ -286,44 +266,45 @@ void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Regis
__ ldrd(v0, __ post(sp, 2 * wordSize));
__ pop(live_regs, sp);
}
- if (ShenandoahStoreValReadBarrier) {
- read_barrier_impl(masm, dst);
+}
+
+void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Register tmp) {
+ if (ShenandoahLoadRefBarrier) {
+ Label is_null;
+ __ cbz(dst, is_null);
+ load_reference_barrier_not_null(masm, dst, tmp);
+ __ bind(is_null);
}
}
void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Address src, Register tmp1, Register tmp_thread) {
bool on_oop = type == T_OBJECT || type == T_ARRAY;
- bool in_heap = (decorators & IN_HEAP) != 0;
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
bool on_reference = on_weak || on_phantom;
- if (in_heap) {
- read_barrier_not_null(masm, src.base());
- }
-
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
- if (ShenandoahKeepAliveBarrier && on_oop && on_reference) {
- __ enter();
- satb_write_barrier_pre(masm /* masm */,
- noreg /* obj */,
- dst /* pre_val */,
- rthread /* thread */,
- tmp1 /* tmp */,
- true /* tosca_live */,
- true /* expand_call */);
- __ leave();
+ if (on_oop) {
+ load_reference_barrier(masm, dst, tmp1);
+
+ if (ShenandoahKeepAliveBarrier && on_reference) {
+ __ enter();
+ satb_write_barrier_pre(masm /* masm */,
+ noreg /* obj */,
+ dst /* pre_val */,
+ rthread /* thread */,
+ tmp1 /* tmp */,
+ true /* tosca_live */,
+ true /* expand_call */);
+ __ leave();
+ }
}
}
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2) {
bool on_oop = type == T_OBJECT || type == T_ARRAY;
- bool in_heap = (decorators & IN_HEAP) != 0;
- if (in_heap) {
- write_barrier(masm, dst.base());
- }
if (!on_oop) {
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
return;
@@ -361,21 +342,6 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
}
-void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) {
- __ cmp(op1, op2);
- if (ShenandoahAcmpBarrier) {
- Label done;
- __ br(Assembler::EQ, done);
- // The object may have been evacuated, but we won't see it without a
- // membar here.
- __ membar(Assembler::LoadStore| Assembler::LoadLoad);
- read_barrier(masm, op1);
- read_barrier(masm, op2);
- __ cmp(op1, op2);
- __ bind(done);
- }
-}
-
void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
Register var_size_in_bytes,
int con_size_in_bytes,
@@ -410,27 +376,6 @@ void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register
}
}
-void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
- bool oop_not_null = (decorators & IS_NOT_NULL) != 0;
- bool is_write = (decorators & ACCESS_WRITE) != 0;
- if (is_write) {
- if (oop_not_null) {
- write_barrier(masm, obj);
- } else {
- Label done;
- __ cbz(obj, done);
- write_barrier(masm, obj);
- __ bind(done);
- }
- } else {
- if (oop_not_null) {
- read_barrier_not_null(masm, obj);
- } else {
- read_barrier(masm, obj);
- }
- }
-}
-
void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
bool acquire, bool release, bool weak, bool is_cae,
Register result) {
@@ -469,8 +414,8 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register a
__ decode_heap_oop(tmp1, tmp1);
__ decode_heap_oop(tmp2, tmp2);
}
- read_barrier_impl(masm, tmp1);
- read_barrier_impl(masm, tmp2);
+ resolve_forward_pointer(masm, tmp1);
+ resolve_forward_pointer(masm, tmp2);
__ cmp(tmp1, tmp2);
// Retry with expected now being the value we just loaded from addr.
__ br(Assembler::EQ, retry);
@@ -515,7 +460,7 @@ void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, Shen
__ b(*stub->continuation());
}
-void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) {
+void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
Register obj = stub->obj()->as_register();
Register res = stub->result()->as_register();
@@ -532,7 +477,7 @@ void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, Sh
__ cbz(res, done);
}
- write_barrier(ce->masm(), res);
+ load_reference_barrier_not_null(ce->masm(), res, rscratch1);
__ bind(done);
__ b(*stub->continuation());
@@ -592,14 +537,14 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
#endif // COMPILER1
-address ShenandoahBarrierSetAssembler::shenandoah_wb() {
- assert(_shenandoah_wb != NULL, "need write barrier stub");
- return _shenandoah_wb;
+address ShenandoahBarrierSetAssembler::shenandoah_lrb() {
+ assert(_shenandoah_lrb != NULL, "need load reference barrier stub");
+ return _shenandoah_lrb;
}
#define __ cgen->assembler()->
-// Shenandoah write barrier.
+// Shenandoah load reference barrier.
//
// Input:
// r0: OOP to evacuate. Not null.
@@ -608,13 +553,13 @@ address ShenandoahBarrierSetAssembler::shenandoah_wb() {
// r0: Pointer to evacuated OOP.
//
// Trash rscratch1, rscratch2. Preserve everything else.
-address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen) {
+address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) {
__ align(6);
- StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb");
+ StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb");
address start = __ pc();
- Label work;
+ Label work, done;
__ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
__ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
__ ldrb(rscratch2, Address(rscratch2, rscratch1));
@@ -622,19 +567,23 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator*
__ ret(lr);
__ bind(work);
- Register obj = r0;
+ __ mov(rscratch2, r0);
+ resolve_forward_pointer_not_null(cgen->assembler(), r0);
+ __ cmp(rscratch2, r0);
+ __ br(Assembler::NE, done);
__ enter(); // required for proper stackwalking of RuntimeStub frame
__ push_call_clobbered_registers();
- __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT));
+ __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT));
__ blrt(lr, 1, 0, MacroAssembler::ret_type_integral);
- __ mov(rscratch1, obj);
+ __ mov(rscratch1, r0);
__ pop_call_clobbered_registers();
- __ mov(obj, rscratch1);
+ __ mov(r0, rscratch1);
__ leave(); // required for proper stackwalking of RuntimeStub frame
+ __ bind(done);
__ ret(lr);
return start;
@@ -643,12 +592,12 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator*
#undef __
void ShenandoahBarrierSetAssembler::barrier_stubs_init() {
- if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
+ if (ShenandoahLoadRefBarrier) {
int stub_code_size = 2048;
ResourceMark rm;
BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size);
CodeBuffer buf(bb);
StubCodeGenerator cgen(&buf);
- _shenandoah_wb = generate_shenandoah_wb(&cgen);
+ _shenandoah_lrb = generate_shenandoah_lrb(&cgen);
}
}
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp
index 69e0782cf44..86b8e3503cd 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp
@@ -29,7 +29,7 @@
#ifdef COMPILER1
class LIR_Assembler;
class ShenandoahPreBarrierStub;
-class ShenandoahWriteBarrierStub;
+class ShenandoahLoadReferenceBarrierStub;
class StubAssembler;
class StubCodeGenerator;
#endif
@@ -37,7 +37,7 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
- static address _shenandoah_wb;
+ static address _shenandoah_lrb;
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
@@ -54,24 +54,21 @@ private:
bool tosca_live,
bool expand_call);
- void read_barrier(MacroAssembler* masm, Register dst);
- void read_barrier_impl(MacroAssembler* masm, Register dst);
- void read_barrier_not_null(MacroAssembler* masm, Register dst);
- void read_barrier_not_null_impl(MacroAssembler* masm, Register dst);
- void write_barrier(MacroAssembler* masm, Register dst);
- void write_barrier_impl(MacroAssembler* masm, Register dst);
- void asm_acmp_barrier(MacroAssembler* masm, Register op1, Register op2);
+ void resolve_forward_pointer(MacroAssembler* masm, Register dst);
+ void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst);
+ void load_reference_barrier(MacroAssembler* masm, Register dst, Register tmp);
+ void load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Register tmp);
- address generate_shenandoah_wb(StubCodeGenerator* cgen);
+ address generate_shenandoah_lrb(StubCodeGenerator* cgen);
public:
- static address shenandoah_wb();
+ static address shenandoah_lrb();
void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp);
#ifdef COMPILER1
void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
- void gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub);
+ void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub);
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
#endif
@@ -83,8 +80,6 @@ public:
Register dst, Address src, Register tmp1, Register tmp_thread);
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2);
- virtual void obj_equals(MacroAssembler* masm, Register src1, Register src2);
- virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj);
virtual void tlab_allocate(MacroAssembler* masm, Register obj,
Register var_size_in_bytes,
int con_size_in_bytes,
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetC1_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetC1_aarch64.cpp
index c78392a1220..e057c1691d3 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetC1_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetC1_aarch64.cpp
@@ -99,6 +99,7 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRIt
__ xchg(access.resolved_addr(), value_opr, result, tmp);
if (access.is_oop()) {
+ result = load_reference_barrier(access.gen(), result, access.access_emit_info(), true);
if (ShenandoahSATBBarrier) {
pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
result /* pre_val */);
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad
index c65fac0b724..99c97d48054 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad
@@ -45,18 +45,6 @@ encode %{
%}
%}
-instruct shenandoahRB(iRegPNoSp dst, iRegP src, rFlagsReg cr) %{
- match(Set dst (ShenandoahReadBarrier src));
- format %{ "shenandoah_rb $dst,$src" %}
- ins_encode %{
- Register s = $src$$Register;
- Register d = $dst$$Register;
- __ ldr(d, Address(s, ShenandoahBrooksPointer::byte_offset()));
- %}
- ins_pipe(pipe_class_memory);
-%}
-
-
instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval)));
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index f5130b23a89..e617b2d7c4e 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -812,6 +812,18 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
return stub_start_addr;
}
+void MacroAssembler::emit_static_call_stub() {
+ // CompiledDirectStaticCall::set_to_interpreted knows the
+ // exact layout of this stub.
+
+ isb();
+ mov_metadata(rmethod, (Metadata*)NULL);
+
+ // Jump to the entry point of the i2c stub.
+ movptr(rscratch1, 0);
+ br(rscratch1);
+}
+
void MacroAssembler::c2bool(Register x) {
// implements x == 0 ? 0 : 1
// note: must only look at least-significant byte of x
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
index 7b9f62f554b..9cae321a2da 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
@@ -607,6 +607,7 @@ public:
static int patch_narrow_klass(address insn_addr, narrowKlass n);
address emit_trampoline_stub(int insts_call_instruction_offset, address target);
+ void emit_static_call_stub();
// The following 4 methods return the offset of the appropriate move instruction
diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
index 668795050ea..4cf08059a67 100644
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
@@ -232,7 +232,11 @@ void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
//-------------------------------------------------------------------
void NativeMovConstReg::verify() {
- // make sure code pattern is actually mov reg64, imm64 instructions
+ if (! (nativeInstruction_at(instruction_address())->is_movz() ||
+ is_adrp_at(instruction_address()) ||
+ is_ldr_literal_at(instruction_address())) ) {
+ fatal("should be MOVZ or ADRP or LDR (literal)");
+ }
}
diff --git a/src/hotspot/cpu/arm/frame_arm.cpp b/src/hotspot/cpu/arm/frame_arm.cpp
index a664a7aa337..0e054f377d9 100644
--- a/src/hotspot/cpu/arm/frame_arm.cpp
+++ b/src/hotspot/cpu/arm/frame_arm.cpp
@@ -494,7 +494,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// validate ConstantPoolCache*
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
- if (cp == NULL || !cp->is_metaspace_object()) return false;
+ if (MetaspaceObj::is_valid(cp) == false) return false;
// validate locals
diff --git a/src/hotspot/cpu/sparc/frame_sparc.cpp b/src/hotspot/cpu/sparc/frame_sparc.cpp
index 2fbca263743..6f5ab67126a 100644
--- a/src/hotspot/cpu/sparc/frame_sparc.cpp
+++ b/src/hotspot/cpu/sparc/frame_sparc.cpp
@@ -665,7 +665,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// validate ConstantPoolCache*
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
- if (cp == NULL || !cp->is_metaspace_object()) return false;
+ if (MetaspaceObj::is_valid(cp) == false) return false;
// validate locals
diff --git a/src/hotspot/cpu/x86/frame_x86.cpp b/src/hotspot/cpu/x86/frame_x86.cpp
index acedaf827b7..34a1a73b4c7 100644
--- a/src/hotspot/cpu/x86/frame_x86.cpp
+++ b/src/hotspot/cpu/x86/frame_x86.cpp
@@ -546,7 +546,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// validate ConstantPoolCache*
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
- if (cp == NULL || !cp->is_metaspace_object()) return false;
+ if (MetaspaceObj::is_valid(cp) == false) return false;
// validate locals
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
index 8cced4100b1..d9fd352ec90 100644
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
@@ -41,7 +41,7 @@
#define __ masm->
-address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
+address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL;
void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {
@@ -138,6 +138,22 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
}
#endif
+ Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
+#ifndef _LP64
+ __ push(thread);
+ __ get_thread(thread);
+#endif
+
+ // Short-circuit if count == 0.
+ Label done;
+ __ testptr(count, count);
+ __ jcc(Assembler::zero, done);
+
+ // Skip runtime call if no forwarded objects.
+ Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+ __ testb(gc_state, ShenandoahHeap::UPDATEREFS);
+ __ jcc(Assembler::zero, done);
+
__ pusha(); // push registers (overkill)
#ifdef _LP64
if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
@@ -155,6 +171,9 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
dst, count);
#endif
__ popa();
+
+ __ bind(done);
+ NOT_LP64(__ pop(thread);)
}
}
@@ -293,41 +312,23 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
__ bind(done);
}
-void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) {
- if (ShenandoahReadBarrier) {
- read_barrier_impl(masm, dst);
- }
-}
-
-void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) {
- assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
+void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst) {
+ assert(ShenandoahCASBarrier, "should be enabled");
Label is_null;
__ testptr(dst, dst);
__ jcc(Assembler::zero, is_null);
- read_barrier_not_null_impl(masm, dst);
+ resolve_forward_pointer_not_null(masm, dst);
__ bind(is_null);
}
-void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) {
- if (ShenandoahReadBarrier) {
- read_barrier_not_null_impl(masm, dst);
- }
-}
-
-void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) {
- assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
+void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst) {
+ assert(ShenandoahCASBarrier || ShenandoahLoadRefBarrier, "should be enabled");
__ movptr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
}
-void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) {
- if (ShenandoahWriteBarrier) {
- write_barrier_impl(masm, dst);
- }
-}
-
-void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) {
- assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
+void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst) {
+ assert(ShenandoahLoadRefBarrier, "Should be enabled");
#ifdef _LP64
Label done;
@@ -335,8 +336,8 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
__ testb(gc_state, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
__ jccb(Assembler::zero, done);
- // Heap is unstable, need to perform the read-barrier even if WB is inactive
- read_barrier_not_null(masm, dst);
+ // Heap is unstable, need to perform the resolve even if LRB is inactive
+ resolve_forward_pointer_not_null(masm, dst);
__ testb(gc_state, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
__ jccb(Assembler::zero, done);
@@ -345,7 +346,7 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
__ xchgptr(dst, rax); // Move obj into rax and save rax into obj.
}
- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb())));
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb())));
if (dst != rax) {
__ xchgptr(rax, dst); // Swap back obj with rax.
@@ -358,24 +359,18 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
}
void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
- if (ShenandoahStoreValReadBarrier || ShenandoahStoreValEnqueueBarrier) {
+ if (ShenandoahStoreValEnqueueBarrier) {
storeval_barrier_impl(masm, dst, tmp);
}
}
void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) {
- assert(UseShenandoahGC && (ShenandoahStoreValReadBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled");
+ assert(ShenandoahStoreValEnqueueBarrier, "should be enabled");
if (dst == noreg) return;
#ifdef _LP64
if (ShenandoahStoreValEnqueueBarrier) {
- Label is_null;
- __ testptr(dst, dst);
- __ jcc(Assembler::zero, is_null);
- write_barrier_impl(masm, dst);
- __ bind(is_null);
-
// The set of registers to be saved+restored is the same as in the write-barrier above.
// Those are the commonly used registers in the interpreter.
__ pusha();
@@ -389,50 +384,54 @@ void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm,
//__ pop_callee_saved_registers();
__ popa();
}
- if (ShenandoahStoreValReadBarrier) {
- read_barrier_impl(masm, dst);
- }
#else
Unimplemented();
#endif
}
+void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst) {
+ if (ShenandoahLoadRefBarrier) {
+ Label done;
+ __ testptr(dst, dst);
+ __ jcc(Assembler::zero, done);
+ load_reference_barrier_not_null(masm, dst);
+ __ bind(done);
+ }
+}
+
void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Address src, Register tmp1, Register tmp_thread) {
bool on_oop = type == T_OBJECT || type == T_ARRAY;
- bool in_heap = (decorators & IN_HEAP) != 0;
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
bool on_reference = on_weak || on_phantom;
- if (in_heap) {
- read_barrier_not_null(masm, src.base());
- }
- BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
- if (ShenandoahKeepAliveBarrier && on_oop && on_reference) {
- const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
- NOT_LP64(__ get_thread(thread));
+ BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+ if (on_oop) {
+ load_reference_barrier(masm, dst);
- // Generate the SATB pre-barrier code to log the value of
- // the referent field in an SATB buffer.
- shenandoah_write_barrier_pre(masm /* masm */,
- noreg /* obj */,
- dst /* pre_val */,
- thread /* thread */,
- tmp1 /* tmp */,
- true /* tosca_live */,
- true /* expand_call */);
+ if (ShenandoahKeepAliveBarrier && on_reference) {
+ const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
+ NOT_LP64(__ get_thread(thread));
+ // Generate the SATB pre-barrier code to log the value of
+ // the referent field in an SATB buffer.
+ shenandoah_write_barrier_pre(masm /* masm */,
+ noreg /* obj */,
+ dst /* pre_val */,
+ thread /* thread */,
+ tmp1 /* tmp */,
+ true /* tosca_live */,
+ true /* expand_call */);
+ }
}
}
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2) {
+ bool on_oop = type == T_OBJECT || type == T_ARRAY;
bool in_heap = (decorators & IN_HEAP) != 0;
bool as_normal = (decorators & AS_NORMAL) != 0;
- if (in_heap) {
- write_barrier(masm, dst.base());
- }
- if (type == T_OBJECT || type == T_ARRAY) {
+ if (on_oop && in_heap) {
bool needs_pre_barrier = as_normal;
Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi);
@@ -475,44 +474,6 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
}
}
-#ifndef _LP64
-void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm,
- Address obj1, jobject obj2) {
- Unimplemented();
-}
-
-void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm,
- Register obj1, jobject obj2) {
- Unimplemented();
-}
-#endif
-
-
-void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) {
- __ cmpptr(op1, op2);
- if (ShenandoahAcmpBarrier) {
- Label done;
- __ jccb(Assembler::equal, done);
- read_barrier(masm, op1);
- read_barrier(masm, op2);
- __ cmpptr(op1, op2);
- __ bind(done);
- }
-}
-
-void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register src1, Address src2) {
- __ cmpptr(src1, src2);
- if (ShenandoahAcmpBarrier) {
- Label done;
- __ jccb(Assembler::equal, done);
- __ movptr(rscratch2, src2);
- read_barrier(masm, src1);
- read_barrier(masm, rscratch2);
- __ cmpptr(src1, rscratch2);
- __ bind(done);
- }
-}
-
void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm,
Register thread, Register obj,
Register var_size_in_bytes,
@@ -562,28 +523,6 @@ void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm,
__ verify_tlab();
}
-void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
- bool oop_not_null = (decorators & IS_NOT_NULL) != 0;
- bool is_write = (decorators & ACCESS_WRITE) != 0;
- if (is_write) {
- if (oop_not_null) {
- write_barrier(masm, obj);
- } else {
- Label done;
- __ testptr(obj, obj);
- __ jcc(Assembler::zero, done);
- write_barrier(masm, obj);
- __ bind(done);
- }
- } else {
- if (oop_not_null) {
- read_barrier_not_null(masm, obj);
- } else {
- read_barrier(masm, obj);
- }
- }
-}
-
// Special Shenandoah CAS implementation that handles false negatives
// due to concurrent evacuation.
#ifndef _LP64
@@ -622,14 +561,14 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
// Step 2. CAS had failed. This may be a false negative.
//
// The trouble comes when we compare the to-space pointer with the from-space
- // pointer to the same object. To resolve this, it will suffice to read both
- // oldval and the value from memory through the read barriers -- this will give
- // both to-space pointers. If they mismatch, then it was a legitimate failure.
+ // pointer to the same object. To resolve this, it will suffice to resolve both
+ // oldval and the value from memory -- this will give both to-space pointers.
+ // If they mismatch, then it was a legitimate failure.
//
if (UseCompressedOops) {
__ decode_heap_oop(tmp1);
}
- read_barrier_impl(masm, tmp1);
+ resolve_forward_pointer(masm, tmp1);
if (UseCompressedOops) {
__ movl(tmp2, oldval);
@@ -637,7 +576,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
} else {
__ movptr(tmp2, oldval);
}
- read_barrier_impl(masm, tmp2);
+ resolve_forward_pointer(masm, tmp2);
__ cmpptr(tmp1, tmp2);
__ jcc(Assembler::notEqual, done, true);
@@ -646,8 +585,8 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
//
// Corner case: it may happen that somebody stored the from-space pointer
// to memory while we were preparing for retry. Therefore, we can fail again
- // on retry, and so need to do this in loop, always re-reading the failure
- // witness through the read barrier.
+ // on retry, and so need to do this in loop, always resolving the failure
+ // witness.
__ bind(retry);
if (os::is_MP()) __ lock();
if (UseCompressedOops) {
@@ -663,7 +602,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
} else {
__ movptr(tmp2, oldval);
}
- read_barrier_impl(masm, tmp2);
+ resolve_forward_pointer(masm, tmp2);
__ cmpptr(tmp1, tmp2);
__ jcc(Assembler::equal, retry, true);
@@ -811,7 +750,7 @@ void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, Shen
}
-void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) {
+void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
__ bind(*stub->entry());
Label done;
@@ -828,7 +767,7 @@ void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, Sh
__ jcc(Assembler::zero, done);
}
- write_barrier(ce->masm(), res);
+ load_reference_barrier_not_null(ce->masm(), res);
__ bind(done);
__ jmp(*stub->continuation());
@@ -898,16 +837,16 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
#endif // COMPILER1
-address ShenandoahBarrierSetAssembler::shenandoah_wb() {
- assert(_shenandoah_wb != NULL, "need write barrier stub");
- return _shenandoah_wb;
+address ShenandoahBarrierSetAssembler::shenandoah_lrb() {
+ assert(_shenandoah_lrb != NULL, "need load reference barrier stub");
+ return _shenandoah_lrb;
}
#define __ cgen->assembler()->
-address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen) {
+address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) {
__ align(CodeEntryAlignment);
- StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb");
+ StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb");
address start = __ pc();
#ifdef _LP64
@@ -955,7 +894,7 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator*
__ push(r15);
save_vector_registers(cgen->assembler());
__ movptr(rdi, rax);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT), rdi);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT), rdi);
restore_vector_registers(cgen->assembler());
__ pop(r15);
__ pop(r14);
@@ -982,12 +921,12 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator*
#undef __
void ShenandoahBarrierSetAssembler::barrier_stubs_init() {
- if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
+ if (ShenandoahLoadRefBarrier) {
int stub_code_size = 4096;
ResourceMark rm;
BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size);
CodeBuffer buf(bb);
StubCodeGenerator cgen(&buf);
- _shenandoah_wb = generate_shenandoah_wb(&cgen);
+ _shenandoah_lrb = generate_shenandoah_lrb(&cgen);
}
}
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp
index afbd204d7be..c748ce62da1 100644
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp
@@ -29,7 +29,7 @@
#ifdef COMPILER1
class LIR_Assembler;
class ShenandoahPreBarrierStub;
-class ShenandoahWriteBarrierStub;
+class ShenandoahLoadReferenceBarrierStub;
class StubAssembler;
class StubCodeGenerator;
#endif
@@ -37,7 +37,7 @@ class StubCodeGenerator;
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
private:
- static address _shenandoah_wb;
+ static address _shenandoah_lrb;
void satb_write_barrier_pre(MacroAssembler* masm,
Register obj,
@@ -55,32 +55,30 @@ private:
bool tosca_live,
bool expand_call);
- void read_barrier(MacroAssembler* masm, Register dst);
- void read_barrier_impl(MacroAssembler* masm, Register dst);
+ void resolve_forward_pointer(MacroAssembler* masm, Register dst);
+ void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst);
- void read_barrier_not_null(MacroAssembler* masm, Register dst);
- void read_barrier_not_null_impl(MacroAssembler* masm, Register dst);
-
- void write_barrier(MacroAssembler* masm, Register dst);
- void write_barrier_impl(MacroAssembler* masm, Register dst);
+ void load_reference_barrier_not_null(MacroAssembler* masm, Register dst);
void storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp);
- address generate_shenandoah_wb(StubCodeGenerator* cgen);
+ address generate_shenandoah_lrb(StubCodeGenerator* cgen);
void save_vector_registers(MacroAssembler* masm);
void restore_vector_registers(MacroAssembler* masm);
public:
- static address shenandoah_wb();
+ static address shenandoah_lrb();
void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp);
#ifdef COMPILER1
void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
- void gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub);
+ void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub);
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
#endif
+ void load_reference_barrier(MacroAssembler* masm, Register dst);
+
void cmpxchg_oop(MacroAssembler* masm,
Register res, Address addr, Register oldval, Register newval,
bool exchange, Register tmp1, Register tmp2);
@@ -93,16 +91,6 @@ public:
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2);
-#ifndef _LP64
- virtual void obj_equals(MacroAssembler* masm,
- Address obj1, jobject obj2);
- virtual void obj_equals(MacroAssembler* masm,
- Register obj1, jobject obj2);
-#endif
-
- virtual void obj_equals(MacroAssembler* masm, Register src1, Register src2);
- virtual void obj_equals(MacroAssembler* masm, Register src1, Address src2);
-
virtual void tlab_allocate(MacroAssembler* masm,
Register thread, Register obj,
Register var_size_in_bytes,
@@ -110,8 +98,6 @@ public:
Register t1, Register t2,
Label& slow_case);
- virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj);
-
virtual void barrier_stubs_init();
};
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetC1_x86.cpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetC1_x86.cpp
index c2a73d2b9a7..38ebe0af106 100644
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetC1_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetC1_x86.cpp
@@ -107,6 +107,7 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRIt
__ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr);
if (access.is_oop()) {
+ result = load_reference_barrier(access.gen(), result, access.access_emit_info(), true);
if (ShenandoahSATBBarrier) {
pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
result /* pre_val */);
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad b/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad
index 40387d0c0d1..90481bcef88 100644
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad
@@ -23,47 +23,7 @@
source_hpp %{
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
-%}
-
-instruct shenandoahRB(rRegP dst, rRegP src, rFlagsReg cr) %{
- match(Set dst (ShenandoahReadBarrier src));
- effect(DEF dst, USE src);
- ins_cost(125); // XXX
- format %{ "shenandoah_rb $dst, $src" %}
- ins_encode %{
- Register d = $dst$$Register;
- Register s = $src$$Register;
- __ movptr(d, Address(s, ShenandoahBrooksPointer::byte_offset()));
- %}
- ins_pipe(ialu_reg_mem);
-%}
-
-instruct shenandoahRBNarrow(rRegP dst, rRegN src) %{
- predicate(UseCompressedOops && (Universe::narrow_oop_shift() == 0));
- match(Set dst (ShenandoahReadBarrier (DecodeN src)));
- effect(DEF dst, USE src);
- ins_cost(125); // XXX
- format %{ "shenandoah_rb $dst, $src" %}
- ins_encode %{
- Register d = $dst$$Register;
- Register s = $src$$Register;
- __ movptr(d, Address(r12, s, Address::times_1, ShenandoahBrooksPointer::byte_offset()));
- %}
- ins_pipe(ialu_reg_mem);
-%}
-
-instruct shenandoahRBNarrowShift(rRegP dst, rRegN src) %{
- predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8));
- match(Set dst (ShenandoahReadBarrier (DecodeN src)));
- effect(DEF dst, USE src);
- ins_cost(125); // XXX
- format %{ "shenandoah_rb $dst, $src" %}
- ins_encode %{
- Register d = $dst$$Register;
- Register s = $src$$Register;
- __ movptr(d, Address(r12, s, Address::times_8, ShenandoahBrooksPointer::byte_offset()));
- %}
- ins_pipe(ialu_reg_mem);
+#include "gc/shenandoah/c2/shenandoahSupport.hpp"
%}
instruct compareAndSwapP_shenandoah(rRegI res,
diff --git a/src/hotspot/cpu/x86/x86_32.ad b/src/hotspot/cpu/x86/x86_32.ad
index 34679f2cd23..ef5125499ff 100644
--- a/src/hotspot/cpu/x86/x86_32.ad
+++ b/src/hotspot/cpu/x86/x86_32.ad
@@ -1309,7 +1309,7 @@ void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
- return implementation( NULL, ra_, true, NULL );
+ return MachNode::size(ra_);
}
diff --git a/src/hotspot/os/aix/os_perf_aix.cpp b/src/hotspot/os/aix/os_perf_aix.cpp
index 033bf843095..c50019c7176 100644
--- a/src/hotspot/os/aix/os_perf_aix.cpp
+++ b/src/hotspot/os/aix/os_perf_aix.cpp
@@ -336,20 +336,8 @@ static OSReturn get_total_ticks(int which_logical_cpu, CPUPerfTicks* pticks) {
fclose(fh);
if (n < expected_assign_count || logical_cpu != which_logical_cpu) {
-#ifdef DEBUG_LINUX_PROC_STAT
- vm_fprintf(stderr, "[stat] read failed");
-#endif
return OS_ERR;
}
-
-#ifdef DEBUG_LINUX_PROC_STAT
- vm_fprintf(stderr, "[stat] read "
- UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
- UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " \n",
- userTicks, niceTicks, systemTicks, idleTicks,
- iowTicks, irqTicks, sirqTicks);
-#endif
-
pticks->used = userTicks + niceTicks;
pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
pticks->total = userTicks + niceTicks + systemTicks + idleTicks +
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 30ed4d859ca..69bc3e764f1 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -1598,6 +1598,8 @@ void os::get_summary_cpu_info(char* buf, size_t buflen) {
}
void os::print_memory_info(outputStream* st) {
+ xsw_usage swap_usage;
+ size_t size = sizeof(swap_usage);
st->print("Memory:");
st->print(" %dk page", os::vm_page_size()>>10);
@@ -1606,6 +1608,16 @@ void os::print_memory_info(outputStream* st) {
os::physical_memory() >> 10);
st->print("(" UINT64_FORMAT "k free)",
os::available_memory() >> 10);
+
+ if((sysctlbyname("vm.swapusage", &swap_usage, &size, NULL, 0) == 0) || (errno == ENOMEM)) {
+ if (size >= offset_of(xsw_usage, xsu_used)) {
+ st->print(", swap " UINT64_FORMAT "k",
+ ((julong) swap_usage.xsu_total) >> 10);
+ st->print("(" UINT64_FORMAT "k free)",
+ ((julong) swap_usage.xsu_avail) >> 10);
+ }
+ }
+
st->cr();
}
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index 515ce5b48cb..184e75a8cd4 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -227,6 +227,82 @@ julong os::physical_memory() {
return phys_mem;
}
+static uint64_t initial_total_ticks = 0;
+static uint64_t initial_steal_ticks = 0;
+static bool has_initial_tick_info = false;
+
+static void next_line(FILE *f) {
+ int c;
+ do {
+ c = fgetc(f);
+ } while (c != '\n' && c != EOF);
+}
+
+bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu) {
+ FILE* fh;
+ uint64_t userTicks, niceTicks, systemTicks, idleTicks;
+ // since at least kernel 2.6 : iowait: time waiting for I/O to complete
+ // irq: time servicing interrupts; softirq: time servicing softirqs
+ uint64_t iowTicks = 0, irqTicks = 0, sirqTicks= 0;
+ // steal (since kernel 2.6.11): time spent in other OS when running in a virtualized environment
+ uint64_t stealTicks = 0;
+ // guest (since kernel 2.6.24): time spent running a virtual CPU for guest OS under the
+ // control of the Linux kernel
+ uint64_t guestNiceTicks = 0;
+ int logical_cpu = -1;
+ const int required_tickinfo_count = (which_logical_cpu == -1) ? 4 : 5;
+ int n;
+
+ memset(pticks, 0, sizeof(CPUPerfTicks));
+
+ if ((fh = fopen("/proc/stat", "r")) == NULL) {
+ return false;
+ }
+
+ if (which_logical_cpu == -1) {
+ n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+ UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+ UINT64_FORMAT " " UINT64_FORMAT " ",
+ &userTicks, &niceTicks, &systemTicks, &idleTicks,
+ &iowTicks, &irqTicks, &sirqTicks,
+ &stealTicks, &guestNiceTicks);
+ } else {
+ // Move to next line
+ next_line(fh);
+
+ // find the line for requested cpu faster to just iterate linefeeds?
+ for (int i = 0; i < which_logical_cpu; i++) {
+ next_line(fh);
+ }
+
+ n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+ UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
+ UINT64_FORMAT " " UINT64_FORMAT " ",
+ &logical_cpu, &userTicks, &niceTicks,
+ &systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks,
+ &stealTicks, &guestNiceTicks);
+ }
+
+ fclose(fh);
+ if (n < required_tickinfo_count || logical_cpu != which_logical_cpu) {
+ return false;
+ }
+ pticks->used = userTicks + niceTicks;
+ pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
+ pticks->total = userTicks + niceTicks + systemTicks + idleTicks +
+ iowTicks + irqTicks + sirqTicks + stealTicks + guestNiceTicks;
+
+ if (n > required_tickinfo_count + 3) {
+ pticks->steal = stealTicks;
+ pticks->has_steal_ticks = true;
+ } else {
+ pticks->steal = 0;
+ pticks->has_steal_ticks = false;
+ }
+
+ return true;
+}
+
// Return true if user is running as root.
bool os::have_special_privileges() {
@@ -1977,6 +2053,8 @@ void os::print_os_info(outputStream* st) {
os::Linux::print_container_info(st);
os::Linux::print_virtualization_info(st);
+
+ os::Linux::print_steal_info(st);
}
// Try to identify popular distros.
@@ -2265,6 +2343,24 @@ void os::Linux::print_virtualization_info(outputStream* st) {
#endif
}
+void os::Linux::print_steal_info(outputStream* st) {
+ if (has_initial_tick_info) {
+ CPUPerfTicks pticks;
+ bool res = os::Linux::get_tick_information(&pticks, -1);
+
+ if (res && pticks.has_steal_ticks) {
+ uint64_t steal_ticks_difference = pticks.steal - initial_steal_ticks;
+ uint64_t total_ticks_difference = pticks.total - initial_total_ticks;
+ double steal_ticks_perc = 0.0;
+ if (total_ticks_difference != 0) {
+ steal_ticks_perc = (double) steal_ticks_difference / total_ticks_difference;
+ }
+ st->print_cr("Steal ticks since vm start: " UINT64_FORMAT, steal_ticks_difference);
+ st->print_cr("Steal ticks percentage since vm start:%7.3f", steal_ticks_perc);
+ }
+ }
+}
+
void os::print_memory_info(outputStream* st) {
st->print("Memory:");
@@ -4989,6 +5085,15 @@ void os::init(void) {
Linux::initialize_os_info();
+ os::Linux::CPUPerfTicks pticks;
+ bool res = os::Linux::get_tick_information(&pticks, -1);
+
+ if (res && pticks.has_steal_ticks) {
+ has_initial_tick_info = true;
+ initial_total_ticks = pticks.total;
+ initial_steal_ticks = pticks.steal;
+ }
+
// _main_thread points to the thread that created/loaded the JVM.
Linux::_main_thread = pthread_self();
diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp
index 2b5cac7883d..2919f03a30d 100644
--- a/src/hotspot/os/linux/os_linux.hpp
+++ b/src/hotspot/os/linux/os_linux.hpp
@@ -109,12 +109,23 @@ class Linux {
static void print_full_memory_info(outputStream* st);
static void print_container_info(outputStream* st);
static void print_virtualization_info(outputStream* st);
+ static void print_steal_info(outputStream* st);
static void print_distro_info(outputStream* st);
static void print_libversion_info(outputStream* st);
static void print_proc_sys_info(outputStream* st);
static void print_ld_preload_file(outputStream* st);
public:
+ struct CPUPerfTicks {
+ uint64_t used;
+ uint64_t usedKernel;
+ uint64_t total;
+ uint64_t steal;
+ bool has_steal_ticks;
+ };
+
+ // which_logical_cpu=-1 returns accumulated ticks for all cpus.
+ static bool get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu);
static bool _stack_is_executable;
static void *dlopen_helper(const char *name, char *ebuf, int ebuflen);
static void *dll_load_in_vmthread(const char *name, char *ebuf, int ebuflen);
diff --git a/src/hotspot/os/linux/os_perf_linux.cpp b/src/hotspot/os/linux/os_perf_linux.cpp
index 9572921f994..2b35dab9cf0 100644
--- a/src/hotspot/os/linux/os_perf_linux.cpp
+++ b/src/hotspot/os/linux/os_perf_linux.cpp
@@ -206,13 +206,6 @@ format: %d %s %c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %l
# define _SCANFMT_
#endif
-
-struct CPUPerfTicks {
- uint64_t used;
- uint64_t usedKernel;
- uint64_t total;
-};
-
typedef enum {
CPU_LOAD_VM_ONLY,
CPU_LOAD_GLOBAL,
@@ -227,8 +220,8 @@ enum {
struct CPUPerfCounters {
int nProcs;
- CPUPerfTicks jvmTicks;
- CPUPerfTicks* cpus;
+ os::Linux::CPUPerfTicks jvmTicks;
+ os::Linux::CPUPerfTicks* cpus;
};
static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, double* pkernelLoad, CpuLoadTarget target);
@@ -287,80 +280,6 @@ static FILE* open_statfile(void) {
return f;
}
-static void
-next_line(FILE *f) {
- int c;
- do {
- c = fgetc(f);
- } while (c != '\n' && c != EOF);
-}
-
-/**
- * Return the total number of ticks since the system was booted.
- * If the usedTicks parameter is not NULL, it will be filled with
- * the number of ticks spent on actual processes (user, system or
- * nice processes) since system boot. Note that this is the total number
- * of "executed" ticks on _all_ CPU:s, that is on a n-way system it is
- * n times the number of ticks that has passed in clock time.
- *
- * Returns a negative value if the reading of the ticks failed.
- */
-static OSReturn get_total_ticks(int which_logical_cpu, CPUPerfTicks* pticks) {
- FILE* fh;
- uint64_t userTicks, niceTicks, systemTicks, idleTicks;
- uint64_t iowTicks = 0, irqTicks = 0, sirqTicks= 0;
- int logical_cpu = -1;
- const int expected_assign_count = (-1 == which_logical_cpu) ? 4 : 5;
- int n;
-
- if ((fh = open_statfile()) == NULL) {
- return OS_ERR;
- }
- if (-1 == which_logical_cpu) {
- n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
- UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT,
- &userTicks, &niceTicks, &systemTicks, &idleTicks,
- &iowTicks, &irqTicks, &sirqTicks);
- } else {
- // Move to next line
- next_line(fh);
-
- // find the line for requested cpu faster to just iterate linefeeds?
- for (int i = 0; i < which_logical_cpu; i++) {
- next_line(fh);
- }
-
- n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
- UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT,
- &logical_cpu, &userTicks, &niceTicks,
- &systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks);
- }
-
- fclose(fh);
- if (n < expected_assign_count || logical_cpu != which_logical_cpu) {
-#ifdef DEBUG_LINUX_PROC_STAT
- vm_fprintf(stderr, "[stat] read failed");
-#endif
- return OS_ERR;
- }
-
-#ifdef DEBUG_LINUX_PROC_STAT
- vm_fprintf(stderr, "[stat] read "
- UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
- UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " \n",
- userTicks, niceTicks, systemTicks, idleTicks,
- iowTicks, irqTicks, sirqTicks);
-#endif
-
- pticks->used = userTicks + niceTicks;
- pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
- pticks->total = userTicks + niceTicks + systemTicks + idleTicks +
- iowTicks + irqTicks + sirqTicks;
-
- return OS_OK;
-}
-
-
static int get_systemtype(void) {
static int procEntriesType = UNDETECTED;
DIR *taskDir;
@@ -391,7 +310,7 @@ static int read_ticks(const char* procfile, uint64_t* userTicks, uint64_t* syste
* Return the number of ticks spent in any of the processes belonging
* to the JVM on any CPU.
*/
-static OSReturn get_jvm_ticks(CPUPerfTicks* pticks) {
+static OSReturn get_jvm_ticks(os::Linux::CPUPerfTicks* pticks) {
uint64_t userTicks;
uint64_t systemTicks;
@@ -404,7 +323,7 @@ static OSReturn get_jvm_ticks(CPUPerfTicks* pticks) {
}
// get the total
- if (get_total_ticks(-1, pticks) != OS_OK) {
+ if (! os::Linux::get_tick_information(pticks, -1)) {
return OS_ERR;
}
@@ -423,8 +342,8 @@ static OSReturn get_jvm_ticks(CPUPerfTicks* pticks) {
*/
static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, double* pkernelLoad, CpuLoadTarget target) {
uint64_t udiff, kdiff, tdiff;
- CPUPerfTicks* pticks;
- CPUPerfTicks tmp;
+ os::Linux::CPUPerfTicks* pticks;
+ os::Linux::CPUPerfTicks tmp;
double user_load;
*pkernelLoad = 0.0;
@@ -443,7 +362,7 @@ static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, dou
if (get_jvm_ticks(pticks) != OS_OK) {
return -1.0;
}
- } else if (get_total_ticks(which_logical_cpu, pticks) != OS_OK) {
+ } else if (! os::Linux::get_tick_information(pticks, which_logical_cpu)) {
return -1.0;
}
@@ -584,19 +503,19 @@ CPUPerformanceInterface::CPUPerformance::CPUPerformance() {
}
bool CPUPerformanceInterface::CPUPerformance::initialize() {
- size_t tick_array_size = (_counters.nProcs +1) * sizeof(CPUPerfTicks);
- _counters.cpus = (CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal);
+ size_t tick_array_size = (_counters.nProcs +1) * sizeof(os::Linux::CPUPerfTicks);
+ _counters.cpus = (os::Linux::CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal);
if (NULL == _counters.cpus) {
return false;
}
memset(_counters.cpus, 0, tick_array_size);
// For the CPU load total
- get_total_ticks(-1, &_counters.cpus[_counters.nProcs]);
+ os::Linux::get_tick_information(&_counters.cpus[_counters.nProcs], -1);
// For each CPU
for (int i = 0; i < _counters.nProcs; i++) {
- get_total_ticks(i, &_counters.cpus[i]);
+ os::Linux::get_tick_information(&_counters.cpus[i], i);
}
// For JVM load
get_jvm_ticks(&_counters.jvmTicks);
diff --git a/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp
index 9213c8772da..0a0633a71ce 100644
--- a/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp
+++ b/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp
@@ -66,7 +66,7 @@ bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
if (ret_frame.is_interpreted_frame()) {
frame::ijava_state* istate = ret_frame.get_ijava_state();
- if (!((Method*)(istate->method))->is_metaspace_object()) {
+ if (MetaspaceObj::is_valid((Method*)(istate->method)) == false) {
return false;
}
uint64_t reg_bcp = uc->uc_mcontext.regs->gpr[14/*R14_bcp*/];
diff --git a/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp b/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp
index c3379f5049e..9695b9a898f 100644
--- a/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp
+++ b/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp
@@ -63,7 +63,8 @@ bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
if (ret_frame.is_interpreted_frame()) {
frame::z_ijava_state* istate = ret_frame.ijava_state_unchecked();
- if ((stack_base() >= (address)istate && (address)istate > stack_end()) || !((Method*)(istate->method))->is_metaspace_object()) {
+ if ((stack_base() >= (address)istate && (address)istate > stack_end()) ||
+ MetaspaceObj::is_valid((Method*)(istate->method)) == false) {
return false;
}
uint64_t reg_bcp = uc->uc_mcontext.gregs[13/*Z_BCP*/];
diff --git a/src/hotspot/share/adlc/formssel.cpp b/src/hotspot/share/adlc/formssel.cpp
index 3ddf421e7c3..d4a32b561d8 100644
--- a/src/hotspot/share/adlc/formssel.cpp
+++ b/src/hotspot/share/adlc/formssel.cpp
@@ -777,8 +777,7 @@ bool InstructForm::captures_bottom_type(FormDict &globals) const {
!strcmp(_matrule->_rChild->_opType,"CompareAndExchangeP") ||
!strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN") ||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") ||
- !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") ||
- !strcmp(_matrule->_rChild->_opType,"ShenandoahReadBarrier"))) return true;
+ !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN"))) return true;
else if ( is_ideal_load() == Form::idealP ) return true;
else if ( is_ideal_store() != Form::none ) return true;
@@ -3506,7 +3505,6 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
"ClearArray",
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
- "ShenandoahReadBarrier",
"LoadBarrierSlowReg", "LoadBarrierWeakSlowReg"
};
int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp
index d76751f1bef..193fed590ba 100644
--- a/src/hotspot/share/classfile/javaClasses.cpp
+++ b/src/hotspot/share/classfile/javaClasses.cpp
@@ -218,7 +218,7 @@ public:
void java_lang_String::set_compact_strings(bool value) {
CompactStringsFixup fix(value);
- InstanceKlass::cast(SystemDictionary::String_klass())->do_local_static_fields(&fix);
+ SystemDictionary::String_klass()->do_local_static_fields(&fix);
}
Handle java_lang_String::basic_create(int length, bool is_latin1, TRAPS) {
@@ -3991,6 +3991,48 @@ int java_lang_System::in_offset_in_bytes() { return static_in_offset; }
int java_lang_System::out_offset_in_bytes() { return static_out_offset; }
int java_lang_System::err_offset_in_bytes() { return static_err_offset; }
+// Support for jdk_internal_misc_UnsafeConstants
+//
+class UnsafeConstantsFixup : public FieldClosure {
+private:
+ int _address_size;
+ int _page_size;
+ bool _big_endian;
+ bool _use_unaligned_access;
+public:
+ UnsafeConstantsFixup() {
+ // round up values for all static final fields
+ _address_size = sizeof(void*);
+ _page_size = os::vm_page_size();
+ _big_endian = LITTLE_ENDIAN_ONLY(false) BIG_ENDIAN_ONLY(true);
+ _use_unaligned_access = UseUnalignedAccesses;
+ }
+
+ void do_field(fieldDescriptor* fd) {
+ oop mirror = fd->field_holder()->java_mirror();
+ assert(mirror != NULL, "UnsafeConstants must have mirror already");
+ assert(fd->field_holder() == SystemDictionary::UnsafeConstants_klass(), "Should be UnsafeConstants");
+ assert(fd->is_final(), "fields of UnsafeConstants must be final");
+ assert(fd->is_static(), "fields of UnsafeConstants must be static");
+ if (fd->name() == vmSymbols::address_size_name()) {
+ mirror->int_field_put(fd->offset(), _address_size);
+ } else if (fd->name() == vmSymbols::page_size_name()) {
+ mirror->int_field_put(fd->offset(), _page_size);
+ } else if (fd->name() == vmSymbols::big_endian_name()) {
+ mirror->bool_field_put(fd->offset(), _big_endian);
+ } else if (fd->name() == vmSymbols::use_unaligned_access_name()) {
+ mirror->bool_field_put(fd->offset(), _use_unaligned_access);
+ } else {
+ assert(false, "unexpected UnsafeConstants field");
+ }
+ }
+};
+
+void jdk_internal_misc_UnsafeConstants::set_unsafe_constants() {
+ UnsafeConstantsFixup fixup;
+ SystemDictionary::UnsafeConstants_klass()->do_local_static_fields(&fixup);
+}
+
int java_lang_Class::_klass_offset;
int java_lang_Class::_array_klass_offset;
int java_lang_Class::_oop_size_offset;
diff --git a/src/hotspot/share/classfile/javaClasses.hpp b/src/hotspot/share/classfile/javaClasses.hpp
index df164ce4f9c..b7682befc29 100644
--- a/src/hotspot/share/classfile/javaClasses.hpp
+++ b/src/hotspot/share/classfile/javaClasses.hpp
@@ -81,6 +81,7 @@
f(java_lang_StackFrameInfo) \
f(java_lang_LiveStackFrameInfo) \
f(java_util_concurrent_locks_AbstractOwnableSynchronizer) \
+ f(jdk_internal_misc_UnsafeConstants) \
//end
#define BASIC_JAVA_CLASSES_DO(f) \
@@ -1483,6 +1484,15 @@ class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
};
+ // Interface to jdk.internal.misc.UnsafeConsants
+
+class jdk_internal_misc_UnsafeConstants : AllStatic {
+ public:
+ static void set_unsafe_constants();
+ static void compute_offsets() { }
+ static void serialize_offsets(SerializeClosure* f) { }
+};
+
// Use to declare fields that need to be injected into Java classes
// for the JVM to use. The name_index and signature_index are
// declared in vmSymbols. The may_be_java flag is used to declare
diff --git a/src/hotspot/share/classfile/protectionDomainCache.cpp b/src/hotspot/share/classfile/protectionDomainCache.cpp
index f2a2b59206a..0b2faaa3163 100644
--- a/src/hotspot/share/classfile/protectionDomainCache.cpp
+++ b/src/hotspot/share/classfile/protectionDomainCache.cpp
@@ -45,7 +45,7 @@ int ProtectionDomainCacheTable::index_for(Handle protection_domain) {
}
ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size)
- : Hashtable(table_size, sizeof(ProtectionDomainCacheEntry))
+ : Hashtable, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
{ _dead_entries = false;
_total_oops_removed = 0;
}
@@ -180,8 +180,8 @@ ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, uns
protection_domain->print_value_on(&ls);
ls.cr();
}
- ClassLoaderWeakHandle w = ClassLoaderWeakHandle::create(protection_domain);
+ WeakHandle w = WeakHandle::create(protection_domain);
ProtectionDomainCacheEntry* p = new_entry(hash, w);
- Hashtable::add_entry(index, p);
+ Hashtable, mtClass>::add_entry(index, p);
return p;
}
diff --git a/src/hotspot/share/classfile/protectionDomainCache.hpp b/src/hotspot/share/classfile/protectionDomainCache.hpp
index e3a1cdf5497..c3d27f01097 100644
--- a/src/hotspot/share/classfile/protectionDomainCache.hpp
+++ b/src/hotspot/share/classfile/protectionDomainCache.hpp
@@ -35,18 +35,18 @@
// to dictionary.hpp pd_set for more information about how protection domain entries
// are used.
// This table is walked during GC, rather than the class loader data graph dictionaries.
-class ProtectionDomainCacheEntry : public HashtableEntry {
+class ProtectionDomainCacheEntry : public HashtableEntry, mtClass> {
friend class VMStructs;
public:
oop object();
oop object_no_keepalive();
ProtectionDomainCacheEntry* next() {
- return (ProtectionDomainCacheEntry*)HashtableEntry::next();
+ return (ProtectionDomainCacheEntry*)HashtableEntry, mtClass>::next();
}
ProtectionDomainCacheEntry** next_addr() {
- return (ProtectionDomainCacheEntry**)HashtableEntry::next_addr();
+ return (ProtectionDomainCacheEntry**)HashtableEntry, mtClass>::next_addr();
}
void verify();
@@ -61,21 +61,21 @@ class ProtectionDomainCacheEntry : public HashtableEntry {
+class ProtectionDomainCacheTable : public Hashtable, mtClass> {
friend class VMStructs;
private:
ProtectionDomainCacheEntry* bucket(int i) const {
- return (ProtectionDomainCacheEntry*) Hashtable::bucket(i);
+ return (ProtectionDomainCacheEntry*) Hashtable, mtClass>::bucket(i);
}
// The following method is not MT-safe and must be done under lock.
ProtectionDomainCacheEntry** bucket_addr(int i) {
- return (ProtectionDomainCacheEntry**) Hashtable::bucket_addr(i);
+ return (ProtectionDomainCacheEntry**) Hashtable, mtClass>::bucket_addr(i);
}
- ProtectionDomainCacheEntry* new_entry(unsigned int hash, ClassLoaderWeakHandle protection_domain) {
+ ProtectionDomainCacheEntry* new_entry(unsigned int hash, WeakHandle protection_domain) {
ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*)
- Hashtable::new_entry(hash, protection_domain);
+ Hashtable, mtClass>::new_entry(hash, protection_domain);
return entry;
}
diff --git a/src/hotspot/share/classfile/systemDictionary.hpp b/src/hotspot/share/classfile/systemDictionary.hpp
index e62752fe1ed..a0879378310 100644
--- a/src/hotspot/share/classfile/systemDictionary.hpp
+++ b/src/hotspot/share/classfile/systemDictionary.hpp
@@ -177,6 +177,7 @@ class OopStorage;
do_klass(AssertionStatusDirectives_klass, java_lang_AssertionStatusDirectives ) \
do_klass(StringBuffer_klass, java_lang_StringBuffer ) \
do_klass(StringBuilder_klass, java_lang_StringBuilder ) \
+ do_klass(UnsafeConstants_klass, jdk_internal_misc_UnsafeConstants ) \
do_klass(internal_Unsafe_klass, jdk_internal_misc_Unsafe ) \
do_klass(module_Modules_klass, jdk_internal_module_Modules ) \
\
diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp
index ad9644b48b8..b480039db20 100644
--- a/src/hotspot/share/classfile/verifier.cpp
+++ b/src/hotspot/share/classfile/verifier.cpp
@@ -2060,7 +2060,9 @@ void ClassVerifier::class_format_error(const char* msg, ...) {
ss.vprint(msg, va);
va_end(va);
if (!_method.is_null()) {
- ss.print(" in method %s", _method->name_and_sig_as_C_string());
+ ss.print(" in method '");
+ _method->print_external_name(&ss);
+ ss.print("'");
}
_message = ss.as_string();
}
diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp
index c6d4c7fdb02..142ddf749a9 100644
--- a/src/hotspot/share/classfile/vmSymbols.hpp
+++ b/src/hotspot/share/classfile/vmSymbols.hpp
@@ -446,6 +446,10 @@
template(module_entry_name, "module_entry") \
template(resolved_references_name, "") \
template(init_lock_name, "") \
+ template(address_size_name, "ADDRESS_SIZE0") \
+ template(page_size_name, "PAGE_SIZE") \
+ template(big_endian_name, "BIG_ENDIAN") \
+ template(use_unaligned_access_name, "UNALIGNED_ACCESS") \
\
/* name symbols needed by intrinsics */ \
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
@@ -1070,6 +1074,9 @@
do_intrinsic(_updateByteBufferAdler32, java_util_zip_Adler32, updateByteBuffer_A_name, updateByteBuffer_signature, F_SN) \
do_name( updateByteBuffer_A_name, "updateByteBuffer") \
\
+ /* support for UnsafeConstants */ \
+ do_class(jdk_internal_misc_UnsafeConstants, "jdk/internal/misc/UnsafeConstants") \
+ \
/* support for Unsafe */ \
do_class(jdk_internal_misc_Unsafe, "jdk/internal/misc/Unsafe") \
\
diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
index 8d3395cdac4..5bd8e0d7b30 100644
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -780,13 +780,14 @@ void CodeCache::increment_unloading_cycle() {
CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
: _is_unloading_behaviour(is_alive)
{
+ _saved_behaviour = IsUnloadingBehaviour::current();
IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
increment_unloading_cycle();
DependencyContext::cleaning_start();
}
CodeCache::UnloadingScope::~UnloadingScope() {
- IsUnloadingBehaviour::set_current(NULL);
+ IsUnloadingBehaviour::set_current(_saved_behaviour);
DependencyContext::cleaning_end();
}
diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp
index eb6ebd59242..1013163c5ba 100644
--- a/src/hotspot/share/code/codeCache.hpp
+++ b/src/hotspot/share/code/codeCache.hpp
@@ -170,6 +170,7 @@ class CodeCache : AllStatic {
// "unloading_occurred" controls whether metadata should be cleaned because of class unloading.
class UnloadingScope: StackObj {
ClosureIsUnloadingBehaviour _is_unloading_behaviour;
+ IsUnloadingBehaviour* _saved_behaviour;
public:
UnloadingScope(BoolObjectClosure* is_alive);
diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp
index 566113240e8..d8e6066452f 100644
--- a/src/hotspot/share/compiler/compileBroker.cpp
+++ b/src/hotspot/share/compiler/compileBroker.cpp
@@ -135,11 +135,6 @@ CompileLog** CompileBroker::_compiler2_logs = NULL;
volatile jint CompileBroker::_compilation_id = 0;
volatile jint CompileBroker::_osr_compilation_id = 0;
-// Debugging information
-int CompileBroker::_last_compile_type = no_compile;
-int CompileBroker::_last_compile_level = CompLevel_none;
-char CompileBroker::_last_method_compiled[CompileBroker::name_buffer_length];
-
// Performance counters
PerfCounter* CompileBroker::_perf_total_compilation = NULL;
PerfCounter* CompileBroker::_perf_osr_compilation = NULL;
@@ -577,8 +572,6 @@ CompilerCounters::CompilerCounters() {
//
// Initialize the Compilation object
void CompileBroker::compilation_init_phase1(TRAPS) {
- _last_method_compiled[0] = '\0';
-
// No need to initialize compilation system if we do not use it.
if (!UseCompiler) {
return;
@@ -2032,8 +2025,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
// Look up matching directives
directive = DirectivesStack::getMatchingDirective(method, comp);
- // Save information about this method in case of failure.
- set_last_compile(thread, method, is_osr, task_level);
+ // Update compile information when using perfdata.
+ if (UsePerfData) {
+ update_compile_perf_data(thread, method, is_osr);
+ }
DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level));
}
@@ -2264,58 +2259,49 @@ void CompileBroker::handle_full_code_cache(int code_blob_type) {
}
// ------------------------------------------------------------------
-// CompileBroker::set_last_compile
+// CompileBroker::update_compile_perf_data
//
// Record this compilation for debugging purposes.
-void CompileBroker::set_last_compile(CompilerThread* thread, const methodHandle& method, bool is_osr, int comp_level) {
+void CompileBroker::update_compile_perf_data(CompilerThread* thread, const methodHandle& method, bool is_osr) {
ResourceMark rm;
char* method_name = method->name()->as_C_string();
- strncpy(_last_method_compiled, method_name, CompileBroker::name_buffer_length);
- _last_method_compiled[CompileBroker::name_buffer_length - 1] = '\0'; // ensure null terminated
char current_method[CompilerCounters::cmname_buffer_length];
size_t maxLen = CompilerCounters::cmname_buffer_length;
- if (UsePerfData) {
- const char* class_name = method->method_holder()->name()->as_C_string();
+ const char* class_name = method->method_holder()->name()->as_C_string();
- size_t s1len = strlen(class_name);
- size_t s2len = strlen(method_name);
+ size_t s1len = strlen(class_name);
+ size_t s2len = strlen(method_name);
- // check if we need to truncate the string
- if (s1len + s2len + 2 > maxLen) {
+ // check if we need to truncate the string
+ if (s1len + s2len + 2 > maxLen) {
- // the strategy is to lop off the leading characters of the
- // class name and the trailing characters of the method name.
+ // the strategy is to lop off the leading characters of the
+ // class name and the trailing characters of the method name.
- if (s2len + 2 > maxLen) {
- // lop of the entire class name string, let snprintf handle
- // truncation of the method name.
- class_name += s1len; // null string
- }
- else {
- // lop off the extra characters from the front of the class name
- class_name += ((s1len + s2len + 2) - maxLen);
- }
+ if (s2len + 2 > maxLen) {
+ // lop of the entire class name string, let snprintf handle
+ // truncation of the method name.
+ class_name += s1len; // null string
+ }
+ else {
+ // lop off the extra characters from the front of the class name
+ class_name += ((s1len + s2len + 2) - maxLen);
}
-
- jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name);
}
+ jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name);
+
+ int last_compile_type = normal_compile;
if (CICountOSR && is_osr) {
- _last_compile_type = osr_compile;
- } else {
- _last_compile_type = normal_compile;
+ last_compile_type = osr_compile;
}
- _last_compile_level = comp_level;
- if (UsePerfData) {
- CompilerCounters* counters = thread->counters();
- counters->set_current_method(current_method);
- counters->set_compile_type((jlong)_last_compile_type);
- }
+ CompilerCounters* counters = thread->counters();
+ counters->set_current_method(current_method);
+ counters->set_compile_type((jlong) last_compile_type);
}
-
// ------------------------------------------------------------------
// CompileBroker::push_jni_handle_block
//
@@ -2618,21 +2604,6 @@ void CompileBroker::print_times(bool per_compiler, bool aggregate) {
tty->print_cr(" nmethod total size : %8d bytes", nmethods_size);
}
-// Debugging output for failure
-void CompileBroker::print_last_compile() {
- if (_last_compile_level != CompLevel_none &&
- compiler(_last_compile_level) != NULL &&
- _last_compile_type != no_compile) {
- if (_last_compile_type == osr_compile) {
- tty->print_cr("Last parse: [osr]%d+++(%d) %s",
- _osr_compilation_id, _last_compile_level, _last_method_compiled);
- } else {
- tty->print_cr("Last parse: %d+++(%d) %s",
- _compilation_id, _last_compile_level, _last_method_compiled);
- }
- }
-}
-
// Print general/accumulated JIT information.
void CompileBroker::print_info(outputStream *out) {
if (out == NULL) out = tty;
diff --git a/src/hotspot/share/compiler/compileBroker.hpp b/src/hotspot/share/compiler/compileBroker.hpp
index 71be6382f9e..fb13fa4ff5b 100644
--- a/src/hotspot/share/compiler/compileBroker.hpp
+++ b/src/hotspot/share/compiler/compileBroker.hpp
@@ -173,10 +173,6 @@ class CompileBroker: AllStatic {
static volatile jint _compilation_id;
static volatile jint _osr_compilation_id;
- static int _last_compile_type;
- static int _last_compile_level;
- static char _last_method_compiled[name_buffer_length];
-
static CompileQueue* _c2_compile_queue;
static CompileQueue* _c1_compile_queue;
@@ -254,7 +250,8 @@ class CompileBroker: AllStatic {
static void invoke_compiler_on_method(CompileTask* task);
static void post_compile(CompilerThread* thread, CompileTask* task, bool success, ciEnv* ci_env,
int compilable, const char* failure_reason);
- static void set_last_compile(CompilerThread *thread, const methodHandle& method, bool is_osr, int comp_level);
+ static void update_compile_perf_data(CompilerThread *thread, const methodHandle& method, bool is_osr);
+
static void push_jni_handle_block();
static void pop_jni_handle_block();
static void collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task);
@@ -382,9 +379,6 @@ public:
// Print a detailed accounting of compilation time
static void print_times(bool per_compiler = true, bool aggregate = true);
- // Debugging output for failure
- static void print_last_compile();
-
// compiler name for debugging
static const char* compiler_name(int comp_level);
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index db01f8a7800..ca5edbd6f9f 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -154,6 +154,11 @@ void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_region
reset_from_card_cache(start_idx, num_regions);
}
+Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
+ Ticks start = Ticks::now();
+ workers()->run_task(task, workers()->active_workers());
+ return Ticks::now() - start;
+}
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
MemRegion mr) {
@@ -2242,12 +2247,12 @@ void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
_hrm->par_iterate(cl, hrclaimer, 0);
}
-void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
+void G1CollectedHeap::collection_set_iterate_all(HeapRegionClosure* cl) {
_collection_set.iterate(cl);
}
-void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
- _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
+void G1CollectedHeap::collection_set_iterate_increment_from(HeapRegionClosure *cl, uint worker_id) {
+ _collection_set.iterate_incremental_part_from(cl, worker_id, workers()->active_workers());
}
HeapWord* G1CollectedHeap::block_start(const void* addr) const {
@@ -2484,7 +2489,7 @@ public:
void G1CollectedHeap::print_cset_rsets() {
PrintRSetsClosure cl("Printing CSet RSets");
- collection_set_iterate(&cl);
+ collection_set_iterate_all(&cl);
}
void G1CollectedHeap::print_all_rsets() {
@@ -2495,8 +2500,8 @@ void G1CollectedHeap::print_all_rsets() {
G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
- size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;
- size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;
+ size_t eden_used_bytes = _eden.used_bytes();
+ size_t survivor_used_bytes = _survivor.used_bytes();
size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
size_t eden_capacity_bytes =
@@ -2880,15 +2885,18 @@ void G1CollectedHeap::start_new_collection_set() {
phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
}
-void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms){
- policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
- evacuation_info.set_collectionset_regions(collection_set()->region_length());
+void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms) {
+
+ _collection_set.finalize_initial_collection_set(target_pause_time_ms, &_survivor);
+ evacuation_info.set_collectionset_regions(collection_set()->region_length() +
+ collection_set()->optional_region_length());
_cm->verify_no_collection_set_oops();
if (_hr_printer.is_active()) {
G1PrintCollectionSetClosure cl(&_hr_printer);
_collection_set.iterate(&cl);
+ _collection_set.iterate_optional(&cl);
}
}
@@ -3060,9 +3068,10 @@ bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_
pre_evacuate_collection_set(evacuation_info);
// Actually do the work...
- evacuate_collection_set(&per_thread_states);
- evacuate_optional_collection_set(&per_thread_states);
-
+ evacuate_initial_collection_set(&per_thread_states);
+ if (_collection_set.optional_region_length() != 0) {
+ evacuate_optional_collection_set(&per_thread_states);
+ }
post_evacuate_collection_set(evacuation_info, &per_thread_states);
start_new_collection_set();
@@ -3088,7 +3097,8 @@ bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_
double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
- size_t total_cards_scanned = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards);
+ size_t total_cards_scanned = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards) +
+ phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanRS, G1GCPhaseTimes::ScanRSScannedCards);
policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
}
@@ -3192,86 +3202,6 @@ void G1ParEvacuateFollowersClosure::do_void() {
} while (!offer_termination());
}
-class G1ParTask : public AbstractGangTask {
-protected:
- G1CollectedHeap* _g1h;
- G1ParScanThreadStateSet* _pss;
- RefToScanQueueSet* _queues;
- G1RootProcessor* _root_processor;
- TaskTerminator _terminator;
- uint _n_workers;
-
-public:
- G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
- : AbstractGangTask("G1 collection"),
- _g1h(g1h),
- _pss(per_thread_states),
- _queues(task_queues),
- _root_processor(root_processor),
- _terminator(n_workers, _queues),
- _n_workers(n_workers)
- {}
-
- void work(uint worker_id) {
- if (worker_id >= _n_workers) return; // no work needed this round
-
- double start_sec = os::elapsedTime();
- _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec);
-
- {
- ResourceMark rm;
- HandleMark hm;
-
- ReferenceProcessor* rp = _g1h->ref_processor_stw();
-
- G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
- pss->set_ref_discoverer(rp);
-
- double start_strong_roots_sec = os::elapsedTime();
-
- _root_processor->evacuate_roots(pss, worker_id);
-
- _g1h->rem_set()->oops_into_collection_set_do(pss, worker_id);
-
- double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
-
- double term_sec = 0.0;
- size_t evac_term_attempts = 0;
- {
- double start = os::elapsedTime();
- G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, _terminator.terminator(), G1GCPhaseTimes::ObjCopy);
- evac.do_void();
-
- evac_term_attempts = evac.term_attempts();
- term_sec = evac.term_time();
- double elapsed_sec = os::elapsedTime() - start;
-
- G1GCPhaseTimes* p = _g1h->phase_times();
- p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
-
- p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
- worker_id,
- pss->lab_waste_words() * HeapWordSize,
- G1GCPhaseTimes::ObjCopyLABWaste);
- p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
- worker_id,
- pss->lab_undo_waste_words() * HeapWordSize,
- G1GCPhaseTimes::ObjCopyLABUndoWaste);
-
- p->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
- p->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
- }
-
- assert(pss->queue_is_empty(), "should be empty");
-
- // Close the inner scope so that the ResourceMark and HandleMark
- // destructors are executed here and are included as part of the
- // "GC Worker Time".
- }
- _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
- }
-};
-
void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
bool class_unloading_occurred) {
uint num_workers = workers()->active_workers();
@@ -3675,176 +3605,196 @@ void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_i
double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
}
-}
-void G1CollectedHeap::evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states) {
// Should G1EvacuationFailureALot be in effect for this GC?
NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
-
- double start_par_time_sec = os::elapsedTime();
- double end_par_time_sec;
-
- {
- const uint n_workers = workers()->active_workers();
- G1RootProcessor root_processor(this, n_workers);
- G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
-
- workers()->run_task(&g1_par_task);
- end_par_time_sec = os::elapsedTime();
-
- // Closing the inner scope will execute the destructor
- // for the G1RootProcessor object. We record the current
- // elapsed time before closing the scope so that time
- // taken for the destructor is NOT included in the
- // reported parallel time.
- }
-
- double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
- phase_times()->record_par_time(par_time_ms);
-
- double code_root_fixup_time_ms =
- (os::elapsedTime() - end_par_time_sec) * 1000.0;
- phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
}
-class G1EvacuateOptionalRegionTask : public AbstractGangTask {
+class G1EvacuateRegionsBaseTask : public AbstractGangTask {
+protected:
G1CollectedHeap* _g1h;
G1ParScanThreadStateSet* _per_thread_states;
- G1OptionalCSet* _optional;
- RefToScanQueueSet* _queues;
- ParallelTaskTerminator _terminator;
+ RefToScanQueueSet* _task_queues;
+ TaskTerminator _terminator;
+ uint _num_workers;
- Tickspan trim_ticks(G1ParScanThreadState* pss) {
- Tickspan copy_time = pss->trim_ticks();
- pss->reset_trim_ticks();
- return copy_time;
- }
-
- void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
- G1EvacuationRootClosures* root_cls = pss->closures();
- G1ScanObjsDuringScanRSClosure obj_cl(_g1h, pss);
-
- size_t scanned = 0;
- size_t claimed = 0;
- size_t skipped = 0;
- size_t used_memory = 0;
-
- Ticks start = Ticks::now();
- Tickspan copy_time;
-
- for (uint i = _optional->current_index(); i < _optional->current_limit(); i++) {
- HeapRegion* hr = _optional->region_at(i);
- G1ScanRSForOptionalClosure scan_opt_cl(&obj_cl);
- pss->oops_into_optional_region(hr)->oops_do(&scan_opt_cl, root_cls->raw_strong_oops());
- copy_time += trim_ticks(pss);
-
- G1ScanRSForRegionClosure scan_rs_cl(_g1h->rem_set()->scan_state(), &obj_cl, pss, G1GCPhaseTimes::OptScanRS, worker_id);
- scan_rs_cl.do_heap_region(hr);
- copy_time += trim_ticks(pss);
- scanned += scan_rs_cl.cards_scanned();
- claimed += scan_rs_cl.cards_claimed();
- skipped += scan_rs_cl.cards_skipped();
-
- // Chunk lists for this region is no longer needed.
- used_memory += pss->oops_into_optional_region(hr)->used_memory();
- }
-
- Tickspan scan_time = (Ticks::now() - start) - copy_time;
+ void evacuate_live_objects(G1ParScanThreadState* pss,
+ uint worker_id,
+ G1GCPhaseTimes::GCParPhases objcopy_phase,
+ G1GCPhaseTimes::GCParPhases termination_phase) {
G1GCPhaseTimes* p = _g1h->phase_times();
- p->record_or_add_time_secs(G1GCPhaseTimes::OptScanRS, worker_id, scan_time.seconds());
- p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, copy_time.seconds());
- p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, scanned, G1GCPhaseTimes::OptCSetScannedCards);
- p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, claimed, G1GCPhaseTimes::OptCSetClaimedCards);
- p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, skipped, G1GCPhaseTimes::OptCSetSkippedCards);
- p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
- }
-
- void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
Ticks start = Ticks::now();
- G1ParEvacuateFollowersClosure cl(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::OptObjCopy);
+ G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, _terminator.terminator(), objcopy_phase);
cl.do_void();
+ assert(pss->queue_is_empty(), "should be empty");
+
Tickspan evac_time = (Ticks::now() - start);
- G1GCPhaseTimes* p = _g1h->phase_times();
- p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, evac_time.seconds());
- assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming done during optional evacuation");
+ p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time());
+
+ p->record_or_add_thread_work_item(objcopy_phase, worker_id, pss->lab_waste_words() * HeapWordSize, G1GCPhaseTimes::ObjCopyLABWaste);
+ p->record_or_add_thread_work_item(objcopy_phase, worker_id, pss->lab_undo_waste_words() * HeapWordSize, G1GCPhaseTimes::ObjCopyLABUndoWaste);
+
+ if (termination_phase == G1GCPhaseTimes::Termination) {
+ p->record_time_secs(termination_phase, worker_id, cl.term_time());
+ p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts());
+ } else {
+ p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time());
+ p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts());
+ }
+ assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming during evacuation");
}
- public:
- G1EvacuateOptionalRegionTask(G1CollectedHeap* g1h,
- G1ParScanThreadStateSet* per_thread_states,
- G1OptionalCSet* cset,
- RefToScanQueueSet* queues,
- uint n_workers) :
- AbstractGangTask("G1 Evacuation Optional Region Task"),
- _g1h(g1h),
+ virtual void start_work(uint worker_id) { }
+
+ virtual void end_work(uint worker_id) { }
+
+ virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0;
+
+ virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
+
+public:
+ G1EvacuateRegionsBaseTask(const char* name, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet* task_queues, uint num_workers) :
+ AbstractGangTask(name),
+ _g1h(G1CollectedHeap::heap()),
_per_thread_states(per_thread_states),
- _optional(cset),
- _queues(queues),
- _terminator(n_workers, _queues) {
- }
+ _task_queues(task_queues),
+ _terminator(num_workers, _task_queues),
+ _num_workers(num_workers)
+ { }
void work(uint worker_id) {
- ResourceMark rm;
- HandleMark hm;
+ start_work(worker_id);
- G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
- pss->set_ref_discoverer(_g1h->ref_processor_stw());
+ {
+ ResourceMark rm;
+ HandleMark hm;
- scan_roots(pss, worker_id);
- evacuate_live_objects(pss, worker_id);
+ G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
+ pss->set_ref_discoverer(_g1h->ref_processor_stw());
+
+ scan_roots(pss, worker_id);
+ evacuate_live_objects(pss, worker_id);
+ }
+
+ end_work(worker_id);
}
};
-void G1CollectedHeap::evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset) {
- class G1MarkScope : public MarkScope {};
- G1MarkScope code_mark_scope;
+class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask {
+ G1RootProcessor* _root_processor;
- G1EvacuateOptionalRegionTask task(this, per_thread_states, ocset, _task_queues, workers()->active_workers());
- workers()->run_task(&task);
+ void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
+ _root_processor->evacuate_roots(pss, worker_id);
+ _g1h->rem_set()->update_rem_set(pss, worker_id);
+ _g1h->rem_set()->scan_rem_set(pss, worker_id, G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::CodeRoots);
+ }
+
+ void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
+ G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination);
+ }
+
+ void start_work(uint worker_id) {
+ _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds());
+ }
+
+ void end_work(uint worker_id) {
+ _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds());
+ }
+
+public:
+ G1EvacuateRegionsTask(G1CollectedHeap* g1h,
+ G1ParScanThreadStateSet* per_thread_states,
+ RefToScanQueueSet* task_queues,
+ G1RootProcessor* root_processor,
+ uint num_workers) :
+ G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
+ _root_processor(root_processor)
+ { }
+};
+
+void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
+ Tickspan task_time;
+ const uint num_workers = workers()->active_workers();
+
+ Ticks start_processing = Ticks::now();
+ {
+ G1RootProcessor root_processor(this, num_workers);
+ G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
+ task_time = run_task(&g1_par_task);
+ // Closing the inner scope will execute the destructor for the G1RootProcessor object.
+ // To extract its code root fixup time we measure total time of this scope and
+ // subtract from the time the WorkGang task took.
+ }
+ Tickspan total_processing = Ticks::now() - start_processing;
+
+ G1GCPhaseTimes* p = phase_times();
+ p->record_initial_evac_time(task_time.seconds() * 1000.0);
+ p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
+}
+
+class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
+
+ void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
+ _g1h->rem_set()->scan_rem_set(pss, worker_id, G1GCPhaseTimes::OptScanRS, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptCodeRoots);
+ }
+
+ void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
+ G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
+ }
+
+public:
+ G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
+ RefToScanQueueSet* queues,
+ uint num_workers) :
+ G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
+ }
+};
+
+void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
+ class G1MarkScope : public MarkScope { };
+
+ Tickspan task_time;
+
+ Ticks start_processing = Ticks::now();
+ {
+ G1MarkScope code_mark_scope;
+ G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
+ task_time = run_task(&task);
+ // See comment in evacuate_collection_set() for the reason of the scope.
+ }
+ Tickspan total_processing = Ticks::now() - start_processing;
+
+ G1GCPhaseTimes* p = phase_times();
+ p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
}
void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
- G1OptionalCSet optional_cset(&_collection_set, per_thread_states);
- if (optional_cset.is_empty()) {
- return;
- }
-
- if (evacuation_failed()) {
- return;
- }
-
const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
- double start_time_sec = os::elapsedTime();
+ Ticks start = Ticks::now();
+
+ while (!evacuation_failed() && _collection_set.optional_region_length() > 0) {
- do {
double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
double time_left_ms = MaxGCPauseMillis - time_used_ms;
- if (time_left_ms < 0) {
- log_trace(gc, ergo, cset)("Skipping %u optional regions, pause time exceeded %.3fms", optional_cset.size(), time_used_ms);
+ if (time_left_ms < 0 ||
+ !_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
+ log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
+ _collection_set.optional_region_length(), time_left_ms);
break;
}
- optional_cset.prepare_evacuation(time_left_ms * _policy->optional_evacuation_fraction());
- if (optional_cset.prepare_failed()) {
- log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms);
- break;
- }
+ evacuate_next_optional_regions(per_thread_states);
+ }
- evacuate_optional_regions(per_thread_states, &optional_cset);
+ _collection_set.abandon_optional_collection_set(per_thread_states);
- optional_cset.complete_evacuation();
- if (optional_cset.evacuation_failed()) {
- break;
- }
- } while (!optional_cset.is_empty());
-
- phase_times()->record_optional_evacuation((os::elapsedTime() - start_time_sec) * 1000.0);
+ phase_times()->record_or_add_optional_evac_time((Ticks::now() - start).seconds() * 1000.0);
}
void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
@@ -4259,15 +4209,14 @@ void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, G1Eva
double free_cset_start_time = os::elapsedTime();
{
- uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
+ uint const num_regions = _collection_set.region_length();
+ uint const num_chunks = MAX2(num_regions / G1FreeCollectionSetTask::chunk_size(), 1U);
uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
- cl.name(),
- num_workers,
- _collection_set.region_length());
+ cl.name(), num_workers, num_regions);
workers()->run_task(&cl, num_workers);
}
phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
@@ -4436,7 +4385,7 @@ public:
void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
G1AbandonCollectionSetClosure cl;
- collection_set->iterate(&cl);
+ collection_set_iterate_all(&cl);
collection_set->clear();
collection_set->stop_incremental_building();
@@ -4636,7 +4585,9 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
collection_set()->add_eden_region(alloc_region);
increase_used(allocated_bytes);
+ _eden.add_used_bytes(allocated_bytes);
_hr_printer.retire(alloc_region);
+
// We update the eden sizes here, when the region is retired,
// instead of when it's allocated, since this is the point that its
// used space has been recorded in _summary_bytes_used.
@@ -4693,6 +4644,9 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
policy()->record_bytes_copied_during_gc(allocated_bytes);
if (dest.is_old()) {
old_set_add(alloc_region);
+ } else {
+ assert(dest.is_young(), "Retiring alloc region should be young(%d)", dest.value());
+ _survivor.add_used_bytes(allocated_bytes);
}
bool const during_im = collector_state()->in_initial_mark_gc();
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index b71a04047da..7ba5eb895c4 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -143,9 +143,8 @@ class G1CollectedHeap : public CollectedHeap {
// Closures used in implementation.
friend class G1ParScanThreadState;
friend class G1ParScanThreadStateSet;
- friend class G1ParTask;
+ friend class G1EvacuateRegionsTask;
friend class G1PLABAllocator;
- friend class G1PrepareCompactClosure;
// Other related classes.
friend class HeapRegionClaimer;
@@ -206,7 +205,7 @@ private:
// Outside of GC pauses, the number of bytes used in all regions other
// than the current allocation region(s).
- size_t _summary_bytes_used;
+ volatile size_t _summary_bytes_used;
void increase_used(size_t bytes);
void decrease_used(size_t bytes);
@@ -519,6 +518,10 @@ public:
WorkGang* workers() const { return _workers; }
+ // Runs the given AbstractGangTask with the current active workers, returning the
+ // total time taken.
+ Tickspan run_task(AbstractGangTask* task);
+
G1Allocator* allocator() {
return _allocator;
}
@@ -738,11 +741,14 @@ private:
void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
- // Actually do the work of evacuating the collection set.
- void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
+ // Actually do the work of evacuating the parts of the collection set.
+ void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
- void evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset);
+private:
+ // Evacuate the next set of optional regions.
+ void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
+public:
void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info);
void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
@@ -1165,14 +1171,14 @@ public:
void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
HeapRegionClaimer* hrclaimer) const;
- // Iterate over the regions (if any) in the current collection set.
- void collection_set_iterate(HeapRegionClosure* blk);
+ // Iterate over all regions currently in the current collection set.
+ void collection_set_iterate_all(HeapRegionClosure* blk);
- // Iterate over the regions (if any) in the current collection set. Starts the
- // iteration over the entire collection set so that the start regions of a given
- // worker id over the set active_workers are evenly spread across the set of
- // collection set regions.
- void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id);
+ // Iterate over the regions in the current increment of the collection set.
+ // Starts the iteration so that the start regions of a given worker id over the
+ // set active_workers are evenly spread across the set of collection set regions
+ // to be iterated.
+ void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id);
// Returns the HeapRegion that contains addr. addr must not be NULL.
template
@@ -1252,6 +1258,8 @@ public:
uint eden_regions_count() const { return _eden.length(); }
uint survivor_regions_count() const { return _survivor.length(); }
+ size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
+ size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
uint young_regions_count() const { return _eden.length() + _survivor.length(); }
uint old_regions_count() const { return _old_set.length(); }
uint archive_regions_count() const { return _archive_set.length(); }
@@ -1420,7 +1428,7 @@ private:
size_t _term_attempts;
void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
- void end_term_time() { _term_time += os::elapsedTime() - _start_term; }
+ void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
protected:
G1CollectedHeap* _g1h;
G1ParScanThreadState* _par_scan_state;
diff --git a/src/hotspot/share/gc/g1/g1CollectionSet.cpp b/src/hotspot/share/gc/g1/g1CollectionSet.cpp
index d67fb10829a..835866a9857 100644
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp
@@ -59,12 +59,11 @@ G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
_collection_set_regions(NULL),
_collection_set_cur_length(0),
_collection_set_max_length(0),
- _optional_regions(NULL),
- _optional_region_length(0),
- _optional_region_max_length(0),
+ _num_optional_regions(0),
_bytes_used_before(0),
_recorded_rs_lengths(0),
_inc_build_state(Inactive),
+ _inc_part_start(0),
_inc_bytes_used_before(0),
_inc_recorded_rs_lengths(0),
_inc_recorded_rs_lengths_diffs(0),
@@ -90,8 +89,8 @@ void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
assert((size_t) young_region_length() == _collection_set_cur_length,
"Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
- _old_region_length = 0;
- _optional_region_length = 0;
+ _old_region_length = 0;
+ free_optional_regions();
}
void G1CollectionSet::initialize(uint max_region_length) {
@@ -100,21 +99,8 @@ void G1CollectionSet::initialize(uint max_region_length) {
_collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
}
-void G1CollectionSet::initialize_optional(uint max_length) {
- assert(_optional_regions == NULL, "Already initialized");
- assert(_optional_region_length == 0, "Already initialized");
- assert(_optional_region_max_length == 0, "Already initialized");
- _optional_region_max_length = max_length;
- _optional_regions = NEW_C_HEAP_ARRAY(HeapRegion*, _optional_region_max_length, mtGC);
-}
-
void G1CollectionSet::free_optional_regions() {
- _optional_region_length = 0;
- _optional_region_max_length = 0;
- if (_optional_regions != NULL) {
- FREE_C_HEAP_ARRAY(HeapRegion*, _optional_regions);
- _optional_regions = NULL;
- }
+ _num_optional_regions = 0;
}
void G1CollectionSet::clear_candidates() {
@@ -130,39 +116,32 @@ void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
void G1CollectionSet::add_old_region(HeapRegion* hr) {
assert_at_safepoint_on_vm_thread();
- assert(_inc_build_state == Active || hr->index_in_opt_cset() != G1OptionalCSet::InvalidCSetIndex,
+ assert(_inc_build_state == Active,
"Precondition, actively building cset or adding optional later on");
assert(hr->is_old(), "the region should be old");
- assert(!hr->in_collection_set(), "should not already be in the CSet");
+ assert(!hr->in_collection_set(), "should not already be in the collection set");
_g1h->register_old_region_with_cset(hr);
_collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
_bytes_used_before += hr->used();
- size_t rs_length = hr->rem_set()->occupied();
- _recorded_rs_lengths += rs_length;
- _old_region_length += 1;
+ _recorded_rs_lengths += hr->rem_set()->occupied();
+ _old_region_length++;
- log_trace(gc, cset)("Added old region %d to collection set", hr->hrm_index());
+ _g1h->old_set_remove(hr);
}
void G1CollectionSet::add_optional_region(HeapRegion* hr) {
- assert(!optional_is_full(), "Precondition, must have room left for this region");
assert(hr->is_old(), "the region should be old");
assert(!hr->in_collection_set(), "should not already be in the CSet");
_g1h->register_optional_region_with_cset(hr);
- _optional_regions[_optional_region_length] = hr;
- uint index = _optional_region_length++;
- hr->set_index_in_opt_cset(index);
-
- log_trace(gc, cset)("Added region %d to optional collection set (%u)", hr->hrm_index(), _optional_region_length);
+ hr->set_index_in_opt_cset(_num_optional_regions++);
}
-// Initialize the per-collection-set information
void G1CollectionSet::start_incremental_building() {
assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
assert(_inc_build_state == Inactive, "Precondition");
@@ -173,7 +152,8 @@ void G1CollectionSet::start_incremental_building() {
_inc_recorded_rs_lengths_diffs = 0;
_inc_predicted_elapsed_time_ms = 0.0;
_inc_predicted_elapsed_time_ms_diffs = 0.0;
- _inc_build_state = Active;
+
+ update_incremental_marker();
}
void G1CollectionSet::finalize_incremental_building() {
@@ -211,29 +191,48 @@ void G1CollectionSet::finalize_incremental_building() {
void G1CollectionSet::clear() {
assert_at_safepoint_on_vm_thread();
_collection_set_cur_length = 0;
- _optional_region_length = 0;
}
void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
- iterate_from(cl, 0, 1);
-}
-
-void G1CollectionSet::iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const {
size_t len = _collection_set_cur_length;
OrderAccess::loadload();
- if (len == 0) {
- return;
- }
- size_t start_pos = (worker_id * len) / total_workers;
- size_t cur_pos = start_pos;
- do {
- HeapRegion* r = _g1h->region_at(_collection_set_regions[cur_pos]);
+ for (uint i = 0; i < len; i++) {
+ HeapRegion* r = _g1h->region_at(_collection_set_regions[i]);
bool result = cl->do_heap_region(r);
if (result) {
cl->set_incomplete();
return;
}
+ }
+}
+
+void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
+ assert_at_safepoint();
+
+ for (uint i = 0; i < _num_optional_regions; i++) {
+ HeapRegion* r = _candidates->at(i);
+ bool result = cl->do_heap_region(r);
+ guarantee(!result, "Must not cancel iteration");
+ }
+}
+
+void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const {
+ assert_at_safepoint();
+
+ size_t len = _collection_set_cur_length - _inc_part_start;
+ if (len == 0) {
+ return;
+ }
+
+ size_t start_pos = (worker_id * len) / total_workers;
+ size_t cur_pos = start_pos;
+
+ do {
+ HeapRegion* r = _g1h->region_at(_collection_set_regions[cur_pos + _inc_part_start]);
+ bool result = cl->do_heap_region(r);
+ guarantee(!result, "Must not cancel iteration");
+
cur_pos++;
if (cur_pos == len) {
cur_pos = 0;
@@ -440,30 +439,6 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi
return time_remaining_ms;
}
-void G1CollectionSet::add_as_old(HeapRegion* hr) {
- candidates()->pop_front(); // already have region via peek()
- _g1h->old_set_remove(hr);
- add_old_region(hr);
-}
-
-void G1CollectionSet::add_as_optional(HeapRegion* hr) {
- assert(_optional_regions != NULL, "Must not be called before array is allocated");
- candidates()->pop_front(); // already have region via peek()
- _g1h->old_set_remove(hr);
- add_optional_region(hr);
-}
-
-bool G1CollectionSet::optional_is_full() {
- assert(_optional_region_length <= _optional_region_max_length, "Invariant");
- return _optional_region_length == _optional_region_max_length;
-}
-
-void G1CollectionSet::clear_optional_region(const HeapRegion* hr) {
- assert(_optional_regions != NULL, "Must not be called before array is allocated");
- uint index = hr->index_in_opt_cset();
- _optional_regions[index] = NULL;
-}
-
static int compare_region_idx(const uint a, const uint b) {
if (a > b) {
return 1;
@@ -476,87 +451,25 @@ static int compare_region_idx(const uint a, const uint b) {
void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
double non_young_start_time_sec = os::elapsedTime();
- double predicted_old_time_ms = 0.0;
- double predicted_optional_time_ms = 0.0;
- double optional_threshold_ms = time_remaining_ms * _policy->optional_prediction_fraction();
- uint expensive_region_num = 0;
if (collector_state()->in_mixed_phase()) {
candidates()->verify();
- const uint min_old_cset_length = _policy->calc_min_old_cset_length();
- const uint max_old_cset_length = MAX2(min_old_cset_length, _policy->calc_max_old_cset_length());
- bool check_time_remaining = _policy->adaptive_young_list_length();
- initialize_optional(max_old_cset_length - min_old_cset_length);
- log_debug(gc, ergo, cset)("Start adding old regions for mixed gc. min %u regions, max %u regions, "
- "time remaining %1.2fms, optional threshold %1.2fms",
- min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms);
+ uint num_initial_old_regions;
+ uint num_optional_old_regions;
- HeapRegion* hr = candidates()->peek_front();
- while (hr != NULL) {
- if (old_region_length() + optional_region_length() >= max_old_cset_length) {
- // Added maximum number of old regions to the CSet.
- log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). "
- "old %u regions, optional %u regions",
- old_region_length(), optional_region_length());
- break;
- }
+ _policy->calculate_old_collection_set_regions(candidates(),
+ time_remaining_ms,
+ num_initial_old_regions,
+ num_optional_old_regions);
- // Stop adding regions if the remaining reclaimable space is
- // not above G1HeapWastePercent.
- size_t reclaimable_bytes = candidates()->remaining_reclaimable_bytes();
- double reclaimable_percent = _policy->reclaimable_bytes_percent(reclaimable_bytes);
- double threshold = (double) G1HeapWastePercent;
- if (reclaimable_percent <= threshold) {
- // We've added enough old regions that the amount of uncollected
- // reclaimable space is at or below the waste threshold. Stop
- // adding old regions to the CSet.
- log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
- "reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%",
- byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes),
- reclaimable_percent, G1HeapWastePercent);
- break;
- }
+ // Prepare initial old regions.
+ move_candidates_to_collection_set(num_initial_old_regions);
- double predicted_time_ms = predict_region_elapsed_time_ms(hr);
- time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
- // Add regions to old set until we reach minimum amount
- if (old_region_length() < min_old_cset_length) {
- predicted_old_time_ms += predicted_time_ms;
- add_as_old(hr);
- // Record the number of regions added when no time remaining
- if (time_remaining_ms == 0.0) {
- expensive_region_num++;
- }
- } else {
- // In the non-auto-tuning case, we'll finish adding regions
- // to the CSet if we reach the minimum.
- if (!check_time_remaining) {
- log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min).");
- break;
- }
- // Keep adding regions to old set until we reach optional threshold
- if (time_remaining_ms > optional_threshold_ms) {
- predicted_old_time_ms += predicted_time_ms;
- add_as_old(hr);
- } else if (time_remaining_ms > 0) {
- // Keep adding optional regions until time is up
- if (!optional_is_full()) {
- predicted_optional_time_ms += predicted_time_ms;
- add_as_optional(hr);
- } else {
- log_debug(gc, ergo, cset)("Finish adding old regions to CSet (optional set full).");
- break;
- }
- } else {
- log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high).");
- break;
- }
- }
- hr = candidates()->peek_front();
- }
- if (hr == NULL) {
- log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
+ // Prepare optional old regions for evacuation.
+ uint candidate_idx = candidates()->cur_idx();
+ for (uint i = 0; i < num_optional_old_regions; i++) {
+ add_optional_region(candidates()->at(candidate_idx + i));
}
candidates()->verify();
@@ -564,99 +477,59 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
stop_incremental_building();
- log_debug(gc, ergo, cset)("Finish choosing CSet regions old: %u, optional: %u, "
- "predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f",
- old_region_length(), optional_region_length(),
- predicted_old_time_ms, predicted_optional_time_ms, time_remaining_ms);
- if (expensive_region_num > 0) {
- log_debug(gc, ergo, cset)("CSet contains %u old regions that were added although the predicted time was too high.",
- expensive_region_num);
- }
-
double non_young_end_time_sec = os::elapsedTime();
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx, true);
}
-HeapRegion* G1OptionalCSet::region_at(uint index) {
- return _cset->optional_region_at(index);
-}
-
-void G1OptionalCSet::prepare_evacuation(double time_limit) {
- assert(_current_index == _current_limit, "Before prepare no regions should be ready for evac");
-
- uint prepared_regions = 0;
- double prediction_ms = 0;
-
- _prepare_failed = true;
- for (uint i = _current_index; i < _cset->optional_region_length(); i++) {
- HeapRegion* hr = region_at(i);
- prediction_ms += _cset->predict_region_elapsed_time_ms(hr);
- if (prediction_ms > time_limit) {
- log_debug(gc, cset)("Prepared %u regions for optional evacuation. Predicted time: %.3fms", prepared_regions, prediction_ms);
- return;
- }
-
- // This region will be included in the next optional evacuation.
- prepare_to_evacuate_optional_region(hr);
- prepared_regions++;
- _current_limit++;
- _prepare_failed = false;
+void G1CollectionSet::move_candidates_to_collection_set(uint num_old_candidate_regions) {
+ if (num_old_candidate_regions == 0) {
+ return;
}
-
- log_debug(gc, cset)("Prepared all %u regions for optional evacuation. Predicted time: %.3fms",
- prepared_regions, prediction_ms);
-}
-
-bool G1OptionalCSet::prepare_failed() {
- return _prepare_failed;
-}
-
-void G1OptionalCSet::complete_evacuation() {
- _evacuation_failed = false;
- for (uint i = _current_index; i < _current_limit; i++) {
- HeapRegion* hr = region_at(i);
- _cset->clear_optional_region(hr);
- if (hr->evacuation_failed()){
- _evacuation_failed = true;
- }
+ uint candidate_idx = candidates()->cur_idx();
+ for (uint i = 0; i < num_old_candidate_regions; i++) {
+ HeapRegion* r = candidates()->at(candidate_idx + i);
+ // This potentially optional candidate region is going to be an actual collection
+ // set region. Clear cset marker.
+ _g1h->clear_in_cset(r);
+ add_old_region(r);
}
- _current_index = _current_limit;
+ candidates()->remove(num_old_candidate_regions);
+
+ candidates()->verify();
}
-bool G1OptionalCSet::evacuation_failed() {
- return _evacuation_failed;
+void G1CollectionSet::finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
+ double time_remaining_ms = finalize_young_part(target_pause_time_ms, survivor);
+ finalize_old_part(time_remaining_ms);
}
-G1OptionalCSet::~G1OptionalCSet() {
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
- while (!is_empty()) {
- // We want to return regions not evacuated to the collection set candidates
- // in reverse order to maintain the old order.
- HeapRegion* hr = _cset->remove_last_optional_region();
- assert(hr != NULL, "Should be valid region left");
- _pset->record_unused_optional_region(hr);
- g1h->old_set_add(hr);
- g1h->clear_in_cset(hr);
- hr->set_index_in_opt_cset(InvalidCSetIndex);
- _cset->candidates()->push_front(hr);
+bool G1CollectionSet::finalize_optional_for_evacuation(double remaining_pause_time) {
+ update_incremental_marker();
+
+ uint num_selected_regions;
+ _policy->calculate_optional_collection_set_regions(candidates(),
+ _num_optional_regions,
+ remaining_pause_time,
+ num_selected_regions);
+
+ move_candidates_to_collection_set(num_selected_regions);
+
+ _num_optional_regions -= num_selected_regions;
+
+ stop_incremental_building();
+ return num_selected_regions > 0;
+}
+
+void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* pss) {
+ for (uint i = 0; i < _num_optional_regions; i++) {
+ HeapRegion* r = candidates()->at(candidates()->cur_idx() + i);
+ pss->record_unused_optional_region(r);
+ _g1h->clear_in_cset(r);
+ r->clear_index_in_opt_cset();
}
- _cset->free_optional_regions();
-}
-
-uint G1OptionalCSet::size() {
- return _cset->optional_region_length() - _current_index;
-}
-
-bool G1OptionalCSet::is_empty() {
- return size() == 0;
-}
-
-void G1OptionalCSet::prepare_to_evacuate_optional_region(HeapRegion* hr) {
- log_trace(gc, cset)("Adding region %u for optional evacuation", hr->hrm_index());
- G1CollectedHeap::heap()->clear_in_cset(hr);
- _cset->add_old_region(hr);
+ free_optional_regions();
}
#ifdef ASSERT
diff --git a/src/hotspot/share/gc/g1/g1CollectionSet.hpp b/src/hotspot/share/gc/g1/g1CollectionSet.hpp
index aa800980510..66ee01e4bec 100644
--- a/src/hotspot/share/gc/g1/g1CollectionSet.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.hpp
@@ -38,11 +38,101 @@ class G1SurvivorRegions;
class HeapRegion;
class HeapRegionClosure;
+// The collection set.
+//
+// The set of regions that are evacuated during an evacuation pause.
+//
+// At the end of a collection, before freeing the collection set, this set
+// contains all regions that were evacuated during this collection:
+//
+// - survivor regions from the last collection (if any)
+// - eden regions allocated by the mutator
+// - old gen regions evacuated during mixed gc
+//
+// This set is built incrementally at mutator time as regions are retired, and
+// if this had been a mixed gc, some additional (during gc) incrementally added
+// old regions from the collection set candidates built during the concurrent
+// cycle.
+//
+// A more detailed overview of how the collection set changes over time follows:
+//
+// 0) at the end of GC the survivor regions are added to this collection set.
+// 1) the mutator incrementally adds eden regions as they retire
+//
+// ----- gc starts
+//
+// 2) prepare (finalize) young regions of the collection set for collection
+// - relabel the survivors as eden
+// - finish up the incremental building that happened at mutator time
+//
+// iff this is a young-only collection:
+//
+// a3) evacuate the current collection set in one "initial evacuation" phase
+//
+// iff this is a mixed collection:
+//
+// b3) calculate the set of old gen regions we may be able to collect in this
+// collection from the list of collection set candidates.
+// - one part is added to the current collection set
+// - the remainder regions are labeled as optional, and NOT yet added to the
+// collection set.
+// b4) evacuate the current collection set in the "initial evacuation" phase
+// b5) evacuate the optional regions in the "optional evacuation" phase. This is
+// done in increments (or rounds).
+// b5-1) add a few of the optional regions to the current collection set
+// b5-2) evacuate only these newly added optional regions. For this mechanism we
+// reuse the incremental collection set building infrastructure (used also at
+// mutator time).
+// b5-3) repeat from b5-1 until the policy determines we are done
+//
+// all collections
+//
+// 6) free the collection set (contains all regions now; empties collection set
+// afterwards)
+// 7) add survivors to this collection set
+//
+// ----- gc ends
+//
+// goto 1)
+//
+// Examples of how the collection set might look over time:
+//
+// Legend:
+// S = survivor, E = eden, O = old.
+// |xxxx| = increment (with increment markers), containing four regions
+//
+// |SSSS| ... after step 0), with four survivor regions
+// |SSSSEE| ... at step 1), after retiring two eden regions
+// |SSSSEEEE| ... after step 1), after retiring four eden regions
+// |EEEEEEEE| ... after step 2)
+//
+// iff this is a young-only collection
+//
+// EEEEEEEE|| ... after step a3), after initial evacuation phase
+// || ... after step 6)
+// |SS| ... after step 7), with two survivor regions
+//
+// iff this is a mixed collection
+//
+// |EEEEEEEEOOOO| ... after step b3), added four regions to be
+// evacuated in the "initial evacuation" phase
+// EEEEEEEEOOOO|| ... after step b4), incremental part is empty
+// after evacuation
+// EEEEEEEEOOOO|OO| ... after step b5.1), added two regions to be
+// evacuated in the first round of the
+// "optional evacuation" phase
+// EEEEEEEEOOOOOO|O| ... after step b5.1), added one region to be
+// evacuated in the second round of the
+// "optional evacuation" phase
+// EEEEEEEEOOOOOOO|| ... after step b5), the complete collection set.
+// || ... after step b6)
+// |SSS| ... after step 7), with three survivor regions
+//
class G1CollectionSet {
G1CollectedHeap* _g1h;
G1Policy* _policy;
- // All old gen collection set candidate regions for the current mixed gc phase.
+ // All old gen collection set candidate regions for the current mixed phase.
G1CollectionSetCandidates* _candidates;
uint _eden_region_length;
@@ -51,7 +141,7 @@ class G1CollectionSet {
// The actual collection set as a set of region indices.
// All entries in _collection_set_regions below _collection_set_cur_length are
- // assumed to be valid entries.
+ // assumed to be part of the collection set.
// We assume that at any time there is at most only one writer and (one or more)
// concurrent readers. This means we are good with using storestore and loadload
// barriers on the writer and reader respectively only.
@@ -59,31 +149,33 @@ class G1CollectionSet {
volatile size_t _collection_set_cur_length;
size_t _collection_set_max_length;
- // When doing mixed collections we can add old regions to the collection, which
- // can be collected if there is enough time. We call these optional regions and
- // the pointer to these regions are stored in the array below.
- HeapRegion** _optional_regions;
- uint _optional_region_length;
- uint _optional_region_max_length;
+ // When doing mixed collections we can add old regions to the collection set, which
+ // will be collected only if there is enough time. We call these optional regions.
+ // This member records the current number of regions that are of that type that
+ // correspond to the first x entries in the collection set candidates.
+ uint _num_optional_regions;
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
- // pause, and incremented in finalize_old_part() when adding old regions
- // (if any) to the collection set.
+ // pause, and updated as more regions are added to the collection set.
size_t _bytes_used_before;
+ // The number of cards in the remembered set in the collection set. Set from
+ // the incrementally built collection set at the start of an evacuation
+ // pause, and updated as more regions are added to the collection set.
size_t _recorded_rs_lengths;
- // The associated information that is maintained while the incremental
- // collection set is being built with young regions. Used to populate
- // the recorded info for the evacuation pause.
-
enum CSetBuildType {
Active, // We are actively building the collection set
Inactive // We are not actively building the collection set
};
CSetBuildType _inc_build_state;
+ size_t _inc_part_start;
+
+ // The associated information that is maintained while the incremental
+ // collection set is being built with *young* regions. Used to populate
+ // the recorded info for the evacuation pause.
// The number of bytes in the incrementally built collection set.
// Used to set _collection_set_bytes_used_before at the start of
@@ -113,22 +205,44 @@ class G1CollectionSet {
// See the comment for _inc_recorded_rs_lengths_diffs.
double _inc_predicted_elapsed_time_ms_diffs;
+ void set_recorded_rs_lengths(size_t rs_lengths);
+
G1CollectorState* collector_state();
G1GCPhaseTimes* phase_times();
void verify_young_cset_indices() const NOT_DEBUG_RETURN;
- void add_as_optional(HeapRegion* hr);
- void add_as_old(HeapRegion* hr);
- bool optional_is_full();
+ double predict_region_elapsed_time_ms(HeapRegion* hr);
+
+ // Update the incremental collection set information when adding a region.
+ void add_young_region_common(HeapRegion* hr);
+
+ // Add old region "hr" to the collection set.
+ void add_old_region(HeapRegion* hr);
+ void free_optional_regions();
+
+ // Add old region "hr" to optional collection set.
+ void add_optional_region(HeapRegion* hr);
+
+ void move_candidates_to_collection_set(uint num_regions);
+
+ // Finalize the young part of the initial collection set. Relabel survivor regions
+ // as Eden and calculate a prediction on how long the evacuation of all young regions
+ // will take.
+ double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
+ // Perform any final calculations on the incremental collection set fields before we
+ // can use them.
+ void finalize_incremental_building();
+
+ // Select the old regions of the initial collection set and determine how many optional
+ // regions we might be able to evacuate in this pause.
+ void finalize_old_part(double time_remaining_ms);
public:
G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
~G1CollectionSet();
// Initializes the collection set giving the maximum possible length of the collection set.
void initialize(uint max_region_length);
- void initialize_optional(uint max_length);
- void free_optional_regions();
void clear_candidates();
@@ -141,8 +255,6 @@ public:
void init_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length);
- void set_recorded_rs_lengths(size_t rs_lengths);
-
uint region_length() const { return young_region_length() +
old_region_length(); }
uint young_region_length() const { return eden_region_length() +
@@ -151,32 +263,29 @@ public:
uint eden_region_length() const { return _eden_region_length; }
uint survivor_region_length() const { return _survivor_region_length; }
uint old_region_length() const { return _old_region_length; }
- uint optional_region_length() const { return _optional_region_length; }
+ uint optional_region_length() const { return _num_optional_regions; }
+
+ // Reset the contents of the collection set.
+ void clear();
// Incremental collection set support
// Initialize incremental collection set info.
void start_incremental_building();
+ // Start a new collection set increment.
+ void update_incremental_marker() { _inc_build_state = Active; _inc_part_start = _collection_set_cur_length; }
+ // Stop adding regions to the current collection set increment.
+ void stop_incremental_building() { _inc_build_state = Inactive; }
- // Perform any final calculations on the incremental collection set fields
- // before we can use them.
- void finalize_incremental_building();
+ // Iterate over the current collection set increment applying the given HeapRegionClosure
+ // from a starting position determined by the given worker id.
+ void iterate_incremental_part_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
- // Reset the contents of the collection set.
- void clear();
-
- // Iterate over the collection set, applying the given HeapRegionClosure on all of them.
- // If may_be_aborted is true, iteration may be aborted using the return value of the
- // called closure method.
+ // Iterate over the entire collection set (all increments calculated so far), applying
+ // the given HeapRegionClosure on all of them.
void iterate(HeapRegionClosure* cl) const;
- // Iterate over the collection set, applying the given HeapRegionClosure on all of them,
- // trying to optimally spread out starting position of total_workers workers given the
- // caller's worker_id.
- void iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
-
- // Stop adding regions to the incremental collection set.
- void stop_incremental_building() { _inc_build_state = Inactive; }
+ void iterate_optional(HeapRegionClosure* cl) const;
size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
@@ -188,16 +297,14 @@ public:
_bytes_used_before = 0;
}
- // Choose a new collection set. Marks the chosen regions as being
- // "in_collection_set".
- double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
- void finalize_old_part(double time_remaining_ms);
-
- // Add old region "hr" to the collection set.
- void add_old_region(HeapRegion* hr);
-
- // Add old region "hr" to optional collection set.
- void add_optional_region(HeapRegion* hr);
+ // Finalize the initial collection set consisting of all young regions potentially a
+ // few old gen regions.
+ void finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor);
+ // Finalize the next collection set from the set of available optional old gen regions.
+ bool finalize_optional_for_evacuation(double remaining_pause_time);
+ // Abandon (clean up) optional collection set regions that were not evacuated in this
+ // pause.
+ void abandon_optional_collection_set(G1ParScanThreadStateSet* pss);
// Update information about hr in the aggregated information for
// the incrementally built collection set.
@@ -214,73 +321,6 @@ public:
void print(outputStream* st);
#endif // !PRODUCT
-
- double predict_region_elapsed_time_ms(HeapRegion* hr);
-
- void clear_optional_region(const HeapRegion* hr);
-
- HeapRegion* optional_region_at(uint i) const {
- assert(_optional_regions != NULL, "Not yet initialized");
- assert(i < _optional_region_length, "index %u out of bounds (%u)", i, _optional_region_length);
- return _optional_regions[i];
- }
-
- HeapRegion* remove_last_optional_region() {
- assert(_optional_regions != NULL, "Not yet initialized");
- assert(_optional_region_length != 0, "No region to remove");
- _optional_region_length--;
- HeapRegion* removed = _optional_regions[_optional_region_length];
- _optional_regions[_optional_region_length] = NULL;
- return removed;
- }
-
-private:
- // Update the incremental collection set information when adding a region.
- void add_young_region_common(HeapRegion* hr);
-};
-
-// Helper class to manage the optional regions in a Mixed collection.
-class G1OptionalCSet : public StackObj {
-private:
- G1CollectionSet* _cset;
- G1ParScanThreadStateSet* _pset;
- uint _current_index;
- uint _current_limit;
- bool _prepare_failed;
- bool _evacuation_failed;
-
- void prepare_to_evacuate_optional_region(HeapRegion* hr);
-
-public:
- static const uint InvalidCSetIndex = UINT_MAX;
-
- G1OptionalCSet(G1CollectionSet* cset, G1ParScanThreadStateSet* pset) :
- _cset(cset),
- _pset(pset),
- _current_index(0),
- _current_limit(0),
- _prepare_failed(false),
- _evacuation_failed(false) { }
- // The destructor returns regions to the collection set candidates set and
- // frees the optional structure in the collection set.
- ~G1OptionalCSet();
-
- uint current_index() { return _current_index; }
- uint current_limit() { return _current_limit; }
-
- uint size();
- bool is_empty();
-
- HeapRegion* region_at(uint index);
-
- // Prepare a set of regions for optional evacuation.
- void prepare_evacuation(double time_left_ms);
- bool prepare_failed();
-
- // Complete the evacuation of the previously prepared
- // regions by updating their state and check for failures.
- void complete_evacuation();
- bool evacuation_failed();
};
#endif // SHARE_GC_G1_G1COLLECTIONSET_HPP
diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp
index 937bb4f8d3f..310cb7bc066 100644
--- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp
@@ -27,26 +27,12 @@
#include "gc/g1/g1CollectionSetChooser.hpp"
#include "gc/g1/heapRegion.inline.hpp"
-HeapRegion* G1CollectionSetCandidates::pop_front() {
- assert(_front_idx < _num_regions, "pre-condition");
- HeapRegion* hr = _regions[_front_idx];
- assert(hr != NULL, "pre-condition");
- _regions[_front_idx] = NULL;
- assert(hr->reclaimable_bytes() <= _remaining_reclaimable_bytes,
- "Remaining reclaimable bytes inconsistent "
- "from region: " SIZE_FORMAT " remaining: " SIZE_FORMAT,
- hr->reclaimable_bytes(), _remaining_reclaimable_bytes);
- _remaining_reclaimable_bytes -= hr->reclaimable_bytes();
- _front_idx++;
- return hr;
-}
-
-void G1CollectionSetCandidates::push_front(HeapRegion* hr) {
- assert(hr != NULL, "Can't put back a NULL region");
- assert(_front_idx >= 1, "Too many regions have been put back.");
- _front_idx--;
- _regions[_front_idx] = hr;
- _remaining_reclaimable_bytes += hr->reclaimable_bytes();
+void G1CollectionSetCandidates::remove(uint num_regions) {
+ assert(num_regions <= num_remaining(), "Trying to remove more regions (%u) than available (%u)", num_regions, num_remaining());
+ for (uint i = 0; i < num_regions; i++) {
+ _remaining_reclaimable_bytes -= at(_front_idx)->reclaimable_bytes();
+ _front_idx++;
+ }
}
void G1CollectionSetCandidates::iterate(HeapRegionClosure* cl) {
@@ -62,13 +48,8 @@ void G1CollectionSetCandidates::iterate(HeapRegionClosure* cl) {
#ifndef PRODUCT
void G1CollectionSetCandidates::verify() const {
guarantee(_front_idx <= _num_regions, "Index: %u Num_regions: %u", _front_idx, _num_regions);
- uint idx = 0;
+ uint idx = _front_idx;
size_t sum_of_reclaimable_bytes = 0;
- while (idx < _front_idx) {
- guarantee(_regions[idx] == NULL, "All entries before _front_idx %u should be NULL, but %u is not",
- _front_idx, idx);
- idx++;
- }
HeapRegion *prev = NULL;
for (; idx < _num_regions; idx++) {
HeapRegion *cur = _regions[idx];
diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp
index 7eb73a156be..ce358d122aa 100644
--- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp
@@ -63,22 +63,18 @@ public:
// Returns the total number of collection set candidate old regions added.
uint num_regions() { return _num_regions; }
- // Return the candidate region at the cursor position to be considered for collection without
- // removing it.
- HeapRegion* peek_front() {
+ uint cur_idx() const { return _front_idx; }
+
+ HeapRegion* at(uint idx) const {
HeapRegion* res = NULL;
- if (_front_idx < _num_regions) {
- res = _regions[_front_idx];
- assert(res != NULL, "Unexpected NULL HeapRegion at index %u", _front_idx);
+ if (idx < _num_regions) {
+ res = _regions[idx];
+ assert(res != NULL, "Unexpected NULL HeapRegion at index %u", idx);
}
return res;
}
- // Remove the given region from the candidates set and move the cursor to the next one.
- HeapRegion* pop_front();
-
- // Add the given HeapRegion to the front of the collection set candidate set again.
- void push_front(HeapRegion* hr);
+ void remove(uint num_regions);
// Iterate over all remaining collection set candidate regions.
void iterate(HeapRegionClosure* cl);
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index 68dbb29980d..71b01a4b298 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -1940,9 +1940,10 @@ public:
guarantee(oopDesc::is_oop(task_entry.obj()),
"Non-oop " PTR_FORMAT ", phase: %s, info: %d",
p2i(task_entry.obj()), _phase, _info);
- guarantee(!_g1h->is_in_cset(task_entry.obj()),
- "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
- p2i(task_entry.obj()), _phase, _info);
+ HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
+ guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
+ "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
+ p2i(task_entry.obj()), _phase, _info, r->hrm_index());
}
};
@@ -1979,11 +1980,11 @@ void G1ConcurrentMark::verify_no_collection_set_oops() {
HeapWord* task_finger = task->finger();
if (task_finger != NULL && task_finger < _heap.end()) {
// See above note on the global finger verification.
- HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
- guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
- !task_hr->in_collection_set(),
+ HeapRegion* r = _g1h->heap_region_containing(task_finger);
+ guarantee(r == NULL || task_finger == r->bottom() ||
+ !r->in_collection_set() || !r->has_index_in_opt_cset(),
"task finger: " PTR_FORMAT " region: " HR_FORMAT,
- p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
+ p2i(task_finger), HR_FORMAT_PARAMS(r));
}
}
}
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp
index 4fa51a77c17..8d0dd42d33c 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp
@@ -127,7 +127,7 @@ double G1ConcurrentMarkThread::mmu_sleep_time(G1Policy* g1_policy, bool remark)
}
void G1ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
- if (g1_policy->adaptive_young_list_length()) {
+ if (g1_policy->use_adaptive_young_list_length()) {
jlong sleep_time_ms = mmu_sleep_time(g1_policy, remark);
if (!_cm->has_aborted() && sleep_time_ms > 0) {
os::sleep(this, sleep_time_ms, false);
diff --git a/src/hotspot/share/gc/g1/g1EdenRegions.hpp b/src/hotspot/share/gc/g1/g1EdenRegions.hpp
index 2f7cf8bb411..9bef18a8338 100644
--- a/src/hotspot/share/gc/g1/g1EdenRegions.hpp
+++ b/src/hotspot/share/gc/g1/g1EdenRegions.hpp
@@ -31,19 +31,28 @@
class G1EdenRegions {
private:
- int _length;
+ int _length;
+ // Sum of used bytes from all retired eden regions.
+ // I.e. updated when mutator regions are retired.
+ volatile size_t _used_bytes;
public:
- G1EdenRegions() : _length(0) {}
+ G1EdenRegions() : _length(0), _used_bytes(0) { }
void add(HeapRegion* hr) {
assert(!hr->is_eden(), "should not already be set");
_length++;
}
- void clear() { _length = 0; }
+ void clear() { _length = 0; _used_bytes = 0; }
uint length() const { return _length; }
+
+ size_t used_bytes() const { return _used_bytes; }
+
+ void add_used_bytes(size_t used_bytes) {
+ _used_bytes += used_bytes;
+ }
};
#endif // SHARE_GC_G1_G1EDENREGIONS_HPP
diff --git a/src/hotspot/share/gc/g1/g1EvacFailure.cpp b/src/hotspot/share/gc/g1/g1EvacFailure.cpp
index b9a6275130d..57448aba5c3 100644
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp
@@ -228,6 +228,8 @@ public:
if (_hrclaimer->claim_region(hr->hrm_index())) {
if (hr->evacuation_failed()) {
+ hr->clear_index_in_opt_cset();
+
bool during_initial_mark = _g1h->collector_state()->in_initial_mark_gc();
bool during_conc_mark = _g1h->collector_state()->mark_or_rebuild_in_progress();
@@ -257,5 +259,5 @@ G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask() :
void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id, &_hrclaimer);
- _g1h->collection_set_iterate_from(&rsfp_cl, worker_id);
+ _g1h->collection_set_iterate_increment_from(&rsfp_cl, worker_id);
}
diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
index fb62003a8d9..8d45ed53427 100644
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
@@ -76,10 +76,12 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
}
_gc_par_phases[ScanRS] = new WorkerDataArray(max_gc_threads, "Scan RS (ms):");
_gc_par_phases[OptScanRS] = new WorkerDataArray(max_gc_threads, "Optional Scan RS (ms):");
- _gc_par_phases[CodeRoots] = new WorkerDataArray(max_gc_threads, "Code Root Scanning (ms):");
+ _gc_par_phases[CodeRoots] = new WorkerDataArray(max_gc_threads, "Code Root Scan (ms):");
+ _gc_par_phases[OptCodeRoots] = new WorkerDataArray(max_gc_threads, "Optional Code Root Scan (ms):");
_gc_par_phases[ObjCopy] = new WorkerDataArray(max_gc_threads, "Object Copy (ms):");
_gc_par_phases[OptObjCopy] = new WorkerDataArray(max_gc_threads, "Optional Object Copy (ms):");
_gc_par_phases[Termination] = new WorkerDataArray(max_gc_threads, "Termination (ms):");
+ _gc_par_phases[OptTermination] = new WorkerDataArray(max_gc_threads, "Optional Termination (ms):");
_gc_par_phases[GCWorkerTotal] = new WorkerDataArray(max_gc_threads, "GC Worker Total (ms):");
_gc_par_phases[GCWorkerEnd] = new WorkerDataArray(max_gc_threads, "GC Worker End (ms):");
_gc_par_phases[Other] = new WorkerDataArray(max_gc_threads, "GC Worker Other (ms):");
@@ -91,14 +93,16 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
_scan_rs_skipped_cards = new WorkerDataArray(max_gc_threads, "Skipped Cards:");
_gc_par_phases[ScanRS]->link_thread_work_items(_scan_rs_skipped_cards, ScanRSSkippedCards);
- _opt_cset_scanned_cards = new WorkerDataArray(max_gc_threads, "Scanned Cards:");
- _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_scanned_cards, OptCSetScannedCards);
- _opt_cset_claimed_cards = new WorkerDataArray(max_gc_threads, "Claimed Cards:");
- _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_claimed_cards, OptCSetClaimedCards);
- _opt_cset_skipped_cards = new WorkerDataArray(max_gc_threads, "Skipped Cards:");
- _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_skipped_cards, OptCSetSkippedCards);
- _opt_cset_used_memory = new WorkerDataArray(max_gc_threads, "Used Memory:");
- _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_used_memory, OptCSetUsedMemory);
+ _opt_scan_rs_scanned_cards = new WorkerDataArray(max_gc_threads, "Scanned Cards:");
+ _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_scanned_cards, ScanRSScannedCards);
+ _opt_scan_rs_claimed_cards = new WorkerDataArray(max_gc_threads, "Claimed Cards:");
+ _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_claimed_cards, ScanRSClaimedCards);
+ _opt_scan_rs_skipped_cards = new WorkerDataArray(max_gc_threads, "Skipped Cards:");
+ _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_skipped_cards, ScanRSSkippedCards);
+ _opt_scan_rs_scanned_opt_refs = new WorkerDataArray(max_gc_threads, "Scanned Refs:");
+ _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_scanned_opt_refs, ScanRSScannedOptRefs);
+ _opt_scan_rs_used_memory = new WorkerDataArray(max_gc_threads, "Used Memory:");
+ _gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_used_memory, ScanRSUsedMemory);
_update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers:");
_gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers, UpdateRSProcessedBuffers);
@@ -112,9 +116,17 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
_obj_copy_lab_undo_waste = new WorkerDataArray(max_gc_threads, "LAB Undo Waste");
_gc_par_phases[ObjCopy]->link_thread_work_items(_obj_copy_lab_undo_waste, ObjCopyLABUndoWaste);
+ _opt_obj_copy_lab_waste = new WorkerDataArray(max_gc_threads, "LAB Waste");
+ _gc_par_phases[OptObjCopy]->link_thread_work_items(_obj_copy_lab_waste, ObjCopyLABWaste);
+ _opt_obj_copy_lab_undo_waste = new WorkerDataArray(max_gc_threads, "LAB Undo Waste");
+ _gc_par_phases[OptObjCopy]->link_thread_work_items(_obj_copy_lab_undo_waste, ObjCopyLABUndoWaste);
+
_termination_attempts = new WorkerDataArray(max_gc_threads, "Termination Attempts:");
_gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
+ _opt_termination_attempts = new WorkerDataArray(max_gc_threads, "Optional Termination Attempts:");
+ _gc_par_phases[OptTermination]->link_thread_work_items(_opt_termination_attempts);
+
if (UseStringDeduplication) {
_gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray(max_gc_threads, "Queue Fixup (ms):");
_gc_par_phases[StringDedupTableFixup] = new WorkerDataArray(max_gc_threads, "Table Fixup (ms):");
@@ -134,7 +146,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
}
void G1GCPhaseTimes::reset() {
- _cur_collection_par_time_ms = 0.0;
+ _cur_collection_initial_evac_time_ms = 0.0;
_cur_optional_evac_ms = 0.0;
_cur_collection_code_root_fixup_time_ms = 0.0;
_cur_strong_code_root_purge_time_ms = 0.0;
@@ -251,6 +263,10 @@ void G1GCPhaseTimes::record_or_add_time_secs(GCParPhases phase, uint worker_i, d
}
}
+double G1GCPhaseTimes::get_time_secs(GCParPhases phase, uint worker_i) {
+ return _gc_par_phases[phase]->get(worker_i);
+}
+
void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index) {
_gc_par_phases[phase]->set_thread_work_item(worker_i, count, index);
}
@@ -259,6 +275,10 @@ void G1GCPhaseTimes::record_or_add_thread_work_item(GCParPhases phase, uint work
_gc_par_phases[phase]->set_or_add_thread_work_item(worker_i, count, index);
}
+size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i, uint index) {
+ return _gc_par_phases[phase]->get_thread_work_item(worker_i, index);
+}
+
// return the average time for a phase in milliseconds
double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->average() * 1000.0;
@@ -374,12 +394,14 @@ double G1GCPhaseTimes::print_evacuate_optional_collection_set() const {
info_time("Evacuate Optional Collection Set", sum_ms);
debug_phase(_gc_par_phases[OptScanRS]);
debug_phase(_gc_par_phases[OptObjCopy]);
+ debug_phase(_gc_par_phases[OptCodeRoots]);
+ debug_phase(_gc_par_phases[OptTermination]);
}
return sum_ms;
}
double G1GCPhaseTimes::print_evacuate_collection_set() const {
- const double sum_ms = _cur_collection_par_time_ms;
+ const double sum_ms = _cur_collection_initial_evac_time_ms;
info_time("Evacuate Collection Set", sum_ms);
@@ -517,9 +539,11 @@ const char* G1GCPhaseTimes::phase_name(GCParPhases phase) {
"ScanRS",
"OptScanRS",
"CodeRoots",
+ "OptCodeRoots",
"ObjCopy",
"OptObjCopy",
"Termination",
+ "OptTermination",
"Other",
"GCWorkerTotal",
"GCWorkerEnd",
diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
index 488a30e94ec..39f4ed9a330 100644
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
@@ -67,9 +67,11 @@ class G1GCPhaseTimes : public CHeapObj {
ScanRS,
OptScanRS,
CodeRoots,
+ OptCodeRoots,
ObjCopy,
OptObjCopy,
Termination,
+ OptTermination,
Other,
GCWorkerTotal,
GCWorkerEnd,
@@ -87,7 +89,9 @@ class G1GCPhaseTimes : public CHeapObj {
enum GCScanRSWorkItems {
ScanRSScannedCards,
ScanRSClaimedCards,
- ScanRSSkippedCards
+ ScanRSSkippedCards,
+ ScanRSScannedOptRefs,
+ ScanRSUsedMemory
};
enum GCUpdateRSWorkItems {
@@ -101,13 +105,6 @@ class G1GCPhaseTimes : public CHeapObj {
ObjCopyLABUndoWaste
};
- enum GCOptCSetWorkItems {
- OptCSetScannedCards,
- OptCSetClaimedCards,
- OptCSetSkippedCards,
- OptCSetUsedMemory
- };
-
private:
// Markers for grouping the phases in the GCPhases enum above
static const int GCMainParPhasesLast = GCWorkerEnd;
@@ -122,19 +119,25 @@ class G1GCPhaseTimes : public CHeapObj {
WorkerDataArray* _scan_rs_claimed_cards;
WorkerDataArray* _scan_rs_skipped_cards;
+ WorkerDataArray* _opt_scan_rs_scanned_cards;
+ WorkerDataArray* _opt_scan_rs_claimed_cards;
+ WorkerDataArray* _opt_scan_rs_skipped_cards;
+ WorkerDataArray* _opt_scan_rs_scanned_opt_refs;
+ WorkerDataArray* _opt_scan_rs_used_memory;
+
WorkerDataArray* _obj_copy_lab_waste;
WorkerDataArray* _obj_copy_lab_undo_waste;
- WorkerDataArray* _opt_cset_scanned_cards;
- WorkerDataArray* _opt_cset_claimed_cards;
- WorkerDataArray* _opt_cset_skipped_cards;
- WorkerDataArray* _opt_cset_used_memory;
+ WorkerDataArray* _opt_obj_copy_lab_waste;
+ WorkerDataArray* _opt_obj_copy_lab_undo_waste;
WorkerDataArray* _termination_attempts;
+ WorkerDataArray* _opt_termination_attempts;
+
WorkerDataArray* _redirtied_cards;
- double _cur_collection_par_time_ms;
+ double _cur_collection_initial_evac_time_ms;
double _cur_optional_evac_ms;
double _cur_collection_code_root_fixup_time_ms;
double _cur_strong_code_root_purge_time_ms;
@@ -225,10 +228,14 @@ class G1GCPhaseTimes : public CHeapObj {
void record_or_add_time_secs(GCParPhases phase, uint worker_i, double secs);
+ double get_time_secs(GCParPhases phase, uint worker_i);
+
void record_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index = 0);
void record_or_add_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index = 0);
+ size_t get_thread_work_item(GCParPhases phase, uint worker_i, uint index = 0);
+
// return the average time for a phase in milliseconds
double average_time_ms(GCParPhases phase);
@@ -256,16 +263,16 @@ class G1GCPhaseTimes : public CHeapObj {
_cur_expand_heap_time_ms = ms;
}
- void record_par_time(double ms) {
- _cur_collection_par_time_ms = ms;
+ void record_initial_evac_time(double ms) {
+ _cur_collection_initial_evac_time_ms = ms;
}
- void record_optional_evacuation(double ms) {
- _cur_optional_evac_ms = ms;
+ void record_or_add_optional_evac_time(double ms) {
+ _cur_optional_evac_ms += ms;
}
- void record_code_root_fixup_time(double ms) {
- _cur_collection_code_root_fixup_time_ms = ms;
+ void record_or_add_code_root_fixup_time(double ms) {
+ _cur_collection_code_root_fixup_time_ms += ms;
}
void record_strong_code_root_purge_time(double ms) {
@@ -360,7 +367,7 @@ class G1GCPhaseTimes : public CHeapObj {
}
double cur_collection_par_time_ms() {
- return _cur_collection_par_time_ms;
+ return _cur_collection_initial_evac_time_ms;
}
double cur_clear_ct_time_ms() {
diff --git a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp
index 51746b29249..f384ab40e7c 100644
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp
@@ -371,6 +371,7 @@ public:
}
bool do_heap_region(HeapRegion* r) {
+ guarantee(!r->has_index_in_opt_cset(), "Region %u still has opt collection set index %u", r->hrm_index(), r->index_in_opt_cset());
guarantee(!r->is_young() || r->rem_set()->is_complete(), "Remembered set for Young region %u must be complete, is %s", r->hrm_index(), r->rem_set()->get_state_str());
// Humongous and old regions regions might be of any state, so can't check here.
guarantee(!r->is_free() || !r->rem_set()->is_tracked(), "Remembered set for free region %u must be untracked, is %s", r->hrm_index(), r->rem_set()->get_state_str());
diff --git a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp
index 1db3dc41ad9..375316c3511 100644
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp
@@ -228,23 +228,25 @@ void G1MonitoringSupport::recalculate_sizes() {
MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
// Recalculate all the sizes from scratch.
- uint young_list_length = _g1h->young_regions_count();
+ // This never includes used bytes of current allocating heap region.
+ _overall_used = _g1h->used_unlocked();
+ _eden_space_used = _g1h->eden_regions_used_bytes();
+ _survivor_space_used = _g1h->survivor_regions_used_bytes();
+
+ // _overall_used and _eden_space_used are obtained concurrently so
+ // may be inconsistent with each other. To prevent _old_gen_used going negative,
+ // use smaller value to substract.
+ _old_gen_used = _overall_used - MIN2(_overall_used, _eden_space_used + _survivor_space_used);
+
uint survivor_list_length = _g1h->survivor_regions_count();
- assert(young_list_length >= survivor_list_length, "invariant");
- uint eden_list_length = young_list_length - survivor_list_length;
// Max length includes any potential extensions to the young gen
// we'll do when the GC locker is active.
uint young_list_max_length = _g1h->policy()->young_list_max_length();
assert(young_list_max_length >= survivor_list_length, "invariant");
uint eden_list_max_length = young_list_max_length - survivor_list_length;
- _overall_used = _g1h->used_unlocked();
- _eden_space_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
- _survivor_space_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
- _old_gen_used = subtract_up_to_zero(_overall_used, _eden_space_used + _survivor_space_used);
-
// First calculate the committed sizes that can be calculated independently.
- _survivor_space_committed = _survivor_space_used;
+ _survivor_space_committed = survivor_list_length * HeapRegion::GrainBytes;
_old_gen_committed = HeapRegion::align_up_to_region_byte_size(_old_gen_used);
// Next, start with the overall committed size.
@@ -274,11 +276,15 @@ void G1MonitoringSupport::recalculate_sizes() {
// Somewhat defensive: cap the eden used size to make sure it
// never exceeds the committed size.
_eden_space_used = MIN2(_eden_space_used, _eden_space_committed);
- // _survivor_committed and _old_committed are calculated in terms of
- // the corresponding _*_used value, so the next two conditions
- // should hold.
- assert(_survivor_space_used <= _survivor_space_committed, "post-condition");
- assert(_old_gen_used <= _old_gen_committed, "post-condition");
+ // _survivor_space_used is calculated during a safepoint and _survivor_space_committed
+ // is calculated from survivor region count * heap region size.
+ assert(_survivor_space_used <= _survivor_space_committed, "Survivor used bytes(" SIZE_FORMAT
+ ") should be less than or equal to survivor committed(" SIZE_FORMAT ")",
+ _survivor_space_used, _survivor_space_committed);
+ // _old_gen_committed is calculated in terms of _old_gen_used value.
+ assert(_old_gen_used <= _old_gen_committed, "Old gen used bytes(" SIZE_FORMAT
+ ") should be less than or equal to old gen committed(" SIZE_FORMAT ")",
+ _old_gen_used, _old_gen_committed);
}
void G1MonitoringSupport::update_sizes() {
diff --git a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp
index 0b86b66c263..219ac73eac1 100644
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp
@@ -174,21 +174,6 @@ class G1MonitoringSupport : public CHeapObj {
size_t _old_gen_used;
- // It returns x - y if x > y, 0 otherwise.
- // As described in the comment above, some of the inputs to the
- // calculations we have to do are obtained concurrently and hence
- // may be inconsistent with each other. So, this provides a
- // defensive way of performing the subtraction and avoids the value
- // going negative (which would mean a very large result, given that
- // the parameter are size_t).
- static size_t subtract_up_to_zero(size_t x, size_t y) {
- if (x > y) {
- return x - y;
- } else {
- return 0;
- }
- }
-
// Recalculate all the sizes.
void recalculate_sizes();
diff --git a/src/hotspot/share/gc/g1/g1OopStarChunkedList.cpp b/src/hotspot/share/gc/g1/g1OopStarChunkedList.cpp
index 7c9f4c5aa5c..48f69d51db9 100644
--- a/src/hotspot/share/gc/g1/g1OopStarChunkedList.cpp
+++ b/src/hotspot/share/gc/g1/g1OopStarChunkedList.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,9 +32,11 @@ G1OopStarChunkedList::~G1OopStarChunkedList() {
delete_list(_coops);
}
-void G1OopStarChunkedList::oops_do(OopClosure* obj_cl, OopClosure* root_cl) {
- chunks_do(_roots, root_cl);
- chunks_do(_croots, root_cl);
- chunks_do(_oops, obj_cl);
- chunks_do(_coops, obj_cl);
+size_t G1OopStarChunkedList::oops_do(OopClosure* obj_cl, OopClosure* root_cl) {
+ size_t result = 0;
+ result += chunks_do(_roots, root_cl);
+ result += chunks_do(_croots, root_cl);
+ result += chunks_do(_oops, obj_cl);
+ result += chunks_do(_coops, obj_cl);
+ return result;
}
diff --git a/src/hotspot/share/gc/g1/g1OopStarChunkedList.hpp b/src/hotspot/share/gc/g1/g1OopStarChunkedList.hpp
index 467838f273a..24542dbde22 100644
--- a/src/hotspot/share/gc/g1/g1OopStarChunkedList.hpp
+++ b/src/hotspot/share/gc/g1/g1OopStarChunkedList.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,8 +41,8 @@ class G1OopStarChunkedList : public CHeapObj {
template void delete_list(ChunkedList* c);
template
- void chunks_do(ChunkedList* head,
- OopClosure* cl);
+ size_t chunks_do(ChunkedList* head,
+ OopClosure* cl);
template
inline void push(ChunkedList** field, T* p);
@@ -53,7 +53,7 @@ class G1OopStarChunkedList : public CHeapObj {
size_t used_memory() { return _used_memory; }
- void oops_do(OopClosure* obj_cl, OopClosure* root_cl);
+ size_t oops_do(OopClosure* obj_cl, OopClosure* root_cl);
inline void push_oop(oop* p);
inline void push_oop(narrowOop* p);
diff --git a/src/hotspot/share/gc/g1/g1OopStarChunkedList.inline.hpp b/src/hotspot/share/gc/g1/g1OopStarChunkedList.inline.hpp
index f49274df716..4872495440f 100644
--- a/src/hotspot/share/gc/g1/g1OopStarChunkedList.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1OopStarChunkedList.inline.hpp
@@ -72,13 +72,16 @@ void G1OopStarChunkedList::delete_list(ChunkedList* c) {
}
template
-void G1OopStarChunkedList::chunks_do(ChunkedList* head, OopClosure* cl) {
+size_t G1OopStarChunkedList::chunks_do(ChunkedList* head, OopClosure* cl) {
+ size_t result = 0;
for (ChunkedList* c = head; c != NULL; c = c->next_used()) {
+ result += c->size();
for (size_t i = 0; i < c->size(); i++) {
T* p = c->at(i);
cl->do_oop(p);
}
}
+ return result;
}
#endif // SHARE_GC_G1_G1OOPSTARCHUNKEDLIST_INLINE_HPP
diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
index a94e11d2b84..c1e657c527e 100644
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
@@ -372,7 +372,7 @@ void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
}
size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
- _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_index, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
+ _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_index, used_memory, G1GCPhaseTimes::ScanRSUsedMemory);
}
}
diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp
index 14729bc840a..e4a964f3a92 100644
--- a/src/hotspot/share/gc/g1/g1Policy.cpp
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp
@@ -105,7 +105,7 @@ void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
assert(Heap_lock->owned_by_self(), "Locking discipline.");
- if (!adaptive_young_list_length()) {
+ if (!use_adaptive_young_list_length()) {
_young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
}
_young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
@@ -195,7 +195,7 @@ void G1Policy::record_new_heap_size(uint new_number_of_regions) {
uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const {
uint desired_min_length = 0;
- if (adaptive_young_list_length()) {
+ if (use_adaptive_young_list_length()) {
if (_analytics->num_alloc_rate_ms() > 3) {
double now_sec = os::elapsedTime();
double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
@@ -252,7 +252,7 @@ G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_lengt
uint desired_max_length = calculate_young_list_desired_max_length();
uint young_list_target_length = 0;
- if (adaptive_young_list_length()) {
+ if (use_adaptive_young_list_length()) {
if (collector_state()->in_young_only_phase()) {
young_list_target_length =
calculate_young_list_target_length(rs_lengths,
@@ -304,7 +304,7 @@ G1Policy::calculate_young_list_target_length(size_t rs_lengths,
uint base_min_length,
uint desired_min_length,
uint desired_max_length) const {
- assert(adaptive_young_list_length(), "pre-condition");
+ assert(use_adaptive_young_list_length(), "pre-condition");
assert(collector_state()->in_young_only_phase(), "only call this for young GCs");
// In case some edge-condition makes the desired max length too small...
@@ -414,7 +414,7 @@ double G1Policy::predict_survivor_regions_evac_time() const {
}
void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
- guarantee( adaptive_young_list_length(), "should not call this otherwise" );
+ guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
if (rs_lengths > _rs_lengths_prediction) {
// add 10% to avoid having to recalculate often
@@ -430,7 +430,7 @@ void G1Policy::update_rs_lengths_prediction() {
}
void G1Policy::update_rs_lengths_prediction(size_t prediction) {
- if (collector_state()->in_young_only_phase() && adaptive_young_list_length()) {
+ if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) {
_rs_lengths_prediction = prediction;
}
}
@@ -659,7 +659,11 @@ void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_sc
double cost_per_entry_ms = 0.0;
if (cards_scanned > 10) {
- cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
+ double avg_time_scan_rs = average_time_ms(G1GCPhaseTimes::ScanRS);
+ if (this_pause_was_young_only) {
+ avg_time_scan_rs += average_time_ms(G1GCPhaseTimes::OptScanRS);
+ }
+ cost_per_entry_ms = avg_time_scan_rs / cards_scanned;
_analytics->report_cost_per_entry_ms(cost_per_entry_ms, this_pause_was_young_only);
}
@@ -694,7 +698,7 @@ void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_sc
double cost_per_byte_ms = 0.0;
if (copied_bytes > 0) {
- cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
+ cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / (double) copied_bytes;
_analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress());
}
@@ -906,8 +910,8 @@ bool G1Policy::can_expand_young_list() const {
return young_list_length < young_list_max_length;
}
-bool G1Policy::adaptive_young_list_length() const {
- return _young_gen_sizer->adaptive_young_list_length();
+bool G1Policy::use_adaptive_young_list_length() const {
+ return _young_gen_sizer->use_adaptive_young_list_length();
}
size_t G1Policy::desired_survivor_size(uint max_regions) const {
@@ -1188,11 +1192,135 @@ uint G1Policy::calc_max_old_cset_length() const {
return (uint) result;
}
-uint G1Policy::finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
- double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms, survivor);
- _collection_set->finalize_old_part(time_remaining_ms);
+void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
+ double time_remaining_ms,
+ uint& num_initial_regions,
+ uint& num_optional_regions) {
+ assert(candidates != NULL, "Must be");
- return _collection_set->region_length();
+ num_initial_regions = 0;
+ num_optional_regions = 0;
+ uint num_expensive_regions = 0;
+
+ double predicted_old_time_ms = 0.0;
+ double predicted_initial_time_ms = 0.0;
+ double predicted_optional_time_ms = 0.0;
+
+ double optional_threshold_ms = time_remaining_ms * optional_prediction_fraction();
+
+ const uint min_old_cset_length = calc_min_old_cset_length();
+ const uint max_old_cset_length = MAX2(min_old_cset_length, calc_max_old_cset_length());
+ const uint max_optional_regions = max_old_cset_length - min_old_cset_length;
+ bool check_time_remaining = use_adaptive_young_list_length();
+
+ uint candidate_idx = candidates->cur_idx();
+
+ log_debug(gc, ergo, cset)("Start adding old regions to collection set. Min %u regions, max %u regions, "
+ "time remaining %1.2fms, optional threshold %1.2fms",
+ min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms);
+
+ HeapRegion* hr = candidates->at(candidate_idx);
+ while (hr != NULL) {
+ if (num_initial_regions + num_optional_regions >= max_old_cset_length) {
+ // Added maximum number of old regions to the CSet.
+ log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Maximum number of regions). "
+ "Initial %u regions, optional %u regions",
+ num_initial_regions, num_optional_regions);
+ break;
+ }
+
+ // Stop adding regions if the remaining reclaimable space is
+ // not above G1HeapWastePercent.
+ size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
+ double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
+ double threshold = (double) G1HeapWastePercent;
+ if (reclaimable_percent <= threshold) {
+ // We've added enough old regions that the amount of uncollected
+ // reclaimable space is at or below the waste threshold. Stop
+ // adding old regions to the CSet.
+ log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Reclaimable percentage below threshold). "
+ "Reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%",
+ byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes),
+ reclaimable_percent, G1HeapWastePercent);
+ break;
+ }
+
+ double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
+ time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
+ // Add regions to old set until we reach the minimum amount
+ if (num_initial_regions < min_old_cset_length) {
+ predicted_old_time_ms += predicted_time_ms;
+ num_initial_regions++;
+ // Record the number of regions added with no time remaining
+ if (time_remaining_ms == 0.0) {
+ num_expensive_regions++;
+ }
+ } else if (!check_time_remaining) {
+ // In the non-auto-tuning case, we'll finish adding regions
+ // to the CSet if we reach the minimum.
+ log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Region amount reached min).");
+ break;
+ } else {
+ // Keep adding regions to old set until we reach the optional threshold
+ if (time_remaining_ms > optional_threshold_ms) {
+ predicted_old_time_ms += predicted_time_ms;
+ num_initial_regions++;
+ } else if (time_remaining_ms > 0) {
+ // Keep adding optional regions until time is up.
+ assert(num_optional_regions < max_optional_regions, "Should not be possible.");
+ predicted_optional_time_ms += predicted_time_ms;
+ num_optional_regions++;
+ } else {
+ log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Predicted time too high).");
+ break;
+ }
+ }
+ hr = candidates->at(++candidate_idx);
+ }
+ if (hr == NULL) {
+ log_debug(gc, ergo, cset)("Old candidate collection set empty.");
+ }
+
+ if (num_expensive_regions > 0) {
+ log_debug(gc, ergo, cset)("Added %u initial old regions to collection set although the predicted time was too high.",
+ num_expensive_regions);
+ }
+
+ log_debug(gc, ergo, cset)("Finish choosing collection set old regions. Initial: %u, optional: %u, "
+ "predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f",
+ num_initial_regions, num_optional_regions,
+ predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms);
+}
+
+void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
+ uint const max_optional_regions,
+ double time_remaining_ms,
+ uint& num_optional_regions) {
+ assert(_g1h->collector_state()->in_mixed_phase(), "Should only be called in mixed phase");
+
+ num_optional_regions = 0;
+ double prediction_ms = 0;
+ uint candidate_idx = candidates->cur_idx();
+
+ HeapRegion* r = candidates->at(candidate_idx);
+ while (num_optional_regions < max_optional_regions) {
+ assert(r != NULL, "Region must exist");
+ prediction_ms += predict_region_elapsed_time_ms(r, false);
+
+ if (prediction_ms > time_remaining_ms) {
+ log_debug(gc, ergo, cset)("Prediction %.3fms for region %u does not fit remaining time: %.3fms.",
+ prediction_ms, r->hrm_index(), time_remaining_ms);
+ break;
+ }
+ // This region will be included in the next optional evacuation.
+
+ time_remaining_ms -= prediction_ms;
+ num_optional_regions++;
+ r = candidates->at(++candidate_idx);
+ }
+
+ log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms",
+ num_optional_regions, max_optional_regions, prediction_ms);
}
void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {
diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp
index 771def9b4ee..c05d9ebd0c2 100644
--- a/src/hotspot/share/gc/g1/g1Policy.hpp
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp
@@ -44,6 +44,7 @@
class HeapRegion;
class G1CollectionSet;
+class G1CollectionSetCandidates;
class G1CollectionSetChooser;
class G1IHOPControl;
class G1Analytics;
@@ -344,7 +345,21 @@ public:
bool next_gc_should_be_mixed(const char* true_action_str,
const char* false_action_str) const;
- uint finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor);
+ // Calculate and return the number of initial and optional old gen regions from
+ // the given collection set candidates and the remaining time.
+ void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
+ double time_remaining_ms,
+ uint& num_initial_regions,
+ uint& num_optional_regions);
+
+ // Calculate the number of optional regions from the given collection set candidates,
+ // the remaining time and the maximum number of these regions and return the number
+ // of actually selected regions in num_optional_regions.
+ void calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
+ uint const max_optional_regions,
+ double time_remaining_ms,
+ uint& num_optional_regions);
+
private:
// Set the state to start a concurrent marking cycle and clear
// _initiate_conc_mark_if_possible because it has now been
@@ -384,7 +399,7 @@ public:
return _young_list_max_length;
}
- bool adaptive_young_list_length() const;
+ bool use_adaptive_young_list_length() const;
void transfer_survivors_to_cset(const G1SurvivorRegions* survivors);
@@ -403,11 +418,13 @@ private:
AgeTable _survivors_age_table;
size_t desired_survivor_size(uint max_regions) const;
-public:
+
// Fraction used when predicting how many optional regions to include in
// the CSet. This fraction of the available time is used for optional regions,
// the rest is used to add old regions to the normal CSet.
double optional_prediction_fraction() { return 0.2; }
+
+public:
// Fraction used when evacuating the optional regions. This fraction of the
// remaining time is used to choose what regions to include in the evacuation.
double optional_evacuation_fraction() { return 0.75; }
diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp
index 01862e30088..19a54da8eb4 100644
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp
@@ -316,6 +316,8 @@ G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state
_scan_state(scan_state),
_phase(phase),
_worker_i(worker_i),
+ _opt_refs_scanned(0),
+ _opt_refs_memory_used(0),
_cards_scanned(0),
_cards_claimed(0),
_cards_skipped(0),
@@ -338,6 +340,19 @@ void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card)
_cards_scanned++;
}
+void G1ScanRSForRegionClosure::scan_opt_rem_set_roots(HeapRegion* r) {
+ EventGCPhaseParallel event;
+
+ G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r);
+
+ G1ScanObjsDuringScanRSClosure scan_cl(_g1h, _pss);
+ G1ScanRSForOptionalClosure cl(&scan_cl);
+ _opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->raw_strong_oops());
+ _opt_refs_memory_used += opt_rem_set_list->used_memory();
+
+ event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(_phase));
+}
+
void G1ScanRSForRegionClosure::scan_rem_set_roots(HeapRegion* r) {
EventGCPhaseParallel event;
uint const region_idx = r->hrm_index();
@@ -414,11 +429,16 @@ void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
}
bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
- assert(r->in_collection_set(),
- "Should only be called on elements of the collection set but region %u is not.",
- r->hrm_index());
+ assert(r->in_collection_set(), "Region %u is not in the collection set.", r->hrm_index());
uint const region_idx = r->hrm_index();
+ // The individual references for the optional remembered set are per-worker, so we
+ // always need to scan them.
+ if (r->has_index_in_opt_cset()) {
+ G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time);
+ scan_opt_rem_set_roots(r);
+ }
+
// Do an early out if we know we are complete.
if (_scan_state->iter_is_complete(region_idx)) {
return false;
@@ -437,22 +457,33 @@ bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
return false;
}
-void G1RemSet::scan_rem_set(G1ParScanThreadState* pss, uint worker_i) {
+void G1RemSet::scan_rem_set(G1ParScanThreadState* pss,
+ uint worker_i,
+ G1GCPhaseTimes::GCParPhases scan_phase,
+ G1GCPhaseTimes::GCParPhases objcopy_phase,
+ G1GCPhaseTimes::GCParPhases coderoots_phase) {
+ assert(pss->trim_ticks().value() == 0, "Queues must have been trimmed before entering.");
+
G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss);
- G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, G1GCPhaseTimes::ScanRS, worker_i);
- _g1h->collection_set_iterate_from(&cl, worker_i);
+ G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, scan_phase, worker_i);
+ _g1h->collection_set_iterate_increment_from(&cl, worker_i);
G1GCPhaseTimes* p = _g1p->phase_times();
- p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, cl.rem_set_root_scan_time().seconds());
- p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.rem_set_trim_partially_time().seconds());
+ p->record_or_add_time_secs(objcopy_phase, worker_i, cl.rem_set_trim_partially_time().seconds());
- p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards);
- p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards);
- p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards);
+ p->record_or_add_time_secs(scan_phase, worker_i, cl.rem_set_root_scan_time().seconds());
+ p->record_or_add_thread_work_item(scan_phase, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards);
+ p->record_or_add_thread_work_item(scan_phase, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards);
+ p->record_or_add_thread_work_item(scan_phase, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards);
+ // At this time we only record some metrics for the optional remembered set.
+ if (scan_phase == G1GCPhaseTimes::OptScanRS) {
+ p->record_or_add_thread_work_item(scan_phase, worker_i, cl.opt_refs_scanned(), G1GCPhaseTimes::ScanRSScannedOptRefs);
+ p->record_or_add_thread_work_item(scan_phase, worker_i, cl.opt_refs_memory_used(), G1GCPhaseTimes::ScanRSUsedMemory);
+ }
- p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time().seconds());
- p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.strong_code_root_trim_partially_time().seconds());
+ p->record_or_add_time_secs(coderoots_phase, worker_i, cl.strong_code_root_scan_time().seconds());
+ p->add_time_secs(objcopy_phase, worker_i, cl.strong_code_root_trim_partially_time().seconds());
}
// Closure used for updating rem sets. Only called during an evacuation pause.
@@ -514,11 +545,6 @@ void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
}
}
-void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i) {
- update_rem_set(pss, worker_i);
- scan_rem_set(pss, worker_i);;
-}
-
void G1RemSet::prepare_for_oops_into_collection_set_do() {
G1BarrierSet::dirty_card_queue_set().concatenate_logs();
_scan_state->reset();
diff --git a/src/hotspot/share/gc/g1/g1RemSet.hpp b/src/hotspot/share/gc/g1/g1RemSet.hpp
index b64a00c613d..1e614901109 100644
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp
@@ -60,14 +60,6 @@ private:
G1RemSetSummary _prev_period_summary;
- // Scan all remembered sets of the collection set for references into the collection
- // set.
- void scan_rem_set(G1ParScanThreadState* pss, uint worker_i);
-
- // Flush remaining refinement buffers for cross-region references to either evacuate references
- // into the collection set or update the remembered set.
- void update_rem_set(G1ParScanThreadState* pss, uint worker_i);
-
G1CollectedHeap* _g1h;
size_t _num_conc_refined_cards; // Number of cards refined concurrently to the mutator.
@@ -93,12 +85,19 @@ public:
G1HotCardCache* hot_card_cache);
~G1RemSet();
- // Process all oops in the collection set from the cards in the refinement buffers and
- // remembered sets using pss.
- //
+ // Scan all remembered sets of the collection set for references into the collection
+ // set.
// Further applies heap_region_codeblobs on the oops of the unmarked nmethods on the strong code
// roots list for each region in the collection set.
- void oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i);
+ void scan_rem_set(G1ParScanThreadState* pss,
+ uint worker_i,
+ G1GCPhaseTimes::GCParPhases scan_phase,
+ G1GCPhaseTimes::GCParPhases objcopy_phase,
+ G1GCPhaseTimes::GCParPhases coderoots_phase);
+
+ // Flush remaining refinement buffers for cross-region references to either evacuate references
+ // into the collection set or update the remembered set.
+ void update_rem_set(G1ParScanThreadState* pss, uint worker_i);
// Prepare for and cleanup after an oops_into_collection_set_do
// call. Must call each of these once before and after (in sequential
@@ -144,6 +143,9 @@ class G1ScanRSForRegionClosure : public HeapRegionClosure {
uint _worker_i;
+ size_t _opt_refs_scanned;
+ size_t _opt_refs_memory_used;
+
size_t _cards_scanned;
size_t _cards_claimed;
size_t _cards_skipped;
@@ -157,6 +159,7 @@ class G1ScanRSForRegionClosure : public HeapRegionClosure {
void claim_card(size_t card_index, const uint region_idx_for_card);
void scan_card(MemRegion mr, uint region_idx_for_card);
+ void scan_opt_rem_set_roots(HeapRegion* r);
void scan_rem_set_roots(HeapRegion* r);
void scan_strong_code_roots(HeapRegion* r);
public:
@@ -177,6 +180,9 @@ public:
size_t cards_scanned() const { return _cards_scanned; }
size_t cards_claimed() const { return _cards_claimed; }
size_t cards_skipped() const { return _cards_skipped; }
+
+ size_t opt_refs_scanned() const { return _opt_refs_scanned; }
+ size_t opt_refs_memory_used() const { return _opt_refs_memory_used; }
};
#endif // SHARE_GC_G1_G1REMSET_HPP
diff --git a/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp b/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp
index ec49aea984d..771daa20f6a 100644
--- a/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp
+++ b/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,9 @@
#include "utilities/growableArray.hpp"
#include "utilities/debug.hpp"
-G1SurvivorRegions::G1SurvivorRegions() : _regions(new (ResourceObj::C_HEAP, mtGC) GrowableArray(8, true, mtGC)) {}
+G1SurvivorRegions::G1SurvivorRegions() :
+ _regions(new (ResourceObj::C_HEAP, mtGC) GrowableArray(8, true, mtGC)),
+ _used_bytes(0) {}
void G1SurvivorRegions::add(HeapRegion* hr) {
assert(hr->is_survivor(), "should be flagged as survivor region");
@@ -51,5 +53,9 @@ void G1SurvivorRegions::convert_to_eden() {
void G1SurvivorRegions::clear() {
_regions->clear();
+ _used_bytes = 0;
}
+void G1SurvivorRegions::add_used_bytes(size_t used_bytes) {
+ _used_bytes += used_bytes;
+}
diff --git a/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp b/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp
index ed6d57a56de..12a8292175f 100644
--- a/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp
+++ b/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp
@@ -34,6 +34,7 @@ class HeapRegion;
class G1SurvivorRegions {
private:
GrowableArray* _regions;
+ volatile size_t _used_bytes;
public:
G1SurvivorRegions();
@@ -49,6 +50,11 @@ public:
const GrowableArray* regions() const {
return _regions;
}
+
+ // Used bytes of all survivor regions.
+ size_t used_bytes() const { return _used_bytes; }
+
+ void add_used_bytes(size_t used_bytes);
};
#endif // SHARE_GC_G1_G1SURVIVORREGIONS_HPP
diff --git a/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp b/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp
index 209d8e2966f..a42f8836447 100644
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp
@@ -30,14 +30,14 @@
#include "logging/log.hpp"
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
- _adaptive_size(true), _min_desired_young_length(0), _max_desired_young_length(0) {
+ _use_adaptive_sizing(true), _min_desired_young_length(0), _max_desired_young_length(0) {
if (FLAG_IS_CMDLINE(NewRatio)) {
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
} else {
_sizer_kind = SizerNewRatio;
- _adaptive_size = false;
+ _use_adaptive_sizing = false;
return;
}
}
@@ -59,7 +59,7 @@ G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
1U);
_sizer_kind = SizerMaxAndNewSize;
- _adaptive_size = _min_desired_young_length != _max_desired_young_length;
+ _use_adaptive_sizing = _min_desired_young_length != _max_desired_young_length;
} else {
_sizer_kind = SizerNewSizeOnly;
}
diff --git a/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp b/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp
index a08b0576f81..a236d786755 100644
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp
@@ -77,7 +77,7 @@ private:
// False when using a fixed young generation size due to command-line options,
// true otherwise.
- bool _adaptive_size;
+ bool _use_adaptive_sizing;
uint calculate_default_min_length(uint new_number_of_heap_regions);
uint calculate_default_max_length(uint new_number_of_heap_regions);
@@ -104,8 +104,8 @@ public:
return _max_desired_young_length;
}
- bool adaptive_young_list_length() const {
- return _adaptive_size;
+ bool use_adaptive_young_list_length() const {
+ return _use_adaptive_sizing;
}
static G1YoungGenSizer* create_gen_sizer(G1CollectorPolicy* policy);
diff --git a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp
index 4cf3e6f5cb8..7a8407c7b7c 100644
--- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp
+++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp
@@ -165,7 +165,7 @@ void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1Policy* policy = g1h->policy();
- if (policy->adaptive_young_list_length()) {
+ if (policy->use_adaptive_young_list_length()) {
G1YoungRemSetSamplingClosure cl(&sts);
G1CollectionSet* g1cs = g1h->collection_set();
diff --git a/src/hotspot/share/gc/g1/heapRegion.cpp b/src/hotspot/share/gc/g1/heapRegion.cpp
index b2565987434..6d2ec601085 100644
--- a/src/hotspot/share/gc/g1/heapRegion.cpp
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp
@@ -117,6 +117,7 @@ void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) {
"Should not clear heap region %u in the collection set", hrm_index());
set_young_index_in_cset(-1);
+ clear_index_in_opt_cset();
uninstall_surv_rate_group();
set_free();
reset_pre_dummy_top();
@@ -241,7 +242,7 @@ HeapRegion::HeapRegion(uint hrm_index,
_containing_set(NULL),
#endif
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
- _index_in_opt_cset(G1OptionalCSet::InvalidCSetIndex), _young_index_in_cset(-1),
+ _index_in_opt_cset(InvalidCSetIndex), _young_index_in_cset(-1),
_surv_rate_group(NULL), _age_index(-1),
_prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL),
_recorded_rs_length(0), _predicted_elapsed_time_ms(0)
diff --git a/src/hotspot/share/gc/g1/heapRegion.hpp b/src/hotspot/share/gc/g1/heapRegion.hpp
index 7c69bc4b54e..f99cf19d3d4 100644
--- a/src/hotspot/share/gc/g1/heapRegion.hpp
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp
@@ -250,6 +250,8 @@ class HeapRegion: public G1ContiguousSpace {
// The calculated GC efficiency of the region.
double _gc_efficiency;
+ static const uint InvalidCSetIndex = UINT_MAX;
+
// The index in the optional regions array, if this region
// is considered optional during a mixed collections.
uint _index_in_opt_cset;
@@ -549,8 +551,13 @@ class HeapRegion: public G1ContiguousSpace {
void calc_gc_efficiency(void);
double gc_efficiency() const { return _gc_efficiency;}
- uint index_in_opt_cset() const { return _index_in_opt_cset; }
+ uint index_in_opt_cset() const {
+ assert(has_index_in_opt_cset(), "Opt cset index not set.");
+ return _index_in_opt_cset;
+ }
+ bool has_index_in_opt_cset() const { return _index_in_opt_cset != InvalidCSetIndex; }
void set_index_in_opt_cset(uint index) { _index_in_opt_cset = index; }
+ void clear_index_in_opt_cset() { _index_in_opt_cset = InvalidCSetIndex; }
int young_index_in_cset() const { return _young_index_in_cset; }
void set_young_index_in_cset(int index) {
diff --git a/src/hotspot/share/gc/g1/vmStructs_g1.hpp b/src/hotspot/share/gc/g1/vmStructs_g1.hpp
index 03e95109cb8..7afe19aa9e0 100644
--- a/src/hotspot/share/gc/g1/vmStructs_g1.hpp
+++ b/src/hotspot/share/gc/g1/vmStructs_g1.hpp
@@ -52,8 +52,8 @@
nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \
nonstatic_field(HeapRegionManager, _num_committed, uint) \
\
- nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
- nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager*) \
+ volatile_nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
+ nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager*) \
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
nonstatic_field(G1CollectedHeap, _archive_set, HeapRegionSetBase) \
diff --git a/src/hotspot/share/gc/shared/workerDataArray.hpp b/src/hotspot/share/gc/shared/workerDataArray.hpp
index 955c805d3e4..f21d0d3f059 100644
--- a/src/hotspot/share/gc/shared/workerDataArray.hpp
+++ b/src/hotspot/share/gc/shared/workerDataArray.hpp
@@ -34,7 +34,7 @@ template
class WorkerDataArray : public CHeapObj {
friend class WDAPrinter;
public:
- static const uint MaxThreadWorkItems = 4;
+ static const uint MaxThreadWorkItems = 5;
private:
T* _data;
uint _length;
@@ -50,6 +50,7 @@ private:
void set_thread_work_item(uint worker_i, size_t value, uint index = 0);
void add_thread_work_item(uint worker_i, size_t value, uint index = 0);
void set_or_add_thread_work_item(uint worker_i, size_t value, uint index = 0);
+ size_t get_thread_work_item(uint worker_i, uint index = 0);
WorkerDataArray* thread_work_items(uint index = 0) const {
assert(index < MaxThreadWorkItems, "Tried to access thread work item %u max %u", index, MaxThreadWorkItems);
diff --git a/src/hotspot/share/gc/shared/workerDataArray.inline.hpp b/src/hotspot/share/gc/shared/workerDataArray.inline.hpp
index 92a57e325b2..c0876e2ad09 100644
--- a/src/hotspot/share/gc/shared/workerDataArray.inline.hpp
+++ b/src/hotspot/share/gc/shared/workerDataArray.inline.hpp
@@ -91,6 +91,13 @@ void WorkerDataArray::set_or_add_thread_work_item(uint worker_i, size_t value
}
}
+template
+size_t WorkerDataArray::get_thread_work_item(uint worker_i, uint index) {
+ assert(index < MaxThreadWorkItems, "Tried to access thread work item %u (max %u)", index, MaxThreadWorkItems);
+ assert(_thread_work_items[index] != NULL, "No sub count");
+ return _thread_work_items[index]->get(worker_i);
+}
+
template
void WorkerDataArray::add(uint worker_i, T value) {
assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
index 647a2407d67..ed4ee0534c3 100644
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
@@ -46,9 +46,9 @@ void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
bs->gen_pre_barrier_stub(ce, this);
}
-void ShenandoahWriteBarrierStub::emit_code(LIR_Assembler* ce) {
+void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
- bs->gen_write_barrier_stub(ce, this);
+ bs->gen_load_reference_barrier_stub(ce, this);
}
void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
@@ -105,40 +105,16 @@ void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info,
__ branch_destination(slow->continuation());
}
-LIR_Opr ShenandoahBarrierSetC1::read_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
- if (UseShenandoahGC && ShenandoahReadBarrier) {
- return read_barrier_impl(gen, obj, info, need_null_check);
+LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
+ if (ShenandoahLoadRefBarrier) {
+ return load_reference_barrier_impl(gen, obj, info, need_null_check);
} else {
return obj;
}
}
-LIR_Opr ShenandoahBarrierSetC1::read_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
- assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier), "Should be enabled");
- LabelObj* done = new LabelObj();
- LIR_Opr result = gen->new_register(T_OBJECT);
- __ move(obj, result);
- if (need_null_check) {
- __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
- __ branch(lir_cond_equal, T_LONG, done->label());
- }
- LIR_Address* brooks_ptr_address = gen->generate_address(result, ShenandoahBrooksPointer::byte_offset(), T_ADDRESS);
- __ load(brooks_ptr_address, result, info ? new CodeEmitInfo(info) : NULL, lir_patch_none);
-
- __ branch_destination(done->label());
- return result;
-}
-
-LIR_Opr ShenandoahBarrierSetC1::write_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
- if (UseShenandoahGC && ShenandoahWriteBarrier) {
- return write_barrier_impl(gen, obj, info, need_null_check);
- } else {
- return obj;
- }
-}
-
-LIR_Opr ShenandoahBarrierSetC1::write_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
- assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
+LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
+ assert(ShenandoahLoadRefBarrier, "Should be enabled");
obj = ensure_in_register(gen, obj);
assert(obj->is_register(), "must be a register at this point");
@@ -168,7 +144,7 @@ LIR_Opr ShenandoahBarrierSetC1::write_barrier_impl(LIRGenerator* gen, LIR_Opr ob
}
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
- CodeStub* slow = new ShenandoahWriteBarrierStub(obj, result, info ? new CodeEmitInfo(info) : NULL, need_null_check);
+ CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, result, info ? new CodeEmitInfo(info) : NULL, need_null_check);
__ branch(lir_cond_notEqual, T_INT, slow);
__ branch_destination(slow->continuation());
@@ -189,58 +165,13 @@ LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr ob
}
LIR_Opr ShenandoahBarrierSetC1::storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) {
- bool need_null_check = (decorators & IS_NOT_NULL) == 0;
if (ShenandoahStoreValEnqueueBarrier) {
- obj = write_barrier_impl(gen, obj, info, need_null_check);
+ obj = ensure_in_register(gen, obj);
pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj);
}
- if (ShenandoahStoreValReadBarrier) {
- obj = read_barrier_impl(gen, obj, info, true /*need_null_check*/);
- }
return obj;
}
-LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
- DecoratorSet decorators = access.decorators();
- bool is_array = (decorators & IS_ARRAY) != 0;
- bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
-
- bool is_write = (decorators & ACCESS_WRITE) != 0;
- bool needs_null_check = (decorators & IS_NOT_NULL) == 0;
-
- LIR_Opr base = access.base().item().result();
- LIR_Opr offset = access.offset().opr();
- LIRGenerator* gen = access.gen();
-
- if (is_write) {
- base = write_barrier(gen, base, access.access_emit_info(), needs_null_check);
- } else {
- base = read_barrier(gen, base, access.access_emit_info(), needs_null_check);
- }
-
- LIR_Opr addr_opr;
- if (is_array) {
- addr_opr = LIR_OprFact::address(gen->emit_array_address(base, offset, access.type()));
- } else if (needs_patching) {
- // we need to patch the offset in the instruction so don't allow
- // generate_address to try to be smart about emitting the -1.
- // Otherwise the patching code won't know how to find the
- // instruction to patch.
- addr_opr = LIR_OprFact::address(new LIR_Address(base, PATCHED_ADDR, access.type()));
- } else {
- addr_opr = LIR_OprFact::address(gen->generate_address(base, offset, 0, 0, access.type()));
- }
-
- if (resolve_in_register) {
- LIR_Opr resolved_addr = gen->new_pointer_register();
- __ leal(addr_opr, resolved_addr);
- resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, access.type()));
- return resolved_addr;
- } else {
- return addr_opr;
- }
-}
-
void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
if (access.is_oop()) {
if (ShenandoahSATBBarrier) {
@@ -252,15 +183,28 @@ void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value)
}
void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
- BarrierSetC1::load_at_resolved(access, result);
+ if (!access.is_oop()) {
+ BarrierSetC1::load_at_resolved(access, result);
+ return;
+ }
+
+ LIRGenerator *gen = access.gen();
+
+ if (ShenandoahLoadRefBarrier) {
+ LIR_Opr tmp = gen->new_register(T_OBJECT);
+ BarrierSetC1::load_at_resolved(access, tmp);
+ tmp = load_reference_barrier(access.gen(), tmp, access.access_emit_info(), true);
+ __ move(tmp, result);
+ } else {
+ BarrierSetC1::load_at_resolved(access, result);
+ }
if (ShenandoahKeepAliveBarrier) {
DecoratorSet decorators = access.decorators();
bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
- LIRGenerator *gen = access.gen();
- if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) {
+ if (is_weak || is_phantom || is_anonymous) {
// Register the value in the referent field with the pre-barrier
LabelObj *Lcont_anonymous;
if (is_anonymous) {
@@ -276,19 +220,6 @@ void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result)
}
}
-LIR_Opr ShenandoahBarrierSetC1::atomic_add_at_resolved(LIRAccess& access, LIRItem& value) {
- return BarrierSetC1::atomic_add_at_resolved(access, value);
-}
-
-LIR_Opr ShenandoahBarrierSetC1::resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj) {
- bool is_write = decorators & ACCESS_WRITE;
- if (is_write) {
- return write_barrier(gen, obj, NULL, (decorators & IS_NOT_NULL) == 0);
- } else {
- return read_barrier(gen, obj, NULL, (decorators & IS_NOT_NULL) == 0);
- }
-}
-
class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
virtual OopMapSet* generate_code(StubAssembler* sasm) {
ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp
index 73e673466ae..dde6910a80d 100644
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp
@@ -85,7 +85,7 @@ public:
#endif // PRODUCT
};
-class ShenandoahWriteBarrierStub: public CodeStub {
+class ShenandoahLoadReferenceBarrierStub: public CodeStub {
friend class ShenandoahBarrierSetC1;
private:
LIR_Opr _obj;
@@ -94,7 +94,7 @@ private:
bool _needs_null_check;
public:
- ShenandoahWriteBarrierStub(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info, bool needs_null_check) :
+ ShenandoahLoadReferenceBarrierStub(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info, bool needs_null_check) :
_obj(obj), _result(result), _info(info), _needs_null_check(needs_null_check)
{
assert(_obj->is_register(), "should be register");
@@ -113,7 +113,7 @@ public:
visitor->do_temp(_result);
}
#ifndef PRODUCT
- virtual void print_name(outputStream* out) const { out->print("ShenandoahWritePreBarrierStub"); }
+ virtual void print_name(outputStream* out) const { out->print("ShenandoahLoadReferenceBarrierStub"); }
#endif // PRODUCT
};
@@ -181,12 +181,10 @@ private:
void pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val);
- LIR_Opr read_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
- LIR_Opr write_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
+ LIR_Opr load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
LIR_Opr storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators);
- LIR_Opr read_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
- LIR_Opr write_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
+ LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj);
@@ -194,7 +192,6 @@ public:
CodeBlob* pre_barrier_c1_runtime_code_blob() { return _pre_barrier_c1_runtime_code_blob; }
protected:
- virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register);
virtual void store_at_resolved(LIRAccess& access, LIR_Opr value);
virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
@@ -202,10 +199,8 @@ protected:
virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
- virtual LIR_Opr atomic_add_at_resolved(LIRAccess& access, LIRItem& value);
public:
- virtual LIR_Opr resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj);
virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob);
};
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
index 5d27bec4aaf..63756c5dbec 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
@@ -43,121 +43,56 @@ ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
}
ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
- : _shenandoah_barriers(new (comp_arena) GrowableArray(comp_arena, 8, 0, NULL)) {
+ : _enqueue_barriers(new (comp_arena) GrowableArray(comp_arena, 8, 0, NULL)),
+ _load_reference_barriers(new (comp_arena) GrowableArray(comp_arena, 8, 0, NULL)) {
}
-int ShenandoahBarrierSetC2State::shenandoah_barriers_count() const {
- return _shenandoah_barriers->length();
+int ShenandoahBarrierSetC2State::enqueue_barriers_count() const {
+ return _enqueue_barriers->length();
}
-ShenandoahWriteBarrierNode* ShenandoahBarrierSetC2State::shenandoah_barrier(int idx) const {
- return _shenandoah_barriers->at(idx);
+ShenandoahEnqueueBarrierNode* ShenandoahBarrierSetC2State::enqueue_barrier(int idx) const {
+ return _enqueue_barriers->at(idx);
}
-void ShenandoahBarrierSetC2State::add_shenandoah_barrier(ShenandoahWriteBarrierNode * n) {
- assert(!_shenandoah_barriers->contains(n), "duplicate entry in barrier list");
- _shenandoah_barriers->append(n);
+void ShenandoahBarrierSetC2State::add_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) {
+ assert(!_enqueue_barriers->contains(n), "duplicate entry in barrier list");
+ _enqueue_barriers->append(n);
}
-void ShenandoahBarrierSetC2State::remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n) {
- if (_shenandoah_barriers->contains(n)) {
- _shenandoah_barriers->remove(n);
+void ShenandoahBarrierSetC2State::remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) {
+ if (_enqueue_barriers->contains(n)) {
+ _enqueue_barriers->remove(n);
}
}
-#define __ kit->
+int ShenandoahBarrierSetC2State::load_reference_barriers_count() const {
+ return _load_reference_barriers->length();
+}
-Node* ShenandoahBarrierSetC2::shenandoah_read_barrier(GraphKit* kit, Node* obj) const {
- if (ShenandoahReadBarrier) {
- obj = shenandoah_read_barrier_impl(kit, obj, false, true, true);
+ShenandoahLoadReferenceBarrierNode* ShenandoahBarrierSetC2State::load_reference_barrier(int idx) const {
+ return _load_reference_barriers->at(idx);
+}
+
+void ShenandoahBarrierSetC2State::add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
+ assert(!_load_reference_barriers->contains(n), "duplicate entry in barrier list");
+ _load_reference_barriers->append(n);
+}
+
+void ShenandoahBarrierSetC2State::remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
+ if (_load_reference_barriers->contains(n)) {
+ _load_reference_barriers->remove(n);
}
- return obj;
}
Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const {
if (ShenandoahStoreValEnqueueBarrier) {
- obj = shenandoah_write_barrier(kit, obj);
obj = shenandoah_enqueue_barrier(kit, obj);
}
- if (ShenandoahStoreValReadBarrier) {
- obj = shenandoah_read_barrier_impl(kit, obj, true, false, false);
- }
return obj;
}
-Node* ShenandoahBarrierSetC2::shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const {
- const Type* obj_type = obj->bottom_type();
- if (obj_type->higher_equal(TypePtr::NULL_PTR)) {
- return obj;
- }
- const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
- Node* mem = use_mem ? __ memory(adr_type) : __ immutable_memory();
-
- if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, mem, allow_fromspace)) {
- // We know it is null, no barrier needed.
- return obj;
- }
-
- if (obj_type->meet(TypePtr::NULL_PTR) == obj_type->remove_speculative()) {
-
- // We don't know if it's null or not. Need null-check.
- enum { _not_null_path = 1, _null_path, PATH_LIMIT };
- RegionNode* region = new RegionNode(PATH_LIMIT);
- Node* phi = new PhiNode(region, obj_type);
- Node* null_ctrl = __ top();
- Node* not_null_obj = __ null_check_oop(obj, &null_ctrl);
-
- region->init_req(_null_path, null_ctrl);
- phi ->init_req(_null_path, __ zerocon(T_OBJECT));
-
- Node* ctrl = use_ctrl ? __ control() : NULL;
- ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, not_null_obj, allow_fromspace);
- Node* n = __ gvn().transform(rb);
-
- region->init_req(_not_null_path, __ control());
- phi ->init_req(_not_null_path, n);
-
- __ set_control(__ gvn().transform(region));
- __ record_for_igvn(region);
- return __ gvn().transform(phi);
-
- } else {
- // We know it is not null. Simple barrier is sufficient.
- Node* ctrl = use_ctrl ? __ control() : NULL;
- ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, obj, allow_fromspace);
- Node* n = __ gvn().transform(rb);
- __ record_for_igvn(n);
- return n;
- }
-}
-
-Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const {
- ShenandoahWriteBarrierNode* wb = new ShenandoahWriteBarrierNode(kit->C, kit->control(), kit->memory(adr_type), obj);
- Node* n = __ gvn().transform(wb);
- if (n == wb) { // New barrier needs memory projection.
- Node* proj = __ gvn().transform(new ShenandoahWBMemProjNode(n));
- __ set_memory(proj, adr_type);
- }
- return n;
-}
-
-Node* ShenandoahBarrierSetC2::shenandoah_write_barrier(GraphKit* kit, Node* obj) const {
- if (ShenandoahWriteBarrier) {
- obj = shenandoah_write_barrier_impl(kit, obj);
- }
- return obj;
-}
-
-Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const {
- if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, NULL, true)) {
- return obj;
- }
- const Type* obj_type = obj->bottom_type();
- const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
- Node* n = shenandoah_write_barrier_helper(kit, obj, adr_type);
- __ record_for_igvn(n);
- return n;
-}
+#define __ kit->
bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr,
BasicType bt, uint adr_idx) const {
@@ -304,7 +239,7 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
- assert(ShenandoahWriteBarrierNode::is_gc_state_load(ld), "Should match the shape");
+ assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");
// if (!marking)
__ if_then(marking, BoolTest::ne, zero, unlikely); {
@@ -361,7 +296,7 @@ bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) {
bool ShenandoahBarrierSetC2::is_shenandoah_wb_call(Node* call) {
return call->is_CallLeaf() &&
- call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT);
+ call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT);
}
bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) {
@@ -549,88 +484,6 @@ const TypeFunc* ShenandoahBarrierSetC2::shenandoah_write_barrier_Type() {
return TypeFunc::make(domain, range);
}
-void ShenandoahBarrierSetC2::resolve_address(C2Access& access) const {
- const TypePtr* adr_type = access.addr().type();
-
- if ((access.decorators() & IN_NATIVE) == 0 && (adr_type->isa_instptr() || adr_type->isa_aryptr())) {
- int off = adr_type->is_ptr()->offset();
- int base_off = adr_type->isa_instptr() ? instanceOopDesc::base_offset_in_bytes() :
- arrayOopDesc::base_offset_in_bytes(adr_type->is_aryptr()->elem()->array_element_basic_type());
- assert(off != Type::OffsetTop, "unexpected offset");
- if (off == Type::OffsetBot || off >= base_off) {
- DecoratorSet decorators = access.decorators();
- bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
- GraphKit* kit = NULL;
- if (access.is_parse_access()) {
- C2ParseAccess& parse_access = static_cast(access);
- kit = parse_access.kit();
- }
- Node* adr = access.addr().node();
- assert(adr->is_AddP(), "unexpected address shape");
- Node* base = adr->in(AddPNode::Base);
-
- if (is_write) {
- if (kit != NULL) {
- base = shenandoah_write_barrier(kit, base);
- } else {
- assert(access.is_opt_access(), "either parse or opt access");
- assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for clone");
- }
- } else {
- if (adr_type->isa_instptr()) {
- Compile* C = access.gvn().C;
- ciField* field = C->alias_type(adr_type)->field();
-
- // Insert read barrier for Shenandoah.
- if (field != NULL &&
- ((ShenandoahOptimizeStaticFinals && field->is_static() && field->is_final()) ||
- (ShenandoahOptimizeInstanceFinals && !field->is_static() && field->is_final()) ||
- (ShenandoahOptimizeStableFinals && field->is_stable()))) {
- // Skip the barrier for special fields
- } else {
- if (kit != NULL) {
- base = shenandoah_read_barrier(kit, base);
- } else {
- assert(access.is_opt_access(), "either parse or opt access");
- assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for arraycopy");
- }
- }
- } else {
- if (kit != NULL) {
- base = shenandoah_read_barrier(kit, base);
- } else {
- assert(access.is_opt_access(), "either parse or opt access");
- assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for arraycopy");
- }
- }
- }
- if (base != adr->in(AddPNode::Base)) {
- assert(kit != NULL, "no barrier should have been added");
-
- Node* address = adr->in(AddPNode::Address);
-
- if (address->is_AddP()) {
- assert(address->in(AddPNode::Base) == adr->in(AddPNode::Base), "unexpected address shape");
- assert(!address->in(AddPNode::Address)->is_AddP(), "unexpected address shape");
- assert(address->in(AddPNode::Address) == adr->in(AddPNode::Base), "unexpected address shape");
- address = address->clone();
- address->set_req(AddPNode::Base, base);
- address->set_req(AddPNode::Address, base);
- address = kit->gvn().transform(address);
- } else {
- assert(address == adr->in(AddPNode::Base), "unexpected address shape");
- address = base;
- }
- adr = adr->clone();
- adr->set_req(AddPNode::Base, base);
- adr->set_req(AddPNode::Address, address);
- adr = kit->gvn().transform(adr);
- access.addr().set_node(adr);
- }
- }
- }
-}
-
Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
DecoratorSet decorators = access.decorators();
@@ -662,44 +515,8 @@ Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue&
PhaseGVN& gvn = opt_access.gvn();
MergeMemNode* mm = opt_access.mem();
- if (ShenandoahStoreValReadBarrier) {
- RegionNode* region = new RegionNode(3);
- const Type* v_t = gvn.type(val.node());
- Node* phi = new PhiNode(region, v_t->isa_oopptr() ? v_t->is_oopptr()->cast_to_nonconst() : v_t);
- Node* cmp = gvn.transform(new CmpPNode(val.node(), gvn.zerocon(T_OBJECT)));
- Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::ne));
- IfNode* iff = new IfNode(opt_access.ctl(), bol, PROB_LIKELY_MAG(3), COUNT_UNKNOWN);
-
- gvn.transform(iff);
- if (gvn.is_IterGVN()) {
- gvn.is_IterGVN()->_worklist.push(iff);
- } else {
- gvn.record_for_igvn(iff);
- }
-
- Node* null_true = gvn.transform(new IfFalseNode(iff));
- Node* null_false = gvn.transform(new IfTrueNode(iff));
- region->init_req(1, null_true);
- region->init_req(2, null_false);
- phi->init_req(1, gvn.zerocon(T_OBJECT));
- Node* cast = new CastPPNode(val.node(), gvn.type(val.node())->join_speculative(TypePtr::NOTNULL));
- cast->set_req(0, null_false);
- cast = gvn.transform(cast);
- Node* rb = gvn.transform(new ShenandoahReadBarrierNode(null_false, gvn.C->immutable_memory(), cast, false));
- phi->init_req(2, rb);
- opt_access.set_ctl(gvn.transform(region));
- val.set_node(gvn.transform(phi));
- }
if (ShenandoahStoreValEnqueueBarrier) {
- const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(gvn.type(val.node()));
- int alias = gvn.C->get_alias_index(adr_type);
- Node* wb = new ShenandoahWriteBarrierNode(gvn.C, opt_access.ctl(), mm->memory_at(alias), val.node());
- Node* wb_transformed = gvn.transform(wb);
- Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(wb_transformed));
- if (wb_transformed == wb) {
- Node* proj = gvn.transform(new ShenandoahWBMemProjNode(wb));
- mm->set_memory_at(alias, proj);
- }
+ Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(val.node()));
val.set_node(enqueue);
}
}
@@ -724,6 +541,17 @@ Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val
Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
Node* load = BarrierSetC2::load_at_resolved(access, val_type);
+ if (access.is_oop()) {
+ if (ShenandoahLoadRefBarrier) {
+ load = new ShenandoahLoadReferenceBarrierNode(NULL, load);
+ if (access.is_parse_access()) {
+ load = static_cast(access).kit()->gvn().transform(load);
+ } else {
+ load = static_cast(access).gvn().transform(load);
+ }
+ }
+ }
+
// If we are reading the value of the referent field of a Reference
// object (either by using Unsafe directly or through reflection)
// then, if SATB is enabled, we need to record the referent in an
@@ -797,9 +625,10 @@ Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
- return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
+ load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
}
#endif
+ load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store));
return load_store;
}
return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
@@ -867,6 +696,7 @@ Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& acces
}
Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
if (access.is_oop()) {
+ result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result));
shenandoah_write_barrier_pre(kit, false /* do_load */,
NULL, NULL, max_juint, NULL, NULL,
result /* pre_val */, T_OBJECT);
@@ -876,19 +706,9 @@ Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& acces
void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
assert(!src->is_AddP(), "unexpected input");
- src = shenandoah_read_barrier(kit, src);
BarrierSetC2::clone(kit, src, dst, size, is_array);
}
-Node* ShenandoahBarrierSetC2::resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const {
- bool is_write = decorators & ACCESS_WRITE;
- if (is_write) {
- return shenandoah_write_barrier(kit, n);
- } else {
- return shenandoah_read_barrier(kit, n);
- }
-}
-
Node* ShenandoahBarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes,
Node*& i_o, Node*& needgc_ctrl,
Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
@@ -915,6 +735,7 @@ Node* ShenandoahBarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl,
// Support for GC barriers emitted during parsing
bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
+ if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) return true;
if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) {
return false;
}
@@ -929,26 +750,30 @@ bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
}
Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
- return ShenandoahBarrierNode::skip_through_barrier(c);
+ if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
+ return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
+ }
+ if (c->Opcode() == Op_ShenandoahEnqueueBarrier) {
+ c = c->in(1);
+ }
+ return c;
}
bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
- return !ShenandoahWriteBarrierNode::expand(C, igvn);
+ return !ShenandoahBarrierC2Support::expand(C, igvn);
}
bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const {
if (mode == LoopOptsShenandoahExpand) {
assert(UseShenandoahGC, "only for shenandoah");
- ShenandoahWriteBarrierNode::pin_and_expand(phase);
+ ShenandoahBarrierC2Support::pin_and_expand(phase);
return true;
} else if (mode == LoopOptsShenandoahPostExpand) {
assert(UseShenandoahGC, "only for shenandoah");
visited.Clear();
- ShenandoahWriteBarrierNode::optimize_after_expansion(visited, nstack, worklist, phase);
+ ShenandoahBarrierC2Support::optimize_after_expansion(visited, nstack, worklist, phase);
return true;
}
- GrowableArray memory_graph_fixers;
- ShenandoahWriteBarrierNode::optimize_before_expansion(phase, memory_graph_fixers, false);
return false;
}
@@ -957,7 +782,6 @@ bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_couple
if (!is_oop) {
return false;
}
-
if (tightly_coupled_alloc) {
if (phase == Optimization) {
return false;
@@ -985,7 +809,7 @@ bool ShenandoahBarrierSetC2::clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIte
}
} else {
return true;
- }
+ }
} else if (src_type->isa_aryptr()) {
BasicType src_elem = src_type->klass()->as_array_klass()->element_type()->basic_type();
if (src_elem == T_OBJECT || src_elem == T_ARRAY) {
@@ -1038,14 +862,20 @@ void ShenandoahBarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node*
// Support for macro expanded GC barriers
void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
- if (node->Opcode() == Op_ShenandoahWriteBarrier) {
- state()->add_shenandoah_barrier((ShenandoahWriteBarrierNode*) node);
+ if (node->Opcode() == Op_ShenandoahEnqueueBarrier) {
+ state()->add_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node);
+ }
+ if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
+ state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
}
}
void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
- if (node->Opcode() == Op_ShenandoahWriteBarrier) {
- state()->remove_shenandoah_barrier((ShenandoahWriteBarrierNode*) node);
+ if (node->Opcode() == Op_ShenandoahEnqueueBarrier) {
+ state()->remove_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node);
+ }
+ if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
+ state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
}
}
@@ -1091,19 +921,18 @@ void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &use
}
}
}
- for (int i = state()->shenandoah_barriers_count()-1; i >= 0; i--) {
- ShenandoahWriteBarrierNode* n = state()->shenandoah_barrier(i);
+ for (int i = state()->enqueue_barriers_count() - 1; i >= 0; i--) {
+ ShenandoahEnqueueBarrierNode* n = state()->enqueue_barrier(i);
if (!useful.member(n)) {
- state()->remove_shenandoah_barrier(n);
+ state()->remove_enqueue_barrier(n);
+ }
+ }
+ for (int i = state()->load_reference_barriers_count() - 1; i >= 0; i--) {
+ ShenandoahLoadReferenceBarrierNode* n = state()->load_reference_barrier(i);
+ if (!useful.member(n)) {
+ state()->remove_load_reference_barrier(n);
}
}
-
-}
-
-bool ShenandoahBarrierSetC2::has_special_unique_user(const Node* node) const {
- assert(node->outcnt() == 1, "match only for unique out");
- Node* n = node->unique_out();
- return node->Opcode() == Op_ShenandoahWriteBarrier && n->Opcode() == Op_ShenandoahWBMemProj;
}
void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {}
@@ -1123,7 +952,7 @@ bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
#ifdef ASSERT
void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeExpand) {
- ShenandoahBarrierNode::verify(Compile::current()->root());
+ ShenandoahBarrierC2Support::verify(Compile::current()->root());
} else if (phase == BarrierSetC2::BeforeCodeGen) {
// Verify G1 pre-barriers
const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
@@ -1229,7 +1058,7 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
}
} else if (can_reshape &&
n->Opcode() == Op_If &&
- ShenandoahWriteBarrierNode::is_heap_stable_test(n) &&
+ ShenandoahBarrierC2Support::is_heap_stable_test(n) &&
n->in(0) != NULL) {
Node* dom = n->in(0);
Node* prev_dom = n;
@@ -1237,7 +1066,7 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
int dist = 16;
// Search up the dominator tree for another heap stable test
while (dom->Opcode() != op || // Not same opcode?
- !ShenandoahWriteBarrierNode::is_heap_stable_test(dom) || // Not same input 1?
+ !ShenandoahBarrierC2Support::is_heap_stable_test(dom) || // Not same input 1?
prev_dom->in(0) != dom) { // One path of test does not dominate?
if (dist < 0) return NULL;
@@ -1258,46 +1087,6 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
return NULL;
}
-Node* ShenandoahBarrierSetC2::identity_node(PhaseGVN* phase, Node* n) const {
- if (n->is_Load()) {
- Node *mem = n->in(MemNode::Memory);
- Node *value = n->as_Load()->can_see_stored_value(mem, phase);
- if (value) {
- PhaseIterGVN *igvn = phase->is_IterGVN();
- if (igvn != NULL &&
- value->is_Phi() &&
- value->req() > 2 &&
- value->in(1) != NULL &&
- value->in(1)->is_ShenandoahBarrier()) {
- if (igvn->_worklist.member(value) ||
- igvn->_worklist.member(value->in(0)) ||
- (value->in(0)->in(1) != NULL &&
- value->in(0)->in(1)->is_IfProj() &&
- (igvn->_worklist.member(value->in(0)->in(1)) ||
- (value->in(0)->in(1)->in(0) != NULL &&
- igvn->_worklist.member(value->in(0)->in(1)->in(0)))))) {
- igvn->_worklist.push(n);
- return n;
- }
- }
- // (This works even when value is a Con, but LoadNode::Value
- // usually runs first, producing the singleton type of the Con.)
- Node *value_no_barrier = step_over_gc_barrier(value->Opcode() == Op_EncodeP ? value->in(1) : value);
- if (value->Opcode() == Op_EncodeP) {
- if (value_no_barrier != value->in(1)) {
- Node *encode = value->clone();
- encode->set_req(1, value_no_barrier);
- encode = phase->transform(encode);
- return encode;
- }
- } else {
- return value_no_barrier;
- }
- }
- }
- return n;
-}
-
bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* u = n->fast_out(i);
@@ -1308,20 +1097,6 @@ bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
return n->outcnt() > 0;
}
-bool ShenandoahBarrierSetC2::flatten_gc_alias_type(const TypePtr*& adr_type) const {
- int offset = adr_type->offset();
- if (offset == ShenandoahBrooksPointer::byte_offset()) {
- if (adr_type->isa_aryptr()) {
- adr_type = TypeAryPtr::make(adr_type->ptr(), adr_type->isa_aryptr()->ary(), adr_type->isa_aryptr()->klass(), false, offset);
- } else if (adr_type->isa_instptr()) {
- adr_type = TypeInstPtr::make(adr_type->ptr(), ciEnv::current()->Object_klass(), false, NULL, offset);
- }
- return true;
- } else {
- return false;
- }
-}
-
bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const {
switch (opcode) {
case Op_CallLeaf:
@@ -1356,9 +1131,7 @@ bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, ui
}
#endif
return true;
- case Op_ShenandoahReadBarrier:
- return true;
- case Op_ShenandoahWriteBarrier:
+ case Op_ShenandoahLoadReferenceBarrier:
assert(false, "should have been expanded already");
return true;
default:
@@ -1366,17 +1139,6 @@ bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, ui
}
}
-#ifdef ASSERT
-bool ShenandoahBarrierSetC2::verify_gc_alias_type(const TypePtr* adr_type, int offset) const {
- if (offset == ShenandoahBrooksPointer::byte_offset() &&
- (adr_type->base() == Type::AryPtr || adr_type->base() == Type::OopPtr)) {
- return true;
- } else {
- return false;
- }
-}
-#endif
-
bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
switch (opcode) {
case Op_ShenandoahCompareAndExchangeP:
@@ -1412,15 +1174,12 @@ bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph
}
return false;
}
- case Op_ShenandoahReadBarrier:
- case Op_ShenandoahWriteBarrier:
- // Barriers 'pass through' its arguments. I.e. what goes in, comes out.
- // It doesn't escape.
- conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahBarrierNode::ValueIn), delayed_worklist);
- break;
case Op_ShenandoahEnqueueBarrier:
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
break;
+ case Op_ShenandoahLoadReferenceBarrier:
+ conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist);
+ return true;
default:
// Nothing
break;
@@ -1441,15 +1200,12 @@ bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph,
case Op_ShenandoahWeakCompareAndSwapP:
case Op_ShenandoahWeakCompareAndSwapN:
return conn_graph->add_final_edges_unsafe_access(n, opcode);
- case Op_ShenandoahReadBarrier:
- case Op_ShenandoahWriteBarrier:
- // Barriers 'pass through' its arguments. I.e. what goes in, comes out.
- // It doesn't escape.
- conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahBarrierNode::ValueIn), NULL);
- return true;
case Op_ShenandoahEnqueueBarrier:
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
return true;
+ case Op_ShenandoahLoadReferenceBarrier:
+ conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL);
+ return true;
default:
// Nothing
break;
@@ -1464,21 +1220,7 @@ bool ShenandoahBarrierSetC2::escape_has_out_with_unsafe_object(Node* n) const {
}
bool ShenandoahBarrierSetC2::escape_is_barrier_node(Node* n) const {
- return n->is_ShenandoahBarrier();
-}
-
-bool ShenandoahBarrierSetC2::matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const {
- switch (opcode) {
- case Op_ShenandoahReadBarrier:
- if (n->in(ShenandoahBarrierNode::ValueIn)->is_DecodeNarrowPtr()) {
- matcher->set_shared(n->in(ShenandoahBarrierNode::ValueIn)->in(1));
- }
- matcher->set_shared(n);
- return true;
- default:
- break;
- }
- return false;
+ return n->Opcode() == Op_ShenandoahLoadReferenceBarrier;
}
bool ShenandoahBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const {
@@ -1510,62 +1252,3 @@ bool ShenandoahBarrierSetC2::matcher_is_store_load_barrier(Node* x, uint xop) co
xop == Op_ShenandoahCompareAndSwapN ||
xop == Op_ShenandoahCompareAndSwapP;
}
-
-void ShenandoahBarrierSetC2::igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const {
- if (use->is_ShenandoahBarrier()) {
- for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
- Node* u = use->fast_out(i2);
- Node* cmp = use->find_out_with(Op_CmpP);
- if (u->Opcode() == Op_CmpP) {
- igvn->_worklist.push(cmp);
- }
- }
- }
-}
-
-void ShenandoahBarrierSetC2::ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const {
- if (use->is_ShenandoahBarrier()) {
- for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
- Node* p = use->fast_out(i2);
- if (p->Opcode() == Op_AddP) {
- for (DUIterator_Fast i3max, i3 = p->fast_outs(i3max); i3 < i3max; i3++) {
- Node* q = p->fast_out(i3);
- if (q->is_Load()) {
- if(q->bottom_type() != ccp->type(q)) {
- worklist.push(q);
- }
- }
- }
- }
- }
- }
-}
-
-Node* ShenandoahBarrierSetC2::split_if_pre(PhaseIdealLoop* phase, Node* n) const {
- if (n->Opcode() == Op_ShenandoahReadBarrier) {
- ((ShenandoahReadBarrierNode*)n)->try_move(phase);
- } else if (n->Opcode() == Op_ShenandoahWriteBarrier) {
- return ((ShenandoahWriteBarrierNode*)n)->try_split_thru_phi(phase);
- }
-
- return NULL;
-}
-
-bool ShenandoahBarrierSetC2::build_loop_late_post(PhaseIdealLoop* phase, Node* n) const {
- return ShenandoahBarrierNode::build_loop_late_post(phase, n);
-}
-
-bool ShenandoahBarrierSetC2::sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const {
- if (n->is_ShenandoahBarrier()) {
- return x->as_ShenandoahBarrier()->sink_node(phase, x_ctrl, n_ctrl);
- }
- if (n->is_MergeMem()) {
- // PhaseIdealLoop::split_if_with_blocks_post() would:
- // _igvn._worklist.yank(x);
- // which sometimes causes chains of MergeMem which some of
- // shenandoah specific code doesn't support
- phase->register_new_node(x, x_ctrl);
- return true;
- }
- return false;
-}
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp
index b2ed04dc43c..aee5687c1aa 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp
@@ -30,14 +30,21 @@
class ShenandoahBarrierSetC2State : public ResourceObj {
private:
- GrowableArray* _shenandoah_barriers;
+ GrowableArray* _enqueue_barriers;
+ GrowableArray* _load_reference_barriers;
public:
ShenandoahBarrierSetC2State(Arena* comp_arena);
- int shenandoah_barriers_count() const;
- ShenandoahWriteBarrierNode* shenandoah_barrier(int idx) const;
- void add_shenandoah_barrier(ShenandoahWriteBarrierNode * n);
- void remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n);
+
+ int enqueue_barriers_count() const;
+ ShenandoahEnqueueBarrierNode* enqueue_barrier(int idx) const;
+ void add_enqueue_barrier(ShenandoahEnqueueBarrierNode* n);
+ void remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n);
+
+ int load_reference_barriers_count() const;
+ ShenandoahLoadReferenceBarrierNode* load_reference_barrier(int idx) const;
+ void add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode* n);
+ void remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n);
};
class ShenandoahBarrierSetC2 : public BarrierSetC2 {
@@ -66,12 +73,7 @@ private:
BasicType bt) const;
Node* shenandoah_enqueue_barrier(GraphKit* kit, Node* val) const;
- Node* shenandoah_read_barrier(GraphKit* kit, Node* obj) const;
Node* shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const;
- Node* shenandoah_write_barrier(GraphKit* kit, Node* obj) const;
- Node* shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const;
- Node* shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const;
- Node* shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const;
void insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
Node* pre_val, bool need_mem_bar) const;
@@ -79,7 +81,6 @@ private:
static bool clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIterGVN& igvn);
protected:
- virtual void resolve_address(C2Access& access) const;
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
@@ -102,12 +103,11 @@ public:
static const TypeFunc* write_ref_field_pre_entry_Type();
static const TypeFunc* shenandoah_clone_barrier_Type();
static const TypeFunc* shenandoah_write_barrier_Type();
+ virtual bool has_load_barriers() const { return true; }
// This is the entry-point for the backend to perform accesses through the Access API.
virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const;
- virtual Node* resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const;
-
virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes,
Node*& i_o, Node*& needgc_ctrl,
Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
@@ -144,13 +144,7 @@ public:
virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const;
#endif
- virtual bool flatten_gc_alias_type(const TypePtr*& adr_type) const;
-#ifdef ASSERT
- virtual bool verify_gc_alias_type(const TypePtr* adr_type, int offset) const;
-#endif
-
virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const;
- virtual Node* identity_node(PhaseGVN* phase, Node* n) const;
virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const;
virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const;
@@ -158,17 +152,8 @@ public:
virtual bool escape_has_out_with_unsafe_object(Node* n) const;
virtual bool escape_is_barrier_node(Node* n) const;
- virtual bool matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const;
virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const;
virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const;
-
- virtual void igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const;
- virtual void ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const;
-
- virtual bool has_special_unique_user(const Node* node) const;
- virtual Node* split_if_pre(PhaseIdealLoop* phase, Node* n) const;
- virtual bool build_loop_late_post(PhaseIdealLoop* phase, Node* n) const;
- virtual bool sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const;
};
#endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
index 709209859bf..65d0035ef72 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
@@ -41,383 +41,28 @@
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
-Node* ShenandoahBarrierNode::skip_through_barrier(Node* n) {
- if (n == NULL) {
- return NULL;
- }
- if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
- n = n->in(1);
- }
-
- if (n->is_ShenandoahBarrier()) {
- return n->in(ValueIn);
- } else if (n->is_Phi() &&
- n->req() == 3 &&
- n->in(1) != NULL &&
- n->in(1)->is_ShenandoahBarrier() &&
- n->in(2) != NULL &&
- n->in(2)->bottom_type() == TypePtr::NULL_PTR &&
- n->in(0) != NULL &&
- n->in(0)->in(1) != NULL &&
- n->in(0)->in(1)->is_IfProj() &&
- n->in(0)->in(2) != NULL &&
- n->in(0)->in(2)->is_IfProj() &&
- n->in(0)->in(1)->in(0) != NULL &&
- n->in(0)->in(1)->in(0) == n->in(0)->in(2)->in(0) &&
- n->in(1)->in(ValueIn)->Opcode() == Op_CastPP) {
- Node* iff = n->in(0)->in(1)->in(0);
- Node* res = n->in(1)->in(ValueIn)->in(1);
- if (iff->is_If() &&
- iff->in(1) != NULL &&
- iff->in(1)->is_Bool() &&
- iff->in(1)->as_Bool()->_test._test == BoolTest::ne &&
- iff->in(1)->in(1) != NULL &&
- iff->in(1)->in(1)->Opcode() == Op_CmpP &&
- iff->in(1)->in(1)->in(1) != NULL &&
- iff->in(1)->in(1)->in(1) == res &&
- iff->in(1)->in(1)->in(2) != NULL &&
- iff->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
- return res;
- }
- }
- return n;
-}
-
-bool ShenandoahBarrierNode::needs_barrier(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace) {
- Unique_Node_List visited;
- return needs_barrier_impl(phase, orig, n, rb_mem, allow_fromspace, visited);
-}
-
-bool ShenandoahBarrierNode::needs_barrier_impl(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace, Unique_Node_List &visited) {
- if (visited.member(n)) {
- return false; // Been there.
- }
- visited.push(n);
-
- if (n->is_Allocate()) {
- return false;
- }
-
- if (n->is_Call()) {
- return true;
- }
-
- const Type* type = phase->type(n);
- if (type == Type::TOP) {
- return false;
- }
- if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
- return false;
- }
- if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
- return false;
- }
-
- if (ShenandoahOptimizeStableFinals) {
- const TypeAryPtr* ary = type->isa_aryptr();
- if (ary && ary->is_stable() && allow_fromspace) {
- return false;
- }
- }
-
- if (n->is_CheckCastPP() || n->is_ConstraintCast() || n->Opcode() == Op_ShenandoahEnqueueBarrier) {
- return needs_barrier_impl(phase, orig, n->in(1), rb_mem, allow_fromspace, visited);
- }
- if (n->is_Parm()) {
- return true;
- }
- if (n->is_Proj()) {
- return needs_barrier_impl(phase, orig, n->in(0), rb_mem, allow_fromspace, visited);
- }
-
- if (n->Opcode() == Op_ShenandoahWBMemProj) {
- return needs_barrier_impl(phase, orig, n->in(ShenandoahWBMemProjNode::WriteBarrier), rb_mem, allow_fromspace, visited);
- }
- if (n->is_Phi()) {
- bool need_barrier = false;
- for (uint i = 1; i < n->req() && ! need_barrier; i++) {
- Node* input = n->in(i);
- if (input == NULL) {
- need_barrier = true; // Phi not complete yet?
- } else if (needs_barrier_impl(phase, orig, input, rb_mem, allow_fromspace, visited)) {
- need_barrier = true;
- }
- }
- return need_barrier;
- }
- if (n->is_CMove()) {
- return needs_barrier_impl(phase, orig, n->in(CMoveNode::IfFalse), rb_mem, allow_fromspace, visited) ||
- needs_barrier_impl(phase, orig, n->in(CMoveNode::IfTrue ), rb_mem, allow_fromspace, visited);
- }
- if (n->Opcode() == Op_CreateEx) {
- return true;
- }
- if (n->Opcode() == Op_ShenandoahWriteBarrier) {
- return false;
- }
- if (n->Opcode() == Op_ShenandoahReadBarrier) {
- if (rb_mem == n->in(Memory)) {
- return false;
- } else {
- return true;
- }
- }
-
- if (n->Opcode() == Op_LoadP ||
- n->Opcode() == Op_LoadN ||
- n->Opcode() == Op_GetAndSetP ||
- n->Opcode() == Op_CompareAndExchangeP ||
- n->Opcode() == Op_ShenandoahCompareAndExchangeP ||
- n->Opcode() == Op_GetAndSetN ||
- n->Opcode() == Op_CompareAndExchangeN ||
- n->Opcode() == Op_ShenandoahCompareAndExchangeN) {
- return true;
- }
- if (n->Opcode() == Op_DecodeN ||
- n->Opcode() == Op_EncodeP) {
- return needs_barrier_impl(phase, orig, n->in(1), rb_mem, allow_fromspace, visited);
- }
-
-#ifdef ASSERT
- tty->print("need barrier on?: "); n->dump();
- ShouldNotReachHere();
-#endif
- return true;
-}
-
-bool ShenandoahReadBarrierNode::dominates_memory_rb_impl(PhaseGVN* phase,
- Node* b1,
- Node* b2,
- Node* current,
- bool linear) {
- ResourceMark rm;
- VectorSet visited(Thread::current()->resource_area());
- Node_Stack phis(0);
-
- for(int i = 0; i < 10; i++) {
- if (current == NULL) {
- return false;
- } else if (visited.test_set(current->_idx) || current->is_top() || current == b1) {
- current = NULL;
- while (phis.is_nonempty() && current == NULL) {
- uint idx = phis.index();
- Node* phi = phis.node();
- if (idx >= phi->req()) {
- phis.pop();
- } else {
- current = phi->in(idx);
- phis.set_index(idx+1);
- }
- }
- if (current == NULL) {
- return true;
- }
- } else if (current == phase->C->immutable_memory()) {
- return false;
- } else if (current->isa_Phi()) {
- if (!linear) {
+bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
+ ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
+ if ((state->enqueue_barriers_count() +
+ state->load_reference_barriers_count()) > 0) {
+ bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
+ C->clear_major_progress();
+ PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
+ if (C->failing()) return false;
+ PhaseIdealLoop::verify(igvn);
+ DEBUG_ONLY(verify_raw_mem(C->root());)
+ if (attempt_more_loopopts) {
+ C->set_major_progress();
+ if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
return false;
}
- phis.push(current, 2);
- current = current->in(1);
- } else if (current->Opcode() == Op_ShenandoahWriteBarrier) {
- const Type* in_type = current->bottom_type();
- const Type* this_type = b2->bottom_type();
- if (is_independent(in_type, this_type)) {
- current = current->in(Memory);
- } else {
- return false;
- }
- } else if (current->Opcode() == Op_ShenandoahWBMemProj) {
- current = current->in(ShenandoahWBMemProjNode::WriteBarrier);
- } else if (current->is_Proj()) {
- current = current->in(0);
- } else if (current->is_Call()) {
- return false; // TODO: Maybe improve by looking at the call's memory effects?
- } else if (current->is_MemBar()) {
- return false; // TODO: Do we need to stop at *any* membar?
- } else if (current->is_MergeMem()) {
- const TypePtr* adr_type = brooks_pointer_type(phase->type(b2));
- uint alias_idx = phase->C->get_alias_index(adr_type);
- current = current->as_MergeMem()->memory_at(alias_idx);
- } else {
-#ifdef ASSERT
- current->dump();
-#endif
- ShouldNotReachHere();
- return false;
- }
- }
- return false;
-}
-
-bool ShenandoahReadBarrierNode::is_independent(Node* mem) {
- if (mem->is_Phi() || mem->is_Proj() || mem->is_MergeMem()) {
- return true;
- } else if (mem->Opcode() == Op_ShenandoahWBMemProj) {
- return true;
- } else if (mem->Opcode() == Op_ShenandoahWriteBarrier) {
- const Type* mem_type = mem->bottom_type();
- const Type* this_type = bottom_type();
- if (is_independent(mem_type, this_type)) {
- return true;
- } else {
- return false;
- }
- } else if (mem->is_Call() || mem->is_MemBar()) {
- return false;
- }
-#ifdef ASSERT
- mem->dump();
-#endif
- ShouldNotReachHere();
- return true;
-}
-
-bool ShenandoahReadBarrierNode::dominates_memory_rb(PhaseGVN* phase, Node* b1, Node* b2, bool linear) {
- return dominates_memory_rb_impl(phase, b1->in(Memory), b2, b2->in(Memory), linear);
-}
-
-bool ShenandoahReadBarrierNode::is_independent(const Type* in_type, const Type* this_type) {
- assert(in_type->isa_oopptr(), "expect oop ptr");
- assert(this_type->isa_oopptr(), "expect oop ptr");
-
- ciKlass* in_kls = in_type->is_oopptr()->klass();
- ciKlass* this_kls = this_type->is_oopptr()->klass();
- if (in_kls != NULL && this_kls != NULL &&
- in_kls->is_loaded() && this_kls->is_loaded() &&
- (!in_kls->is_subclass_of(this_kls)) &&
- (!this_kls->is_subclass_of(in_kls))) {
- return true;
- }
- return false;
-}
-
-Node* ShenandoahReadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
- if (! can_reshape) {
- return NULL;
- }
-
- if (in(Memory) == phase->C->immutable_memory()) return NULL;
-
- // If memory input is a MergeMem, take the appropriate slice out of it.
- Node* mem_in = in(Memory);
- if (mem_in->isa_MergeMem()) {
- const TypePtr* adr_type = brooks_pointer_type(bottom_type());
- uint alias_idx = phase->C->get_alias_index(adr_type);
- mem_in = mem_in->as_MergeMem()->memory_at(alias_idx);
- set_req(Memory, mem_in);
- return this;
- }
-
- Node* input = in(Memory);
- if (input->Opcode() == Op_ShenandoahWBMemProj) {
- ResourceMark rm;
- VectorSet seen(Thread::current()->resource_area());
- Node* n = in(Memory);
- while (n->Opcode() == Op_ShenandoahWBMemProj &&
- n->in(ShenandoahWBMemProjNode::WriteBarrier) != NULL &&
- n->in(ShenandoahWBMemProjNode::WriteBarrier)->Opcode() == Op_ShenandoahWriteBarrier &&
- n->in(ShenandoahWBMemProjNode::WriteBarrier)->in(Memory) != NULL) {
- if (seen.test_set(n->_idx)) {
- return NULL; // loop
- }
- n = n->in(ShenandoahWBMemProjNode::WriteBarrier)->in(Memory);
- }
-
- Node* wb = input->in(ShenandoahWBMemProjNode::WriteBarrier);
- const Type* in_type = phase->type(wb);
- // is_top() test not sufficient here: we can come here after CCP
- // in a dead branch of the graph that has not yet been removed.
- if (in_type == Type::TOP) return NULL; // Dead path.
- assert(wb->Opcode() == Op_ShenandoahWriteBarrier, "expect write barrier");
- if (is_independent(in_type, _type)) {
- phase->igvn_rehash_node_delayed(wb);
- set_req(Memory, wb->in(Memory));
- if (can_reshape && input->outcnt() == 0) {
- phase->is_IterGVN()->_worklist.push(input);
- }
- return this;
- }
- }
- return NULL;
-}
-
-ShenandoahWriteBarrierNode::ShenandoahWriteBarrierNode(Compile* C, Node* ctrl, Node* mem, Node* obj)
- : ShenandoahBarrierNode(ctrl, mem, obj, false) {
- assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
- ShenandoahBarrierSetC2::bsc2()->state()->add_shenandoah_barrier(this);
-}
-
-Node* ShenandoahWriteBarrierNode::Identity(PhaseGVN* phase) {
- assert(in(0) != NULL, "should have control");
- PhaseIterGVN* igvn = phase->is_IterGVN();
- Node* mem_in = in(Memory);
- Node* mem_proj = NULL;
-
- if (igvn != NULL) {
- mem_proj = find_out_with(Op_ShenandoahWBMemProj);
- if (mem_in == mem_proj) {
- return this;
- }
- }
-
- Node* replacement = Identity_impl(phase);
- if (igvn != NULL) {
- if (replacement != NULL && replacement != this && mem_proj != NULL) {
- igvn->replace_node(mem_proj, mem_in);
- }
- }
- return replacement;
-}
-
-Node* ShenandoahWriteBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
- assert(in(0) != NULL, "should have control");
- if (!can_reshape) {
- return NULL;
- }
-
- Node* mem_in = in(Memory);
-
- if (mem_in->isa_MergeMem()) {
- const TypePtr* adr_type = brooks_pointer_type(bottom_type());
- uint alias_idx = phase->C->get_alias_index(adr_type);
- mem_in = mem_in->as_MergeMem()->memory_at(alias_idx);
- set_req(Memory, mem_in);
- return this;
- }
-
- Node* val = in(ValueIn);
- if (val->is_ShenandoahBarrier()) {
- set_req(ValueIn, val->in(ValueIn));
- return this;
- }
-
- return NULL;
-}
-
-bool ShenandoahWriteBarrierNode::expand(Compile* C, PhaseIterGVN& igvn) {
- if (UseShenandoahGC) {
- if (ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count() > 0 || (!ShenandoahWriteBarrier && ShenandoahStoreValEnqueueBarrier)) {
- bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
C->clear_major_progress();
- PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
- if (C->failing()) return false;
- PhaseIdealLoop::verify(igvn);
- DEBUG_ONLY(ShenandoahBarrierNode::verify_raw_mem(C->root());)
- if (attempt_more_loopopts) {
- C->set_major_progress();
- if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
- return false;
- }
- C->clear_major_progress();
- }
}
}
return true;
}
-bool ShenandoahWriteBarrierNode::is_heap_state_test(Node* iff, int mask) {
+bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) {
if (!UseShenandoahGC) {
return false;
}
@@ -450,11 +95,11 @@ bool ShenandoahWriteBarrierNode::is_heap_state_test(Node* iff, int mask) {
return is_gc_state_load(in1);
}
-bool ShenandoahWriteBarrierNode::is_heap_stable_test(Node* iff) {
+bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
}
-bool ShenandoahWriteBarrierNode::is_gc_state_load(Node *n) {
+bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
if (!UseShenandoahGC) {
return false;
}
@@ -476,7 +121,7 @@ bool ShenandoahWriteBarrierNode::is_gc_state_load(Node *n) {
return true;
}
-bool ShenandoahWriteBarrierNode::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
+bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
assert(phase->is_dominator(stop, start), "bad inputs");
ResourceMark rm;
Unique_Node_List wq;
@@ -500,7 +145,7 @@ bool ShenandoahWriteBarrierNode::has_safepoint_between(Node* start, Node* stop,
return false;
}
-bool ShenandoahWriteBarrierNode::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) {
+bool ShenandoahBarrierC2Support::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) {
assert(is_gc_state_load(n), "inconsistent");
Node* addp = n->in(MemNode::Address);
Node* dominator = NULL;
@@ -525,193 +170,8 @@ bool ShenandoahWriteBarrierNode::try_common_gc_state_load(Node *n, PhaseIdealLoo
return true;
}
-bool ShenandoahBarrierNode::dominates_memory_impl(PhaseGVN* phase,
- Node* b1,
- Node* b2,
- Node* current,
- bool linear) {
- ResourceMark rm;
- VectorSet visited(Thread::current()->resource_area());
- Node_Stack phis(0);
-
- for(int i = 0; i < 10; i++) {
- if (current == NULL) {
- return false;
- } else if (visited.test_set(current->_idx) || current->is_top() || current == b1) {
- current = NULL;
- while (phis.is_nonempty() && current == NULL) {
- uint idx = phis.index();
- Node* phi = phis.node();
- if (idx >= phi->req()) {
- phis.pop();
- } else {
- current = phi->in(idx);
- phis.set_index(idx+1);
- }
- }
- if (current == NULL) {
- return true;
- }
- } else if (current == b2) {
- return false;
- } else if (current == phase->C->immutable_memory()) {
- return false;
- } else if (current->isa_Phi()) {
- if (!linear) {
- return false;
- }
- phis.push(current, 2);
- current = current->in(1);
- } else if (current->Opcode() == Op_ShenandoahWriteBarrier) {
- current = current->in(Memory);
- } else if (current->Opcode() == Op_ShenandoahWBMemProj) {
- current = current->in(ShenandoahWBMemProjNode::WriteBarrier);
- } else if (current->is_Proj()) {
- current = current->in(0);
- } else if (current->is_Call()) {
- current = current->in(TypeFunc::Memory);
- } else if (current->is_MemBar()) {
- current = current->in(TypeFunc::Memory);
- } else if (current->is_MergeMem()) {
- const TypePtr* adr_type = brooks_pointer_type(phase->type(b2));
- uint alias_idx = phase->C->get_alias_index(adr_type);
- current = current->as_MergeMem()->memory_at(alias_idx);
- } else {
#ifdef ASSERT
- current->dump();
-#endif
- ShouldNotReachHere();
- return false;
- }
- }
- return false;
-}
-
-/**
- * Determines if b1 dominates b2 through memory inputs. It returns true if:
- * - b1 can be reached by following each branch in b2's memory input (through phis, etc)
- * - or we get back to b2 (i.e. through a loop) without seeing b1
- * In all other cases, (in particular, if we reach immutable_memory without having seen b1)
- * we return false.
- */
-bool ShenandoahBarrierNode::dominates_memory(PhaseGVN* phase, Node* b1, Node* b2, bool linear) {
- return dominates_memory_impl(phase, b1, b2, b2->in(Memory), linear);
-}
-
-Node* ShenandoahBarrierNode::Identity_impl(PhaseGVN* phase) {
- Node* n = in(ValueIn);
-
- Node* rb_mem = Opcode() == Op_ShenandoahReadBarrier ? in(Memory) : NULL;
- if (! needs_barrier(phase, this, n, rb_mem, _allow_fromspace)) {
- return n;
- }
-
- // Try to find a write barrier sibling with identical inputs that we can fold into.
- for (DUIterator i = n->outs(); n->has_out(i); i++) {
- Node* sibling = n->out(i);
- if (sibling == this) {
- continue;
- }
- if (sibling->Opcode() != Op_ShenandoahWriteBarrier) {
- continue;
- }
-
- assert(sibling->in(ValueIn) == in(ValueIn), "sanity");
- assert(sibling->Opcode() == Op_ShenandoahWriteBarrier, "sanity");
-
- if (dominates_memory(phase, sibling, this, phase->is_IterGVN() == NULL)) {
- return sibling;
- }
- }
- return this;
-}
-
-#ifndef PRODUCT
-void ShenandoahBarrierNode::dump_spec(outputStream *st) const {
- const TypePtr* adr = adr_type();
- if (adr == NULL) {
- return;
- }
- st->print(" @");
- adr->dump_on(st);
- st->print(" (");
- Compile::current()->alias_type(adr)->adr_type()->dump_on(st);
- st->print(") ");
-}
-#endif
-
-Node* ShenandoahReadBarrierNode::Identity(PhaseGVN* phase) {
- Node* id = Identity_impl(phase);
-
- if (id == this && phase->is_IterGVN()) {
- Node* n = in(ValueIn);
- // No success in super call. Try to combine identical read barriers.
- for (DUIterator i = n->outs(); n->has_out(i); i++) {
- Node* sibling = n->out(i);
- if (sibling == this || sibling->Opcode() != Op_ShenandoahReadBarrier) {
- continue;
- }
- assert(sibling->in(ValueIn) == in(ValueIn), "sanity");
- if (phase->is_IterGVN()->hash_find(sibling) &&
- sibling->bottom_type() == bottom_type() &&
- sibling->in(Control) == in(Control) &&
- dominates_memory_rb(phase, sibling, this, phase->is_IterGVN() == NULL)) {
- return sibling;
- }
- }
- }
- return id;
-}
-
-const Type* ShenandoahBarrierNode::Value(PhaseGVN* phase) const {
- // Either input is TOP ==> the result is TOP
- const Type *t1 = phase->type(in(Memory));
- if (t1 == Type::TOP) return Type::TOP;
- const Type *t2 = phase->type(in(ValueIn));
- if( t2 == Type::TOP ) return Type::TOP;
-
- if (t2 == TypePtr::NULL_PTR) {
- return _type;
- }
-
- const Type* type = t2->is_oopptr()->cast_to_nonconst();
- return type;
-}
-
-uint ShenandoahBarrierNode::hash() const {
- return TypeNode::hash() + _allow_fromspace;
-}
-
-bool ShenandoahBarrierNode::cmp(const Node& n) const {
- return _allow_fromspace == ((ShenandoahBarrierNode&) n)._allow_fromspace
- && TypeNode::cmp(n);
-}
-
-uint ShenandoahBarrierNode::size_of() const {
- return sizeof(*this);
-}
-
-Node* ShenandoahWBMemProjNode::Identity(PhaseGVN* phase) {
- Node* wb = in(WriteBarrier);
- if (wb->is_top()) return phase->C->top(); // Dead path.
-
- assert(wb->Opcode() == Op_ShenandoahWriteBarrier, "expect write barrier");
- PhaseIterGVN* igvn = phase->is_IterGVN();
- // We can't do the below unless the graph is fully constructed.
- if (igvn == NULL) {
- return this;
- }
-
- // If the mem projection has no barrier users, it's not needed anymore.
- if (wb->outcnt() == 1) {
- return wb->in(ShenandoahBarrierNode::Memory);
- }
-
- return this;
-}
-
-#ifdef ASSERT
-bool ShenandoahBarrierNode::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
+bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
assert(phis.size() == 0, "");
while (true) {
@@ -732,12 +192,24 @@ bool ShenandoahBarrierNode::verify_helper(Node* in, Node_Stack& phis, VectorSet&
in = in->in(AddPNode::Address);
continue;
} else if (in->is_Con()) {
- if (trace) {tty->print("Found constant"); in->dump();}
- } else if (in->is_ShenandoahBarrier()) {
+ if (trace) {
+ tty->print("Found constant");
+ in->dump();
+ }
+ } else if (in->Opcode() == Op_Parm) {
+ if (trace) {
+ tty->print("Found argument");
+ }
+ } else if (in->Opcode() == Op_CreateEx) {
+ if (trace) {
+ tty->print("Found create-exception");
+ }
+ } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
+ if (trace) {
+ tty->print("Found raw LoadP (OSR argument?)");
+ }
+ } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
if (t == ShenandoahOopStore) {
- if (in->Opcode() != Op_ShenandoahWriteBarrier) {
- return false;
- }
uint i = 0;
for (; i < phis.size(); i++) {
Node* n = phis.node_at(i);
@@ -748,8 +220,6 @@ bool ShenandoahBarrierNode::verify_helper(Node* in, Node_Stack& phis, VectorSet&
if (i == phis.size()) {
return false;
}
- } else if (t == ShenandoahStore && in->Opcode() != Op_ShenandoahWriteBarrier) {
- return false;
}
barriers_used.push(in);
if (trace) {tty->print("Found barrier"); in->dump();}
@@ -763,7 +233,14 @@ bool ShenandoahBarrierNode::verify_helper(Node* in, Node_Stack& phis, VectorSet&
in = in->in(1);
continue;
} else if (in->is_Proj() && in->in(0)->is_Allocate()) {
- if (trace) {tty->print("Found alloc"); in->in(0)->dump();}
+ if (trace) {
+ tty->print("Found alloc");
+ in->in(0)->dump();
+ }
+ } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
+ if (trace) {
+ tty->print("Found Java call");
+ }
} else if (in->is_Phi()) {
if (!visited.test_set(in->_idx)) {
if (trace) {tty->print("Pushed phi:"); in->dump();}
@@ -809,7 +286,7 @@ bool ShenandoahBarrierNode::verify_helper(Node* in, Node_Stack& phis, VectorSet&
return true;
}
-void ShenandoahBarrierNode::report_verify_failure(const char *msg, Node *n1, Node *n2) {
+void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
if (n1 != NULL) {
n1->dump(+10);
}
@@ -819,7 +296,7 @@ void ShenandoahBarrierNode::report_verify_failure(const char *msg, Node *n1, Nod
fatal("%s", msg);
}
-void ShenandoahBarrierNode::verify(RootNode* root) {
+void ShenandoahBarrierC2Support::verify(RootNode* root) {
ResourceMark rm;
Unique_Node_List wq;
GrowableArray barriers;
@@ -871,7 +348,7 @@ void ShenandoahBarrierNode::verify(RootNode* root) {
}
}
- if (verify && !ShenandoahBarrierNode::verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
+ if (verify && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
report_verify_failure("Shenandoah verification: Load should have barriers", n);
}
}
@@ -899,11 +376,11 @@ void ShenandoahBarrierNode::verify(RootNode* root) {
}
}
- if (verify && !ShenandoahBarrierNode::verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
+ if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
report_verify_failure("Shenandoah verification: Store should have barriers", n);
}
}
- if (!ShenandoahBarrierNode::verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
+ if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
}
} else if (n->Opcode() == Op_CmpP) {
@@ -926,26 +403,26 @@ void ShenandoahBarrierNode::verify(RootNode* root) {
} else {
assert(in2->bottom_type()->isa_oopptr(), "");
- if (!ShenandoahBarrierNode::verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
- !ShenandoahBarrierNode::verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
+ if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
+ !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
}
}
if (verify_no_useless_barrier &&
mark_inputs &&
- (!ShenandoahBarrierNode::verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
- !ShenandoahBarrierNode::verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
+ (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
+ !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
phis.clear();
visited.Reset();
}
}
} else if (n->is_LoadStore()) {
if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
- !ShenandoahBarrierNode::verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
+ !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
}
- if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !ShenandoahBarrierNode::verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
+ if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
}
} else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
@@ -1041,13 +518,13 @@ void ShenandoahBarrierNode::verify(RootNode* root) {
}
}
}
- if (!ShenandoahBarrierNode::verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
- !ShenandoahBarrierNode::verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
+ if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
+ !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
}
} else if (strlen(call->_name) > 5 &&
!strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
- if (!ShenandoahBarrierNode::verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
+ if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
report_verify_failure("Shenandoah verification: _fill should have barriers", n);
}
} else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
@@ -1067,7 +544,7 @@ void ShenandoahBarrierNode::verify(RootNode* root) {
if (pos == -1) {
break;
}
- if (!ShenandoahBarrierNode::verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
+ if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
}
}
@@ -1090,15 +567,8 @@ void ShenandoahBarrierNode::verify(RootNode* root) {
}
}
}
- } else if (n->is_ShenandoahBarrier()) {
- assert(!barriers.contains(n), "");
- assert(n->Opcode() != Op_ShenandoahWriteBarrier || n->find_out_with(Op_ShenandoahWBMemProj) != NULL, "bad shenandoah write barrier");
- assert(n->Opcode() != Op_ShenandoahWriteBarrier || n->outcnt() > 1, "bad shenandoah write barrier");
- barriers.push(n);
- } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
+ } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
// skip
- } else if (n->Opcode() == Op_ShenandoahWBMemProj) {
- assert(n->in(0) == NULL && n->in(ShenandoahWBMemProjNode::WriteBarrier)->Opcode() == Op_ShenandoahWriteBarrier, "strange ShenandoahWBMemProj");
} else if (n->is_AddP()
|| n->is_Phi()
|| n->is_ConstraintCast()
@@ -1165,7 +635,7 @@ void ShenandoahBarrierNode::verify(RootNode* root) {
if (pos == -1) {
break;
}
- if (!ShenandoahBarrierNode::verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
+ if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
}
}
@@ -1193,7 +663,7 @@ void ShenandoahBarrierNode::verify(RootNode* root) {
SafePointNode* sfpt = n->as_SafePoint();
if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
- if (!ShenandoahBarrierNode::verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
+ if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
phis.clear();
visited.Reset();
}
@@ -1227,9 +697,8 @@ void ShenandoahBarrierNode::verify(RootNode* root) {
n->Opcode() == Op_SCMemProj ||
n->Opcode() == Op_EncodeP ||
n->Opcode() == Op_DecodeN ||
- n->Opcode() == Op_ShenandoahWriteBarrier ||
- n->Opcode() == Op_ShenandoahWBMemProj ||
- n->Opcode() == Op_ShenandoahEnqueueBarrier)) {
+ n->Opcode() == Op_ShenandoahEnqueueBarrier ||
+ n->Opcode() == Op_ShenandoahLoadReferenceBarrier)) {
if (m->bottom_type()->make_oopptr() && m->bottom_type()->make_oopptr()->meet(TypePtr::NULL_PTR) == m->bottom_type()) {
report_verify_failure("Shenandoah verification: null input", n, m);
}
@@ -1251,7 +720,7 @@ void ShenandoahBarrierNode::verify(RootNode* root) {
}
#endif
-bool ShenandoahBarrierNode::is_dominator_same_ctrl(Node*c, Node* d, Node* n, PhaseIdealLoop* phase) {
+bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
// That both nodes have the same control is not sufficient to prove
// domination, verify that there's no path from d to n
ResourceMark rm;
@@ -1275,7 +744,7 @@ bool ShenandoahBarrierNode::is_dominator_same_ctrl(Node*c, Node* d, Node* n, Pha
return true;
}
-bool ShenandoahBarrierNode::is_dominator(Node *d_c, Node *n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
+bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
if (d_c != n_c) {
return phase->is_dominator(d_c, n_c);
}
@@ -1290,15 +759,11 @@ Node* next_mem(Node* mem, int alias) {
res = mem->in(TypeFunc::Memory);
} else if (mem->is_Phi()) {
res = mem->in(1);
- } else if (mem->is_ShenandoahBarrier()) {
- res = mem->in(ShenandoahBarrierNode::Memory);
} else if (mem->is_MergeMem()) {
res = mem->as_MergeMem()->memory_at(alias);
} else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
res = mem->in(MemNode::Memory);
- } else if (mem->Opcode() == Op_ShenandoahWBMemProj) {
- res = mem->in(ShenandoahWBMemProjNode::WriteBarrier);
} else {
#ifdef ASSERT
mem->dump();
@@ -1308,7 +773,7 @@ Node* next_mem(Node* mem, int alias) {
return res;
}
-Node* ShenandoahBarrierNode::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
+Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
Node* iffproj = NULL;
while (c != dom) {
Node* next = phase->idom(c);
@@ -1373,270 +838,7 @@ Node* ShenandoahBarrierNode::no_branches(Node* c, Node* dom, bool allow_one_proj
return iffproj;
}
-bool ShenandoahBarrierNode::build_loop_late_post(PhaseIdealLoop* phase, Node* n) {
- if (n->Opcode() == Op_ShenandoahReadBarrier ||
- n->Opcode() == Op_ShenandoahWriteBarrier ||
- n->Opcode() == Op_ShenandoahWBMemProj) {
-
- phase->build_loop_late_post_work(n, false);
-
- if (n->Opcode() == Op_ShenandoahWriteBarrier) {
- // The write barrier and its memory proj must have the same
- // control otherwise some loop opts could put nodes (Phis) between
- // them
- Node* proj = n->find_out_with(Op_ShenandoahWBMemProj);
- if (proj != NULL) {
- phase->set_ctrl_and_loop(proj, phase->get_ctrl(n));
- }
- }
- return true;
- }
- return false;
-}
-
-bool ShenandoahBarrierNode::sink_node(PhaseIdealLoop* phase, Node* ctrl, Node* n_ctrl) {
- ctrl = phase->find_non_split_ctrl(ctrl);
- assert(phase->dom_depth(n_ctrl) <= phase->dom_depth(ctrl), "n is later than its clone");
- set_req(0, ctrl);
- phase->register_new_node(this, ctrl);
- return true;
-}
-
-#ifdef ASSERT
-void ShenandoahWriteBarrierNode::memory_dominates_all_paths_helper(Node* c, Node* rep_ctrl, Unique_Node_List& controls, PhaseIdealLoop* phase) {
- const bool trace = false;
- if (trace) { tty->print("X control is"); c->dump(); }
-
- uint start = controls.size();
- controls.push(c);
- for (uint i = start; i < controls.size(); i++) {
- Node *n = controls.at(i);
-
- if (trace) { tty->print("X from"); n->dump(); }
-
- if (n == rep_ctrl) {
- continue;
- }
-
- if (n->is_Proj()) {
- Node* n_dom = n->in(0);
- IdealLoopTree* n_dom_loop = phase->get_loop(n_dom);
- if (n->is_IfProj() && n_dom->outcnt() == 2) {
- n_dom_loop = phase->get_loop(n_dom->as_If()->proj_out(n->as_Proj()->_con == 0 ? 1 : 0));
- }
- if (n_dom_loop != phase->ltree_root()) {
- Node* tail = n_dom_loop->tail();
- if (tail->is_Region()) {
- for (uint j = 1; j < tail->req(); j++) {
- if (phase->is_dominator(n_dom, tail->in(j)) && !phase->is_dominator(n, tail->in(j))) {
- assert(phase->is_dominator(rep_ctrl, tail->in(j)), "why are we here?");
- // entering loop from below, mark backedge
- if (trace) { tty->print("X pushing backedge"); tail->in(j)->dump(); }
- controls.push(tail->in(j));
- //assert(n->in(0) == n_dom, "strange flow control");
- }
- }
- } else if (phase->get_loop(n) != n_dom_loop && phase->is_dominator(n_dom, tail)) {
- // entering loop from below, mark backedge
- if (trace) { tty->print("X pushing backedge"); tail->dump(); }
- controls.push(tail);
- //assert(n->in(0) == n_dom, "strange flow control");
- }
- }
- }
-
- if (n->is_Loop()) {
- Node* c = n->in(LoopNode::EntryControl);
- if (trace) { tty->print("X pushing"); c->dump(); }
- controls.push(c);
- } else if (n->is_Region()) {
- for (uint i = 1; i < n->req(); i++) {
- Node* c = n->in(i);
- if (trace) { tty->print("X pushing"); c->dump(); }
- controls.push(c);
- }
- } else {
- Node* c = n->in(0);
- if (trace) { tty->print("X pushing"); c->dump(); }
- controls.push(c);
- }
- }
-}
-
-bool ShenandoahWriteBarrierNode::memory_dominates_all_paths(Node* mem, Node* rep_ctrl, int alias, PhaseIdealLoop* phase) {
- const bool trace = false;
- if (trace) {
- tty->print("XXX mem is"); mem->dump();
- tty->print("XXX rep ctrl is"); rep_ctrl->dump();
- tty->print_cr("XXX alias is %d", alias);
- }
- ResourceMark rm;
- Unique_Node_List wq;
- Unique_Node_List controls;
- wq.push(mem);
- for (uint next = 0; next < wq.size(); next++) {
- Node *nn = wq.at(next);
- if (trace) { tty->print("XX from mem"); nn->dump(); }
- assert(nn->bottom_type() == Type::MEMORY, "memory only");
-
- if (nn->is_Phi()) {
- Node* r = nn->in(0);
- for (DUIterator_Fast jmax, j = r->fast_outs(jmax); j < jmax; j++) {
- Node* u = r->fast_out(j);
- if (u->is_Phi() && u->bottom_type() == Type::MEMORY && u != nn &&
- (u->adr_type() == TypePtr::BOTTOM || phase->C->get_alias_index(u->adr_type()) == alias)) {
- if (trace) { tty->print("XX Next mem (other phi)"); u->dump(); }
- wq.push(u);
- }
- }
- }
-
- for (DUIterator_Fast imax, i = nn->fast_outs(imax); i < imax; i++) {
- Node* use = nn->fast_out(i);
-
- if (trace) { tty->print("XX use %p", use->adr_type()); use->dump(); }
- if (use->is_CFG() && use->in(TypeFunc::Memory) == nn) {
- Node* c = use->in(0);
- if (phase->is_dominator(rep_ctrl, c)) {
- memory_dominates_all_paths_helper(c, rep_ctrl, controls, phase);
- } else if (use->is_CallStaticJava() && use->as_CallStaticJava()->uncommon_trap_request() != 0 && c->is_Region()) {
- Node* region = c;
- if (trace) { tty->print("XX unc region"); region->dump(); }
- for (uint j = 1; j < region->req(); j++) {
- if (phase->is_dominator(rep_ctrl, region->in(j))) {
- if (trace) { tty->print("XX unc follows"); region->in(j)->dump(); }
- memory_dominates_all_paths_helper(region->in(j), rep_ctrl, controls, phase);
- }
- }
- }
- //continue;
- } else if (use->is_Phi()) {
- assert(use->bottom_type() == Type::MEMORY, "bad phi");
- if ((use->adr_type() == TypePtr::BOTTOM) ||
- phase->C->get_alias_index(use->adr_type()) == alias) {
- for (uint j = 1; j < use->req(); j++) {
- if (use->in(j) == nn) {
- Node* c = use->in(0)->in(j);
- if (phase->is_dominator(rep_ctrl, c)) {
- memory_dominates_all_paths_helper(c, rep_ctrl, controls, phase);
- }
- }
- }
- }
- // continue;
- }
-
- if (use->is_MergeMem()) {
- if (use->as_MergeMem()->memory_at(alias) == nn) {
- if (trace) { tty->print("XX Next mem"); use->dump(); }
- // follow the memory edges
- wq.push(use);
- }
- } else if (use->is_Phi()) {
- assert(use->bottom_type() == Type::MEMORY, "bad phi");
- if ((use->adr_type() == TypePtr::BOTTOM) ||
- phase->C->get_alias_index(use->adr_type()) == alias) {
- if (trace) { tty->print("XX Next mem"); use->dump(); }
- // follow the memory edges
- wq.push(use);
- }
- } else if (use->bottom_type() == Type::MEMORY &&
- (use->adr_type() == TypePtr::BOTTOM || phase->C->get_alias_index(use->adr_type()) == alias)) {
- if (trace) { tty->print("XX Next mem"); use->dump(); }
- // follow the memory edges
- wq.push(use);
- } else if ((use->is_SafePoint() || use->is_MemBar()) &&
- (use->adr_type() == TypePtr::BOTTOM || phase->C->get_alias_index(use->adr_type()) == alias)) {
- for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
- Node* u = use->fast_out(j);
- if (u->bottom_type() == Type::MEMORY) {
- if (trace) { tty->print("XX Next mem"); u->dump(); }
- // follow the memory edges
- wq.push(u);
- }
- }
- } else if (use->Opcode() == Op_ShenandoahWriteBarrier && phase->C->get_alias_index(use->adr_type()) == alias) {
- Node* m = use->find_out_with(Op_ShenandoahWBMemProj);
- if (m != NULL) {
- if (trace) { tty->print("XX Next mem"); m->dump(); }
- // follow the memory edges
- wq.push(m);
- }
- }
- }
- }
-
- if (controls.size() == 0) {
- return false;
- }
-
- for (uint i = 0; i < controls.size(); i++) {
- Node *n = controls.at(i);
-
- if (trace) { tty->print("X checking"); n->dump(); }
-
- if (n->unique_ctrl_out() != NULL) {
- continue;
- }
-
- if (n->Opcode() == Op_NeverBranch) {
- Node* taken = n->as_Multi()->proj_out(0);
- if (!controls.member(taken)) {
- if (trace) { tty->print("X not seen"); taken->dump(); }
- return false;
- }
- continue;
- }
-
- for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
- Node* u = n->fast_out(j);
-
- if (u->is_CFG()) {
- if (!controls.member(u)) {
- if (u->is_Proj() && u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
- if (trace) { tty->print("X not seen but unc"); u->dump(); }
- } else {
- Node* c = u;
- do {
- c = c->unique_ctrl_out();
- } while (c != NULL && c->is_Region());
- if (c != NULL && c->Opcode() == Op_Halt) {
- if (trace) { tty->print("X not seen but halt"); c->dump(); }
- } else {
- if (trace) { tty->print("X not seen"); u->dump(); }
- return false;
- }
- }
- } else {
- if (trace) { tty->print("X seen"); u->dump(); }
- }
- }
- }
- }
- return true;
-}
-#endif
-
-Node* ShenandoahBarrierNode::dom_mem(Node* mem, Node*& mem_ctrl, Node* n, Node* rep_ctrl, int alias, PhaseIdealLoop* phase) {
- ResourceMark rm;
- VectorSet wq(Thread::current()->resource_area());
- wq.set(mem->_idx);
- mem_ctrl = phase->get_ctrl(mem);
- while (!is_dominator(mem_ctrl, rep_ctrl, mem, n, phase)) {
- mem = next_mem(mem, alias);
- if (wq.test_set(mem->_idx)) {
- return NULL; // hit an unexpected loop
- }
- mem_ctrl = phase->ctrl_or_self(mem);
- }
- if (mem->is_MergeMem()) {
- mem = mem->as_MergeMem()->memory_at(alias);
- mem_ctrl = phase->ctrl_or_self(mem);
- }
- return mem;
-}
-
-Node* ShenandoahBarrierNode::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
+Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
ResourceMark rm;
VectorSet wq(Thread::current()->resource_area());
wq.set(mem->_idx);
@@ -1655,650 +857,7 @@ Node* ShenandoahBarrierNode::dom_mem(Node* mem, Node* ctrl, int alias, Node*& me
return mem;
}
-static void disconnect_barrier_mem(Node* wb, PhaseIterGVN& igvn) {
- Node* mem_in = wb->in(ShenandoahBarrierNode::Memory);
- Node* proj = wb->find_out_with(Op_ShenandoahWBMemProj);
-
- for (DUIterator_Last imin, i = proj->last_outs(imin); i >= imin; ) {
- Node* u = proj->last_out(i);
- igvn.rehash_node_delayed(u);
- int nb = u->replace_edge(proj, mem_in);
- assert(nb > 0, "no replacement?");
- i -= nb;
- }
-}
-
-Node* ShenandoahWriteBarrierNode::move_above_predicates(LoopNode* cl, Node* val_ctrl, PhaseIdealLoop* phase) {
- Node* entry = cl->skip_strip_mined(-1)->in(LoopNode::EntryControl);
- Node* above_pred = phase->skip_all_loop_predicates(entry);
- Node* ctrl = entry;
- while (ctrl != above_pred) {
- Node* next = ctrl->in(0);
- if (!phase->is_dominator(val_ctrl, next)) {
- break;
- }
- ctrl = next;
- }
- return ctrl;
-}
-
-static MemoryGraphFixer* find_fixer(GrowableArray& memory_graph_fixers, int alias) {
- for (int i = 0; i < memory_graph_fixers.length(); i++) {
- if (memory_graph_fixers.at(i)->alias() == alias) {
- return memory_graph_fixers.at(i);
- }
- }
- return NULL;
-}
-
-static MemoryGraphFixer* create_fixer(GrowableArray& memory_graph_fixers, int alias, PhaseIdealLoop* phase, bool include_lsm) {
- assert(find_fixer(memory_graph_fixers, alias) == NULL, "none should exist yet");
- MemoryGraphFixer* fixer = new MemoryGraphFixer(alias, include_lsm, phase);
- memory_graph_fixers.push(fixer);
- return fixer;
-}
-
-void ShenandoahWriteBarrierNode::try_move_before_loop_helper(LoopNode* cl, Node* val_ctrl, GrowableArray& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses) {
- assert(cl->is_Loop(), "bad control");
- Node* ctrl = move_above_predicates(cl, val_ctrl, phase);
- Node* mem_ctrl = NULL;
- int alias = phase->C->get_alias_index(adr_type());
-
- MemoryGraphFixer* fixer = find_fixer(memory_graph_fixers, alias);
- if (fixer == NULL) {
- fixer = create_fixer(memory_graph_fixers, alias, phase, include_lsm);
- }
-
- Node* proj = find_out_with(Op_ShenandoahWBMemProj);
-
- fixer->remove(proj);
- Node* mem = fixer->find_mem(ctrl, NULL);
-
- assert(!ShenandoahVerifyOptoBarriers || memory_dominates_all_paths(mem, ctrl, alias, phase), "can't fix the memory graph");
-
- phase->set_ctrl_and_loop(this, ctrl);
- phase->igvn().replace_input_of(this, Control, ctrl);
-
- disconnect_barrier_mem(this, phase->igvn());
-
- phase->igvn().replace_input_of(this, Memory, mem);
- phase->set_ctrl_and_loop(proj, ctrl);
-
- fixer->fix_mem(ctrl, ctrl, mem, mem, proj, uses);
- assert(proj->outcnt() > 0, "disconnected write barrier");
-}
-
-LoopNode* ShenandoahWriteBarrierNode::try_move_before_pre_loop(Node* c, Node* val_ctrl, PhaseIdealLoop* phase) {
- // A write barrier between a pre and main loop can get in the way of
- // vectorization. Move it above the pre loop if possible
- CountedLoopNode* cl = NULL;
- if (c->is_IfFalse() &&
- c->in(0)->is_CountedLoopEnd()) {
- cl = c->in(0)->as_CountedLoopEnd()->loopnode();
- } else if (c->is_IfProj() &&
- c->in(0)->is_If() &&
- c->in(0)->in(0)->is_IfFalse() &&
- c->in(0)->in(0)->in(0)->is_CountedLoopEnd()) {
- cl = c->in(0)->in(0)->in(0)->as_CountedLoopEnd()->loopnode();
- }
- if (cl != NULL &&
- cl->is_pre_loop() &&
- val_ctrl != cl &&
- phase->is_dominator(val_ctrl, cl)) {
- return cl;
- }
- return NULL;
-}
-
-void ShenandoahWriteBarrierNode::try_move_before_loop(GrowableArray& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses) {
- Node *n_ctrl = phase->get_ctrl(this);
- IdealLoopTree *n_loop = phase->get_loop(n_ctrl);
- Node* val = in(ValueIn);
- Node* val_ctrl = phase->get_ctrl(val);
- if (n_loop != phase->ltree_root() && !n_loop->_irreducible) {
- IdealLoopTree *val_loop = phase->get_loop(val_ctrl);
- Node* mem = in(Memory);
- IdealLoopTree *mem_loop = phase->get_loop(phase->get_ctrl(mem));
- if (!n_loop->is_member(val_loop) &&
- n_loop->is_member(mem_loop)) {
- Node* n_loop_head = n_loop->_head;
-
- if (n_loop_head->is_Loop()) {
- LoopNode* loop = n_loop_head->as_Loop();
- if (n_loop_head->is_CountedLoop() && n_loop_head->as_CountedLoop()->is_main_loop()) {
- LoopNode* res = try_move_before_pre_loop(n_loop_head->in(LoopNode::EntryControl), val_ctrl, phase);
- if (res != NULL) {
- loop = res;
- }
- }
-
- try_move_before_loop_helper(loop, val_ctrl, memory_graph_fixers, phase, include_lsm, uses);
- }
- }
- }
- LoopNode* ctrl = try_move_before_pre_loop(in(0), val_ctrl, phase);
- if (ctrl != NULL) {
- try_move_before_loop_helper(ctrl, val_ctrl, memory_graph_fixers, phase, include_lsm, uses);
- }
-}
-
-Node* ShenandoahWriteBarrierNode::would_subsume(ShenandoahBarrierNode* other, PhaseIdealLoop* phase) {
- Node* val = in(ValueIn);
- Node* val_ctrl = phase->get_ctrl(val);
- Node* other_mem = other->in(Memory);
- Node* other_ctrl = phase->get_ctrl(other);
- Node* this_ctrl = phase->get_ctrl(this);
- IdealLoopTree* this_loop = phase->get_loop(this_ctrl);
- IdealLoopTree* other_loop = phase->get_loop(other_ctrl);
-
- Node* ctrl = phase->dom_lca(other_ctrl, this_ctrl);
-
- if (ctrl->is_Proj() &&
- ctrl->in(0)->is_Call() &&
- ctrl->unique_ctrl_out() != NULL &&
- ctrl->unique_ctrl_out()->Opcode() == Op_Catch &&
- !phase->is_dominator(val_ctrl, ctrl->in(0)->in(0))) {
- return NULL;
- }
-
- IdealLoopTree* loop = phase->get_loop(ctrl);
-
- // We don't want to move a write barrier in a loop
- // If the LCA is in a inner loop, try a control out of loop if possible
- while (!loop->is_member(this_loop) && (other->Opcode() != Op_ShenandoahWriteBarrier || !loop->is_member(other_loop))) {
- ctrl = phase->idom(ctrl);
- if (ctrl->is_MultiBranch()) {
- ctrl = ctrl->in(0);
- }
- if (ctrl != val_ctrl && phase->is_dominator(ctrl, val_ctrl)) {
- return NULL;
- }
- loop = phase->get_loop(ctrl);
- }
-
- if (ShenandoahDontIncreaseWBFreq) {
- Node* this_iffproj = no_branches(this_ctrl, ctrl, true, phase);
- if (other->Opcode() == Op_ShenandoahWriteBarrier) {
- Node* other_iffproj = no_branches(other_ctrl, ctrl, true, phase);
- if (other_iffproj == NULL || this_iffproj == NULL) {
- return ctrl;
- } else if (other_iffproj != NodeSentinel && this_iffproj != NodeSentinel &&
- other_iffproj->in(0) == this_iffproj->in(0)) {
- return ctrl;
- }
- } else if (this_iffproj == NULL) {
- return ctrl;
- }
- return NULL;
- }
-
- return ctrl;
-}
-
-void ShenandoahWriteBarrierNode::optimize_before_expansion(PhaseIdealLoop* phase, GrowableArray memory_graph_fixers, bool include_lsm) {
- bool progress = false;
- Unique_Node_List uses;
- do {
- progress = false;
- for (int i = 0; i < ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i++) {
- ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i);
-
- wb->try_move_before_loop(memory_graph_fixers, phase, include_lsm, uses);
-
- Node* val = wb->in(ValueIn);
-
- for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
- Node* u = val->fast_out(j);
- if (u != wb && u->is_ShenandoahBarrier()) {
- Node* rep_ctrl = wb->would_subsume(u->as_ShenandoahBarrier(), phase);
-
- if (rep_ctrl != NULL) {
- Node* other = u;
- Node* val_ctrl = phase->get_ctrl(val);
- if (rep_ctrl->is_Proj() &&
- rep_ctrl->in(0)->is_Call() &&
- rep_ctrl->unique_ctrl_out() != NULL &&
- rep_ctrl->unique_ctrl_out()->Opcode() == Op_Catch) {
- rep_ctrl = rep_ctrl->in(0)->in(0);
-
- assert(phase->is_dominator(val_ctrl, rep_ctrl), "bad control");
- } else {
- LoopNode* c = ShenandoahWriteBarrierNode::try_move_before_pre_loop(rep_ctrl, val_ctrl, phase);
- if (c != NULL) {
- rep_ctrl = ShenandoahWriteBarrierNode::move_above_predicates(c, val_ctrl, phase);
- } else {
- while (rep_ctrl->is_IfProj()) {
- CallStaticJavaNode* unc = rep_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
- if (unc != NULL) {
- int req = unc->uncommon_trap_request();
- Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
- if ((trap_reason == Deoptimization::Reason_loop_limit_check ||
- trap_reason == Deoptimization::Reason_predicate ||
- trap_reason == Deoptimization::Reason_profile_predicate) &&
- phase->is_dominator(val_ctrl, rep_ctrl->in(0)->in(0))) {
- rep_ctrl = rep_ctrl->in(0)->in(0);
- continue;
- }
- }
- break;
- }
- }
- }
-
- Node* wb_ctrl = phase->get_ctrl(wb);
- Node* other_ctrl = phase->get_ctrl(other);
- int alias = phase->C->get_alias_index(wb->adr_type());
- MemoryGraphFixer* fixer = find_fixer(memory_graph_fixers, alias);;
- if (!is_dominator(wb_ctrl, other_ctrl, wb, other, phase)) {
- if (fixer == NULL) {
- fixer = create_fixer(memory_graph_fixers, alias, phase, include_lsm);
- }
- Node* mem = fixer->find_mem(rep_ctrl, phase->get_ctrl(other) == rep_ctrl ? other : NULL);
-
- if (mem->has_out_with(Op_Lock) || mem->has_out_with(Op_Unlock)) {
- continue;
- }
-
- Node* wb_proj = wb->find_out_with(Op_ShenandoahWBMemProj);
- fixer->remove(wb_proj);
- Node* mem_for_ctrl = fixer->find_mem(rep_ctrl, NULL);
-
- if (wb->in(Memory) != mem) {
- disconnect_barrier_mem(wb, phase->igvn());
- phase->igvn().replace_input_of(wb, Memory, mem);
- }
- if (rep_ctrl != wb_ctrl) {
- phase->set_ctrl_and_loop(wb, rep_ctrl);
- phase->igvn().replace_input_of(wb, Control, rep_ctrl);
- phase->set_ctrl_and_loop(wb_proj, rep_ctrl);
- progress = true;
- }
-
- fixer->fix_mem(rep_ctrl, rep_ctrl, mem, mem_for_ctrl, wb_proj, uses);
-
- assert(!ShenandoahVerifyOptoBarriers || ShenandoahWriteBarrierNode::memory_dominates_all_paths(mem, rep_ctrl, alias, phase), "can't fix the memory graph");
- }
-
- if (other->Opcode() == Op_ShenandoahWriteBarrier) {
- Node* other_proj = other->find_out_with(Op_ShenandoahWBMemProj);
- if (fixer != NULL) {
- fixer->remove(other_proj);
- }
- phase->igvn().replace_node(other_proj, other->in(Memory));
- }
- phase->igvn().replace_node(other, wb);
- --j; --jmax;
- }
- }
- }
- }
- } while(progress);
-}
-
-// Some code duplication with PhaseIdealLoop::split_if_with_blocks_pre()
-Node* ShenandoahWriteBarrierNode::try_split_thru_phi(PhaseIdealLoop* phase) {
- Node *ctrl = phase->get_ctrl(this);
- if (ctrl == NULL) {
- return this;
- }
- Node *blk = phase->has_local_phi_input(this);
- if (blk == NULL) {
- return this;
- }
-
- if (in(0) != blk) {
- return this;
- }
-
- int policy = blk->req() >> 2;
-
- if (blk->is_CountedLoop()) {
- IdealLoopTree *lp = phase->get_loop(blk);
- if (lp && lp->_rce_candidate) {
- return this;
- }
- }
-
- if (phase->C->live_nodes() > 35000) {
- return this;
- }
-
- uint unique = phase->C->unique();
- Node *phi = phase->split_thru_phi(this, blk, policy);
- if (phi == NULL) {
- return this;
- }
-
- Node* mem_phi = new PhiNode(blk, Type::MEMORY, phase->C->alias_type(adr_type())->adr_type());
- for (uint i = 1; i < blk->req(); i++) {
- Node* n = phi->in(i);
- if (n->Opcode() == Op_ShenandoahWriteBarrier &&
- n->_idx >= unique) {
- Node* proj = new ShenandoahWBMemProjNode(n);
- phase->register_new_node(proj, phase->get_ctrl(n));
- mem_phi->init_req(i, proj);
- } else {
- Node* mem = in(ShenandoahBarrierNode::Memory);
- if (mem->is_Phi() && mem->in(0) == blk) {
- mem = mem->in(i);
- }
- mem_phi->init_req(i, mem);
- }
- }
- phase->register_new_node(mem_phi, blk);
-
-
- Node* proj = find_out_with(Op_ShenandoahWBMemProj);
- phase->igvn().replace_node(proj, mem_phi);
- phase->igvn().replace_node(this, phi);
-
- return phi;
-}
-
-void ShenandoahReadBarrierNode::try_move(PhaseIdealLoop* phase) {
- Node *n_ctrl = phase->get_ctrl(this);
- if (n_ctrl == NULL) {
- return;
- }
- Node* mem = in(MemNode::Memory);
- int alias = phase->C->get_alias_index(adr_type());
- const bool trace = false;
-
-#ifdef ASSERT
- if (trace) { tty->print("Trying to move mem of"); dump(); }
-#endif
-
- Node* new_mem = mem;
-
- ResourceMark rm;
- VectorSet seen(Thread::current()->resource_area());
- Node_List phis;
-
- for (;;) {
-#ifdef ASSERT
- if (trace) { tty->print("Looking for dominator from"); mem->dump(); }
-#endif
- if (mem->is_Proj() && mem->in(0)->is_Start()) {
- if (new_mem != in(MemNode::Memory)) {
-#ifdef ASSERT
- if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); }
-#endif
- phase->igvn().replace_input_of(this, MemNode::Memory, new_mem);
- }
- return;
- }
-
- Node* candidate = mem;
- do {
- if (!is_independent(mem)) {
- if (trace) { tty->print_cr("Not independent"); }
- if (new_mem != in(MemNode::Memory)) {
-#ifdef ASSERT
- if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); }
-#endif
- phase->igvn().replace_input_of(this, MemNode::Memory, new_mem);
- }
- return;
- }
- if (seen.test_set(mem->_idx)) {
- if (trace) { tty->print_cr("Already seen"); }
- ShouldNotReachHere();
- // Strange graph
- if (new_mem != in(MemNode::Memory)) {
-#ifdef ASSERT
- if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); }
-#endif
- phase->igvn().replace_input_of(this, MemNode::Memory, new_mem);
- }
- return;
- }
- if (mem->is_Phi()) {
- phis.push(mem);
- }
- mem = next_mem(mem, alias);
- if (mem->bottom_type() == Type::MEMORY) {
- candidate = mem;
- }
- assert(is_dominator(phase->ctrl_or_self(mem), n_ctrl, mem, this, phase) == phase->is_dominator(phase->ctrl_or_self(mem), n_ctrl), "strange dominator");
-#ifdef ASSERT
- if (trace) { tty->print("Next mem is"); mem->dump(); }
-#endif
- } while (mem->bottom_type() != Type::MEMORY || !phase->is_dominator(phase->ctrl_or_self(mem), n_ctrl));
-
- assert(mem->bottom_type() == Type::MEMORY, "bad mem");
-
- bool not_dom = false;
- for (uint i = 0; i < phis.size() && !not_dom; i++) {
- Node* nn = phis.at(i);
-
-#ifdef ASSERT
- if (trace) { tty->print("Looking from phi"); nn->dump(); }
-#endif
- assert(nn->is_Phi(), "phis only");
- for (uint j = 2; j < nn->req() && !not_dom; j++) {
- Node* m = nn->in(j);
-#ifdef ASSERT
- if (trace) { tty->print("Input %d is", j); m->dump(); }
-#endif
- while (m != mem && !seen.test_set(m->_idx)) {
- if (is_dominator(phase->ctrl_or_self(m), phase->ctrl_or_self(mem), m, mem, phase)) {
- not_dom = true;
- // Scheduling anomaly
-#ifdef ASSERT
- if (trace) { tty->print("Giving up"); m->dump(); }
-#endif
- break;
- }
- if (!is_independent(m)) {
- if (trace) { tty->print_cr("Not independent"); }
- if (new_mem != in(MemNode::Memory)) {
-#ifdef ASSERT
- if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); }
-#endif
- phase->igvn().replace_input_of(this, MemNode::Memory, new_mem);
- }
- return;
- }
- if (m->is_Phi()) {
- phis.push(m);
- }
- m = next_mem(m, alias);
-#ifdef ASSERT
- if (trace) { tty->print("Next mem is"); m->dump(); }
-#endif
- }
- }
- }
- if (!not_dom) {
- new_mem = mem;
- phis.clear();
- } else {
- seen.Clear();
- }
- }
-}
-
-CallStaticJavaNode* ShenandoahWriteBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
- Node* val = in(ValueIn);
-
- const Type* val_t = igvn.type(val);
-
- if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
- val->Opcode() == Op_CastPP &&
- val->in(0) != NULL &&
- val->in(0)->Opcode() == Op_IfTrue &&
- val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
- val->in(0)->in(0)->is_If() &&
- val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
- val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
- val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
- val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
- val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
- assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
- CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
- return unc;
- }
- return NULL;
-}
-
-void ShenandoahWriteBarrierNode::pin_and_expand_move_barrier(PhaseIdealLoop* phase, GrowableArray& memory_graph_fixers, Unique_Node_List& uses) {
- Node* unc = pin_and_expand_null_check(phase->igvn());
- Node* val = in(ValueIn);
-
- if (unc != NULL) {
- Node* ctrl = phase->get_ctrl(this);
- Node* unc_ctrl = val->in(0);
-
- // Don't move write barrier in a loop
- IdealLoopTree* loop = phase->get_loop(ctrl);
- IdealLoopTree* unc_loop = phase->get_loop(unc_ctrl);
-
- if (!unc_loop->is_member(loop)) {
- return;
- }
-
- Node* branch = no_branches(ctrl, unc_ctrl, false, phase);
- assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
- if (branch == NodeSentinel) {
- return;
- }
-
- RegionNode* r = new RegionNode(3);
- IfNode* iff = unc_ctrl->in(0)->as_If();
-
- Node* ctrl_use = unc_ctrl->unique_ctrl_out();
- Node* unc_ctrl_clone = unc_ctrl->clone();
- phase->register_control(unc_ctrl_clone, loop, iff);
- Node* c = unc_ctrl_clone;
- Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
- r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
-
- phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
- phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
- phase->lazy_replace(c, unc_ctrl);
- c = NULL;;
- phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
- phase->set_ctrl(val, unc_ctrl_clone);
-
- IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
- fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
- Node* iff_proj = iff->proj_out(0);
- r->init_req(2, iff_proj);
- phase->register_control(r, phase->ltree_root(), iff);
-
- Node* new_bol = new_iff->in(1)->clone();
- Node* new_cmp = new_bol->in(1)->clone();
- assert(new_cmp->Opcode() == Op_CmpP, "broken");
- assert(new_cmp->in(1) == val->in(1), "broken");
- new_bol->set_req(1, new_cmp);
- new_cmp->set_req(1, this);
- phase->register_new_node(new_bol, new_iff->in(0));
- phase->register_new_node(new_cmp, new_iff->in(0));
- phase->igvn().replace_input_of(new_iff, 1, new_bol);
- phase->igvn().replace_input_of(new_cast, 1, this);
-
- for (DUIterator_Fast imax, i = this->fast_outs(imax); i < imax; i++) {
- Node* u = this->fast_out(i);
- if (u == new_cast || u->Opcode() == Op_ShenandoahWBMemProj || u == new_cmp) {
- continue;
- }
- phase->igvn().rehash_node_delayed(u);
- int nb = u->replace_edge(this, new_cast);
- assert(nb > 0, "no update?");
- --i; imax -= nb;
- }
-
- for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
- Node* u = val->fast_out(i);
- if (u == this) {
- continue;
- }
- phase->igvn().rehash_node_delayed(u);
- int nb = u->replace_edge(val, new_cast);
- assert(nb > 0, "no update?");
- --i; imax -= nb;
- }
-
- Node* new_ctrl = unc_ctrl_clone;
-
- int alias = phase->C->get_alias_index(adr_type());
- MemoryGraphFixer* fixer = find_fixer(memory_graph_fixers, alias);
- if (fixer == NULL) {
- fixer = create_fixer(memory_graph_fixers, alias, phase, true);
- }
-
- Node* proj = find_out_with(Op_ShenandoahWBMemProj);
- fixer->remove(proj);
- Node* mem = fixer->find_mem(new_ctrl, NULL);
-
- if (in(Memory) != mem) {
- disconnect_barrier_mem(this, phase->igvn());
- phase->igvn().replace_input_of(this, Memory, mem);
- }
-
- phase->set_ctrl_and_loop(this, new_ctrl);
- phase->igvn().replace_input_of(this, Control, new_ctrl);
- phase->set_ctrl_and_loop(proj, new_ctrl);
-
- fixer->fix_mem(new_ctrl, new_ctrl, mem, mem, proj, uses);
- }
-}
-
-void ShenandoahWriteBarrierNode::pin_and_expand_helper(PhaseIdealLoop* phase) {
- Node* val = in(ValueIn);
- CallStaticJavaNode* unc = pin_and_expand_null_check(phase->igvn());
- Node* rep = this;
- Node* ctrl = phase->get_ctrl(this);
- if (unc != NULL && val->in(0) == ctrl) {
- Node* unc_ctrl = val->in(0);
- IfNode* other_iff = unc_ctrl->unique_ctrl_out()->as_If();
- ProjNode* other_unc_ctrl = other_iff->proj_out(1);
- Node* cast = NULL;
- for (DUIterator_Fast imax, i = other_unc_ctrl->fast_outs(imax); i < imax && cast == NULL; i++) {
- Node* u = other_unc_ctrl->fast_out(i);
- if (u->Opcode() == Op_CastPP && u->in(1) == this) {
- cast = u;
- }
- }
- assert(other_unc_ctrl->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) == unc, "broken");
- rep = cast;
- }
-
- // Replace all uses of barrier's input that are dominated by ctrl
- // with the value returned by the barrier: no need to keep both
- // live.
- for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
- Node* u = val->fast_out(i);
- if (u != this) {
- if (u->is_Phi()) {
- int nb = 0;
- for (uint j = 1; j < u->req(); j++) {
- if (u->in(j) == val) {
- Node* c = u->in(0)->in(j);
- if (phase->is_dominator(ctrl, c)) {
- phase->igvn().replace_input_of(u, j, rep);
- nb++;
- }
- }
- }
- if (nb > 0) {
- imax -= nb;
- --i;
- }
- } else {
- Node* c = phase->ctrl_or_self(u);
- if (is_dominator(ctrl, c, this, u, phase)) {
- phase->igvn().rehash_node_delayed(u);
- int nb = u->replace_edge(val, rep);
- assert(nb > 0, "no update?");
- --i, imax -= nb;
- }
- }
- }
- }
-}
-
-Node* ShenandoahWriteBarrierNode::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
+Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
Node* mem = NULL;
Node* c = ctrl;
do {
@@ -2355,7 +914,7 @@ Node* ShenandoahWriteBarrierNode::find_bottom_mem(Node* ctrl, PhaseIdealLoop* ph
return mem;
}
-void ShenandoahWriteBarrierNode::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
+void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* u = n->fast_out(i);
if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
@@ -2375,7 +934,7 @@ static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNod
inner->clear_strip_mined();
}
-void ShenandoahWriteBarrierNode::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
+void ShenandoahBarrierC2Support::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
PhaseIdealLoop* phase) {
IdealLoopTree* loop = phase->get_loop(ctrl);
Node* thread = new ThreadLocalNode();
@@ -2407,7 +966,7 @@ void ShenandoahWriteBarrierNode::test_heap_stable(Node*& ctrl, Node* raw_mem, No
assert(is_heap_stable_test(heap_stable_iff), "Should match the shape");
}
-void ShenandoahWriteBarrierNode::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
+void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
const Type* val_t = phase->igvn().type(val);
if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
IdealLoopTree* loop = phase->get_loop(ctrl);
@@ -2424,7 +983,7 @@ void ShenandoahWriteBarrierNode::test_null(Node*& ctrl, Node* val, Node*& null_c
}
}
-Node* ShenandoahWriteBarrierNode::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
+Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
IdealLoopTree *loop = phase->get_loop(c);
Node* iff = unc_ctrl->in(0);
assert(iff->is_If(), "broken");
@@ -2445,7 +1004,7 @@ Node* ShenandoahWriteBarrierNode::clone_null_check(Node*& c, Node* val, Node* un
return val;
}
-void ShenandoahWriteBarrierNode::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
+void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
Unique_Node_List& uses, PhaseIdealLoop* phase) {
IfNode* iff = unc_ctrl->in(0)->as_If();
Node* proj = iff->proj_out(0);
@@ -2494,7 +1053,7 @@ void ShenandoahWriteBarrierNode::fix_null_check(Node* unc, Node* unc_ctrl, Node*
assert(nb == 1, "only use expected");
}
-void ShenandoahWriteBarrierNode::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
+void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
IdealLoopTree *loop = phase->get_loop(ctrl);
Node* raw_rbtrue = new CastP2XNode(ctrl, val);
phase->register_new_node(raw_rbtrue, ctrl);
@@ -2523,23 +1082,18 @@ void ShenandoahWriteBarrierNode::in_cset_fast_test(Node*& ctrl, Node*& not_cset_
phase->register_control(ctrl, loop, in_cset_fast_test_iff);
}
-void ShenandoahWriteBarrierNode::call_wb_stub(Node*& ctrl, Node*& val, Node*& result_mem,
- Node* raw_mem, Node* wb_mem,
- int alias,
- PhaseIdealLoop* phase) {
+void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node*& result_mem, Node* raw_mem, PhaseIdealLoop* phase) {
IdealLoopTree*loop = phase->get_loop(ctrl);
const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr()->cast_to_nonconst();
// The slow path stub consumes and produces raw memory in addition
// to the existing memory edges
Node* base = find_bottom_mem(ctrl, phase);
-
MergeMemNode* mm = MergeMemNode::make(base);
- mm->set_memory_at(alias, wb_mem);
mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
phase->register_new_node(mm, ctrl);
- Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_write_barrier_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT), "shenandoah_write_barrier", TypeRawPtr::BOTTOM);
+ Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_write_barrier_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT), "shenandoah_write_barrier", TypeRawPtr::BOTTOM);
call->init_req(TypeFunc::Control, ctrl);
call->init_req(TypeFunc::I_O, phase->C->top());
call->init_req(TypeFunc::Memory, mm);
@@ -2557,7 +1111,7 @@ void ShenandoahWriteBarrierNode::call_wb_stub(Node*& ctrl, Node*& val, Node*& re
phase->register_new_node(val, ctrl);
}
-void ShenandoahWriteBarrierNode::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
+void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
Node* ctrl = phase->get_ctrl(barrier);
Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
@@ -2610,26 +1164,32 @@ void ShenandoahWriteBarrierNode::fix_ctrl(Node* barrier, Node* region, const Mem
}
}
-void ShenandoahWriteBarrierNode::pin_and_expand(PhaseIdealLoop* phase) {
- Node_List enqueue_barriers;
- if (ShenandoahStoreValEnqueueBarrier) {
- Unique_Node_List wq;
- wq.push(phase->C->root());
- for (uint i = 0; i < wq.size(); i++) {
- Node* n = wq.at(i);
- if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
- enqueue_barriers.push(n);
- }
- for (uint i = 0; i < n->req(); i++) {
- Node* in = n->in(i);
- if (in != NULL) {
- wq.push(in);
- }
- }
+static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
+ Node* region = NULL;
+ while (c != ctrl) {
+ if (c->is_Region()) {
+ region = c;
+ }
+ c = phase->idom(c);
+ }
+ assert(region != NULL, "");
+ Node* phi = new PhiNode(region, n->bottom_type());
+ for (uint j = 1; j < region->req(); j++) {
+ Node* in = region->in(j);
+ if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
+ phi->init_req(j, n);
+ } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
+ phi->init_req(j, n_clone);
+ } else {
+ phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
}
}
+ phase->register_new_node(phi, region);
+ return phi;
+}
- const bool trace = false;
+void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
+ ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
// Collect raw memory state at CFG points in the entire graph and
// record it in memory_nodes. Optimize the raw memory graph in the
@@ -2637,34 +1197,9 @@ void ShenandoahWriteBarrierNode::pin_and_expand(PhaseIdealLoop* phase) {
// simpler.
GrowableArray memory_graph_fixers;
- // Let's try to common write barriers again
- optimize_before_expansion(phase, memory_graph_fixers, true);
-
Unique_Node_List uses;
- for (int i = 0; i < ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i++) {
- ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i);
- Node* ctrl = phase->get_ctrl(wb);
-
- Node* val = wb->in(ValueIn);
- if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
- assert(is_dominator(phase->get_ctrl(val), ctrl->in(0)->in(0), val, ctrl->in(0), phase), "can't move");
- phase->set_ctrl(wb, ctrl->in(0)->in(0));
- } else if (ctrl->is_CallRuntime()) {
- assert(is_dominator(phase->get_ctrl(val), ctrl->in(0), val, ctrl, phase), "can't move");
- phase->set_ctrl(wb, ctrl->in(0));
- }
-
- assert(wb->Opcode() == Op_ShenandoahWriteBarrier, "only for write barriers");
- // Look for a null check that dominates this barrier and move the
- // barrier right after the null check to enable implicit null
- // checks
- wb->pin_and_expand_move_barrier(phase, memory_graph_fixers, uses);
-
- wb->pin_and_expand_helper(phase);
- }
-
- for (uint i = 0; i < enqueue_barriers.size(); i++) {
- Node* barrier = enqueue_barriers.at(i);
+ for (int i = 0; i < state->enqueue_barriers_count(); i++) {
+ Node* barrier = state->enqueue_barrier(i);
Node* ctrl = phase->get_ctrl(barrier);
IdealLoopTree* loop = phase->get_loop(ctrl);
if (loop->_head->is_OuterStripMinedLoop()) {
@@ -2676,24 +1211,386 @@ void ShenandoahWriteBarrierNode::pin_and_expand(PhaseIdealLoop* phase) {
}
}
- for (int i = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i > 0; i--) {
- int cnt = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count();
- ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i-1);
- Node* ctrl = phase->get_ctrl(wb);
- IdealLoopTree* loop = phase->get_loop(ctrl);
- if (loop->_head->is_OuterStripMinedLoop()) {
- // Expanding a barrier here will break loop strip mining
- // verification. Transform the loop so the loop nest doesn't
- // appear as strip mined.
- OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
- hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
+ Node_Stack stack(0);
+ Node_List clones;
+ for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
+ ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
+ if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
+ continue;
+ }
+
+ Node* ctrl = phase->get_ctrl(lrb);
+ Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
+
+ CallStaticJavaNode* unc = NULL;
+ Node* unc_ctrl = NULL;
+ Node* uncasted_val = val;
+
+ for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
+ Node* u = lrb->fast_out(i);
+ if (u->Opcode() == Op_CastPP &&
+ u->in(0) != NULL &&
+ phase->is_dominator(u->in(0), ctrl)) {
+ const Type* u_t = phase->igvn().type(u);
+
+ if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
+ u->in(0)->Opcode() == Op_IfTrue &&
+ u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
+ u->in(0)->in(0)->is_If() &&
+ u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
+ u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
+ u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
+ u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
+ u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
+ IdealLoopTree* loop = phase->get_loop(ctrl);
+ IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
+
+ if (!unc_loop->is_member(loop)) {
+ continue;
+ }
+
+ Node* branch = no_branches(ctrl, u->in(0), false, phase);
+ assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
+ if (branch == NodeSentinel) {
+ continue;
+ }
+
+ phase->igvn().replace_input_of(u, 1, val);
+ phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
+ phase->set_ctrl(u, u->in(0));
+ phase->set_ctrl(lrb, u->in(0));
+ unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
+ unc_ctrl = u->in(0);
+ val = u;
+
+ for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
+ Node* u = val->fast_out(j);
+ if (u == lrb) continue;
+ phase->igvn().rehash_node_delayed(u);
+ int nb = u->replace_edge(val, lrb);
+ --j; jmax -= nb;
+ }
+
+ RegionNode* r = new RegionNode(3);
+ IfNode* iff = unc_ctrl->in(0)->as_If();
+
+ Node* ctrl_use = unc_ctrl->unique_ctrl_out();
+ Node* unc_ctrl_clone = unc_ctrl->clone();
+ phase->register_control(unc_ctrl_clone, loop, iff);
+ Node* c = unc_ctrl_clone;
+ Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
+ r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
+
+ phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
+ phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
+ phase->lazy_replace(c, unc_ctrl);
+ c = NULL;;
+ phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
+ phase->set_ctrl(val, unc_ctrl_clone);
+
+ IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
+ fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
+ Node* iff_proj = iff->proj_out(0);
+ r->init_req(2, iff_proj);
+ phase->register_control(r, phase->ltree_root(), iff);
+
+ Node* new_bol = new_iff->in(1)->clone();
+ Node* new_cmp = new_bol->in(1)->clone();
+ assert(new_cmp->Opcode() == Op_CmpP, "broken");
+ assert(new_cmp->in(1) == val->in(1), "broken");
+ new_bol->set_req(1, new_cmp);
+ new_cmp->set_req(1, lrb);
+ phase->register_new_node(new_bol, new_iff->in(0));
+ phase->register_new_node(new_cmp, new_iff->in(0));
+ phase->igvn().replace_input_of(new_iff, 1, new_bol);
+ phase->igvn().replace_input_of(new_cast, 1, lrb);
+
+ for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
+ Node* u = lrb->fast_out(i);
+ if (u == new_cast || u == new_cmp) {
+ continue;
+ }
+ phase->igvn().rehash_node_delayed(u);
+ int nb = u->replace_edge(lrb, new_cast);
+ assert(nb > 0, "no update?");
+ --i; imax -= nb;
+ }
+
+ for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+ Node* u = val->fast_out(i);
+ if (u == lrb) {
+ continue;
+ }
+ phase->igvn().rehash_node_delayed(u);
+ int nb = u->replace_edge(val, new_cast);
+ assert(nb > 0, "no update?");
+ --i; imax -= nb;
+ }
+
+ ctrl = unc_ctrl_clone;
+ phase->set_ctrl_and_loop(lrb, ctrl);
+ break;
+ }
+ }
+ }
+ if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
+ CallNode* call = ctrl->in(0)->as_CallJava();
+ CallProjections projs;
+ call->extract_projections(&projs, false, false);
+
+ Node* lrb_clone = lrb->clone();
+ phase->register_new_node(lrb_clone, projs.catchall_catchproj);
+ phase->set_ctrl(lrb, projs.fallthrough_catchproj);
+
+ stack.push(lrb, 0);
+ clones.push(lrb_clone);
+
+ do {
+ assert(stack.size() == clones.size(), "");
+ Node* n = stack.node();
+#ifdef ASSERT
+ if (n->is_Load()) {
+ Node* mem = n->in(MemNode::Memory);
+ for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
+ Node* u = mem->fast_out(j);
+ assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
+ }
+ }
+#endif
+ uint idx = stack.index();
+ Node* n_clone = clones.at(clones.size()-1);
+ if (idx < n->outcnt()) {
+ Node* u = n->raw_out(idx);
+ Node* c = phase->ctrl_or_self(u);
+ if (c == ctrl) {
+ stack.set_index(idx+1);
+ assert(!u->is_CFG(), "");
+ stack.push(u, 0);
+ Node* u_clone = u->clone();
+ int nb = u_clone->replace_edge(n, n_clone);
+ assert(nb > 0, "should have replaced some uses");
+ phase->register_new_node(u_clone, projs.catchall_catchproj);
+ clones.push(u_clone);
+ phase->set_ctrl(u, projs.fallthrough_catchproj);
+ } else {
+ bool replaced = false;
+ if (u->is_Phi()) {
+ for (uint k = 1; k < u->req(); k++) {
+ if (u->in(k) == n) {
+ if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
+ phase->igvn().replace_input_of(u, k, n_clone);
+ replaced = true;
+ } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
+ phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
+ replaced = true;
+ }
+ }
+ }
+ } else {
+ if (phase->is_dominator(projs.catchall_catchproj, c)) {
+ phase->igvn().rehash_node_delayed(u);
+ int nb = u->replace_edge(n, n_clone);
+ assert(nb > 0, "should have replaced some uses");
+ replaced = true;
+ } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
+ phase->igvn().rehash_node_delayed(u);
+ int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
+ assert(nb > 0, "should have replaced some uses");
+ replaced = true;
+ }
+ }
+ if (!replaced) {
+ stack.set_index(idx+1);
+ }
+ }
+ } else {
+ // assert(n_clone->outcnt() > 0, "");
+ // assert(n->outcnt() > 0, "");
+ stack.pop();
+ clones.pop();
+ }
+ } while (stack.size() > 0);
+ assert(stack.size() == 0 && clones.size() == 0, "");
+ ctrl = projs.fallthrough_catchproj;
}
}
+ // Expand load-reference-barriers
MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
Unique_Node_List uses_to_ignore;
- for (uint i = 0; i < enqueue_barriers.size(); i++) {
- Node* barrier = enqueue_barriers.at(i);
+ for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
+ ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
+ if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
+ phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
+ continue;
+ }
+ uint last = phase->C->unique();
+ Node* ctrl = phase->get_ctrl(lrb);
+ Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
+
+
+ Node* orig_ctrl = ctrl;
+
+ Node* raw_mem = fixer.find_mem(ctrl, lrb);
+ Node* init_raw_mem = raw_mem;
+ Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
+ // int alias = phase->C->get_alias_index(lrb->adr_type());
+
+ IdealLoopTree *loop = phase->get_loop(ctrl);
+ CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
+ Node* unc_ctrl = NULL;
+ if (unc != NULL) {
+ if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
+ unc = NULL;
+ } else {
+ unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
+ }
+ }
+
+ Node* uncasted_val = val;
+ if (unc != NULL) {
+ uncasted_val = val->in(1);
+ }
+
+ Node* heap_stable_ctrl = NULL;
+ Node* null_ctrl = NULL;
+
+ assert(val->bottom_type()->make_oopptr(), "need oop");
+ assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
+
+ enum { _heap_stable = 1, _not_cset, _not_equal, _evac_path, _null_path, PATH_LIMIT };
+ Node* region = new RegionNode(PATH_LIMIT);
+ Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
+ Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
+
+ // Stable path.
+ test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
+ IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
+
+ // Heap stable case
+ region->init_req(_heap_stable, heap_stable_ctrl);
+ val_phi->init_req(_heap_stable, uncasted_val);
+ raw_mem_phi->init_req(_heap_stable, raw_mem);
+
+ Node* reg2_ctrl = NULL;
+ // Null case
+ test_null(ctrl, val, null_ctrl, phase);
+ if (null_ctrl != NULL) {
+ reg2_ctrl = null_ctrl->in(0);
+ region->init_req(_null_path, null_ctrl);
+ val_phi->init_req(_null_path, uncasted_val);
+ raw_mem_phi->init_req(_null_path, raw_mem);
+ } else {
+ region->del_req(_null_path);
+ val_phi->del_req(_null_path);
+ raw_mem_phi->del_req(_null_path);
+ }
+
+ // Test for in-cset.
+ // Wires !in_cset(obj) to slot 2 of region and phis
+ Node* not_cset_ctrl = NULL;
+ in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
+ if (not_cset_ctrl != NULL) {
+ if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
+ region->init_req(_not_cset, not_cset_ctrl);
+ val_phi->init_req(_not_cset, uncasted_val);
+ raw_mem_phi->init_req(_not_cset, raw_mem);
+ }
+
+ // Resolve object when orig-value is in cset.
+ // Make the unconditional resolve for fwdptr.
+ Node* new_val = uncasted_val;
+ if (unc_ctrl != NULL) {
+ // Clone the null check in this branch to allow implicit null check
+ new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
+ fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
+
+ IfNode* iff = unc_ctrl->in(0)->as_If();
+ phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
+ }
+ Node* addr = new AddPNode(new_val, uncasted_val, phase->igvn().MakeConX(ShenandoahBrooksPointer::byte_offset()));
+ phase->register_new_node(addr, ctrl);
+ assert(val->bottom_type()->isa_oopptr(), "what else?");
+ const TypePtr* obj_type = val->bottom_type()->is_oopptr();
+ const TypePtr* adr_type = TypeRawPtr::BOTTOM;
+ Node* fwd = new LoadPNode(ctrl, raw_mem, addr, adr_type, obj_type, MemNode::unordered);
+ phase->register_new_node(fwd, ctrl);
+
+ // Only branch to LRB stub if object is not forwarded; otherwise reply with fwd ptr
+ Node* cmp = new CmpPNode(fwd, new_val);
+ phase->register_new_node(cmp, ctrl);
+ Node* bol = new BoolNode(cmp, BoolTest::eq);
+ phase->register_new_node(bol, ctrl);
+
+ IfNode* iff = new IfNode(ctrl, bol, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
+ if (reg2_ctrl == NULL) reg2_ctrl = iff;
+ phase->register_control(iff, loop, ctrl);
+ Node* if_not_eq = new IfFalseNode(iff);
+ phase->register_control(if_not_eq, loop, iff);
+ Node* if_eq = new IfTrueNode(iff);
+ phase->register_control(if_eq, loop, iff);
+
+ // Wire up not-equal-path in slots 3.
+ region->init_req(_not_equal, if_not_eq);
+ val_phi->init_req(_not_equal, fwd);
+ raw_mem_phi->init_req(_not_equal, raw_mem);
+
+ // Call wb-stub and wire up that path in slots 4
+ Node* result_mem = NULL;
+ ctrl = if_eq;
+ call_lrb_stub(ctrl, fwd, result_mem, raw_mem, phase);
+ region->init_req(_evac_path, ctrl);
+ val_phi->init_req(_evac_path, fwd);
+ raw_mem_phi->init_req(_evac_path, result_mem);
+
+ phase->register_control(region, loop, heap_stable_iff);
+ Node* out_val = val_phi;
+ phase->register_new_node(val_phi, region);
+ phase->register_new_node(raw_mem_phi, region);
+
+ fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
+
+ ctrl = orig_ctrl;
+
+ if (unc != NULL) {
+ for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+ Node* u = val->fast_out(i);
+ Node* c = phase->ctrl_or_self(u);
+ if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
+ phase->igvn().rehash_node_delayed(u);
+ int nb = u->replace_edge(val, out_val);
+ --i, imax -= nb;
+ }
+ }
+ if (val->outcnt() == 0) {
+ phase->igvn()._worklist.push(val);
+ }
+ }
+ phase->igvn().replace_node(lrb, out_val);
+
+ follow_barrier_uses(out_val, ctrl, uses, phase);
+
+ for(uint next = 0; next < uses.size(); next++ ) {
+ Node *n = uses.at(next);
+ assert(phase->get_ctrl(n) == ctrl, "bad control");
+ assert(n != init_raw_mem, "should leave input raw mem above the barrier");
+ phase->set_ctrl(n, region);
+ follow_barrier_uses(n, ctrl, uses, phase);
+ }
+
+ // The slow path call produces memory: hook the raw memory phi
+ // from the expanded load reference barrier with the rest of the graph
+ // which may require adding memory phis at every post dominated
+ // region and at enclosing loop heads. Use the memory state
+ // collected in memory_nodes to fix the memory graph. Update that
+ // memory state as we go.
+ fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
+ }
+ // Done expanding load-reference-barriers.
+ assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
+
+ for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
+ Node* barrier = state->enqueue_barrier(i);
Node* pre_val = barrier->in(1);
if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
@@ -2840,212 +1737,11 @@ void ShenandoahWriteBarrierNode::pin_and_expand(PhaseIdealLoop* phase) {
phase->igvn().replace_node(barrier, pre_val);
}
+ assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
- for (int i = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i > 0; i--) {
- int cnt = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count();
- ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i-1);
-
- uint last = phase->C->unique();
- Node* ctrl = phase->get_ctrl(wb);
- Node* orig_ctrl = ctrl;
-
- Node* raw_mem = fixer.find_mem(ctrl, wb);
- Node* init_raw_mem = raw_mem;
- Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
- int alias = phase->C->get_alias_index(wb->adr_type());
- Node* wb_mem = wb->in(Memory);
- Node* init_wb_mem = wb_mem;
-
- Node* val = wb->in(ValueIn);
- Node* wbproj = wb->find_out_with(Op_ShenandoahWBMemProj);
- IdealLoopTree *loop = phase->get_loop(ctrl);
-
- assert(val->Opcode() != Op_ShenandoahWriteBarrier, "No chain of write barriers");
-
- CallStaticJavaNode* unc = wb->pin_and_expand_null_check(phase->igvn());
- Node* unc_ctrl = NULL;
- if (unc != NULL) {
- if (val->in(0) != ctrl) {
- unc = NULL;
- } else {
- unc_ctrl = val->in(0);
- }
- }
-
- Node* uncasted_val = val;
- if (unc != NULL) {
- uncasted_val = val->in(1);
- }
-
- Node* heap_stable_ctrl = NULL;
- Node* null_ctrl = NULL;
-
- assert(val->bottom_type()->make_oopptr(), "need oop");
- assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
-
- enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
- Node* region = new RegionNode(PATH_LIMIT);
- Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
- Node* mem_phi = PhiNode::make(region, wb_mem, Type::MEMORY, phase->C->alias_type(wb->adr_type())->adr_type());
- Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
-
- enum { _not_cset = 1, _not_equal, _evac_path, _null_path, PATH_LIMIT2 };
- Node* region2 = new RegionNode(PATH_LIMIT2);
- Node* val_phi2 = new PhiNode(region2, uncasted_val->bottom_type()->is_oopptr());
- Node* mem_phi2 = PhiNode::make(region2, wb_mem, Type::MEMORY, phase->C->alias_type(wb->adr_type())->adr_type());
- Node* raw_mem_phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
-
- // Stable path.
- test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
- IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
-
- // Heap stable case
- region->init_req(_heap_stable, heap_stable_ctrl);
- val_phi->init_req(_heap_stable, uncasted_val);
- mem_phi->init_req(_heap_stable, wb_mem);
- raw_mem_phi->init_req(_heap_stable, raw_mem);
-
- Node* reg2_ctrl = NULL;
- // Null case
- test_null(ctrl, val, null_ctrl, phase);
- if (null_ctrl != NULL) {
- reg2_ctrl = null_ctrl->in(0);
- region2->init_req(_null_path, null_ctrl);
- val_phi2->init_req(_null_path, uncasted_val);
- mem_phi2->init_req(_null_path, wb_mem);
- raw_mem_phi2->init_req(_null_path, raw_mem);
- } else {
- region2->del_req(_null_path);
- val_phi2->del_req(_null_path);
- mem_phi2->del_req(_null_path);
- raw_mem_phi2->del_req(_null_path);
- }
-
- // Test for in-cset.
- // Wires !in_cset(obj) to slot 2 of region and phis
- Node* not_cset_ctrl = NULL;
- in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
- if (not_cset_ctrl != NULL) {
- if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
- region2->init_req(_not_cset, not_cset_ctrl);
- val_phi2->init_req(_not_cset, uncasted_val);
- mem_phi2->init_req(_not_cset, wb_mem);
- raw_mem_phi2->init_req(_not_cset, raw_mem);
- }
-
- // Resolve object when orig-value is in cset.
- // Make the unconditional resolve for fwdptr, not the read barrier.
- Node* new_val = uncasted_val;
- if (unc_ctrl != NULL) {
- // Clone the null check in this branch to allow implicit null check
- new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
- fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
-
- IfNode* iff = unc_ctrl->in(0)->as_If();
- phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
- }
- Node* addr = new AddPNode(new_val, uncasted_val, phase->igvn().MakeConX(ShenandoahBrooksPointer::byte_offset()));
- phase->register_new_node(addr, ctrl);
- assert(val->bottom_type()->isa_oopptr(), "what else?");
- const TypePtr* obj_type = val->bottom_type()->is_oopptr();
- const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
- Node* fwd = new LoadPNode(ctrl, wb_mem, addr, adr_type, obj_type, MemNode::unordered);
- phase->register_new_node(fwd, ctrl);
-
- // Only branch to WB stub if object is not forwarded; otherwise reply with fwd ptr
- Node* cmp = new CmpPNode(fwd, new_val);
- phase->register_new_node(cmp, ctrl);
- Node* bol = new BoolNode(cmp, BoolTest::eq);
- phase->register_new_node(bol, ctrl);
-
- IfNode* iff = new IfNode(ctrl, bol, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
- if (reg2_ctrl == NULL) reg2_ctrl = iff;
- phase->register_control(iff, loop, ctrl);
- Node* if_not_eq = new IfFalseNode(iff);
- phase->register_control(if_not_eq, loop, iff);
- Node* if_eq = new IfTrueNode(iff);
- phase->register_control(if_eq, loop, iff);
-
- // Wire up not-equal-path in slots 3.
- region2->init_req(_not_equal, if_not_eq);
- val_phi2->init_req(_not_equal, fwd);
- mem_phi2->init_req(_not_equal, wb_mem);
- raw_mem_phi2->init_req(_not_equal, raw_mem);
-
- // Call wb-stub and wire up that path in slots 4
- Node* result_mem = NULL;
- ctrl = if_eq;
- call_wb_stub(ctrl, new_val, result_mem,
- raw_mem, wb_mem,
- alias, phase);
- region2->init_req(_evac_path, ctrl);
- val_phi2->init_req(_evac_path, new_val);
- mem_phi2->init_req(_evac_path, result_mem);
- raw_mem_phi2->init_req(_evac_path, result_mem);
-
- phase->register_control(region2, loop, reg2_ctrl);
- phase->register_new_node(val_phi2, region2);
- phase->register_new_node(mem_phi2, region2);
- phase->register_new_node(raw_mem_phi2, region2);
-
- region->init_req(_heap_unstable, region2);
- val_phi->init_req(_heap_unstable, val_phi2);
- mem_phi->init_req(_heap_unstable, mem_phi2);
- raw_mem_phi->init_req(_heap_unstable, raw_mem_phi2);
-
- phase->register_control(region, loop, heap_stable_iff);
- Node* out_val = val_phi;
- phase->register_new_node(val_phi, region);
- phase->register_new_node(mem_phi, region);
- phase->register_new_node(raw_mem_phi, region);
-
- fix_ctrl(wb, region, fixer, uses, uses_to_ignore, last, phase);
-
- ctrl = orig_ctrl;
-
- phase->igvn().replace_input_of(wbproj, ShenandoahWBMemProjNode::WriteBarrier, phase->C->top());
- phase->igvn().replace_node(wbproj, mem_phi);
- if (unc != NULL) {
- for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
- Node* u = val->fast_out(i);
- Node* c = phase->ctrl_or_self(u);
- if (u != wb && (c != ctrl || is_dominator_same_ctrl(c, wb, u, phase))) {
- phase->igvn().rehash_node_delayed(u);
- int nb = u->replace_edge(val, out_val);
- --i, imax -= nb;
- }
- }
- if (val->outcnt() == 0) {
- phase->igvn()._worklist.push(val);
- }
- }
- phase->igvn().replace_node(wb, out_val);
-
- follow_barrier_uses(mem_phi, ctrl, uses, phase);
- follow_barrier_uses(out_val, ctrl, uses, phase);
-
- for(uint next = 0; next < uses.size(); next++ ) {
- Node *n = uses.at(next);
- assert(phase->get_ctrl(n) == ctrl, "bad control");
- assert(n != init_raw_mem, "should leave input raw mem above the barrier");
- phase->set_ctrl(n, region);
- follow_barrier_uses(n, ctrl, uses, phase);
- }
-
- // The slow path call produces memory: hook the raw memory phi
- // from the expanded write barrier with the rest of the graph
- // which may require adding memory phis at every post dominated
- // region and at enclosing loop heads. Use the memory state
- // collected in memory_nodes to fix the memory graph. Update that
- // memory state as we go.
- fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
- assert(ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count() == cnt - 1, "not replaced");
- }
-
- assert(ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count() == 0, "all write barrier nodes should have been replaced");
}
-void ShenandoahWriteBarrierNode::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
+void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
IdealLoopTree *loop = phase->get_loop(iff);
Node* loop_head = loop->_head;
Node* entry_c = loop_head->in(LoopNode::EntryControl);
@@ -3078,7 +1774,7 @@ void ShenandoahWriteBarrierNode::move_heap_stable_test_out_of_loop(IfNode* iff,
}
}
-bool ShenandoahWriteBarrierNode::identical_backtoback_ifs(Node *n, PhaseIdealLoop* phase) {
+bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
if (!n->is_If() || n->is_CountedLoopEnd()) {
return false;
}
@@ -3113,7 +1809,7 @@ bool ShenandoahWriteBarrierNode::identical_backtoback_ifs(Node *n, PhaseIdealLoo
return true;
}
-void ShenandoahWriteBarrierNode::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
+void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
assert(is_heap_stable_test(n), "no other tests");
if (identical_backtoback_ifs(n, phase)) {
Node* n_ctrl = n->in(0);
@@ -3149,7 +1845,7 @@ void ShenandoahWriteBarrierNode::merge_back_to_back_tests(Node* n, PhaseIdealLoo
}
}
-IfNode* ShenandoahWriteBarrierNode::find_unswitching_candidate(const IdealLoopTree *loop, PhaseIdealLoop* phase) {
+IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
// Find first invariant test that doesn't exit the loop
LoopNode *head = loop->_head->as_Loop();
IfNode* unswitch_iff = NULL;
@@ -3194,10 +1890,9 @@ IfNode* ShenandoahWriteBarrierNode::find_unswitching_candidate(const IdealLoopTr
}
-void ShenandoahWriteBarrierNode::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
+void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
Node_List heap_stable_tests;
Node_List gc_state_loads;
-
stack.push(phase->C->start(), 0);
do {
Node* n = stack.node();
@@ -3274,7 +1969,7 @@ void ShenandoahWriteBarrierNode::optimize_after_expansion(VectorSet &visited, No
}
#ifdef ASSERT
-void ShenandoahBarrierNode::verify_raw_mem(RootNode* root) {
+void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
const bool trace = false;
ResourceMark rm;
Unique_Node_List nodes;
@@ -3372,6 +2067,10 @@ void ShenandoahBarrierNode::verify_raw_mem(RootNode* root) {
}
#endif
+ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
+ ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
+}
+
const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
if (in(1) == NULL || in(1)->is_top()) {
return Type::TOP;
@@ -3531,6 +2230,26 @@ void MemoryGraphFixer::collect_memory_nodes() {
Node* call = in->in(0)->in(0);
assert(call->is_Call(), "");
mem = call->in(TypeFunc::Memory);
+ } else if (in->Opcode() == Op_NeverBranch) {
+ ResourceMark rm;
+ Unique_Node_List wq;
+ wq.push(in);
+ wq.push(in->as_Multi()->proj_out(0));
+ for (uint j = 1; j < wq.size(); j++) {
+ Node* c = wq.at(j);
+ assert(!c->is_Root(), "shouldn't leave loop");
+ if (c->is_SafePoint()) {
+ assert(mem == NULL, "only one safepoint");
+ mem = c->in(TypeFunc::Memory);
+ }
+ for (DUIterator_Fast kmax, k = c->fast_outs(kmax); k < kmax; k++) {
+ Node* u = c->fast_out(k);
+ if (u->is_CFG()) {
+ wq.push(u);
+ }
+ }
+ }
+ assert(mem != NULL, "should have found safepoint");
}
}
} else {
@@ -3569,12 +2288,6 @@ void MemoryGraphFixer::collect_memory_nodes() {
assert(_alias == Compile::AliasIdxRaw, "");
stack.push(mem, mem->req());
mem = mem->in(MemNode::Memory);
- } else if (mem->Opcode() == Op_ShenandoahWriteBarrier) {
- assert(_alias != Compile::AliasIdxRaw, "");
- mem = mem->in(ShenandoahBarrierNode::Memory);
- } else if (mem->Opcode() == Op_ShenandoahWBMemProj) {
- stack.push(mem, mem->req());
- mem = mem->in(ShenandoahWBMemProjNode::WriteBarrier);
} else {
#ifdef ASSERT
mem->dump();
@@ -3628,7 +2341,7 @@ void MemoryGraphFixer::collect_memory_nodes() {
while (progress) {
progress = false;
iteration++;
- assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop(), "");
+ assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
IdealLoopTree* last_updated_ilt = NULL;
for (int i = rpo_list.size() - 1; i >= 0; i--) {
@@ -3796,7 +2509,7 @@ Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
mem = _memory_nodes[c->_idx];
}
if (n != NULL && mem_is_valid(mem, c)) {
- while (!ShenandoahWriteBarrierNode::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
+ while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
mem = next_mem(mem, _alias);
}
if (mem->is_MergeMem()) {
@@ -3842,12 +2555,6 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
} else if (old->Opcode() == Op_SCMemProj) {
assert(_alias == Compile::AliasIdxRaw, "");
old = old->in(0);
- } else if (old->Opcode() == Op_ShenandoahWBMemProj) {
- assert(_alias != Compile::AliasIdxRaw, "");
- old = old->in(ShenandoahWBMemProjNode::WriteBarrier);
- } else if (old->Opcode() == Op_ShenandoahWriteBarrier) {
- assert(_alias != Compile::AliasIdxRaw, "");
- old = old->in(ShenandoahBarrierNode::Memory);
} else {
ShouldNotReachHere();
}
@@ -3857,7 +2564,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
_memory_nodes.map(ctrl->_idx, mem);
_memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
}
- uint input = prev->Opcode() == Op_ShenandoahWriteBarrier ? (uint)ShenandoahBarrierNode::Memory : (uint)MemNode::Memory;
+ uint input = (uint)MemNode::Memory;
_phase->igvn().replace_input_of(prev, input, new_mem);
} else {
uses.clear();
@@ -3925,19 +2632,14 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
} else {
DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
for (;;) {
- assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj() || m->Opcode() == Op_ShenandoahWriteBarrier || m->Opcode() == Op_ShenandoahWBMemProj, "");
+ assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
Node* next = NULL;
if (m->is_Proj()) {
next = m->in(0);
- } else if (m->Opcode() == Op_ShenandoahWBMemProj) {
- next = m->in(ShenandoahWBMemProjNode::WriteBarrier);
- } else if (m->is_Mem() || m->is_LoadStore()) {
+ } else {
+ assert(m->is_Mem() || m->is_LoadStore(), "");
assert(_alias == Compile::AliasIdxRaw, "");
next = m->in(MemNode::Memory);
- } else {
- assert(_alias != Compile::AliasIdxRaw, "");
- assert (m->Opcode() == Op_ShenandoahWriteBarrier, "");
- next = m->in(ShenandoahBarrierNode::Memory);
}
if (_phase->get_ctrl(next) != u) {
break;
@@ -3954,8 +2656,8 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
}
DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
- assert(m->is_Mem() || m->is_LoadStore() || m->Opcode() == Op_ShenandoahWriteBarrier, "");
- uint input = (m->is_Mem() || m->is_LoadStore()) ? (uint)MemNode::Memory : (uint)ShenandoahBarrierNode::Memory;
+ assert(m->is_Mem() || m->is_LoadStore(), "");
+ uint input = (uint)MemNode::Memory;
_phase->igvn().replace_input_of(m, input, phi);
push = false;
}
@@ -4181,20 +2883,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
Node* u = mem->out(i);
if (u != replacement && u->_idx < last) {
- if (u->is_ShenandoahBarrier() && _alias != Compile::AliasIdxRaw) {
- if (_phase->C->get_alias_index(u->adr_type()) == _alias && ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
- _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
- assert(u->find_edge(mem) == -1, "only one edge");
- --i;
- }
- } else if (u->is_Mem()) {
- if (_phase->C->get_alias_index(u->adr_type()) == _alias && ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
- assert(_alias == Compile::AliasIdxRaw , "only raw memory can lead to a memory operation");
- _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
- assert(u->find_edge(mem) == -1, "only one edge");
- --i;
- }
- } else if (u->is_MergeMem()) {
+ if (u->is_MergeMem()) {
MergeMemNode* u_mm = u->as_MergeMem();
if (u_mm->memory_at(_alias) == mem) {
MergeMemNode* newmm = NULL;
@@ -4222,7 +2911,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
}
}
} else {
- if (rep_ctrl != uu && ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
+ if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
if (newmm == NULL) {
newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
}
@@ -4263,10 +2952,11 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
u->Opcode() == Op_Rethrow ||
u->Opcode() == Op_Return ||
u->Opcode() == Op_SafePoint ||
+ u->Opcode() == Op_StoreLConditional ||
(u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
(u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
u->Opcode() == Op_CallLeaf, "");
- if (ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
+ if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
if (mm == NULL) {
mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
}
@@ -4274,7 +2964,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
--i;
}
} else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
- if (ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
+ if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
_phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
--i;
}
@@ -4283,11 +2973,324 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
}
}
-void MemoryGraphFixer::remove(Node* n) {
- assert(n->Opcode() == Op_ShenandoahWBMemProj, "");
- Node* c = _phase->get_ctrl(n);
- Node* mem = find_mem(c, NULL);
- if (mem == n) {
- _memory_nodes.map(c->_idx, mem->in(ShenandoahWBMemProjNode::WriteBarrier)->in(ShenandoahBarrierNode::Memory));
- }
+ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj)
+: Node(ctrl, obj) {
+ ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
+}
+
+const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
+ if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
+ return Type::TOP;
+ }
+ const Type* t = in(ValueIn)->bottom_type();
+ if (t == TypePtr::NULL_PTR) {
+ return t;
+ }
+ return t->is_oopptr();
+}
+
+const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
+ // Either input is TOP ==> the result is TOP
+ const Type *t2 = phase->type(in(ValueIn));
+ if( t2 == Type::TOP ) return Type::TOP;
+
+ if (t2 == TypePtr::NULL_PTR) {
+ return t2;
+ }
+
+ const Type* type = t2->is_oopptr()/*->cast_to_nonconst()*/;
+ return type;
+}
+
+Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
+ Node* value = in(ValueIn);
+ if (!needs_barrier(phase, value)) {
+ return value;
+ }
+ return this;
+}
+
+bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
+ Unique_Node_List visited;
+ return needs_barrier_impl(phase, n, visited);
+}
+
+bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
+ if (n == NULL) return false;
+ if (visited.member(n)) {
+ return false; // Been there.
+ }
+ visited.push(n);
+
+ if (n->is_Allocate()) {
+ // tty->print_cr("optimize barrier on alloc");
+ return false;
+ }
+ if (n->is_Call()) {
+ // tty->print_cr("optimize barrier on call");
+ return false;
+ }
+
+ const Type* type = phase->type(n);
+ if (type == Type::TOP) {
+ return false;
+ }
+ if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
+ // tty->print_cr("optimize barrier on null");
+ return false;
+ }
+ if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
+ // tty->print_cr("optimize barrier on constant");
+ return false;
+ }
+
+ switch (n->Opcode()) {
+ case Op_AddP:
+ return true; // TODO: Can refine?
+ case Op_LoadP:
+ case Op_ShenandoahCompareAndExchangeN:
+ case Op_ShenandoahCompareAndExchangeP:
+ case Op_CompareAndExchangeN:
+ case Op_CompareAndExchangeP:
+ case Op_GetAndSetN:
+ case Op_GetAndSetP:
+ return true;
+ case Op_Phi: {
+ for (uint i = 1; i < n->req(); i++) {
+ if (needs_barrier_impl(phase, n->in(i), visited)) return true;
+ }
+ return false;
+ }
+ case Op_CheckCastPP:
+ case Op_CastPP:
+ return needs_barrier_impl(phase, n->in(1), visited);
+ case Op_Proj:
+ return needs_barrier_impl(phase, n->in(0), visited);
+ case Op_ShenandoahLoadReferenceBarrier:
+ // tty->print_cr("optimize barrier on barrier");
+ return false;
+ case Op_Parm:
+ // tty->print_cr("optimize barrier on input arg");
+ return false;
+ case Op_DecodeN:
+ case Op_EncodeP:
+ return needs_barrier_impl(phase, n->in(1), visited);
+ case Op_LoadN:
+ return true;
+ case Op_CMoveP:
+ return needs_barrier_impl(phase, n->in(2), visited) ||
+ needs_barrier_impl(phase, n->in(3), visited);
+ case Op_ShenandoahEnqueueBarrier:
+ return needs_barrier_impl(phase, n->in(1), visited);
+ default:
+ break;
+ }
+#ifdef ASSERT
+ tty->print("need barrier on?: ");
+ tty->print_cr("ins:");
+ n->dump(2);
+ tty->print_cr("outs:");
+ n->dump(-2);
+ ShouldNotReachHere();
+#endif
+ return true;
+}
+
+ShenandoahLoadReferenceBarrierNode::Strength ShenandoahLoadReferenceBarrierNode::get_barrier_strength() {
+ Unique_Node_List visited;
+ Node_Stack stack(0);
+ stack.push(this, 0);
+ Strength strength = NONE;
+ while (strength != STRONG && stack.size() > 0) {
+ Node* n = stack.node();
+ if (visited.member(n)) {
+ stack.pop();
+ continue;
+ }
+ visited.push(n);
+ bool visit_users = false;
+ switch (n->Opcode()) {
+ case Op_StoreN:
+ case Op_StoreP: {
+ strength = STRONG;
+ break;
+ }
+ case Op_CmpP: {
+ if (!n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) &&
+ !n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
+ strength = STRONG;
+ }
+ break;
+ }
+ case Op_CallStaticJava: {
+ strength = STRONG;
+ break;
+ }
+ case Op_CallDynamicJava:
+ case Op_CallLeaf:
+ case Op_CallLeafNoFP:
+ case Op_CompareAndSwapL:
+ case Op_CompareAndSwapI:
+ case Op_CompareAndSwapB:
+ case Op_CompareAndSwapS:
+ case Op_CompareAndSwapN:
+ case Op_CompareAndSwapP:
+ case Op_ShenandoahCompareAndSwapN:
+ case Op_ShenandoahCompareAndSwapP:
+ case Op_ShenandoahWeakCompareAndSwapN:
+ case Op_ShenandoahWeakCompareAndSwapP:
+ case Op_ShenandoahCompareAndExchangeN:
+ case Op_ShenandoahCompareAndExchangeP:
+ case Op_CompareAndExchangeL:
+ case Op_CompareAndExchangeI:
+ case Op_CompareAndExchangeB:
+ case Op_CompareAndExchangeS:
+ case Op_WeakCompareAndSwapL:
+ case Op_WeakCompareAndSwapI:
+ case Op_WeakCompareAndSwapB:
+ case Op_WeakCompareAndSwapS:
+ case Op_GetAndSetL:
+ case Op_GetAndSetI:
+ case Op_GetAndSetB:
+ case Op_GetAndSetS:
+ case Op_GetAndSetP:
+ case Op_GetAndSetN:
+ case Op_GetAndAddL:
+ case Op_GetAndAddI:
+ case Op_GetAndAddB:
+ case Op_GetAndAddS:
+ case Op_ShenandoahEnqueueBarrier:
+ case Op_FastLock:
+ case Op_FastUnlock:
+ case Op_Rethrow:
+ case Op_Return:
+ case Op_StoreB:
+ case Op_StoreC:
+ case Op_StoreD:
+ case Op_StoreF:
+ case Op_StoreL:
+ case Op_StoreLConditional:
+ case Op_StoreI:
+ case Op_StoreVector:
+ case Op_StrInflatedCopy:
+ case Op_StrCompressedCopy:
+ case Op_EncodeP:
+ case Op_CastP2X:
+ case Op_SafePoint:
+ case Op_EncodeISOArray:
+ strength = STRONG;
+ break;
+ case Op_LoadB:
+ case Op_LoadUB:
+ case Op_LoadUS:
+ case Op_LoadD:
+ case Op_LoadF:
+ case Op_LoadL:
+ case Op_LoadI:
+ case Op_LoadS:
+ case Op_LoadN:
+ case Op_LoadP:
+ case Op_LoadVector: {
+ const TypePtr* adr_type = n->adr_type();
+ int alias_idx = Compile::current()->get_alias_index(adr_type);
+ Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
+ ciField* field = alias_type->field();
+ bool is_static = field != NULL && field->is_static();
+ bool is_final = field != NULL && field->is_final();
+ bool is_stable = field != NULL && field->is_stable();
+ if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
+ // Leave strength as is.
+ } else if (ShenandoahOptimizeInstanceFinals && !is_static && is_final) {
+ // Leave strength as is.
+ } else if (ShenandoahOptimizeStableFinals && (is_stable || (adr_type->isa_aryptr() && adr_type->isa_aryptr()->is_stable()))) {
+ // Leave strength as is.
+ } else {
+ strength = WEAK;
+ }
+ break;
+ }
+ case Op_AryEq: {
+ Node* n1 = n->in(2);
+ Node* n2 = n->in(3);
+ if (!ShenandoahOptimizeStableFinals ||
+ !n1->bottom_type()->isa_aryptr() || !n1->bottom_type()->isa_aryptr()->is_stable() ||
+ !n2->bottom_type()->isa_aryptr() || !n2->bottom_type()->isa_aryptr()->is_stable()) {
+ strength = WEAK;
+ }
+ break;
+ }
+ case Op_StrEquals:
+ case Op_StrComp:
+ case Op_StrIndexOf:
+ case Op_StrIndexOfChar:
+ if (!ShenandoahOptimizeStableFinals) {
+ strength = WEAK;
+ }
+ break;
+ case Op_Conv2B:
+ case Op_LoadRange:
+ case Op_LoadKlass:
+ case Op_LoadNKlass:
+ // NONE, i.e. leave current strength as is
+ break;
+ case Op_AddP:
+ case Op_CheckCastPP:
+ case Op_CastPP:
+ case Op_CMoveP:
+ case Op_Phi:
+ case Op_ShenandoahLoadReferenceBarrier:
+ visit_users = true;
+ break;
+ default: {
+#ifdef ASSERT
+ tty->print_cr("Unknown node in get_barrier_strength:");
+ n->dump(1);
+ ShouldNotReachHere();
+#else
+ strength = STRONG;
+#endif
+ }
+ }
+#ifdef ASSERT
+/*
+ if (strength == STRONG) {
+ tty->print("strengthening node: ");
+ n->dump();
+ }
+ */
+#endif
+ stack.pop();
+ if (visit_users) {
+ for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+ Node* user = n->fast_out(i);
+ if (user != NULL) {
+ stack.push(user, 0);
+ }
+ }
+ }
+ }
+ return strength;
+}
+
+CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
+ Node* val = in(ValueIn);
+
+ const Type* val_t = igvn.type(val);
+
+ if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
+ val->Opcode() == Op_CastPP &&
+ val->in(0) != NULL &&
+ val->in(0)->Opcode() == Op_IfTrue &&
+ val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
+ val->in(0)->in(0)->is_If() &&
+ val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
+ val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
+ val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
+ val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
+ val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
+ assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
+ CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
+ return unc;
+ }
+ return NULL;
}
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp
index 02caf091f20..bfbe7e2b5d6 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp
@@ -36,10 +36,8 @@
class PhaseGVN;
class MemoryGraphFixer;
-class ShenandoahBarrierNode : public TypeNode {
+class ShenandoahBarrierC2Support : public AllStatic {
private:
- bool _allow_fromspace;
-
#ifdef ASSERT
enum verify_type {
ShenandoahLoad,
@@ -50,204 +48,49 @@ private:
};
static bool verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used);
-#endif
-
-public:
- enum { Control,
- Memory,
- ValueIn
- };
-
- ShenandoahBarrierNode(Node* ctrl, Node* mem, Node* obj, bool allow_fromspace)
- : TypeNode(obj->bottom_type()->isa_oopptr() ? obj->bottom_type()->is_oopptr()->cast_to_nonconst() : obj->bottom_type(), 3),
- _allow_fromspace(allow_fromspace) {
-
- init_req(Control, ctrl);
- init_req(Memory, mem);
- init_req(ValueIn, obj);
-
- init_class_id(Class_ShenandoahBarrier);
- }
-
- static Node* skip_through_barrier(Node* n);
-
- static const TypeOopPtr* brooks_pointer_type(const Type* t) {
- return t->is_oopptr()->cast_to_nonconst()->add_offset(ShenandoahBrooksPointer::byte_offset())->is_oopptr();
- }
-
- virtual const TypePtr* adr_type() const {
- if (bottom_type() == Type::TOP) {
- return NULL;
- }
- //const TypePtr* adr_type = in(MemNode::Address)->bottom_type()->is_ptr();
- const TypePtr* adr_type = brooks_pointer_type(bottom_type());
- assert(adr_type->offset() == ShenandoahBrooksPointer::byte_offset(), "sane offset");
- assert(Compile::current()->alias_type(adr_type)->is_rewritable(), "brooks ptr must be rewritable");
- return adr_type;
- }
-
- virtual uint ideal_reg() const { return Op_RegP; }
- virtual uint match_edge(uint idx) const {
- return idx >= ValueIn;
- }
-
- Node* Identity_impl(PhaseGVN* phase);
-
- virtual const Type* Value(PhaseGVN* phase) const;
- virtual bool depends_only_on_test() const {
- return true;
- };
-
- static bool needs_barrier(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace);
-
-#ifdef ASSERT
static void report_verify_failure(const char* msg, Node* n1 = NULL, Node* n2 = NULL);
- static void verify(RootNode* root);
static void verify_raw_mem(RootNode* root);
#endif
-#ifndef PRODUCT
- virtual void dump_spec(outputStream *st) const;
-#endif
-
- // protected:
- static Node* dom_mem(Node* mem, Node*& mem_ctrl, Node* n, Node* rep_ctrl, int alias, PhaseIdealLoop* phase);
static Node* dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase);
- static bool is_dominator(Node *d_c, Node *n_c, Node* d, Node* n, PhaseIdealLoop* phase);
- static bool is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase);
static Node* no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase);
- static bool build_loop_late_post(PhaseIdealLoop* phase, Node* n);
- bool sink_node(PhaseIdealLoop* phase, Node* ctrl, Node* n_ctrl);
-
-protected:
- uint hash() const;
- bool cmp(const Node& n) const;
- uint size_of() const;
-
-private:
- static bool needs_barrier_impl(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace, Unique_Node_List &visited);
-
- static bool dominates_memory(PhaseGVN* phase, Node* b1, Node* b2, bool linear);
- static bool dominates_memory_impl(PhaseGVN* phase, Node* b1, Node* b2, Node* current, bool linear);
-};
-
-class ShenandoahReadBarrierNode : public ShenandoahBarrierNode {
-public:
- ShenandoahReadBarrierNode(Node* ctrl, Node* mem, Node* obj)
- : ShenandoahBarrierNode(ctrl, mem, obj, true) {
- assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier ||
- ShenandoahWriteBarrier || ShenandoahAcmpBarrier),
- "should be enabled");
- }
- ShenandoahReadBarrierNode(Node* ctrl, Node* mem, Node* obj, bool allow_fromspace)
- : ShenandoahBarrierNode(ctrl, mem, obj, allow_fromspace) {
- assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier ||
- ShenandoahWriteBarrier || ShenandoahAcmpBarrier),
- "should be enabled");
- }
-
- virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
- virtual Node* Identity(PhaseGVN* phase);
- virtual int Opcode() const;
-
- bool is_independent(Node* mem);
-
- void try_move(PhaseIdealLoop* phase);
-
-private:
- static bool is_independent(const Type* in_type, const Type* this_type);
- static bool dominates_memory_rb(PhaseGVN* phase, Node* b1, Node* b2, bool linear);
- static bool dominates_memory_rb_impl(PhaseGVN* phase, Node* b1, Node* b2, Node* current, bool linear);
-};
-
-class ShenandoahWriteBarrierNode : public ShenandoahBarrierNode {
-public:
- ShenandoahWriteBarrierNode(Compile* C, Node* ctrl, Node* mem, Node* obj);
-
- virtual int Opcode() const;
- virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
- virtual Node* Identity(PhaseGVN* phase);
- virtual bool depends_only_on_test() const { return false; }
-
- static bool expand(Compile* C, PhaseIterGVN& igvn);
- static bool is_gc_state_load(Node *n);
static bool is_heap_state_test(Node* iff, int mask);
- static bool is_heap_stable_test(Node* iff);
static bool try_common_gc_state_load(Node *n, PhaseIdealLoop *phase);
static bool has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase);
-
- static LoopNode* try_move_before_pre_loop(Node* c, Node* val_ctrl, PhaseIdealLoop* phase);
- static Node* move_above_predicates(LoopNode* cl, Node* val_ctrl, PhaseIdealLoop* phase);
-#ifdef ASSERT
- static bool memory_dominates_all_paths(Node* mem, Node* rep_ctrl, int alias, PhaseIdealLoop* phase);
- static void memory_dominates_all_paths_helper(Node* c, Node* rep_ctrl, Unique_Node_List& controls, PhaseIdealLoop* phase);
-#endif
- void try_move_before_loop(GrowableArray& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses);
- void try_move_before_loop_helper(LoopNode* cl, Node* val_ctrl, GrowableArray& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses);
- static void pin_and_expand(PhaseIdealLoop* phase);
- CallStaticJavaNode* pin_and_expand_null_check(PhaseIterGVN& igvn);
- void pin_and_expand_move_barrier(PhaseIdealLoop* phase, GrowableArray& memory_graph_fixers, Unique_Node_List& uses);
- void pin_and_expand_helper(PhaseIdealLoop* phase);
static Node* find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase);
static void follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase);
static void test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase);
-
static void test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
PhaseIdealLoop* phase);
- static void call_wb_stub(Node*& ctrl, Node*& val, Node*& result_mem,
- Node* raw_mem, Node* wb_mem, int alias,
- PhaseIdealLoop* phase);
+ static void call_lrb_stub(Node*& ctrl, Node*& val, Node*& result_mem, Node* raw_mem, PhaseIdealLoop* phase);
static Node* clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase);
static void fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, Unique_Node_List& uses,
PhaseIdealLoop* phase);
static void in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase);
static void move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase);
-
- static void optimize_after_expansion(VectorSet &visited, Node_Stack &nstack, Node_List &old_new, PhaseIdealLoop* phase);
static void merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase);
static bool identical_backtoback_ifs(Node *n, PhaseIdealLoop* phase);
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase);
-
- static void optimize_before_expansion(PhaseIdealLoop* phase, GrowableArray memory_graph_fixers, bool include_lsm);
- Node* would_subsume(ShenandoahBarrierNode* other, PhaseIdealLoop* phase);
static IfNode* find_unswitching_candidate(const IdealLoopTree *loop, PhaseIdealLoop* phase);
- Node* try_split_thru_phi(PhaseIdealLoop* phase);
-};
-
-class ShenandoahWBMemProjNode : public Node {
public:
- enum { Control,
- WriteBarrier };
+ static bool is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase);
+ static bool is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase);
- ShenandoahWBMemProjNode(Node *src) : Node(NULL, src) {
- assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
- assert(src->Opcode() == Op_ShenandoahWriteBarrier || src->is_Mach(), "epxect wb");
- }
- virtual Node* Identity(PhaseGVN* phase);
+ static bool is_gc_state_load(Node* n);
+ static bool is_heap_stable_test(Node* iff);
- virtual int Opcode() const;
- virtual bool is_CFG() const { return false; }
- virtual const Type *bottom_type() const {return Type::MEMORY;}
- virtual const TypePtr *adr_type() const {
- Node* wb = in(WriteBarrier);
- if (wb == NULL || wb->is_top()) return NULL; // node is dead
- assert(wb->Opcode() == Op_ShenandoahWriteBarrier || (wb->is_Mach() && wb->as_Mach()->ideal_Opcode() == Op_ShenandoahWriteBarrier) || wb->is_Phi(), "expect wb");
- return ShenandoahBarrierNode::brooks_pointer_type(wb->bottom_type());
- }
+ static bool expand(Compile* C, PhaseIterGVN& igvn);
+ static void pin_and_expand(PhaseIdealLoop* phase);
+ static void optimize_after_expansion(VectorSet& visited, Node_Stack& nstack, Node_List& old_new, PhaseIdealLoop* phase);
- virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
- virtual const Type *Value(PhaseGVN* phase ) const {
- return bottom_type();
- }
-#ifndef PRODUCT
- virtual void dump_spec(outputStream *st) const {};
+#ifdef ASSERT
+ static void verify(RootNode* root);
#endif
};
class ShenandoahEnqueueBarrierNode : public Node {
public:
- ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
- }
+ ShenandoahEnqueueBarrierNode(Node* val);
const Type *bottom_type() const;
const Type* Value(PhaseGVN* phase) const;
@@ -289,7 +132,6 @@ public:
Node* find_mem(Node* ctrl, Node* n) const;
void fix_mem(Node* ctrl, Node* region, Node* mem, Node* mem_for_ctrl, Node* mem_phi, Unique_Node_List& uses);
int alias() const { return _alias; }
- void remove(Node* n);
};
class ShenandoahCompareAndSwapPNode : public CompareAndSwapPNode {
@@ -382,4 +224,41 @@ public:
virtual int Opcode() const;
};
+class ShenandoahLoadReferenceBarrierNode : public Node {
+public:
+ enum {
+ Control,
+ ValueIn
+ };
+
+ enum Strength {
+ NONE, WEAK, STRONG, NA
+ };
+
+ ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* val);
+
+ virtual int Opcode() const;
+ virtual const Type* bottom_type() const;
+ virtual const Type* Value(PhaseGVN* phase) const;
+ virtual const class TypePtr *adr_type() const { return TypeOopPtr::BOTTOM; }
+ virtual uint match_edge(uint idx) const {
+ return idx >= ValueIn;
+ }
+ virtual uint ideal_reg() const { return Op_RegP; }
+
+ virtual Node* Identity(PhaseGVN* phase);
+
+ uint size_of() const {
+ return sizeof(*this);
+ }
+
+ Strength get_barrier_strength();
+ CallStaticJavaNode* pin_and_expand_null_check(PhaseIterGVN& igvn);
+
+private:
+ bool needs_barrier(PhaseGVN* phase, Node* n);
+ bool needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited);
+};
+
+
#endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
index 3d8d3f39f7b..a020b87856f 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
@@ -41,13 +41,10 @@ ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics() :
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
// Final configuration checks
+ SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
}
@@ -75,7 +72,7 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
// we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme,
// ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit.
- size_t capacity = ShenandoahHeap::heap()->capacity();
+ size_t capacity = ShenandoahHeap::heap()->max_capacity();
size_t free_target = ShenandoahMinFreeThreshold * capacity / 100;
size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
size_t max_cset = (size_t)(1.0 * ShenandoahEvacReserve * capacity / 100 / ShenandoahEvacWaste);
@@ -126,12 +123,12 @@ void ShenandoahAdaptiveHeuristics::record_phase_time(ShenandoahPhaseTimings::Pha
bool ShenandoahAdaptiveHeuristics::should_start_normal_gc() const {
ShenandoahHeap* heap = ShenandoahHeap::heap();
- size_t capacity = heap->capacity();
+ size_t capacity = heap->max_capacity();
size_t available = heap->free_set()->available();
// Check if we are falling below the worst limit, time to trigger the GC, regardless of
// anything else.
- size_t min_threshold = ShenandoahMinFreeThreshold * heap->capacity() / 100;
+ size_t min_threshold = ShenandoahMinFreeThreshold * heap->max_capacity() / 100;
if (available < min_threshold) {
log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
available / M, min_threshold / M);
@@ -141,7 +138,7 @@ bool ShenandoahAdaptiveHeuristics::should_start_normal_gc() const {
// Check if are need to learn a bit about the application
const size_t max_learn = ShenandoahLearningSteps;
if (_gc_times_learned < max_learn) {
- size_t init_threshold = ShenandoahInitFreeThreshold * heap->capacity() / 100;
+ size_t init_threshold = ShenandoahInitFreeThreshold * heap->max_capacity() / 100;
if (available < init_threshold) {
log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
_gc_times_learned + 1, max_learn, available / M, init_threshold / M);
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
index 21b1a725293..f9ffa109b44 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
@@ -47,13 +47,10 @@ ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics() : ShenandoahHeu
}
// Final configuration checks
+ SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
index 7447c3200e6..9752069587e 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
@@ -42,13 +42,10 @@ ShenandoahCompactHeuristics::ShenandoahCompactHeuristics() : ShenandoahHeuristic
SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGarbageThreshold, 10);
// Final configuration checks
+ SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
}
@@ -56,8 +53,8 @@ bool ShenandoahCompactHeuristics::should_start_normal_gc() const {
ShenandoahHeap* heap = ShenandoahHeap::heap();
size_t available = heap->free_set()->available();
- size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
- size_t min_threshold = ShenandoahMinFreeThreshold * heap->capacity() / 100;
+ size_t threshold_bytes_allocated = heap->max_capacity() * ShenandoahAllocationThreshold / 100;
+ size_t min_threshold = ShenandoahMinFreeThreshold * heap->max_capacity() / 100;
if (available < min_threshold) {
log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp
index 6bb325185a5..779e4bd1455 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp
@@ -43,14 +43,11 @@ ShenandoahPassiveHeuristics::ShenandoahPassiveHeuristics() : ShenandoahHeuristic
}
// Disable known barriers by default.
+ SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahLoadRefBarrier);
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahSATBBarrier);
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahKeepAliveBarrier);
- SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahWriteBarrier);
- SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahReadBarrier);
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValEnqueueBarrier);
- SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValReadBarrier);
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier);
- SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahAcmpBarrier);
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier);
// Final configuration checks
@@ -84,7 +81,7 @@ void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenando
// Do not select too large CSet that would overflow the available free space.
// Take at least the entire evacuation reserve, and be free to overflow to free space.
- size_t capacity = ShenandoahHeap::heap()->capacity();
+ size_t capacity = ShenandoahHeap::heap()->max_capacity();
size_t available = MAX2(ShenandoahEvacReserve * capacity / 100, actual_free);
size_t max_cset = (size_t)(available / ShenandoahEvacWaste);
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp
index db6aa6fcd6a..dcd80535748 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp
@@ -40,13 +40,10 @@ ShenandoahStaticHeuristics::ShenandoahStaticHeuristics() : ShenandoahHeuristics(
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
// Final configuration checks
+ SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
}
@@ -55,7 +52,7 @@ ShenandoahStaticHeuristics::~ShenandoahStaticHeuristics() {}
bool ShenandoahStaticHeuristics::should_start_normal_gc() const {
ShenandoahHeap* heap = ShenandoahHeap::heap();
- size_t capacity = heap->capacity();
+ size_t capacity = heap->max_capacity();
size_t available = heap->free_set()->available();
size_t threshold_available = (capacity * ShenandoahFreeThreshold) / 100;
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp
index c57002c0fda..03a63873707 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp
@@ -37,27 +37,17 @@ ShenandoahTraversalHeuristics::ShenandoahTraversalHeuristics() : ShenandoahHeuri
_last_cset_select(0)
{
FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false);
- FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier, false);
FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, true);
FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier, false);
FLAG_SET_DEFAULT(ShenandoahAllowMixedAllocs, false);
- SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahRefProcFrequency, 1);
-
- // Adjust class unloading settings only if globally enabled.
- if (ClassUnloadingWithConcurrentMark) {
- SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUnloadClassesFrequency, 1);
- }
-
SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent);
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
// Final configuration checks
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
+ SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValEnqueueBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
}
@@ -127,7 +117,7 @@ void ShenandoahTraversalHeuristics::choose_collection_set(ShenandoahCollectionSe
// The significant complication is that liveness data was collected at the previous cycle, and only
// for those regions that were allocated before previous cycle started.
- size_t capacity = heap->capacity();
+ size_t capacity = heap->max_capacity();
size_t actual_free = heap->free_set()->available();
size_t free_target = ShenandoahMinFreeThreshold * capacity / 100;
size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
@@ -216,12 +206,12 @@ bool ShenandoahTraversalHeuristics::should_start_traversal_gc() {
ShenandoahHeap* heap = ShenandoahHeap::heap();
assert(!heap->has_forwarded_objects(), "no forwarded objects here");
- size_t capacity = heap->capacity();
+ size_t capacity = heap->max_capacity();
size_t available = heap->free_set()->available();
// Check if we are falling below the worst limit, time to trigger the GC, regardless of
// anything else.
- size_t min_threshold = ShenandoahMinFreeThreshold * heap->capacity() / 100;
+ size_t min_threshold = ShenandoahMinFreeThreshold * heap->max_capacity() / 100;
if (available < min_threshold) {
log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
available / M, min_threshold / M);
@@ -231,7 +221,7 @@ bool ShenandoahTraversalHeuristics::should_start_traversal_gc() {
// Check if are need to learn a bit about the application
const size_t max_learn = ShenandoahLearningSteps;
if (_gc_times_learned < max_learn) {
- size_t init_threshold = ShenandoahInitFreeThreshold * heap->capacity() / 100;
+ size_t init_threshold = ShenandoahInitFreeThreshold * heap->max_capacity() / 100;
if (available < init_threshold) {
log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
_gc_times_learned + 1, max_learn, available / M, init_threshold / M);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp
index caefc0605e5..42de71263c5 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp
@@ -46,12 +46,8 @@ void ShenandoahArguments::initialize() {
FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false);
FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier, false);
- FLAG_SET_DEFAULT(ShenandoahWriteBarrier, false);
- FLAG_SET_DEFAULT(ShenandoahReadBarrier, false);
FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, false);
- FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier, false);
FLAG_SET_DEFAULT(ShenandoahCASBarrier, false);
- FLAG_SET_DEFAULT(ShenandoahAcmpBarrier, false);
FLAG_SET_DEFAULT(ShenandoahCloneBarrier, false);
#endif
@@ -111,12 +107,8 @@ void ShenandoahArguments::initialize() {
if (ShenandoahVerifyOptoBarriers &&
(!FLAG_IS_DEFAULT(ShenandoahSATBBarrier) ||
!FLAG_IS_DEFAULT(ShenandoahKeepAliveBarrier) ||
- !FLAG_IS_DEFAULT(ShenandoahWriteBarrier) ||
- !FLAG_IS_DEFAULT(ShenandoahReadBarrier) ||
!FLAG_IS_DEFAULT(ShenandoahStoreValEnqueueBarrier) ||
- !FLAG_IS_DEFAULT(ShenandoahStoreValReadBarrier) ||
!FLAG_IS_DEFAULT(ShenandoahCASBarrier) ||
- !FLAG_IS_DEFAULT(ShenandoahAcmpBarrier) ||
!FLAG_IS_DEFAULT(ShenandoahCloneBarrier)
)) {
warning("Unusual barrier configuration, disabling C2 barrier verification");
@@ -147,6 +139,11 @@ void ShenandoahArguments::initialize() {
FLAG_SET_DEFAULT(ShenandoahUncommit, false);
}
+ if ((InitialHeapSize == MaxHeapSize) && ShenandoahUncommit) {
+ log_info(gc)("Min heap equals to max heap, disabling ShenandoahUncommit");
+ FLAG_SET_DEFAULT(ShenandoahUncommit, false);
+ }
+
// If class unloading is disabled, no unloading for concurrent cycles as well.
// If class unloading is enabled, users should opt-in for unloading during
// concurrent cycles.
@@ -164,13 +161,6 @@ void ShenandoahArguments::initialize() {
FLAG_SET_DEFAULT(UseAOT, false);
}
- // JNI fast get field stuff is not currently supported by Shenandoah.
- // It would introduce another heap memory access for reading the forwarding
- // pointer, which would have to be guarded by the signal handler machinery.
- // See:
- // http://mail.openjdk.java.net/pipermail/hotspot-dev/2018-June/032763.html
- FLAG_SET_DEFAULT(UseFastJNIAccessors, false);
-
// TLAB sizing policy makes resizing decisions before each GC cycle. It averages
// historical data, assigning more recent data the weight according to TLABAllocationWeight.
// Current default is good for generational collectors that run frequent young GCs.
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
index 412df539aeb..7ff8dbf6b22 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
@@ -107,11 +107,8 @@ void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
}
void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
- assert(UseShenandoahGC, "should be enabled");
- if (count == 0) return;
- if (!ShenandoahCloneBarrier) return;
-
- if (!need_update_refs_barrier()) return;
+ assert(_heap->is_update_refs_in_progress(), "should not be here otherwise");
+ assert(count > 0, "Should have been filtered before");
if (_heap->is_concurrent_traversal_in_progress()) {
ShenandoahEvacOOMScope oom_evac_scope;
@@ -197,9 +194,8 @@ void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
}
void ShenandoahBarrierSet::write_region(MemRegion mr) {
- assert(UseShenandoahGC, "should be enabled");
if (!ShenandoahCloneBarrier) return;
- if (! need_update_refs_barrier()) return;
+ if (!_heap->is_update_refs_in_progress()) return;
// This is called for cloning an object (see jvm.cpp) after the clone
// has been made. We are not interested in any 'previous value' because
@@ -218,31 +214,25 @@ void ShenandoahBarrierSet::write_region(MemRegion mr) {
}
}
-oop ShenandoahBarrierSet::read_barrier(oop src) {
- // Check for forwarded objects, because on Full GC path we might deal with
- // non-trivial fwdptrs that contain Full GC specific metadata. We could check
- // for is_full_gc_in_progress(), but this also covers the case of stable heap,
- // which provides a bit of performance improvement.
- if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
- return ShenandoahBarrierSet::resolve_forwarded(src);
+oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
+ if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
+ return load_reference_barrier_impl(obj);
} else {
- return src;
+ return obj;
}
}
-bool ShenandoahBarrierSet::obj_equals(oop obj1, oop obj2) {
- bool eq = oopDesc::equals_raw(obj1, obj2);
- if (! eq && ShenandoahAcmpBarrier) {
- OrderAccess::loadload();
- obj1 = resolve_forwarded(obj1);
- obj2 = resolve_forwarded(obj2);
- eq = oopDesc::equals_raw(obj1, obj2);
+oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
+ if (obj != NULL) {
+ return load_reference_barrier_not_null(obj);
+ } else {
+ return obj;
}
- return eq;
}
-oop ShenandoahBarrierSet::write_barrier_mutator(oop obj) {
- assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
+
+oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj) {
+ assert(ShenandoahLoadRefBarrier, "should be enabled");
assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), "evac should be in progress");
shenandoah_assert_in_cset(NULL, obj);
@@ -288,8 +278,8 @@ oop ShenandoahBarrierSet::write_barrier_mutator(oop obj) {
return fwd;
}
-oop ShenandoahBarrierSet::write_barrier_impl(oop obj) {
- assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
+oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
+ assert(ShenandoahLoadRefBarrier, "should be enabled");
if (!CompressedOops::is_null(obj)) {
bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
oop fwd = resolve_forwarded_not_null(obj);
@@ -311,23 +301,10 @@ oop ShenandoahBarrierSet::write_barrier_impl(oop obj) {
}
}
-oop ShenandoahBarrierSet::write_barrier(oop obj) {
- if (ShenandoahWriteBarrier && _heap->has_forwarded_objects()) {
- return write_barrier_impl(obj);
- } else {
- return obj;
- }
-}
-
-oop ShenandoahBarrierSet::storeval_barrier(oop obj) {
+void ShenandoahBarrierSet::storeval_barrier(oop obj) {
if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj) && _heap->is_concurrent_traversal_in_progress()) {
- obj = write_barrier(obj);
enqueue(obj);
}
- if (ShenandoahStoreValReadBarrier) {
- obj = resolve_forwarded(obj);
- }
- return obj;
}
void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp
index 65ae19bfd3b..d1a08b8c5cc 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp
@@ -87,34 +87,23 @@ public:
virtual void on_thread_attach(Thread* thread);
virtual void on_thread_detach(Thread* thread);
- virtual oop read_barrier(oop src);
-
static inline oop resolve_forwarded_not_null(oop p);
static inline oop resolve_forwarded(oop p);
- virtual oop write_barrier(oop obj);
+ void storeval_barrier(oop obj);
+ void keep_alive_barrier(oop obj);
- oop write_barrier_mutator(oop obj);
-
- virtual oop storeval_barrier(oop obj);
-
- virtual void keep_alive_barrier(oop obj);
-
- bool obj_equals(oop obj1, oop obj2);
-
-#ifdef CHECK_UNHANDLED_OOPS
- bool oop_equals_operator_allowed() { return !ShenandoahVerifyObjectEquals; }
-#endif
+ oop load_reference_barrier(oop obj);
+ oop load_reference_barrier_mutator(oop obj);
+ oop load_reference_barrier_not_null(oop obj);
void enqueue(oop obj);
private:
- inline bool need_update_refs_barrier();
-
template
void write_ref_array_loop(HeapWord* start, size_t count);
- oop write_barrier_impl(oop obj);
+ oop load_reference_barrier_impl(oop obj);
static void keep_alive_if_weak(DecoratorSet decorators, oop value) {
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
@@ -149,114 +138,31 @@ public:
class AccessBarrier: public BarrierSet::AccessBarrier {
typedef BarrierSet::AccessBarrier Raw;
+ template
+ static oop oop_atomic_cmpxchg_in_heap_impl(oop new_value, T* addr, oop compare_value);
+
+ template
+ static oop oop_atomic_xchg_in_heap_impl(oop new_value, T* addr);
+
public:
- // Primitive heap accesses. These accessors get resolved when
- // IN_HEAP is set (e.g. when using the HeapAccess API), it is
- // not an oop_* overload, and the barrier strength is AS_NORMAL.
- template
- static T load_in_heap(T* addr) {
- ShouldNotReachHere();
- return Raw::template load(addr);
- }
-
- template
- static T load_in_heap_at(oop base, ptrdiff_t offset) {
- base = ShenandoahBarrierSet::resolve_forwarded(base);
- return Raw::template load_at(base, offset);
- }
-
- template
- static void store_in_heap(T* addr, T value) {
- ShouldNotReachHere();
- Raw::store(addr, value);
- }
-
- template
- static void store_in_heap_at(oop base, ptrdiff_t offset, T value) {
- base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
- Raw::store_at(base, offset, value);
- }
-
- template
- static T atomic_cmpxchg_in_heap(T new_value, T* addr, T compare_value) {
- ShouldNotReachHere();
- return Raw::atomic_cmpxchg(new_value, addr, compare_value);
- }
-
- template
- static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
- base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
- return Raw::atomic_cmpxchg_at(new_value, base, offset, compare_value);
- }
-
- template
- static T atomic_xchg_in_heap(T new_value, T* addr) {
- ShouldNotReachHere();
- return Raw::atomic_xchg(new_value, addr);
- }
-
- template
- static T atomic_xchg_in_heap_at(T new_value, oop base, ptrdiff_t offset) {
- base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
- return Raw::atomic_xchg_at(new_value, base, offset);
- }
-
- template
- static void arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
- arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
- size_t length);
-
// Heap oop accesses. These accessors get resolved when
// IN_HEAP is set (e.g. when using the HeapAccess API), it is
// an oop_* overload, and the barrier strength is AS_NORMAL.
template
- static oop oop_load_in_heap(T* addr) {
- // ShouldNotReachHere();
- oop value = Raw::template oop_load(addr);
- keep_alive_if_weak(decorators, value);
- return value;
- }
-
- static oop oop_load_in_heap_at(oop base, ptrdiff_t offset) {
- base = ShenandoahBarrierSet::resolve_forwarded(base);
- oop value = Raw::template oop_load_at(base, offset);
- keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), value);
- return value;
- }
+ static oop oop_load_in_heap(T* addr);
+ static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
template
- static void oop_store_in_heap(T* addr, oop value) {
- const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
- if (keep_alive) {
- ShenandoahBarrierSet::barrier_set()->write_ref_field_pre_work(addr, value);
- }
- Raw::oop_store(addr, value);
- }
-
- static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
- base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
- value = ShenandoahBarrierSet::barrier_set()->storeval_barrier(value);
-
- oop_store_in_heap(AccessInternal::oop_field_addr(base, offset), value);
- }
+ static void oop_store_in_heap(T* addr, oop value);
+ static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value);
template
static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
-
- static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
- base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
- new_value = ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
- return oop_atomic_cmpxchg_in_heap(new_value, AccessInternal::oop_field_addr(base, offset), compare_value);
- }
+ static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value);
template
static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
-
- static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
- base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
- new_value = ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
- return oop_atomic_xchg_in_heap(new_value, AccessInternal::oop_field_addr(base, offset));
- }
+ static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset);
template
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
@@ -268,19 +174,13 @@ public:
// Needed for loads on non-heap weak references
template
- static oop oop_load_not_in_heap(T* addr) {
- oop value = Raw::oop_load_not_in_heap(addr);
- keep_alive_if_weak(decorators, value);
- return value;
- }
+ static oop oop_load_not_in_heap(T* addr);
- static oop resolve(oop obj) {
- return ShenandoahBarrierSet::barrier_set()->write_barrier(obj);
- }
+ template
+ static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value);
- static bool equals(oop o1, oop o2) {
- return ShenandoahBarrierSet::barrier_set()->obj_equals(o1, o2);
- }
+ template
+ static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr);
};
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
index c6c0624c1f5..66b361b66b9 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
@@ -32,12 +32,6 @@
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
-bool ShenandoahBarrierSet::need_update_refs_barrier() {
- return _heap->is_update_refs_in_progress() ||
- _heap->is_concurrent_traversal_in_progress() ||
- (_heap->is_concurrent_mark_in_progress() && _heap->has_forwarded_objects());
-}
-
inline oop ShenandoahBarrierSet::resolve_forwarded_not_null(oop p) {
return ShenandoahBrooksPointer::forwardee(p);
}
@@ -52,7 +46,49 @@ inline oop ShenandoahBarrierSet::resolve_forwarded(oop p) {
template
template
-inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
+inline oop ShenandoahBarrierSet::AccessBarrier::oop_load_in_heap(T* addr) {
+ oop value = Raw::oop_load_in_heap(addr);
+ value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(value);
+ keep_alive_if_weak(decorators, value);
+ return value;
+}
+
+template
+inline oop ShenandoahBarrierSet::AccessBarrier::oop_load_in_heap_at(oop base, ptrdiff_t offset) {
+ oop value = Raw::oop_load_in_heap_at(base, offset);
+ value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(value);
+ keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), value);
+ return value;
+}
+
+template
+template
+inline oop ShenandoahBarrierSet::AccessBarrier::oop_load_not_in_heap(T* addr) {
+ oop value = Raw::oop_load_not_in_heap(addr);
+ value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(value);
+ keep_alive_if_weak(decorators, value);
+ return value;
+}
+
+template
+template
+inline void ShenandoahBarrierSet::AccessBarrier::oop_store_in_heap(T* addr, oop value) {
+ ShenandoahBarrierSet::barrier_set()->storeval_barrier(value);
+ const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
+ if (keep_alive) {
+ ShenandoahBarrierSet::barrier_set()->write_ref_field_pre_work(addr, value);
+ }
+ Raw::oop_store_in_heap(addr, value);
+}
+
+template
+inline void ShenandoahBarrierSet::AccessBarrier::oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
+ oop_store_in_heap(AccessInternal::oop_field_addr(base, offset), value);
+}
+
+template
+template
+inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
oop res;
oop expected = compare_value;
do {
@@ -60,42 +96,79 @@ inline oop ShenandoahBarrierSet::AccessBarrier::oop_ato
res = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
expected = res;
} while ((! oopDesc::equals_raw(compare_value, expected)) && oopDesc::equals_raw(resolve_forwarded(compare_value), resolve_forwarded(expected)));
- if (oopDesc::equals_raw(expected, compare_value)) {
- const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
- if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(compare_value) &&
- ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) {
- ShenandoahBarrierSet::barrier_set()->enqueue(compare_value);
- }
+ if (res != NULL) {
+ return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(res);
+ } else {
+ return res;
}
- return res;
+}
+
+template
+template
+inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_impl(oop new_value, T* addr, oop compare_value) {
+ ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
+ oop result = oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value);
+ const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
+ if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) &&
+ oopDesc::equals_raw(result, compare_value) &&
+ ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) {
+ ShenandoahBarrierSet::barrier_set()->enqueue(result);
+ }
+ return result;
+}
+
+template
+template
+inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
+ oop result = oop_atomic_cmpxchg_in_heap_impl(new_value, addr, compare_value);
+ keep_alive_if_weak(decorators, result);
+ return result;
+}
+
+template
+inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
+ oop result = oop_atomic_cmpxchg_in_heap_impl(new_value, AccessInternal::oop_field_addr(base, offset), compare_value);
+ keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), result);
+ return result;
+}
+
+template
+template
+inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
+ oop previous = Raw::oop_atomic_xchg(new_value, addr);
+ if (previous != NULL) {
+ return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(previous);
+ } else {
+ return previous;
+ }
+}
+
+template
+template
+inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_impl(oop new_value, T* addr) {
+ ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
+ oop result = oop_atomic_xchg_not_in_heap(new_value, addr);
+ const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
+ if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) &&
+ ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) {
+ ShenandoahBarrierSet::barrier_set()->enqueue(result);
+ }
+ return result;
}
template
template
inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap(oop new_value, T* addr) {
- oop previous = Raw::oop_atomic_xchg(new_value, addr);
- if (ShenandoahSATBBarrier) {
- const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
- if (keep_alive && !CompressedOops::is_null(previous) &&
- ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) {
- ShenandoahBarrierSet::barrier_set()->enqueue(previous);
- }
- }
- return previous;
+ oop result = oop_atomic_xchg_in_heap_impl(new_value, addr);
+ keep_alive_if_weak(addr, result);
+ return result;
}
template
-template
-void ShenandoahBarrierSet::AccessBarrier::arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
- arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
- size_t length) {
- if (!CompressedOops::is_null(src_obj)) {
- src_obj = arrayOop(ShenandoahBarrierSet::barrier_set()->read_barrier(src_obj));
- }
- if (!CompressedOops::is_null(dst_obj)) {
- dst_obj = arrayOop(ShenandoahBarrierSet::barrier_set()->write_barrier(dst_obj));
- }
- Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length);
+inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
+ oop result = oop_atomic_xchg_in_heap_impl(new_value, AccessInternal::oop_field_addr(base, offset));
+ keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), result);
+ return result;
}
template
@@ -248,8 +321,6 @@ bool ShenandoahBarrierSet::arraycopy_element(T* cur_src, T* cur_dst, Klass* boun
// Clone barrier support
template
void ShenandoahBarrierSet::AccessBarrier::clone_in_heap(oop src, oop dst, size_t size) {
- src = arrayOop(ShenandoahBarrierSet::barrier_set()->read_barrier(src));
- dst = arrayOop(ShenandoahBarrierSet::barrier_set()->write_barrier(dst));
Raw::clone(src, dst, size);
ShenandoahBarrierSet::barrier_set()->write_region(MemRegion((HeapWord*) dst, size));
}
@@ -260,13 +331,6 @@ bool ShenandoahBarrierSet::AccessBarrier::oop_arraycopy
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
size_t length) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
- if (!CompressedOops::is_null(src_obj)) {
- src_obj = arrayOop(ShenandoahBarrierSet::barrier_set()->read_barrier(src_obj));
- }
- if (!CompressedOops::is_null(dst_obj)) {
- dst_obj = arrayOop(ShenandoahBarrierSet::barrier_set()->write_barrier(dst_obj));
- }
-
bool satb = ShenandoahSATBBarrier && heap->is_concurrent_mark_in_progress();
bool checkcast = HasDecorator::value;
bool disjoint = HasDecorator::value;
@@ -274,7 +338,7 @@ bool ShenandoahBarrierSet::AccessBarrier::oop_arraycopy
if (heap->has_forwarded_objects()) {
if (heap->is_concurrent_traversal_in_progress()) {
storeval_mode = WRITE_BARRIER;
- } else if (heap->is_concurrent_mark_in_progress() || heap->is_update_refs_in_progress()) {
+ } else if (heap->is_update_refs_in_progress()) {
storeval_mode = READ_BARRIER;
} else {
assert(heap->is_idle() || heap->is_evacuation_in_progress(), "must not have anything in progress");
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp
index 73f980d6142..78d68129421 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp
@@ -119,39 +119,6 @@ public:
}
};
-class ShenandoahNMethodOopInitializer : public OopClosure {
-private:
- ShenandoahHeap* const _heap;
-
-public:
- ShenandoahNMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {};
-
-private:
- template
- inline void do_oop_work(T* p) {
- T o = RawAccess<>::oop_load(p);
- if (! CompressedOops::is_null(o)) {
- oop obj1 = CompressedOops::decode_not_null(o);
- oop obj2 = ShenandoahBarrierSet::barrier_set()->write_barrier(obj1);
- if (! oopDesc::equals_raw(obj1, obj2)) {
- shenandoah_assert_not_in_cset(NULL, obj2);
- RawAccess::oop_store(p, obj2);
- if (_heap->is_concurrent_traversal_in_progress()) {
- ShenandoahBarrierSet::barrier_set()->enqueue(obj2);
- }
- }
- }
- }
-
-public:
- void do_oop(oop* o) {
- do_oop_work(o);
- }
- void do_oop(narrowOop* o) {
- do_oop_work(o);
- }
-};
-
ShenandoahCodeRoots::PaddedLock ShenandoahCodeRoots::_recorded_nms_lock;
GrowableArray* ShenandoahCodeRoots::_recorded_nms;
@@ -163,21 +130,13 @@ void ShenandoahCodeRoots::initialize() {
void ShenandoahCodeRoots::add_nmethod(nmethod* nm) {
switch (ShenandoahCodeRootsStyle) {
case 0:
- case 1: {
- ShenandoahNMethodOopInitializer init;
- nm->oops_do(&init);
- nm->fix_oop_relocations();
+ case 1:
break;
- }
case 2: {
ShenandoahNMethodOopDetector detector;
nm->oops_do(&detector);
if (detector.has_oops()) {
- ShenandoahNMethodOopInitializer init;
- nm->oops_do(&init);
- nm->fix_oop_relocations();
-
ShenandoahNMethod* nmr = new ShenandoahNMethod(nm, detector.oops());
nmr->assert_alive_and_correct();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
index 7b13ccb6940..ca071a60056 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
@@ -462,9 +462,11 @@ void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause
void ShenandoahControlThread::service_uncommit(double shrink_before) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
- // Scan through the heap and determine if there is work to do. This avoids taking
- // heap lock if there is no work available, avoids spamming logs with superfluous
- // logging messages, and minimises the amount of work while locks are taken.
+ // Determine if there is work to do. This avoids taking heap lock if there is
+ // no work available, avoids spamming logs with superfluous logging messages,
+ // and minimises the amount of work while locks are taken.
+
+ if (heap->committed() <= heap->min_capacity()) return;
bool has_work = false;
for (size_t i = 0; i < heap->num_regions(); i++) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp
index 83071cf40d8..13831ce6f64 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp
@@ -31,8 +31,8 @@
* Provides safe handling of out-of-memory situations during evacuation.
*
* When a Java thread encounters out-of-memory while evacuating an object in a
- * write-barrier (i.e. it cannot copy the object to to-space), it does not necessarily
- * follow we can return immediately from the WB (and store to from-space).
+ * load-reference-barrier (i.e. it cannot copy the object to to-space), it does not
+ * necessarily follow we can return immediately from the LRB (and store to from-space).
*
* In very basic case, on such failure we may wait until the the evacuation is over,
* and then resolve the forwarded copy, and to the store there. This is possible
@@ -64,17 +64,17 @@
* - failure:
* - if offending value is a valid counter, then try again
* - if offending value is OOM-during-evac special value: loop until
- * counter drops to 0, then exit with read-barrier
+ * counter drops to 0, then exit with resolving the ptr
*
* Upon exit, exiting thread will decrease the counter using atomic dec.
*
* Upon OOM-during-evac, any thread will attempt to CAS OOM-during-evac
* special value into the counter. Depending on result:
- * - success: busy-loop until counter drops to zero, then exit with RB
+ * - success: busy-loop until counter drops to zero, then exit with resolve
* - failure:
* - offender is valid counter update: try again
* - offender is OOM-during-evac: busy loop until counter drops to
- * zero, then exit with RB
+ * zero, then exit with resolve
*/
class ShenandoahEvacOOMHandler {
private:
@@ -94,7 +94,7 @@ public:
*
* When this returns true, it is safe to continue with normal evacuation.
* When this method returns false, evacuation must not be entered, and caller
- * may safely continue with a read-barrier (if Java thread).
+ * may safely continue with a simple resolve (if Java thread).
*/
void enter_evacuation();
@@ -106,7 +106,7 @@ public:
/**
* Signal out-of-memory during evacuation. It will prevent any other threads
* from entering the evacuation path, then wait until all threads have left the
- * evacuation path, and then return. It is then safe to continue with a read-barrier.
+ * evacuation path, and then return. It is then safe to continue with a simple resolve.
*/
void handle_out_of_memory_during_evacuation();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
index e059f09b212..ec0b3528314 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
@@ -430,7 +430,7 @@ void ShenandoahFreeSet::rebuild() {
}
// Evac reserve: reserve trailing space for evacuations
- size_t to_reserve = ShenandoahEvacReserve * _heap->capacity() / 100;
+ size_t to_reserve = ShenandoahEvacReserve * _heap->max_capacity() / 100;
size_t reserved = 0;
for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index 588f186c3f6..eb61f3877e3 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -141,6 +141,7 @@ jint ShenandoahHeap::initialize() {
//
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
+ size_t min_byte_size = collector_policy()->min_heap_byte_size();
size_t max_byte_size = collector_policy()->max_heap_byte_size();
size_t heap_alignment = collector_policy()->heap_alignment();
@@ -159,8 +160,13 @@ jint ShenandoahHeap::initialize() {
size_t num_committed_regions = init_byte_size / reg_size_bytes;
num_committed_regions = MIN2(num_committed_regions, _num_regions);
assert(num_committed_regions <= _num_regions, "sanity");
-
_initial_size = num_committed_regions * reg_size_bytes;
+
+ size_t num_min_regions = min_byte_size / reg_size_bytes;
+ num_min_regions = MIN2(num_min_regions, _num_regions);
+ assert(num_min_regions <= _num_regions, "sanity");
+ _minimum_size = num_min_regions * reg_size_bytes;
+
_committed = _initial_size;
size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
@@ -351,8 +357,11 @@ jint ShenandoahHeap::initialize() {
_control_thread = new ShenandoahControlThread();
- log_info(gc, init)("Initialize Shenandoah heap with initial size " SIZE_FORMAT "%s",
- byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size));
+ log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
+ byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size),
+ byte_size_in_proper_unit(_minimum_size), proper_unit_for_byte_size(_minimum_size),
+ byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
+ );
log_info(gc, init)("Safepointing mechanism: %s",
SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
@@ -389,10 +398,6 @@ void ShenandoahHeap::initialize_heuristics() {
err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
_heuristics->name()));
}
-
- if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) {
- vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier");
- }
log_info(gc, init)("Shenandoah heuristics: %s",
_heuristics->name());
} else {
@@ -506,7 +511,7 @@ void ShenandoahHeap::reset_mark_bitmap() {
void ShenandoahHeap::print_on(outputStream* st) const {
st->print_cr("Shenandoah Heap");
st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
- capacity() / K, committed() / K, used() / K);
+ max_capacity() / K, committed() / K, used() / K);
st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
@@ -619,13 +624,17 @@ void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
}
size_t ShenandoahHeap::capacity() const {
- return num_regions() * ShenandoahHeapRegion::region_size_bytes();
+ return committed();
}
size_t ShenandoahHeap::max_capacity() const {
return _num_regions * ShenandoahHeapRegion::region_size_bytes();
}
+size_t ShenandoahHeap::min_capacity() const {
+ return _minimum_size;
+}
+
size_t ShenandoahHeap::initial_capacity() const {
return _initial_size;
}
@@ -639,12 +648,22 @@ bool ShenandoahHeap::is_in(const void* p) const {
void ShenandoahHeap::op_uncommit(double shrink_before) {
assert (ShenandoahUncommit, "should be enabled");
+ // Application allocates from the beginning of the heap, and GC allocates at
+ // the end of it. It is more efficient to uncommit from the end, so that applications
+ // could enjoy the near committed regions. GC allocations are much less frequent,
+ // and therefore can accept the committing costs.
+
size_t count = 0;
- for (size_t i = 0; i < num_regions(); i++) {
- ShenandoahHeapRegion* r = get_region(i);
+ for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
+ ShenandoahHeapRegion* r = get_region(i - 1);
if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
ShenandoahHeapLocker locker(lock());
if (r->is_empty_committed()) {
+ // Do not uncommit below minimal capacity
+ if (committed() < min_capacity() + ShenandoahHeapRegion::region_size_bytes()) {
+ break;
+ }
+
r->make_uncommitted();
count++;
}
@@ -653,8 +672,6 @@ void ShenandoahHeap::op_uncommit(double shrink_before) {
}
if (count > 0) {
- log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
- count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
control_thread()->notify_heap_changed();
}
}
@@ -791,7 +808,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
assert(req.is_gc_alloc(), "Can only accept GC allocs here");
result = allocate_memory_under_lock(req, in_new_region);
// Do not call handle_alloc_failure() here, because we cannot block.
- // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
+ // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
}
if (in_new_region) {
@@ -1105,7 +1122,6 @@ public:
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahEvacOOMScope oom_evac_scope;
ShenandoahEvacuateUpdateRootsClosure cl;
-
MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
_rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
}
@@ -1209,7 +1225,9 @@ void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
workers()->threads_do(tcl);
- _safepoint_workers->threads_do(tcl);
+ if (_safepoint_workers != NULL) {
+ _safepoint_workers->threads_do(tcl);
+ }
if (ShenandoahStringDedup::is_enabled()) {
ShenandoahStringDedup::threads_do(tcl);
}
@@ -1550,6 +1568,10 @@ void ShenandoahHeap::op_final_mark() {
if (ShenandoahPacing) {
pacer()->setup_for_evac();
}
+
+ if (ShenandoahVerify) {
+ verifier()->verify_during_evacuation();
+ }
} else {
if (ShenandoahVerify) {
verifier()->verify_after_concmark();
@@ -1800,13 +1822,13 @@ void ShenandoahHeap::op_degenerated_futile() {
void ShenandoahHeap::stop_concurrent_marking() {
assert(is_concurrent_mark_in_progress(), "How else could we get here?");
+ set_concurrent_mark_in_progress(false);
if (!cancelled_gc()) {
// If we needed to update refs, and concurrent marking has been cancelled,
// we need to finish updating references.
set_has_forwarded_objects(false);
mark_complete_marking_context();
}
- set_concurrent_mark_in_progress(false);
}
void ShenandoahHeap::force_satb_flush_all_threads() {
@@ -1836,12 +1858,16 @@ void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
}
void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
- set_gc_state_mask(MARKING, in_progress);
+ if (has_forwarded_objects()) {
+ set_gc_state_mask(MARKING | UPDATEREFS, in_progress);
+ } else {
+ set_gc_state_mask(MARKING, in_progress);
+ }
ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
}
void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
- set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
+ set_gc_state_mask(TRAVERSAL | HAS_FORWARDED | UPDATEREFS, in_progress);
ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
}
@@ -2062,14 +2088,12 @@ void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
}
oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
- o = ShenandoahBarrierSet::barrier_set()->write_barrier(o);
ShenandoahHeapLocker locker(lock());
heap_region_containing(o)->make_pinned();
return o;
}
void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
- o = ShenandoahBarrierSet::barrier_set()->read_barrier(o);
ShenandoahHeapLocker locker(lock());
heap_region_containing(o)->make_unpinned();
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index 2258448f95d..f11ac8dbf80 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -198,6 +198,7 @@ public:
//
private:
size_t _initial_size;
+ size_t _minimum_size;
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
volatile size_t _used;
volatile size_t _committed;
@@ -216,6 +217,7 @@ public:
size_t bytes_allocated_since_gc_start();
void reset_bytes_allocated_since_gc_start();
+ size_t min_capacity() const;
size_t max_capacity() const;
size_t initial_capacity() const;
size_t capacity() const;
@@ -270,16 +272,16 @@ public:
//
public:
enum GCStateBitPos {
- // Heap has forwarded objects: need RB, ACMP, CAS barriers.
+ // Heap has forwarded objects: needs LRB barriers.
HAS_FORWARDED_BITPOS = 0,
// Heap is under marking: needs SATB barriers.
MARKING_BITPOS = 1,
- // Heap is under evacuation: needs WB barriers. (Set together with UNSTABLE)
+ // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
EVACUATION_BITPOS = 2,
- // Heap is under updating: needs SVRB/SVWB barriers.
+ // Heap is under updating: needs no additional barriers.
UPDATEREFS_BITPOS = 3,
// Heap is under traversal collection
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp
index b67af972f1e..f71bfcb676b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp
@@ -129,7 +129,7 @@ void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
// Once marking is done, which may have fixed up forwarded objects, we can drop it.
// Coming out of Full GC, we would not have any forwarded objects.
- // This also prevents read barrier from kicking in while adjusting pointers in phase3.
+ // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
heap->set_has_forwarded_objects(false);
heap->set_full_gc_move_in_progress(true);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMonitoringSupport.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMonitoringSupport.cpp
index e12fac7cca4..70b1d6c10d2 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMonitoringSupport.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMonitoringSupport.cpp
@@ -46,12 +46,12 @@ private:
ShenandoahHeap* _heap;
public:
ShenandoahGenerationCounters(ShenandoahHeap* heap) :
- GenerationCounters("Heap", 1, 1, heap->initial_capacity(), heap->max_capacity(), heap->committed()),
+ GenerationCounters("Heap", 1, 1, heap->initial_capacity(), heap->max_capacity(), heap->capacity()),
_heap(heap)
{};
virtual void update_all() {
- _current_size->set_value(_heap->committed());
+ _current_size->set_value(_heap->capacity());
}
};
@@ -94,7 +94,7 @@ void ShenandoahMonitoringSupport::update_counters() {
if (UsePerfData) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
size_t used = heap->used();
- size_t capacity = heap->capacity();
+ size_t capacity = heap->max_capacity();
_heap_counters->update_all();
_space_counters->update_all(capacity, used);
_heap_region_counters->update();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp
index bd649b2f2f0..190eae3ac13 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp
@@ -34,7 +34,7 @@
enum UpdateRefsMode {
NONE, // No reference updating
- RESOLVE, // Only a read-barrier (no reference updating)
+ RESOLVE, // Only a resolve (no reference updating)
SIMPLE, // Reference updating using simple store
CONCURRENT // Reference updating using CAS
};
@@ -184,25 +184,17 @@ public:
virtual bool do_metadata() { return true; }
};
-class ShenandoahUpdateHeapRefsSuperClosure : public BasicOopIterateClosure {
+class ShenandoahUpdateHeapRefsClosure : public BasicOopIterateClosure {
private:
ShenandoahHeap* _heap;
+
+ template
+ void do_oop_work(T* p);
+
public:
- ShenandoahUpdateHeapRefsSuperClosure() :
+ ShenandoahUpdateHeapRefsClosure() :
_heap(ShenandoahHeap::heap()) {}
- template
- void work(T *p);
-};
-
-class ShenandoahUpdateHeapRefsClosure : public ShenandoahUpdateHeapRefsSuperClosure {
-private:
- template
- inline void do_oop_work(T* p) { work(p); }
-
-public:
- ShenandoahUpdateHeapRefsClosure() : ShenandoahUpdateHeapRefsSuperClosure() {}
-
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop(oop* p) { do_oop_work(p); }
};
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp
index e827c86a229..23d5bc17a3b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp
@@ -34,7 +34,7 @@ inline void ShenandoahMarkRefsSuperClosure::work(T *p) {
}
template
-inline void ShenandoahUpdateHeapRefsSuperClosure::work(T* p) {
+inline void ShenandoahUpdateHeapRefsClosure::do_oop_work(T* p) {
_heap->maybe_update_with_forwarded(p);
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp
index 0069d2793a1..b796628b405 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp
@@ -153,7 +153,7 @@ void ShenandoahPacer::setup_for_traversal() {
void ShenandoahPacer::setup_for_idle() {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
- size_t initial = _heap->capacity() * ShenandoahPacingIdleSlack / 100;
+ size_t initial = _heap->max_capacity() * ShenandoahPacingIdleSlack / 100;
double tax = 1;
restart_with(initial, tax);
@@ -166,7 +166,7 @@ size_t ShenandoahPacer::update_and_get_progress_history() {
if (_progress == -1) {
// First initialization, report some prior
Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress);
- return (size_t) (_heap->capacity() * 0.1);
+ return (size_t) (_heap->max_capacity() * 0.1);
} else {
// Record history, and reply historical data
_progress_history->add(_progress);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
index 62e38518681..8b333ee6532 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
@@ -242,13 +242,21 @@ ShenandoahRootEvacuator::ShenandoahRootEvacuator(ShenandoahHeap* heap, uint n_wo
_evacuation_tasks(new SubTasksDone(SHENANDOAH_EVAC_NumElements)),
_srs(n_workers),
_phase(phase),
- _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator())
+ _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator()),
+ _par_state_string(StringTable::weak_storage())
+
{
heap->phase_timings()->record_workers_start(_phase);
+ if (ShenandoahStringDedup::is_enabled()) {
+ StringDedup::gc_prologue(false);
+ }
}
ShenandoahRootEvacuator::~ShenandoahRootEvacuator() {
delete _evacuation_tasks;
+ if (ShenandoahStringDedup::is_enabled()) {
+ StringDedup::gc_epilogue();
+ }
ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
}
@@ -270,11 +278,38 @@ void ShenandoahRootEvacuator::process_evacuate_roots(OopClosure* oops,
_coderoots_cset_iterator.possibly_parallel_blobs_do(blobs);
}
- if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_jvmti_oops_do)) {
+ if (ShenandoahStringDedup::is_enabled()) {
ShenandoahForwardedIsAliveClosure is_alive;
+ ShenandoahStringDedup::parallel_oops_do(&is_alive, oops, worker_id);
+ }
+
+ if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_Universe_oops_do)) {
+ ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::UniverseRoots, worker_id);
+ Universe::oops_do(oops);
+ }
+
+ if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_Management_oops_do)) {
+ ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ManagementRoots, worker_id);
+ Management::oops_do(oops);
+ }
+
+ if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_jvmti_oops_do)) {
ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JVMTIRoots, worker_id);
+ JvmtiExport::oops_do(oops);
+ ShenandoahForwardedIsAliveClosure is_alive;
JvmtiExport::weak_oops_do(&is_alive, oops);
}
+
+ if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_SystemDictionary_oops_do)) {
+ ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::SystemDictionaryRoots, worker_id);
+ SystemDictionary::oops_do(oops);
+ }
+
+ if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_ObjectSynchronizer_oops_do)) {
+ ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ObjectSynchronizerRoots, worker_id);
+ ObjectSynchronizer::oops_do(oops);
+ }
+
}
uint ShenandoahRootEvacuator::n_workers() const {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
index 323475b2768..2821e950575 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
@@ -58,7 +58,7 @@ class ShenandoahRootProcessor : public StackObj {
StrongRootsScope _srs;
OopStorage::ParState _par_state_string;
ShenandoahPhaseTimings::Phase _phase;
- ParallelCLDRootIterator _cld_iterator;
+ ParallelCLDRootIterator _cld_iterator;
ShenandoahAllCodeRootsIterator _coderoots_all_iterator;
CodeBlobClosure* _threads_nmethods_cl;
WeakProcessorPhaseTimes _weak_processor_timings;
@@ -120,11 +120,16 @@ class ShenandoahRootEvacuator : public StackObj {
StrongRootsScope _srs;
ShenandoahPhaseTimings::Phase _phase;
ShenandoahCsetCodeRootsIterator _coderoots_cset_iterator;
+ OopStorage::ParState _par_state_string;
enum Shenandoah_evacuate_roots_tasks {
- SHENANDOAH_EVAC_jvmti_oops_do,
- // Leave this one last.
- SHENANDOAH_EVAC_NumElements
+ SHENANDOAH_EVAC_Universe_oops_do,
+ SHENANDOAH_EVAC_ObjectSynchronizer_oops_do,
+ SHENANDOAH_EVAC_Management_oops_do,
+ SHENANDOAH_EVAC_SystemDictionary_oops_do,
+ SHENANDOAH_EVAC_jvmti_oops_do,
+ // Leave this one last.
+ SHENANDOAH_EVAC_NumElements
};
public:
ShenandoahRootEvacuator(ShenandoahHeap* heap, uint n_workers,
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp
index c011897b251..8fb70834c4d 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp
@@ -55,8 +55,8 @@ JRT_LEAF(void, ShenandoahRuntime::write_ref_field_pre_entry(oopDesc* orig, JavaT
ShenandoahThreadLocalData::satb_mark_queue(thread).enqueue_known_active(orig);
JRT_END
-JRT_LEAF(oopDesc*, ShenandoahRuntime::write_barrier_JRT(oopDesc* src))
- oop result = ShenandoahBarrierSet::barrier_set()->write_barrier_mutator(src);
+JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_JRT(oopDesc* src))
+ oop result = ShenandoahBarrierSet::barrier_set()->load_reference_barrier_mutator(src);
return (oopDesc*) result;
JRT_END
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp
index 87943ce2047..f142e16f039 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp
@@ -37,7 +37,7 @@ public:
static void write_ref_array_post_entry(HeapWord* dst, size_t length);
static void write_ref_field_pre_entry(oopDesc* orig, JavaThread* thread);
- static oopDesc* write_barrier_JRT(oopDesc* src);
+ static oopDesc* load_reference_barrier_JRT(oopDesc* src);
static void shenandoah_clone_barrier(oopDesc* obj);
};
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp
index 88a22d3d146..d01c4a605c5 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp
@@ -595,11 +595,10 @@ void ShenandoahTraversalGC::final_traversal_collection() {
}
if (!_heap->cancelled_gc()) {
+ fixup_roots();
if (_heap->unload_classes()) {
_heap->unload_classes_and_cleanup_tables(false);
}
-
- fixup_roots();
}
if (!_heap->cancelled_gc()) {
@@ -769,29 +768,6 @@ public:
void do_oop(oop* p) { do_oop_work(p); }
};
-class ShenandoahTraversalWeakUpdateClosure : public OopClosure {
-private:
- template
- inline void do_oop_work(T* p) {
- // Cannot call maybe_update_with_forwarded, because on traversal-degen
- // path the collection set is already dropped. Instead, do the unguarded store.
- // TODO: This can be fixed after degen-traversal stops dropping cset.
- T o = RawAccess<>::oop_load(p);
- if (!CompressedOops::is_null(o)) {
- oop obj = CompressedOops::decode_not_null(o);
- obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
- shenandoah_assert_marked(p, obj);
- RawAccess::oop_store(p, obj);
- }
- }
-
-public:
- ShenandoahTraversalWeakUpdateClosure() {}
-
- void do_oop(narrowOop* p) { do_oop_work(p); }
- void do_oop(oop* p) { do_oop_work(p); }
-};
-
class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
private:
ShenandoahObjToScanQueue* _queue;
@@ -1104,16 +1080,6 @@ void ShenandoahTraversalGC::weak_refs_work_doit() {
&pt);
}
- {
- ShenandoahGCPhase phase(phase_process);
- ShenandoahTerminationTracker termination(ShenandoahPhaseTimings::weakrefs_termination);
-
- // Process leftover weak oops (using parallel version)
- ShenandoahTraversalWeakUpdateClosure cl;
- WeakProcessor::weak_oops_do(workers, &is_alive, &cl, 1);
-
- pt.print_all_references();
-
- assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
- }
+ pt.print_all_references();
+ assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
index 062aee68592..b492773183e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
@@ -628,6 +628,10 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label,
enabled = true;
expected = ShenandoahHeap::HAS_FORWARDED;
break;
+ case _verify_gcstate_evacuation:
+ enabled = true;
+ expected = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION;
+ break;
case _verify_gcstate_stable:
enabled = true;
expected = ShenandoahHeap::STABLE;
@@ -808,6 +812,18 @@ void ShenandoahVerifier::verify_before_evacuation() {
);
}
+void ShenandoahVerifier::verify_during_evacuation() {
+ verify_at_safepoint(
+ "During Evacuation",
+ _verify_forwarded_allow, // some forwarded references are allowed
+ _verify_marked_disable, // walk only roots
+ _verify_cset_disable, // some cset references are not forwarded yet
+ _verify_liveness_disable, // liveness data might be already stale after pre-evacs
+ _verify_regions_disable, // trash regions not yet recycled
+ _verify_gcstate_evacuation // evacuation is in progress
+ );
+}
+
void ShenandoahVerifier::verify_after_evacuation() {
verify_at_safepoint(
"After Evacuation",
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
index a7191eac1ec..4108633e93d 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
@@ -135,6 +135,9 @@ public:
// Nothing is in progress, some objects are forwarded
_verify_gcstate_forwarded,
+
+ // Evacuation is in progress, some objects are forwarded
+ _verify_gcstate_evacuation,
} VerifyGCState;
struct VerifyOptions {
@@ -173,6 +176,7 @@ public:
void verify_before_concmark();
void verify_after_concmark();
void verify_before_evacuation();
+ void verify_during_evacuation();
void verify_after_evacuation();
void verify_before_updaterefs();
void verify_after_updaterefs();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
index 533b728dafd..a6c1742efdd 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
@@ -244,7 +244,7 @@
"Time is in microseconds.") \
\
experimental(uintx, ShenandoahEvacAssist, 10, \
- "How many objects to evacuate on WB assist path. " \
+ "How many objects to evacuate on LRB assist path. " \
"Use zero to disable.") \
\
experimental(bool, ShenandoahPacing, true, \
@@ -352,27 +352,18 @@
diagnostic(bool, ShenandoahKeepAliveBarrier, true, \
"Turn on/off keep alive barriers in Shenandoah") \
\
- diagnostic(bool, ShenandoahWriteBarrier, true, \
- "Turn on/off write barriers in Shenandoah") \
- \
- diagnostic(bool, ShenandoahReadBarrier, true, \
- "Turn on/off read barriers in Shenandoah") \
- \
diagnostic(bool, ShenandoahStoreValEnqueueBarrier, false, \
"Turn on/off enqueuing of oops for storeval barriers") \
\
- diagnostic(bool, ShenandoahStoreValReadBarrier, true, \
- "Turn on/off store val read barriers in Shenandoah") \
- \
diagnostic(bool, ShenandoahCASBarrier, true, \
"Turn on/off CAS barriers in Shenandoah") \
\
- diagnostic(bool, ShenandoahAcmpBarrier, true, \
- "Turn on/off acmp barriers in Shenandoah") \
- \
diagnostic(bool, ShenandoahCloneBarrier, true, \
"Turn on/off clone barriers in Shenandoah") \
\
+ diagnostic(bool, ShenandoahLoadRefBarrier, true, \
+ "Turn on/off load-reference barriers in Shenandoah") \
+ \
diagnostic(bool, ShenandoahStoreCheck, false, \
"Emit additional code that checks objects are written to only" \
" in to-space") \
@@ -401,20 +392,13 @@
"Turn it off for maximum compatibility with reflection or JNI " \
"code that manipulates final fields.") \
\
- diagnostic(bool, ShenandoahDecreaseRegisterPressure, false, \
- "Try to reuse after-barrier values to reduce register pressure") \
- \
experimental(bool, ShenandoahCommonGCStateLoads, false, \
"Enable commonming for GC state loads in generated code.") \
\
develop(bool, ShenandoahVerifyOptoBarriers, false, \
"Verify no missing barriers in C2") \
\
- experimental(bool, ShenandoahDontIncreaseWBFreq, true, \
- "Common 2 WriteBarriers or WriteBarrier and a ReadBarrier only " \
- "if the resulting WriteBarrier isn't executed more frequently") \
- \
experimental(bool, ShenandoahLoopOptsAfterExpansion, true, \
- "Attempt more loop opts after write barrier expansion") \
+ "Attempt more loop opts after barrier expansion") \
#endif // SHARE_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP
diff --git a/src/hotspot/share/interpreter/linkResolver.cpp b/src/hotspot/share/interpreter/linkResolver.cpp
index 69f90b4a3d7..6fdd056287f 100644
--- a/src/hotspot/share/interpreter/linkResolver.cpp
+++ b/src/hotspot/share/interpreter/linkResolver.cpp
@@ -264,10 +264,6 @@ LinkInfo::LinkInfo(const constantPoolHandle& pool, int index, TRAPS) {
_check_access = true;
}
-char* LinkInfo::method_string() const {
- return Method::name_and_sig_as_C_string(_resolved_klass, _name, _signature);
-}
-
#ifndef PRODUCT
void LinkInfo::print() {
ResourceMark rm;
@@ -593,14 +589,12 @@ void LinkResolver::check_method_accessability(Klass* ref_klass,
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_IllegalAccessError(),
- "class %s tried to access %s%s%smethod %s.%s%s (%s%s%s)",
+ "class %s tried to access %s%s%smethod '%s' (%s%s%s)",
ref_klass->external_name(),
sel_method->is_abstract() ? "abstract " : "",
sel_method->is_protected() ? "protected " : "",
sel_method->is_private() ? "private " : "",
- sel_klass->external_name(),
- sel_method->name()->as_C_string(),
- sel_method->signature()->as_C_string(),
+ sel_method->external_name(),
(same_module) ? ref_klass->joint_in_module_of_loader(sel_klass) : ref_klass->class_in_module_of_loader(),
(same_module) ? "" : "; ",
(same_module) ? "" : sel_klass->class_in_module_of_loader()
@@ -670,12 +664,11 @@ void LinkResolver::check_method_loader_constraints(const LinkInfo& link_info,
assert(target_loader_data != NULL, "resolved method's class has no class loader data");
stringStream ss;
- ss.print("loader constraint violation: when resolving %s"
- " \"%s\" the class loader %s of the current class, %s,"
+ ss.print("loader constraint violation: when resolving %s '", method_type);
+ Method::print_external_name(&ss, link_info.resolved_klass(), link_info.name(), link_info.signature());
+ ss.print("' the class loader %s of the current class, %s,"
" and the class loader %s for the method's defining class, %s, have"
" different Class objects for the type %s used in the signature (%s; %s)",
- method_type,
- link_info.method_string(),
current_loader_data->loader_name_and_id(),
current_class->name()->as_C_string(),
target_loader_data->loader_name_and_id(),
@@ -739,9 +732,11 @@ methodHandle LinkResolver::resolve_method(const LinkInfo& link_info,
// 2. check constant pool tag for called method - must be JVM_CONSTANT_Methodref
if (!link_info.tag().is_invalid() && !link_info.tag().is_method()) {
ResourceMark rm(THREAD);
- char buf[200];
- jio_snprintf(buf, sizeof(buf), "Method %s must be Methodref constant", link_info.method_string());
- THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+ stringStream ss;
+ ss.print("Method '");
+ Method::print_external_name(&ss, link_info.resolved_klass(), link_info.name(), link_info.signature());
+ ss.print("' must be Methodref constant");
+ THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
// 3. lookup method in resolved klass and its super klasses
@@ -764,11 +759,12 @@ methodHandle LinkResolver::resolve_method(const LinkInfo& link_info,
// 5. method lookup failed
if (resolved_method.is_null()) {
ResourceMark rm(THREAD);
+ stringStream ss;
+ ss.print("'");
+ Method::print_external_name(&ss, resolved_klass, link_info.name(), link_info.signature());
+ ss.print("'");
THROW_MSG_CAUSE_(vmSymbols::java_lang_NoSuchMethodError(),
- Method::name_and_sig_as_C_string(resolved_klass,
- link_info.name(),
- link_info.signature()),
- nested_exception, NULL);
+ ss.as_string(), nested_exception, NULL);
}
// 6. access checks, access checking may be turned off when calling from within the VM.
@@ -840,9 +836,11 @@ methodHandle LinkResolver::resolve_interface_method(const LinkInfo& link_info, B
// check constant pool tag for called method - must be JVM_CONSTANT_InterfaceMethodref
if (!link_info.tag().is_invalid() && !link_info.tag().is_interface_method()) {
ResourceMark rm(THREAD);
- char buf[200];
- jio_snprintf(buf, sizeof(buf), "Method %s must be InterfaceMethodref constant", link_info.method_string());
- THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+ stringStream ss;
+ ss.print("Method '");
+ Method::print_external_name(&ss, link_info.resolved_klass(), link_info.name(), link_info.signature());
+ ss.print("' must be InterfaceMethodref constant");
+ THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
// lookup method in this interface or its super, java.lang.Object
@@ -857,10 +855,11 @@ methodHandle LinkResolver::resolve_interface_method(const LinkInfo& link_info, B
if (resolved_method.is_null()) {
// no method found
ResourceMark rm(THREAD);
- THROW_MSG_NULL(vmSymbols::java_lang_NoSuchMethodError(),
- Method::name_and_sig_as_C_string(resolved_klass,
- link_info.name(),
- link_info.signature()));
+ stringStream ss;
+ ss.print("'");
+ Method::print_external_name(&ss, resolved_klass, link_info.name(), link_info.signature());
+ ss.print("'");
+ THROW_MSG_NULL(vmSymbols::java_lang_NoSuchMethodError(), ss.as_string());
}
if (link_info.check_access()) {
@@ -881,11 +880,12 @@ methodHandle LinkResolver::resolve_interface_method(const LinkInfo& link_info, B
if (code != Bytecodes::_invokestatic && resolved_method->is_static()) {
ResourceMark rm(THREAD);
- char buf[200];
- jio_snprintf(buf, sizeof(buf), "Expected instance not static method %s",
- Method::name_and_sig_as_C_string(resolved_klass,
- resolved_method->name(), resolved_method->signature()));
- THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+ stringStream ss;
+ ss.print("Expected instance not static method '");
+ Method::print_external_name(&ss, resolved_klass,
+ resolved_method->name(), resolved_method->signature());
+ ss.print("'");
+ THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
if (log_develop_is_enabled(Trace, itables)) {
@@ -1086,11 +1086,11 @@ methodHandle LinkResolver::linktime_resolve_static_method(const LinkInfo& link_i
// check if static
if (!resolved_method->is_static()) {
ResourceMark rm(THREAD);
- char buf[200];
- jio_snprintf(buf, sizeof(buf), "Expected static method %s", Method::name_and_sig_as_C_string(resolved_klass,
- resolved_method->name(),
- resolved_method->signature()));
- THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+ stringStream ss;
+ ss.print("Expected static method '");
+ resolved_method()->print_external_name(&ss);
+ ss.print("'");
+ THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
return resolved_method;
}
@@ -1127,14 +1127,16 @@ methodHandle LinkResolver::linktime_resolve_special_method(const LinkInfo& link_
if (resolved_method->name() == vmSymbols::object_initializer_name() &&
resolved_method->method_holder() != resolved_klass) {
ResourceMark rm(THREAD);
+ stringStream ss;
+ ss.print("%s: method '", resolved_klass->external_name());
+ resolved_method->signature()->print_as_signature_external_return_type(&ss);
+ ss.print(" %s(", resolved_method->name()->as_C_string());
+ resolved_method->signature()->print_as_signature_external_parameters(&ss);
+ ss.print(")' not found");
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_NoSuchMethodError(),
- "%s: method %s%s not found",
- resolved_klass->external_name(),
- resolved_method->name()->as_C_string(),
- resolved_method->signature()->as_C_string()
- );
+ "%s", ss.as_string());
return NULL;
}
@@ -1153,27 +1155,23 @@ methodHandle LinkResolver::linktime_resolve_special_method(const LinkInfo& link_
if (!is_reflect &&
!klass_to_check->is_same_or_direct_interface(resolved_klass)) {
ResourceMark rm(THREAD);
- char buf[200];
- jio_snprintf(buf, sizeof(buf),
- "Interface method reference: %s, is in an indirect superinterface of %s",
- Method::name_and_sig_as_C_string(resolved_klass,
- resolved_method->name(),
- resolved_method->signature()),
- current_klass->external_name());
- THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+ stringStream ss;
+ ss.print("Interface method reference: '");
+ resolved_method->print_external_name(&ss);
+ ss.print("', is in an indirect superinterface of %s",
+ current_klass->external_name());
+ THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
}
// check if not static
if (resolved_method->is_static()) {
ResourceMark rm(THREAD);
- char buf[200];
- jio_snprintf(buf, sizeof(buf),
- "Expecting non-static method %s",
- Method::name_and_sig_as_C_string(resolved_klass,
- resolved_method->name(),
- resolved_method->signature()));
- THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+ stringStream ss;
+ ss.print("Expecting non-static method '");
+ resolved_method->print_external_name(&ss);
+ ss.print("'");
+ THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
if (log_develop_is_enabled(Trace, itables)) {
@@ -1219,10 +1217,11 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result,
// check if found
if (sel_method.is_null()) {
ResourceMark rm(THREAD);
- THROW_MSG(vmSymbols::java_lang_AbstractMethodError(),
- Method::name_and_sig_as_C_string(resolved_klass,
- resolved_method->name(),
- resolved_method->signature()));
+ stringStream ss;
+ ss.print("'");
+ resolved_method->print_external_name(&ss);
+ ss.print("'");
+ THROW_MSG(vmSymbols::java_lang_AbstractMethodError(), ss.as_string());
// check loader constraints if found a different method
} else if (sel_method() != resolved_method()) {
check_method_loader_constraints(link_info, sel_method, "method", CHECK);
@@ -1244,8 +1243,8 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result,
char buf[500];
jio_snprintf(buf, sizeof(buf),
"Receiver class %s must be the current class or a subtype of interface %s",
- receiver_klass->name()->as_C_string(),
- sender->name()->as_C_string());
+ receiver_klass->external_name(),
+ sender->external_name());
THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), buf);
}
}
@@ -1254,20 +1253,21 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result,
// check if not static
if (sel_method->is_static()) {
ResourceMark rm(THREAD);
- char buf[200];
- jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", Method::name_and_sig_as_C_string(resolved_klass,
- resolved_method->name(),
- resolved_method->signature()));
- THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+ stringStream ss;
+ ss.print("Expecting non-static method '");
+ resolved_method->print_external_name(&ss);
+ ss.print("'");
+ THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
// check if abstract
if (sel_method->is_abstract()) {
ResourceMark rm(THREAD);
- THROW_MSG(vmSymbols::java_lang_AbstractMethodError(),
- Method::name_and_sig_as_C_string(resolved_klass,
- sel_method->name(),
- sel_method->signature()));
+ stringStream ss;
+ ss.print("'");
+ Method::print_external_name(&ss, resolved_klass, sel_method->name(), sel_method->signature());
+ ss.print("'");
+ THROW_MSG(vmSymbols::java_lang_AbstractMethodError(), ss.as_string());
}
if (log_develop_is_enabled(Trace, itables)) {
@@ -1305,23 +1305,22 @@ methodHandle LinkResolver::linktime_resolve_virtual_method(const LinkInfo& link_
// This is impossible, if resolve_klass is an interface, we've thrown icce in resolve_method
if (resolved_klass->is_interface() && resolved_method->is_private()) {
ResourceMark rm(THREAD);
- char buf[200];
- jio_snprintf(buf, sizeof(buf), "private interface method requires invokespecial, not invokevirtual: method %s, caller-class:%s",
- Method::name_and_sig_as_C_string(resolved_klass,
- resolved_method->name(),
- resolved_method->signature()),
- (current_klass == NULL ? "" : current_klass->internal_name()));
- THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+ stringStream ss;
+ ss.print("private interface method requires invokespecial, not invokevirtual: method '");
+ resolved_method->print_external_name(&ss);
+ ss.print("', caller-class: %s",
+ (current_klass == NULL ? "" : current_klass->internal_name()));
+ THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
// check if not static
if (resolved_method->is_static()) {
ResourceMark rm(THREAD);
- char buf[200];
- jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", Method::name_and_sig_as_C_string(resolved_klass,
- resolved_method->name(),
- resolved_method->signature()));
- THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+ stringStream ss;
+ ss.print("Expecting non-static method '");
+ resolved_method->print_external_name(&ss);
+ ss.print("'");
+ THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
if (log_develop_is_enabled(Trace, vtables)) {
@@ -1470,10 +1469,11 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result,
// Throw Illegal Access Error if selected_method is not public.
if (!selected_method->is_public()) {
ResourceMark rm(THREAD);
- THROW_MSG(vmSymbols::java_lang_IllegalAccessError(),
- Method::name_and_sig_as_C_string(recv_klass,
- selected_method->name(),
- selected_method->signature()));
+ stringStream ss;
+ ss.print("'");
+ Method::print_external_name(&ss, recv_klass, selected_method->name(), selected_method->signature());
+ ss.print("'");
+ THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), ss.as_string());
}
// check if abstract
if (check_null_and_abstract && selected_method->is_abstract()) {
@@ -1806,19 +1806,22 @@ void LinkResolver::throw_abstract_method_error(const methodHandle& resolved_meth
}
assert(resolved_method.not_null(), "Sanity");
- ss.print(" resolved method %s%s%s%s of %s %s.",
+ ss.print(" resolved method '%s%s",
resolved_method->is_abstract() ? "abstract " : "",
- resolved_method->is_private() ? "private " : "",
- resolved_method->name()->as_C_string(),
- resolved_method->signature()->as_C_string(),
+ resolved_method->is_private() ? "private " : "");
+ resolved_method->signature()->print_as_signature_external_return_type(&ss);
+ ss.print(" %s(", resolved_method->name()->as_C_string());
+ resolved_method->signature()->print_as_signature_external_parameters(&ss);
+ ss.print(")' of %s %s.",
resolved_klass->external_kind(),
resolved_klass->external_name());
if (selected_method.not_null() && !(resolved_method == selected_method)) {
- ss.print(" Selected method is %s%s%s.",
+ ss.print(" Selected method is '%s%s",
selected_method->is_abstract() ? "abstract " : "",
- selected_method->is_private() ? "private " : "",
- selected_method->name_and_sig_as_C_string());
+ selected_method->is_private() ? "private " : "");
+ selected_method->print_external_name(&ss);
+ ss.print("'.");
}
THROW_MSG(vmSymbols::java_lang_AbstractMethodError(), ss.as_string());
diff --git a/src/hotspot/share/interpreter/linkResolver.hpp b/src/hotspot/share/interpreter/linkResolver.hpp
index e357320e3f9..52ab55a7900 100644
--- a/src/hotspot/share/interpreter/linkResolver.hpp
+++ b/src/hotspot/share/interpreter/linkResolver.hpp
@@ -182,7 +182,6 @@ class LinkInfo : public StackObj {
methodHandle current_method() const { return _current_method; }
constantTag tag() const { return _tag; }
bool check_access() const { return _check_access; }
- char* method_string() const;
void print() PRODUCT_RETURN;
};
diff --git a/src/hotspot/share/memory/allocation.cpp b/src/hotspot/share/memory/allocation.cpp
index 5f4b5f94944..f04ee776d83 100644
--- a/src/hotspot/share/memory/allocation.cpp
+++ b/src/hotspot/share/memory/allocation.cpp
@@ -84,8 +84,14 @@ void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
return Metaspace::allocate(loader_data, word_size, type, THREAD);
}
-bool MetaspaceObj::is_metaspace_object() const {
- return Metaspace::contains((void*)this);
+bool MetaspaceObj::is_valid(const MetaspaceObj* p) {
+ // Weed out obvious bogus values first without traversing metaspace
+ if ((size_t)p < os::min_page_size()) {
+ return false;
+ } else if (!is_aligned((address)p, sizeof(MetaWord))) {
+ return false;
+ }
+ return Metaspace::contains((void*)p);
}
void MetaspaceObj::print_address_on(outputStream* st) const {
diff --git a/src/hotspot/share/memory/allocation.hpp b/src/hotspot/share/memory/allocation.hpp
index 17273c08c7e..08908a3d0fd 100644
--- a/src/hotspot/share/memory/allocation.hpp
+++ b/src/hotspot/share/memory/allocation.hpp
@@ -258,12 +258,19 @@ class MetaspaceObj {
static void* _shared_metaspace_top; // (exclusive) high address
public:
- bool is_metaspace_object() const;
- bool is_shared() const {
+
+ // Returns true if the pointer points to a valid MetaspaceObj. A valid
+ // MetaspaceObj is MetaWord-aligned and contained within either
+ // non-shared or shared metaspace.
+ static bool is_valid(const MetaspaceObj* p);
+
+ static bool is_shared(const MetaspaceObj* p) {
// If no shared metaspace regions are mapped, _shared_metaspace_{base,top} will
// both be NULL and all values of p will be rejected quickly.
- return (((void*)this) < _shared_metaspace_top && ((void*)this) >= _shared_metaspace_base);
+ return (((void*)p) < _shared_metaspace_top && ((void*)p) >= _shared_metaspace_base);
}
+ bool is_shared() const { return MetaspaceObj::is_shared(this); }
+
void print_address_on(outputStream* st) const; // nonvirtual address printing
static void set_shared_metaspace_range(void* base, void* top) {
diff --git a/src/hotspot/share/memory/heapShared.cpp b/src/hotspot/share/memory/heapShared.cpp
index 3ec6830d2e9..546ab452510 100644
--- a/src/hotspot/share/memory/heapShared.cpp
+++ b/src/hotspot/share/memory/heapShared.cpp
@@ -69,6 +69,7 @@ static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
{"java/lang/Short$ShortCache", "archivedCache"},
{"java/lang/Character$CharacterCache", "archivedCache"},
{"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
+ {"sun/util/locale/BaseLocale", "constantBaseLocales"},
};
// Entry fields for subgraphs archived in the open archive heap region.
static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
diff --git a/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp b/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp
index 721eab60a26..7381ea51123 100644
--- a/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp
@@ -92,9 +92,9 @@ void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
assert_lock_strong(MetaspaceExpand_lock);
// Don't use a VirtualSpaceListIterator because this
// list is being changed and a straightforward use of an iterator is not safe.
- VirtualSpaceNode* purged_vsl = NULL;
VirtualSpaceNode* prev_vsl = virtual_space_list();
VirtualSpaceNode* next_vsl = prev_vsl;
+ int num_purged_nodes = 0;
while (next_vsl != NULL) {
VirtualSpaceNode* vsl = next_vsl;
DEBUG_ONLY(vsl->verify(false);)
@@ -118,20 +118,17 @@ void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
dec_reserved_words(vsl->reserved_words());
dec_committed_words(vsl->committed_words());
dec_virtual_space_count();
- purged_vsl = vsl;
delete vsl;
+ num_purged_nodes ++;
} else {
prev_vsl = vsl;
}
}
+
+ // Verify list
#ifdef ASSERT
- if (purged_vsl != NULL) {
- // List should be stable enough to use an iterator here.
- VirtualSpaceListIterator iter(virtual_space_list());
- while (iter.repeat()) {
- VirtualSpaceNode* vsl = iter.get_next();
- assert(vsl != purged_vsl, "Purge of vsl failed");
- }
+ if (num_purged_nodes > 0) {
+ verify(false);
}
#endif
}
@@ -143,11 +140,13 @@ void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
VirtualSpaceNode* VirtualSpaceList::find_enclosing_space(const void* ptr) {
// List should be stable enough to use an iterator here because removing virtual
// space nodes is only allowed at a safepoint.
- VirtualSpaceListIterator iter(virtual_space_list());
- while (iter.repeat()) {
- VirtualSpaceNode* vsn = iter.get_next();
- if (vsn->contains(ptr)) {
- return vsn;
+ if (is_within_envelope((address)ptr)) {
+ VirtualSpaceListIterator iter(virtual_space_list());
+ while (iter.repeat()) {
+ VirtualSpaceNode* vsn = iter.get_next();
+ if (vsn->contains(ptr)) {
+ return vsn;
+ }
}
}
return NULL;
@@ -170,7 +169,9 @@ VirtualSpaceList::VirtualSpaceList(size_t word_size) :
_is_class(false),
_reserved_words(0),
_committed_words(0),
- _virtual_space_count(0) {
+ _virtual_space_count(0),
+ _envelope_lo((address)max_uintx),
+ _envelope_hi(NULL) {
MutexLockerEx cl(MetaspaceExpand_lock,
Mutex::_no_safepoint_check_flag);
create_new_virtual_space(word_size);
@@ -182,12 +183,17 @@ VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
_is_class(true),
_reserved_words(0),
_committed_words(0),
- _virtual_space_count(0) {
+ _virtual_space_count(0),
+ _envelope_lo((address)max_uintx),
+ _envelope_hi(NULL) {
MutexLockerEx cl(MetaspaceExpand_lock,
Mutex::_no_safepoint_check_flag);
VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
bool succeeded = class_entry->initialize();
if (succeeded) {
+ expand_envelope_to_include_node(class_entry);
+ // ensure lock-free iteration sees fully initialized node
+ OrderAccess::storestore();
link_vs(class_entry);
}
}
@@ -224,12 +230,16 @@ bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
} else {
assert(new_entry->reserved_words() == vs_word_size,
"Reserved memory size differs from requested memory size");
+ expand_envelope_to_include_node(new_entry);
// ensure lock-free iteration sees fully initialized node
OrderAccess::storestore();
link_vs(new_entry);
DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
return true;
}
+
+ DEBUG_ONLY(verify(false);)
+
}
void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
@@ -399,5 +409,41 @@ void VirtualSpaceList::print_map(outputStream* st) const {
}
}
+// Given a node, expand range such that it includes the node.
+void VirtualSpaceList::expand_envelope_to_include_node(const VirtualSpaceNode* node) {
+ _envelope_lo = MIN2(_envelope_lo, (address)node->low_boundary());
+ _envelope_hi = MAX2(_envelope_hi, (address)node->high_boundary());
+}
+
+
+#ifdef ASSERT
+void VirtualSpaceList::verify(bool slow) {
+ VirtualSpaceNode* list = virtual_space_list();
+ VirtualSpaceListIterator iter(list);
+ size_t reserved = 0;
+ size_t committed = 0;
+ size_t node_count = 0;
+ while (iter.repeat()) {
+ VirtualSpaceNode* node = iter.get_next();
+ if (slow) {
+ node->verify(true);
+ }
+ // Check that the node resides fully within our envelope.
+ assert((address)node->low_boundary() >= _envelope_lo && (address)node->high_boundary() <= _envelope_hi,
+ "Node " SIZE_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT ") outside envelope [" PTR_FORMAT ", " PTR_FORMAT ").",
+ node_count, p2i(node->low_boundary()), p2i(node->high_boundary()), p2i(_envelope_lo), p2i(_envelope_hi));
+ reserved += node->reserved_words();
+ committed += node->committed_words();
+ node_count ++;
+ }
+ assert(reserved == reserved_words() && committed == committed_words() && node_count == _virtual_space_count,
+ "Mismatch: reserved real: " SIZE_FORMAT " expected: " SIZE_FORMAT
+ ", committed real: " SIZE_FORMAT " expected: " SIZE_FORMAT
+ ", node count real: " SIZE_FORMAT " expected: " SIZE_FORMAT ".",
+ reserved, reserved_words(), committed, committed_words(),
+ node_count, _virtual_space_count);
+}
+#endif // ASSERT
+
} // namespace metaspace
diff --git a/src/hotspot/share/memory/metaspace/virtualSpaceList.hpp b/src/hotspot/share/memory/metaspace/virtualSpaceList.hpp
index 5c55eed4b2c..937d2dcdb31 100644
--- a/src/hotspot/share/memory/metaspace/virtualSpaceList.hpp
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceList.hpp
@@ -58,6 +58,19 @@ class VirtualSpaceList : public CHeapObj {
// Number of virtual spaces
size_t _virtual_space_count;
+ // Optimization: we keep an address range to quickly exclude pointers
+ // which are clearly not pointing into metaspace. This is an optimization for
+ // VirtualSpaceList::contains().
+ address _envelope_lo;
+ address _envelope_hi;
+
+ bool is_within_envelope(address p) const {
+ return p >= _envelope_lo && p < _envelope_hi;
+ }
+
+ // Given a node, expand range such that it includes the node.
+ void expand_envelope_to_include_node(const VirtualSpaceNode* node);
+
~VirtualSpaceList();
VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
@@ -80,6 +93,8 @@ class VirtualSpaceList : public CHeapObj {
// virtual space and add the chunks to the free list.
void retire_current_virtual_space();
+ DEBUG_ONLY(bool contains_node(const VirtualSpaceNode* node) const;)
+
public:
VirtualSpaceList(size_t word_size);
VirtualSpaceList(ReservedSpace rs);
@@ -126,6 +141,8 @@ class VirtualSpaceList : public CHeapObj {
void print_on(outputStream* st, size_t scale) const;
void print_map(outputStream* st) const;
+ DEBUG_ONLY(void verify(bool slow);)
+
class VirtualSpaceListIterator : public StackObj {
VirtualSpaceNode* _virtual_spaces;
public:
diff --git a/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp b/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp
index fcf5dcda395..c8fbdaa431d 100644
--- a/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp
@@ -60,6 +60,8 @@ class VirtualSpaceNode : public CHeapObj {
// Convenience functions to access the _virtual_space
char* low() const { return virtual_space()->low(); }
char* high() const { return virtual_space()->high(); }
+ char* low_boundary() const { return virtual_space()->low_boundary(); }
+ char* high_boundary() const { return virtual_space()->high_boundary(); }
// The first Metachunk will be allocated at the bottom of the
// VirtualSpace
diff --git a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp
index 4815ed37d18..292576beef3 100644
--- a/src/hotspot/share/oops/constantPool.cpp
+++ b/src/hotspot/share/oops/constantPool.cpp
@@ -1000,14 +1000,17 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
if ((callee->is_interface() && m_tag.is_method()) ||
((!callee->is_interface() && m_tag.is_interface_method()))) {
ResourceMark rm(THREAD);
- char buf[400];
- jio_snprintf(buf, sizeof(buf),
- "Inconsistent constant pool data in classfile for class %s. "
- "Method %s%s at index %d is %s and should be %s",
- callee->name()->as_C_string(), name->as_C_string(), signature->as_C_string(), index,
- callee->is_interface() ? "CONSTANT_MethodRef" : "CONSTANT_InterfaceMethodRef",
- callee->is_interface() ? "CONSTANT_InterfaceMethodRef" : "CONSTANT_MethodRef");
- THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+ stringStream ss;
+ ss.print("Inconsistent constant pool data in classfile for class %s. "
+ "Method '", callee->name()->as_C_string());
+ signature->print_as_signature_external_return_type(&ss);
+ ss.print(" %s(", name->as_C_string());
+ signature->print_as_signature_external_parameters(&ss);
+ ss.print(")' at index %d is %s and should be %s",
+ index,
+ callee->is_interface() ? "CONSTANT_MethodRef" : "CONSTANT_InterfaceMethodRef",
+ callee->is_interface() ? "CONSTANT_InterfaceMethodRef" : "CONSTANT_MethodRef");
+ THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
}
Klass* klass = this_cp->pool_holder();
diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp
index 5d802f4bf77..741cdf76fb9 100644
--- a/src/hotspot/share/oops/instanceKlass.cpp
+++ b/src/hotspot/share/oops/instanceKlass.cpp
@@ -3104,7 +3104,7 @@ static void print_vtable(intptr_t* start, int len, outputStream* st) {
for (int i = 0; i < len; i++) {
intptr_t e = start[i];
st->print("%d : " INTPTR_FORMAT, i, e);
- if (e != 0 && ((Metadata*)e)->is_metaspace_object()) {
+ if (MetaspaceObj::is_valid((Metadata*)e)) {
st->print(" ");
((Metadata*)e)->print_value_on(st);
}
diff --git a/src/hotspot/share/oops/klassVtable.cpp b/src/hotspot/share/oops/klassVtable.cpp
index d67afb1de86..ba2736e5592 100644
--- a/src/hotspot/share/oops/klassVtable.cpp
+++ b/src/hotspot/share/oops/klassVtable.cpp
@@ -500,11 +500,11 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, const methodHand
if (failed_type_symbol != NULL) {
stringStream ss;
ss.print("loader constraint violation for class %s: when selecting "
- "overriding method %s the class loader %s of the "
+ "overriding method '", klass->external_name());
+ target_method()->print_external_name(&ss),
+ ss.print("' the class loader %s of the "
"selected method's type %s, and the class loader %s for its super "
"type %s have different Class objects for the type %s used in the signature (%s; %s)",
- klass->external_name(),
- target_method()->name_and_sig_as_C_string(),
target_klass->class_loader_data()->loader_name_and_id(),
target_klass->external_name(),
super_klass->class_loader_data()->loader_name_and_id(),
@@ -1227,15 +1227,16 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Insta
if (failed_type_symbol != NULL) {
stringStream ss;
ss.print("loader constraint violation in interface itable"
- " initialization for class %s: when selecting method %s the"
- " class loader %s for super interface %s, and the class"
- " loader %s of the selected method's type, %s have"
+ " initialization for class %s: when selecting method '",
+ _klass->external_name());
+ m->print_external_name(&ss),
+ ss.print("' the class loader %s for super interface %s, and the class"
+ " loader %s of the selected method's %s, %s have"
" different Class objects for the type %s used in the signature (%s; %s)",
- _klass->external_name(),
- m->name_and_sig_as_C_string(),
interf->class_loader_data()->loader_name_and_id(),
interf->external_name(),
target()->method_holder()->class_loader_data()->loader_name_and_id(),
+ target()->method_holder()->external_kind(),
target()->method_holder()->external_name(),
failed_type_symbol->as_klass_external_name(),
interf->class_in_module_of_loader(false, true),
diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp
index 0d1215f87a2..f4f4fca0719 100644
--- a/src/hotspot/share/oops/method.cpp
+++ b/src/hotspot/share/oops/method.cpp
@@ -178,6 +178,27 @@ char* Method::name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol
return buf;
}
+const char* Method::external_name() const {
+ return external_name(constants()->pool_holder(), name(), signature());
+}
+
+void Method::print_external_name(outputStream *os) const {
+ print_external_name(os, constants()->pool_holder(), name(), signature());
+}
+
+const char* Method::external_name(Klass* klass, Symbol* method_name, Symbol* signature) {
+ stringStream ss;
+ print_external_name(&ss, klass, method_name, signature);
+ return ss.as_string();
+}
+
+void Method::print_external_name(outputStream *os, Klass* klass, Symbol* method_name, Symbol* signature) {
+ signature->print_as_signature_external_return_type(os);
+ os->print(" %s.%s(", klass->external_name(), method_name->as_C_string());
+ signature->print_as_signature_external_parameters(os);
+ os->print(")");
+}
+
int Method::fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_klass, int throw_bci, TRAPS) {
// exception table holds quadruple entries of the form (beg_bci, end_bci, handler_bci, klass_index)
// access exception table
diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp
index 49a892762d2..238a437a6fd 100644
--- a/src/hotspot/share/oops/method.hpp
+++ b/src/hotspot/share/oops/method.hpp
@@ -180,8 +180,8 @@ class Method : public Metadata {
}
// Helper routine: get klass name + "." + method name + signature as
- // C string, for the purpose of providing more useful NoSuchMethodErrors
- // and fatal error handling. The string is allocated in resource
+ // C string, for the purpose of providing more useful
+ // fatal error handling. The string is allocated in resource
// area if a buffer is not provided by the caller.
char* name_and_sig_as_C_string() const;
char* name_and_sig_as_C_string(char* buf, int size) const;
@@ -190,6 +190,18 @@ class Method : public Metadata {
static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature);
static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size);
+ // Get return type + klass name + "." + method name + ( parameters types )
+ // as a C string or print it to an outputStream.
+ // This is to be used to assemble strings passed to Java, so that
+ // the text more resembles Java code. Used in exception messages.
+ // Memory is allocated in the resource area; the caller needs
+ // a ResourceMark.
+ const char* external_name() const;
+ void print_external_name(outputStream *os) const;
+
+ static const char* external_name( Klass* klass, Symbol* method_name, Symbol* signature);
+ static void print_external_name(outputStream *os, Klass* klass, Symbol* method_name, Symbol* signature);
+
Bytecodes::Code java_code_at(int bci) const {
return Bytecodes::java_code_at(this, bcp_from(bci));
}
diff --git a/src/hotspot/share/oops/symbol.cpp b/src/hotspot/share/oops/symbol.cpp
index 76f6d7b2e01..d37a354026f 100644
--- a/src/hotspot/share/oops/symbol.cpp
+++ b/src/hotspot/share/oops/symbol.cpp
@@ -200,6 +200,66 @@ const char* Symbol::as_klass_external_name() const {
return str;
}
+static void print_class(outputStream *os, char *class_str, int len) {
+ for (int i = 0; i < len; ++i) {
+ if (class_str[i] == '/') {
+ os->put('.');
+ } else {
+ os->put(class_str[i]);
+ }
+ }
+}
+
+static void print_array(outputStream *os, char *array_str, int len) {
+ int dimensions = 0;
+ for (int i = 0; i < len; ++i) {
+ if (array_str[i] == '[') {
+ dimensions++;
+ } else if (array_str[i] == 'L') {
+ // Expected format: L;. Skip 'L' and ';' delimiting the type name.
+ print_class(os, array_str+i+1, len-i-2);
+ break;
+ } else {
+ os->print("%s", type2name(char2type(array_str[i])));
+ }
+ }
+ for (int i = 0; i < dimensions; ++i) {
+ os->print("[]");
+ }
+}
+
+void Symbol::print_as_signature_external_return_type(outputStream *os) {
+ for (SignatureStream ss(this); !ss.is_done(); ss.next()) {
+ if (ss.at_return_type()) {
+ if (ss.is_array()) {
+ print_array(os, (char*)ss.raw_bytes(), (int)ss.raw_length());
+ } else if (ss.is_object()) {
+ // Expected format: L;. Skip 'L' and ';' delimiting the class name.
+ print_class(os, (char*)ss.raw_bytes()+1, (int)ss.raw_length()-2);
+ } else {
+ os->print("%s", type2name(ss.type()));
+ }
+ }
+ }
+}
+
+void Symbol::print_as_signature_external_parameters(outputStream *os) {
+ bool first = true;
+ for (SignatureStream ss(this); !ss.is_done(); ss.next()) {
+ if (ss.at_return_type()) break;
+ if (!first) { os->print(", "); }
+ if (ss.is_array()) {
+ print_array(os, (char*)ss.raw_bytes(), (int)ss.raw_length());
+ } else if (ss.is_object()) {
+ // Skip 'L' and ';'.
+ print_class(os, (char*)ss.raw_bytes()+1, (int)ss.raw_length()-2);
+ } else {
+ os->print("%s", type2name(ss.type()));
+ }
+ first = false;
+ }
+}
+
// Increment refcount while checking for zero. If the Symbol's refcount becomes zero
// a thread could be concurrently removing the Symbol. This is used during SymbolTable
// lookup to avoid reviving a dead Symbol.
diff --git a/src/hotspot/share/oops/symbol.hpp b/src/hotspot/share/oops/symbol.hpp
index ddaf57b2c10..333da081c31 100644
--- a/src/hotspot/share/oops/symbol.hpp
+++ b/src/hotspot/share/oops/symbol.hpp
@@ -229,6 +229,15 @@ class Symbol : public MetaspaceObj {
const char* as_klass_external_name() const;
const char* as_klass_external_name(char* buf, int size) const;
+ // Treating the symbol as a signature, print the return
+ // type to the outputStream. Prints external names as 'double' or
+ // 'java.lang.Object[][]'.
+ void print_as_signature_external_return_type(outputStream *os);
+ // Treating the symbol as a signature, print the parameter types
+ // seperated by ', ' to the outputStream. Prints external names as
+ // 'double' or 'java.lang.Object[][]'.
+ void print_as_signature_external_parameters(outputStream *os);
+
void metaspace_pointers_do(MetaspaceClosure* it);
MetaspaceObj::Type type() const { return SymbolType; }
diff --git a/src/hotspot/share/opto/classes.hpp b/src/hotspot/share/opto/classes.hpp
index d090889046e..bb6565e7017 100644
--- a/src/hotspot/share/opto/classes.hpp
+++ b/src/hotspot/share/opto/classes.hpp
@@ -281,9 +281,7 @@ shmacro(ShenandoahCompareAndSwapP)
shmacro(ShenandoahWeakCompareAndSwapN)
shmacro(ShenandoahWeakCompareAndSwapP)
shmacro(ShenandoahEnqueueBarrier)
-shmacro(ShenandoahReadBarrier)
-shmacro(ShenandoahWriteBarrier)
-shmacro(ShenandoahWBMemProj)
+shmacro(ShenandoahLoadReferenceBarrier)
macro(SCMemProj)
macro(SqrtD)
macro(SqrtF)
diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp
index 04d041df3d9..98b81a08913 100644
--- a/src/hotspot/share/opto/compile.cpp
+++ b/src/hotspot/share/opto/compile.cpp
@@ -3070,7 +3070,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
Node *m = wq.at(next);
for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
Node* use = m->fast_out(i);
- if (use->is_Mem() || use->is_EncodeNarrowPtr() || use->is_ShenandoahBarrier()) {
+ if (use->is_Mem() || use->is_EncodeNarrowPtr()) {
use->ensure_control_or_add_prec(n->in(0));
} else {
switch(use->Opcode()) {
diff --git a/src/hotspot/share/opto/lcm.cpp b/src/hotspot/share/opto/lcm.cpp
index 9e43a24aac5..50c0c2dd341 100644
--- a/src/hotspot/share/opto/lcm.cpp
+++ b/src/hotspot/share/opto/lcm.cpp
@@ -178,7 +178,6 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
case Op_LoadRange:
case Op_LoadD_unaligned:
case Op_LoadL_unaligned:
- case Op_ShenandoahReadBarrier:
assert(mach->in(2) == val, "should be address");
break;
case Op_StoreB:
diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp
index bd3daf55db6..f8c5cededc1 100644
--- a/src/hotspot/share/opto/library_call.cpp
+++ b/src/hotspot/share/opto/library_call.cpp
@@ -4485,7 +4485,7 @@ JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc
for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
Node* n = mms.memory();
if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
- assert(n->is_Store() || n->Opcode() == Op_ShenandoahWBMemProj, "what else?");
+ assert(n->is_Store(), "what else?");
no_interfering_store = false;
break;
}
@@ -4494,7 +4494,7 @@ JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc
for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
Node* n = mms.memory();
if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
- assert(n->is_Store() || n->Opcode() == Op_ShenandoahWBMemProj, "what else?");
+ assert(n->is_Store(), "what else?");
no_interfering_store = false;
break;
}
diff --git a/src/hotspot/share/opto/loopPredicate.cpp b/src/hotspot/share/opto/loopPredicate.cpp
index 9534ec44a2a..5d2e69f9bd6 100644
--- a/src/hotspot/share/opto/loopPredicate.cpp
+++ b/src/hotspot/share/opto/loopPredicate.cpp
@@ -536,9 +536,6 @@ class Invariance : public StackObj {
if (_lpt->is_invariant(n)) { // known invariant
_invariant.set(n->_idx);
} else if (!n->is_CFG()) {
- if (n->Opcode() == Op_ShenandoahWriteBarrier) {
- return;
- }
Node *n_ctrl = _phase->ctrl_or_self(n);
Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG
if (_phase->is_dominator(n_ctrl, u_ctrl)) {
diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp
index 672dcda6923..4b1df0a872b 100644
--- a/src/hotspot/share/opto/loopnode.cpp
+++ b/src/hotspot/share/opto/loopnode.cpp
@@ -3971,7 +3971,7 @@ Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
}
while(worklist.size() != 0 && LCA != early) {
Node* s = worklist.pop();
- if (s->is_Load() || s->is_ShenandoahBarrier() || s->Opcode() == Op_SafePoint ||
+ if (s->is_Load() || s->Opcode() == Op_SafePoint ||
(s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) {
continue;
} else if (s->is_MergeMem()) {
diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp
index 09f13ab58d7..6a7b253f7ad 100644
--- a/src/hotspot/share/opto/loopnode.hpp
+++ b/src/hotspot/share/opto/loopnode.hpp
@@ -38,8 +38,6 @@ class IdealLoopTree;
class LoopNode;
class Node;
class OuterStripMinedLoopEndNode;
-class ShenandoahBarrierNode;
-class ShenandoahWriteBarrierNode;
class PathFrequency;
class PhaseIdealLoop;
class CountedLoopReserveKit;
@@ -638,8 +636,7 @@ class PhaseIdealLoop : public PhaseTransform {
friend class IdealLoopTree;
friend class SuperWord;
friend class CountedLoopReserveKit;
- friend class ShenandoahBarrierNode;
- friend class ShenandoahWriteBarrierNode;
+ friend class ShenandoahBarrierC2Support;
// Pre-computed def-use info
PhaseIterGVN &_igvn;
diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp
index 5b955cb8775..bb70c2cca41 100644
--- a/src/hotspot/share/opto/loopopts.cpp
+++ b/src/hotspot/share/opto/loopopts.cpp
@@ -1082,11 +1082,6 @@ static bool merge_point_safe(Node* region) {
Node* m = n->fast_out(j);
if (m->is_FastLock())
return false;
-#if INCLUDE_SHENANDOAHGC
- if (m->is_ShenandoahBarrier() && m->has_out_with(Op_FastLock)) {
- return false;
- }
-#endif
#ifdef _LP64
if (m->Opcode() == Op_ConvI2L)
return false;
@@ -3210,7 +3205,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
// if not pinned and not a load (which maybe anti-dependent on a store)
// and not a CMove (Matcher expects only bool->cmove).
- if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove() && n->Opcode() != Op_ShenandoahWBMemProj) {
+ if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove()) {
cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist );
sink_list.push(n);
peel >>= n->_idx; // delete n from peel set.
diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp
index 2f02b98bda7..29b649bafd2 100644
--- a/src/hotspot/share/opto/node.hpp
+++ b/src/hotspot/share/opto/node.hpp
@@ -142,7 +142,6 @@ class RegionNode;
class RootNode;
class SafePointNode;
class SafePointScalarObjectNode;
-class ShenandoahBarrierNode;
class StartNode;
class State;
class StoreNode;
@@ -676,7 +675,6 @@ public:
DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
- DEFINE_CLASS_ID(ShenandoahBarrier, Type, 7)
DEFINE_CLASS_ID(Proj, Node, 3)
DEFINE_CLASS_ID(CatchProj, Proj, 0)
@@ -875,7 +873,6 @@ public:
DEFINE_CLASS_QUERY(Root)
DEFINE_CLASS_QUERY(SafePoint)
DEFINE_CLASS_QUERY(SafePointScalarObject)
- DEFINE_CLASS_QUERY(ShenandoahBarrier)
DEFINE_CLASS_QUERY(Start)
DEFINE_CLASS_QUERY(Store)
DEFINE_CLASS_QUERY(Sub)
diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp
index 397f137948a..bd1460bea8d 100644
--- a/src/hotspot/share/prims/jni.cpp
+++ b/src/hotspot/share/prims/jni.cpp
@@ -2955,8 +2955,9 @@ static bool register_native(Klass* k, Symbol* name, Symbol* signature, address e
if (method == NULL) {
ResourceMark rm;
stringStream st;
- st.print("Method %s name or signature does not match",
- Method::name_and_sig_as_C_string(k, name, signature));
+ st.print("Method '");
+ Method::print_external_name(&st, k, name, signature);
+ st.print("' name or signature does not match");
THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(), st.as_string(), false);
}
if (!method->is_native()) {
@@ -2965,8 +2966,9 @@ static bool register_native(Klass* k, Symbol* name, Symbol* signature, address e
if (method == NULL) {
ResourceMark rm;
stringStream st;
- st.print("Method %s is not declared as native",
- Method::name_and_sig_as_C_string(k, name, signature));
+ st.print("Method '");
+ Method::print_external_name(&st, k, name, signature);
+ st.print("' is not declared as native");
THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(), st.as_string(), false);
}
}
@@ -4166,7 +4168,7 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae
if (attach_failed) {
// Added missing cleanup
- thread->cleanup_failed_attach_current_thread();
+ thread->cleanup_failed_attach_current_thread(daemon);
return JNI_ERR;
}
diff --git a/src/hotspot/share/prims/jvmtiRawMonitor.cpp b/src/hotspot/share/prims/jvmtiRawMonitor.cpp
index fe5200f65be..7f2de976d14 100644
--- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp
+++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -167,7 +167,12 @@ int JvmtiRawMonitor::SimpleExit (Thread * Self) {
RawMonitor_lock->unlock() ;
if (w != NULL) {
guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+ // Once we set TState to TS_RUN the waiting thread can complete
+ // SimpleEnter and 'w' is pointing into random stack space. So we have
+ // to ensure we extract the ParkEvent (which is in type-stable memory)
+ // before we set the state, and then don't access 'w'.
ParkEvent * ev = w->_event ;
+ OrderAccess::loadstore();
w->TState = ObjectWaiter::TS_RUN ;
OrderAccess::fence() ;
ev->unpark() ;
@@ -200,7 +205,7 @@ int JvmtiRawMonitor::SimpleWait (Thread * Self, jlong millis) {
// If thread still resides on the waitset then unlink it.
// Double-checked locking -- the usage is safe in this context
- // as we TState is volatile and the lock-unlock operators are
+ // as TState is volatile and the lock-unlock operators are
// serializing (barrier-equivalent).
if (Node.TState == ObjectWaiter::TS_WAIT) {
diff --git a/src/hotspot/share/prims/nativeLookup.cpp b/src/hotspot/share/prims/nativeLookup.cpp
index 690e23738ea..649a5dc4f03 100644
--- a/src/hotspot/share/prims/nativeLookup.cpp
+++ b/src/hotspot/share/prims/nativeLookup.cpp
@@ -380,8 +380,11 @@ address NativeLookup::lookup_base(const methodHandle& method, bool& in_base_libr
if (entry != NULL) return entry;
// Native function not found, throw UnsatisfiedLinkError
- THROW_MSG_0(vmSymbols::java_lang_UnsatisfiedLinkError(),
- method->name_and_sig_as_C_string());
+ stringStream ss;
+ ss.print("'");
+ method->print_external_name(&ss);
+ ss.print("'");
+ THROW_MSG_0(vmSymbols::java_lang_UnsatisfiedLinkError(), ss.as_string());
}
diff --git a/src/hotspot/share/prims/resolvedMethodTable.cpp b/src/hotspot/share/prims/resolvedMethodTable.cpp
index da3737980ec..38fbcbf2548 100644
--- a/src/hotspot/share/prims/resolvedMethodTable.cpp
+++ b/src/hotspot/share/prims/resolvedMethodTable.cpp
@@ -126,17 +126,14 @@ oop ResolvedMethodTable::add_method(const methodHandle& m, Handle resolved_metho
Method* method = m();
// Check if method has been redefined while taking out ResolvedMethodTable_lock, if so
- // use new method. The old method won't be deallocated because it's passed in as a Handle.
+ // use new method in the ResolvedMethodName. The old method won't be deallocated
+ // yet because it's passed in as a Handle.
if (method->is_old()) {
- // Replace method with redefined version
- InstanceKlass* holder = method->method_holder();
- method = holder->method_with_idnum(method->method_idnum());
- if (method == NULL) {
- // Replace deleted method with NSME.
- method = Universe::throw_no_such_method_error();
- }
+ method = (method->is_deleted()) ? Universe::throw_no_such_method_error() :
+ method->get_new_method();
java_lang_invoke_ResolvedMethodName::set_vmtarget(resolved_method_name(), method);
}
+
// Set flag in class to indicate this InstanceKlass has entries in the table
// to avoid walking table during redefinition if none of the redefined classes
// have any membernames in the table.
diff --git a/src/hotspot/share/prims/stackwalk.cpp b/src/hotspot/share/prims/stackwalk.cpp
index 14882f77bbe..d38ef1f196f 100644
--- a/src/hotspot/share/prims/stackwalk.cpp
+++ b/src/hotspot/share/prims/stackwalk.cpp
@@ -151,8 +151,8 @@ int StackWalk::fill_in_frames(jlong mode, BaseFrameStream& stream,
index == start_index && method->caller_sensitive()) {
ResourceMark rm(THREAD);
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(),
- err_msg("StackWalker::getCallerClass called from @CallerSensitive %s method",
- method->name_and_sig_as_C_string()));
+ err_msg("StackWalker::getCallerClass called from @CallerSensitive '%s' method",
+ method->external_name()));
}
// fill in StackFrameInfo and initialize MemberName
stream.fill_frame(index, frames_array, method, CHECK_0);
diff --git a/src/hotspot/share/prims/unsafe.cpp b/src/hotspot/share/prims/unsafe.cpp
index 4f5404f5ca3..322f2ec95b8 100644
--- a/src/hotspot/share/prims/unsafe.cpp
+++ b/src/hotspot/share/prims/unsafe.cpp
@@ -292,18 +292,6 @@ UNSAFE_ENTRY(jobject, Unsafe_GetUncompressedObject(JNIEnv *env, jobject unsafe,
return JNIHandles::make_local(env, v);
} UNSAFE_END
-UNSAFE_LEAF(jboolean, Unsafe_isBigEndian0(JNIEnv *env, jobject unsafe)) {
-#ifdef VM_LITTLE_ENDIAN
- return false;
-#else
- return true;
-#endif
-} UNSAFE_END
-
-UNSAFE_LEAF(jint, Unsafe_unalignedAccess0(JNIEnv *env, jobject unsafe)) {
- return UseUnalignedAccesses;
-} UNSAFE_END
-
#define DEFINE_GETSETOOP(java_type, Type) \
\
UNSAFE_ENTRY(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
@@ -446,14 +434,6 @@ UNSAFE_LEAF(void, Unsafe_CopySwapMemory0(JNIEnv *env, jobject unsafe, jobject sr
////// Random queries
-UNSAFE_LEAF(jint, Unsafe_AddressSize0(JNIEnv *env, jobject unsafe)) {
- return sizeof(void*);
-} UNSAFE_END
-
-UNSAFE_LEAF(jint, Unsafe_PageSize()) {
- return os::vm_page_size();
-} UNSAFE_END
-
static jlong find_field_offset(jclass clazz, jstring name, TRAPS) {
assert(clazz != NULL, "clazz must not be NULL");
assert(name != NULL, "name must not be NULL");
@@ -1073,8 +1053,6 @@ static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = {
{CC "ensureClassInitialized0", CC "(" CLS ")V", FN_PTR(Unsafe_EnsureClassInitialized0)},
{CC "arrayBaseOffset0", CC "(" CLS ")I", FN_PTR(Unsafe_ArrayBaseOffset0)},
{CC "arrayIndexScale0", CC "(" CLS ")I", FN_PTR(Unsafe_ArrayIndexScale0)},
- {CC "addressSize0", CC "()I", FN_PTR(Unsafe_AddressSize0)},
- {CC "pageSize", CC "()I", FN_PTR(Unsafe_PageSize)},
{CC "defineClass0", CC "(" DC_Args ")" CLS, FN_PTR(Unsafe_DefineClass0)},
{CC "allocateInstance", CC "(" CLS ")" OBJ, FN_PTR(Unsafe_AllocateInstance)},
@@ -1102,9 +1080,6 @@ static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = {
{CC "loadFence", CC "()V", FN_PTR(Unsafe_LoadFence)},
{CC "storeFence", CC "()V", FN_PTR(Unsafe_StoreFence)},
{CC "fullFence", CC "()V", FN_PTR(Unsafe_FullFence)},
-
- {CC "isBigEndian0", CC "()Z", FN_PTR(Unsafe_isBigEndian0)},
- {CC "unalignedAccess0", CC "()Z", FN_PTR(Unsafe_unalignedAccess0)}
};
#undef CC
diff --git a/src/hotspot/share/runtime/init.cpp b/src/hotspot/share/runtime/init.cpp
index 2149aef1173..229185bf702 100644
--- a/src/hotspot/share/runtime/init.cpp
+++ b/src/hotspot/share/runtime/init.cpp
@@ -122,6 +122,7 @@ jint init_globals() {
accessFlags_init();
templateTable_init();
InterfaceSupport_init();
+ VMRegImpl::set_regName(); // need this before generate_stubs (for printing oop maps).
SharedRuntime::generate_stubs();
universe2_init(); // dependent on codeCache_init and stubRoutines_init1
javaClasses_init();// must happen after vtable initialization, before referenceProcessor_init
@@ -139,7 +140,6 @@ jint init_globals() {
if (!compileBroker_init()) {
return JNI_EINVAL;
}
- VMRegImpl::set_regName();
if (!universe_post_init()) {
return JNI_ERR;
diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp
index 8e2ddff79b0..e45e312acde 100644
--- a/src/hotspot/share/runtime/os.cpp
+++ b/src/hotspot/share/runtime/os.cpp
@@ -1024,8 +1024,9 @@ bool os::is_readable_pointer(const void* p) {
}
bool os::is_readable_range(const void* from, const void* to) {
- for (address p = align_down((address)from, min_page_size()); p < to; p += min_page_size()) {
- if (!is_readable_pointer(p)) {
+ if ((uintptr_t)from >= (uintptr_t)to) return false;
+ for (uintptr_t p = align_down((uintptr_t)from, min_page_size()); p < (uintptr_t)to; p += min_page_size()) {
+ if (!is_readable_pointer((const void*)p)) {
return false;
}
}
diff --git a/src/hotspot/share/runtime/reflection.cpp b/src/hotspot/share/runtime/reflection.cpp
index cbfc3a1466b..244a4d1300f 100644
--- a/src/hotspot/share/runtime/reflection.cpp
+++ b/src/hotspot/share/runtime/reflection.cpp
@@ -1085,11 +1085,12 @@ static oop invoke(InstanceKlass* klass,
if (method->is_abstract()) {
// new default: 6531596
ResourceMark rm(THREAD);
+ stringStream ss;
+ ss.print("'");
+ Method::print_external_name(&ss, target_klass, method->name(), method->signature());
+ ss.print("'");
Handle h_origexception = Exceptions::new_exception(THREAD,
- vmSymbols::java_lang_AbstractMethodError(),
- Method::name_and_sig_as_C_string(target_klass,
- method->name(),
- method->signature()));
+ vmSymbols::java_lang_AbstractMethodError(), ss.as_string());
JavaCallArguments args(h_origexception);
THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(),
vmSymbols::throwable_void_signature(),
@@ -1104,10 +1105,13 @@ static oop invoke(InstanceKlass* klass,
// an internal vtable bug. If you ever get this please let Karen know.
if (method.is_null()) {
ResourceMark rm(THREAD);
- THROW_MSG_0(vmSymbols::java_lang_NoSuchMethodError(),
- Method::name_and_sig_as_C_string(klass,
- reflected_method->name(),
- reflected_method->signature()));
+ stringStream ss;
+ ss.print("'");
+ Method::print_external_name(&ss, klass,
+ reflected_method->name(),
+ reflected_method->signature());
+ ss.print("'");
+ THROW_MSG_0(vmSymbols::java_lang_NoSuchMethodError(), ss.as_string());
}
assert(ptypes->is_objArray(), "just checking");
diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp
index 244054e70fc..c8932ea2c9e 100644
--- a/src/hotspot/share/runtime/thread.cpp
+++ b/src/hotspot/share/runtime/thread.cpp
@@ -2021,6 +2021,10 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
_timer_exit_phase1.stop();
_timer_exit_phase2.start();
}
+
+ // Capture daemon status before the thread is marked as terminated.
+ bool daemon = is_daemon(threadObj());
+
// Notify waiters on thread object. This has to be done after exit() is called
// on the thread (if the thread is the last thread in a daemon ThreadGroup the
// group should have the destroyed bit set before waiters are notified).
@@ -2089,7 +2093,7 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
_timer_exit_phase4.start();
}
// Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread
- Threads::remove(this);
+ Threads::remove(this, daemon);
if (log_is_enabled(Debug, os, thread, timer)) {
_timer_exit_phase4.stop();
@@ -2107,7 +2111,7 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
}
}
-void JavaThread::cleanup_failed_attach_current_thread() {
+void JavaThread::cleanup_failed_attach_current_thread(bool is_daemon) {
if (active_handles() != NULL) {
JNIHandleBlock* block = active_handles();
set_active_handles(NULL);
@@ -2129,7 +2133,7 @@ void JavaThread::cleanup_failed_attach_current_thread() {
BarrierSet::barrier_set()->on_thread_detach(this);
- Threads::remove(this);
+ Threads::remove(this, is_daemon);
this->smr_delete();
}
@@ -3624,6 +3628,7 @@ void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
initialize_class(vmSymbols::java_lang_Thread(), CHECK);
oop thread_object = create_initial_thread(thread_group, main_thread, CHECK);
main_thread->set_threadObj(thread_object);
+
// Set thread status to running since main thread has
// been started and running.
java_lang_Thread::set_thread_status(thread_object,
@@ -3632,6 +3637,15 @@ void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
// The VM creates objects of this class.
initialize_class(vmSymbols::java_lang_Module(), CHECK);
+#ifdef ASSERT
+ InstanceKlass *k = SystemDictionary::UnsafeConstants_klass();
+ assert(k->is_not_initialized(), "UnsafeConstants should not already be initialized");
+#endif
+
+ // initialize the hardware-specific constants needed by Unsafe
+ initialize_class(vmSymbols::jdk_internal_misc_UnsafeConstants(), CHECK);
+ jdk_internal_misc_UnsafeConstants::set_unsafe_constants();
+
// The VM preresolves methods to these classes. Make sure that they get initialized
initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK);
initialize_class(vmSymbols::java_lang_ref_Finalizer(), CHECK);
@@ -4455,7 +4469,7 @@ void Threads::add(JavaThread* p, bool force_daemon) {
Events::log(p, "Thread added: " INTPTR_FORMAT, p2i(p));
}
-void Threads::remove(JavaThread* p) {
+void Threads::remove(JavaThread* p, bool is_daemon) {
// Reclaim the ObjectMonitors from the omInUseList and omFreeList of the moribund thread.
ObjectSynchronizer::omFlush(p);
@@ -4484,11 +4498,8 @@ void Threads::remove(JavaThread* p) {
}
_number_of_threads--;
- oop threadObj = p->threadObj();
- bool daemon = true;
- if (!is_daemon(threadObj)) {
+ if (!is_daemon) {
_number_of_non_daemon_threads--;
- daemon = false;
// Only one thread left, do a notify on the Threads_lock so a thread waiting
// on destroy_vm will wake up.
@@ -4496,7 +4507,7 @@ void Threads::remove(JavaThread* p) {
Threads_lock->notify_all();
}
}
- ThreadService::remove_thread(p, daemon);
+ ThreadService::remove_thread(p, is_daemon);
// Make sure that safepoint code disregard this thread. This is needed since
// the thread might mess around with locks after this point. This can cause it
diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp
index ab0ba1d863a..b977c7df186 100644
--- a/src/hotspot/share/runtime/thread.hpp
+++ b/src/hotspot/share/runtime/thread.hpp
@@ -1242,7 +1242,7 @@ class JavaThread: public Thread {
};
void exit(bool destroy_vm, ExitType exit_type = normal_exit);
- void cleanup_failed_attach_current_thread();
+ void cleanup_failed_attach_current_thread(bool is_daemon);
// Testers
virtual bool is_Java_thread() const { return true; }
@@ -2235,7 +2235,7 @@ class Threads: AllStatic {
// force_daemon is a concession to JNI, where we may need to add a
// thread to the thread list before allocating its thread object
static void add(JavaThread* p, bool force_daemon = false);
- static void remove(JavaThread* p);
+ static void remove(JavaThread* p, bool is_daemon);
static void non_java_threads_do(ThreadClosure* tc);
static void java_threads_do(ThreadClosure* tc);
static void java_threads_and_vm_thread_do(ThreadClosure* tc);
diff --git a/src/hotspot/share/utilities/hashtable.cpp b/src/hotspot/share/utilities/hashtable.cpp
index c59180a1a87..827859c7184 100644
--- a/src/hotspot/share/utilities/hashtable.cpp
+++ b/src/hotspot/share/utilities/hashtable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -129,7 +129,7 @@ static int literal_size(oop obj) {
}
}
-static int literal_size(ClassLoaderWeakHandle v) {
+static int literal_size(WeakHandle v) {
return literal_size(v.peek());
}
@@ -244,7 +244,7 @@ template void print_literal(T l) {
l->print();
}
-static void print_literal(ClassLoaderWeakHandle l) {
+static void print_literal(WeakHandle l) {
l.print();
}
@@ -308,15 +308,14 @@ template class Hashtable;
template class Hashtable;
template class Hashtable;
template class Hashtable;
-template class Hashtable;
+template class Hashtable, mtClass>;
template class Hashtable;
template class Hashtable;
-template class Hashtable;
template class Hashtable;
template class HashtableEntry;
template class HashtableEntry;
template class HashtableEntry;
-template class HashtableEntry;
+template class HashtableEntry, mtClass>;
template class HashtableBucket;
template class BasicHashtableEntry