This commit is contained in:
Serguei Spitsyn 2017-10-18 21:05:09 +00:00
commit f0a6c87e29
213 changed files with 4186 additions and 1877 deletions

View File

@ -450,3 +450,5 @@ e5357aa85dadacc6562175ff74714fecfb4470cf jdk-10+22
22850b3a55240253841b9a425ad60a7fcdb22d47 jdk-10+23
3b201865d5c1f244f555cad58da599c9261286d8 jdk-10+24
8eb5e3ccee560c28ac9b1df2670adac2b3d36fad jdk-10+25
1129253d3bc728a2963ba411ab9dd1adf358fb6b jdk-10+26
b87d7b5d5dedc1185e5929470f945b7378cdb3ad jdk-10+27

View File

@ -39,7 +39,7 @@ setup_url() {
jib_repository="jdk-virtual"
jib_organization="jpg/infra/builddeps"
jib_module="jib"
jib_revision="2.0-SNAPSHOT"
jib_revision="3.0-SNAPSHOT"
jib_ext="jib.sh.gz"
closed_script="${mydir}/../../closed/make/conf/jib-install.conf"
@ -146,4 +146,9 @@ elif [ ! -e "${install_data}" ] || [ "${data_string}" != "$(cat "${install_data}
install_jib
fi
# Provide a reasonable default for the --src-dir parameter if run out of tree
if [ -z "${JIB_SRC_DIR}" ]; then
export JIB_SRC_DIR="${mydir}/../"
fi
${installed_jib_script} "$@"

View File

@ -70,9 +70,14 @@ define SetupBundleFileBody
$$(call SetIfEmpty, $1_UNZIP_DEBUGINFO, false)
$(BUNDLES_OUTPUTDIR)/$$($1_BUNDLE_NAME): $$($1_FILES)
# If any of the files contain a space in the file name, CacheFind
# will have replaced it with ?. Tar does not accept that so need to
# switch it back.
$$(foreach d, $$($1_BASE_DIRS), \
$$(eval $$(call ListPathsSafely, \
$1_$$d_RELATIVE_FILES, $$($1_$$d_LIST_FILE))) \
$$(CAT) $$($1_$$d_LIST_FILE) | $$(TR) '?' ' ' > $$($1_$$d_LIST_FILE).tmp \
&& $(MV) $$($1_$$d_LIST_FILE).tmp $$($1_$$d_LIST_FILE) $$(NEWLINE) \
)
$$(call MakeDir, $$(@D))
ifneq ($$($1_SPECIAL_INCLUDES), )

View File

@ -37,9 +37,13 @@ include SetupJavaCompilers.gmk
include TextFileProcessing.gmk
include ZipArchive.gmk
# Hook to include the corresponding custom file, if present.
$(eval $(call IncludeCustomExtension, CompileDemos-pre.gmk))
# Prepare the find cache.
$(eval $(call FillCacheFind, $(wildcard $(TOPDIR)/src/demo \
$(TOPDIR)/src/*/demo)))
DEMO_SRC_DIRS += $(TOPDIR)/src/demo
$(eval $(call FillCacheFind, $(wildcard $(DEMO_SRC_DIRS))))
# Append demo goals to this variable.
TARGETS =
@ -303,7 +307,7 @@ endif
################################################################################
# Hook to include the corresponding custom file, if present.
$(eval $(call IncludeCustomExtension, CompileDemos.gmk))
$(eval $(call IncludeCustomExtension, CompileDemos-post.gmk))
all: $(TARGETS)
images: $(IMAGES_TARGETS)

View File

@ -267,8 +267,9 @@ else # HAS_SPEC=true
$(ECHO) "Re-running configure using default settings"
endif
( cd $(OUTPUTDIR) && PATH="$(ORIGINAL_PATH)" \
CUSTOM_ROOT="$(CUSTOM_ROOT)" \
CUSTOM_CONFIG_DIR="$(CUSTOM_CONFIG_DIR)" \
$(BASH) $(CONFIGURE_CMD) $(CONFIGURE_COMMAND_LINE) )
$(BASH) $(TOPDIR)/configure $(CONFIGURE_COMMAND_LINE) )
##############################################################################
# The main target, for delegating into Main.gmk

View File

@ -70,10 +70,10 @@ ifeq ($(HAS_SPEC),)
$(subst \ ,\#,$(MAKEOVERRIDES))))
# Setup information about available configurations, if any.
ifeq ($(CUSTOM_BUILD_DIR), )
build_dir=$(topdir)/build
ifneq ($(CUSTOM_ROOT), )
build_dir=$(CUSTOM_ROOT)/build
else
build_dir=$(CUSTOM_BUILD_DIR)
build_dir=$(topdir)/build
endif
all_spec_files=$(wildcard $(build_dir)/*/spec.gmk)
# Extract the configuration names from the path
@ -227,7 +227,11 @@ ifeq ($(HAS_SPEC),)
else
# Use spec.gmk files in the build output directory
ifeq ($$(all_spec_files),)
$$(info Error: No configurations found for $$(topdir).)
ifneq ($(CUSTOM_ROOT), )
$$(info Error: No configurations found for $$(CUSTOM_ROOT).)
else
$$(info Error: No configurations found for $$(topdir).)
endif
$$(info Please run 'bash configure' to create a configuration.)
$$(info )
$$(error Cannot continue)

View File

@ -49,22 +49,17 @@ ifeq ($(OPENJDK_TARGET_OS), macosx)
BUNDLE_VENDOR := $(COMPANY_NAME)
endif
JDK_FILE_LIST := $(shell $(FIND) $(JDK_IMAGE_DIR))
JRE_FILE_LIST := $(shell $(FIND) $(JRE_IMAGE_DIR))
$(eval $(call SetupCopyFiles, COPY_JDK_IMAGE, \
SRC := $(JDK_IMAGE_DIR), \
DEST := $(JDK_MACOSX_CONTENTS_DIR)/Home, \
FILES := $(call CacheFind, $(JDK_IMAGE_DIR)), \
))
JDK_TARGET_LIST := $(subst $(JDK_IMAGE_DIR)/,$(JDK_MACOSX_CONTENTS_DIR)/Home/,$(JDK_FILE_LIST))
JRE_TARGET_LIST := $(subst $(JRE_IMAGE_DIR)/,$(JRE_MACOSX_CONTENTS_DIR)/Home/,$(JRE_FILE_LIST))
# Copy empty directories (jre/lib/applet).
$(JDK_MACOSX_CONTENTS_DIR)/Home/%: $(JDK_IMAGE_DIR)/%
$(call LogInfo, Copying $(patsubst $(OUTPUTDIR)/%,%,$@))
$(MKDIR) -p $(@D)
if [ -d "$<" ]; then $(MKDIR) -p $@; else $(CP) -f -R -P '$<' '$@'; fi
$(JRE_MACOSX_CONTENTS_DIR)/Home/%: $(JRE_IMAGE_DIR)/%
$(call LogInfo, Copying $(patsubst $(OUTPUTDIR)/%,%,$@))
$(MKDIR) -p $(@D)
if [ -d "$<" ]; then $(MKDIR) -p $@; else $(CP) -f -R -P '$<' '$@'; fi
$(eval $(call SetupCopyFiles, COPY_JRE_IMAGE, \
SRC := $(JRE_IMAGE_DIR), \
DEST := $(JRE_MACOSX_CONTENTS_DIR)/Home, \
FILES := $(call CacheFind, $(JRE_IMAGE_DIR)), \
))
$(JDK_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib:
$(call LogInfo, Creating link $(patsubst $(OUTPUTDIR)/%,%,$@))
@ -102,11 +97,11 @@ ifeq ($(OPENJDK_TARGET_OS), macosx)
@@VENDOR@@ => $(BUNDLE_VENDOR) , \
))
jdk-bundle: $(JDK_TARGET_LIST) $(JDK_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib \
jdk-bundle: $(COPY_JDK_IMAGE) $(JDK_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib \
$(BUILD_JDK_PLIST)
$(SETFILE) -a B $(dir $(JDK_MACOSX_CONTENTS_DIR))
jre-bundle: $(JRE_TARGET_LIST) $(JRE_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib \
jre-bundle: $(COPY_JRE_IMAGE) $(JRE_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib \
$(BUILD_JRE_PLIST)
$(SETFILE) -a B $(dir $(JRE_MACOSX_CONTENTS_DIR))

View File

@ -351,6 +351,9 @@ define SetupRunJtregTestBody
$1_JTREG_BASIC_OPTIONS += -automatic -keywords:\!ignore -ignore:quiet
# Make it possible to specify the JIB_DATA_DIR for tests using the
# JIB Artifact resolver
$1_JTREG_BASIC_OPTIONS += -e:JIB_DATA_DIR
# Some tests needs to find a boot JDK using the JDK8_HOME variable.
$1_JTREG_BASIC_OPTIONS += -e:JDK8_HOME=$$(BOOT_JDK)

View File

@ -766,13 +766,10 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
AC_ARG_WITH(conf-name, [AS_HELP_STRING([--with-conf-name],
[use this as the name of the configuration @<:@generated from important configuration options@:>@])],
[ CONF_NAME=${with_conf_name} ])
AC_ARG_WITH(output-base-dir, [AS_HELP_STRING([--with-output-base-dir],
[override the default output base directory @<:@./build@:>@])],
[ OUTPUT_BASE=${with_output_base_dir} ], [ OUTPUT_BASE="$TOPDIR/build" ] )
# Test from where we are running configure, in or outside of src root.
AC_MSG_CHECKING([where to store configuration])
if test "x$CURDIR" = "x$TOPDIR" || test "x$CURDIR" = "x$TOPDIR/common" \
if test "x$CURDIR" = "x$TOPDIR" || test "x$CURDIR" = "x$CUSTOM_ROOT" \
|| test "x$CURDIR" = "x$TOPDIR/make/autoconf" \
|| test "x$CURDIR" = "x$TOPDIR/make" ; then
# We are running configure from the src root.
@ -783,7 +780,12 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
else
AC_MSG_RESULT([in build directory with custom name])
fi
OUTPUTDIR="${OUTPUT_BASE}/${CONF_NAME}"
if test "x$CUSTOM_ROOT" != x; then
OUTPUTDIR="${CUSTOM_ROOT}/build/${CONF_NAME}"
else
OUTPUTDIR="${TOPDIR}/build/${CONF_NAME}"
fi
$MKDIR -p "$OUTPUTDIR"
if test ! -d "$OUTPUTDIR"; then
AC_MSG_ERROR([Could not create build directory $OUTPUTDIR])

View File

@ -325,6 +325,27 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK],
fi
AC_MSG_CHECKING([if Boot JDK is 32 or 64 bits])
AC_MSG_RESULT([$BOOT_JDK_BITS])
# Try to enable CDS
AC_MSG_CHECKING([for local Boot JDK Class Data Sharing (CDS)])
BOOT_JDK_CDS_ARCHIVE=$CONFIGURESUPPORT_OUTPUTDIR/classes.jsa
ADD_JVM_ARG_IF_OK([-XX:+UnlockDiagnosticVMOptions -XX:-VerifySharedSpaces -XX:SharedArchiveFile=$BOOT_JDK_CDS_ARCHIVE],boot_jdk_cds_args,[$JAVA])
if test "x$boot_jdk_cds_args" != x; then
# Try creating a CDS archive
"$JAVA" $boot_jdk_cds_args -Xshare:dump > /dev/null 2>&1
if test $? -eq 0; then
BOOTJDK_USE_LOCAL_CDS=true
AC_MSG_RESULT([yes, created])
else
# Generation failed, don't use CDS.
BOOTJDK_USE_LOCAL_CDS=false
AC_MSG_RESULT([no, creation failed])
fi
else
BOOTJDK_USE_LOCAL_CDS=false
AC_MSG_RESULT([no, -XX:SharedArchiveFile not supported])
fi
])
AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
@ -346,6 +367,14 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
# Force en-US environment
ADD_JVM_ARG_IF_OK([-Duser.language=en -Duser.country=US],boot_jdk_jvmargs,[$JAVA])
if test "x$BOOTJDK_USE_LOCAL_CDS" = xtrue; then
# Use our own CDS archive
ADD_JVM_ARG_IF_OK([$boot_jdk_cds_args -Xshare:auto],boot_jdk_jvmargs,[$JAVA])
else
# Otherwise optimistically use the system-wide one, if one is present
ADD_JVM_ARG_IF_OK([-Xshare:auto],boot_jdk_jvmargs,[$JAVA])
fi
# Apply user provided options.
ADD_JVM_ARG_IF_OK([$with_boot_jdk_jvmargs],boot_jdk_jvmargs,[$JAVA])
@ -355,7 +384,6 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
JAVA_FLAGS=$boot_jdk_jvmargs
AC_SUBST(JAVA_FLAGS)
AC_MSG_CHECKING([flags for boot jdk java command for big workloads])
# Starting amount of heap memory.

View File

@ -23,19 +23,18 @@
#
if test "x$1" != xCHECKME; then
echo "WARNING: Calling the wrapper script directly is deprecated and unsupported."
echo "Not all features of configure will be available."
echo "ERROR: Calling this wrapper script directly is not supported."
echo "Use the 'configure' script in the top-level directory instead."
TOPDIR=$(cd $(dirname $0)/../.. > /dev/null && pwd)
else
# Now the next argument is the absolute top-level directory path.
# The TOPDIR variable is passed on to configure.ac.
TOPDIR="$2"
# Remove these two arguments to get to the user supplied arguments
shift
shift
exit 1
fi
# The next argument is the absolute top-level directory path.
# The TOPDIR variable is passed on to configure.ac.
TOPDIR="$2"
# Remove these two arguments to get to the user supplied arguments
shift
shift
if test "x$BASH" = x; then
echo "Error: This script must be run using bash." 1>&2
exit 1

View File

@ -1134,7 +1134,6 @@ with_toolchain_path
with_extra_path
with_sdk_name
with_conf_name
with_output_base_dir
with_output_sync
with_default_make_target
enable_headless_only
@ -2043,7 +2042,6 @@ Optional Packages:
--with-sdk-name use the platform SDK of the given name. [macosx]
--with-conf-name use this as the name of the configuration [generated
from important configuration options]
--with-output-base-dir override the default output base directory [./build]
--with-output-sync set make output sync type if supported by make.
[recurse]
--with-default-make-target
@ -5117,7 +5115,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1506397140
DATE_WHEN_GENERATED=1508136203
###############################################################################
#
@ -17554,18 +17552,10 @@ if test "${with_conf_name+set}" = set; then :
fi
# Check whether --with-output-base-dir was given.
if test "${with_output_base_dir+set}" = set; then :
withval=$with_output_base_dir; OUTPUT_BASE=${with_output_base_dir}
else
OUTPUT_BASE="$TOPDIR/build"
fi
# Test from where we are running configure, in or outside of src root.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking where to store configuration" >&5
$as_echo_n "checking where to store configuration... " >&6; }
if test "x$CURDIR" = "x$TOPDIR" || test "x$CURDIR" = "x$TOPDIR/common" \
if test "x$CURDIR" = "x$TOPDIR" || test "x$CURDIR" = "x$CUSTOM_ROOT" \
|| test "x$CURDIR" = "x$TOPDIR/make/autoconf" \
|| test "x$CURDIR" = "x$TOPDIR/make" ; then
# We are running configure from the src root.
@ -17578,7 +17568,12 @@ $as_echo "in default location" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: in build directory with custom name" >&5
$as_echo "in build directory with custom name" >&6; }
fi
OUTPUTDIR="${OUTPUT_BASE}/${CONF_NAME}"
if test "x$CUSTOM_ROOT" != x; then
OUTPUTDIR="${CUSTOM_ROOT}/build/${CONF_NAME}"
else
OUTPUTDIR="${TOPDIR}/build/${CONF_NAME}"
fi
$MKDIR -p "$OUTPUTDIR"
if test ! -d "$OUTPUTDIR"; then
as_fn_error $? "Could not create build directory $OUTPUTDIR" "$LINENO" 5
@ -31483,6 +31478,45 @@ $as_echo_n "checking if Boot JDK is 32 or 64 bits... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $BOOT_JDK_BITS" >&5
$as_echo "$BOOT_JDK_BITS" >&6; }
# Try to enable CDS
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for local Boot JDK Class Data Sharing (CDS)" >&5
$as_echo_n "checking for local Boot JDK Class Data Sharing (CDS)... " >&6; }
BOOT_JDK_CDS_ARCHIVE=$CONFIGURESUPPORT_OUTPUTDIR/classes.jsa
$ECHO "Check if jvm arg is ok: -XX:+UnlockDiagnosticVMOptions -XX:-VerifySharedSpaces -XX:SharedArchiveFile=$BOOT_JDK_CDS_ARCHIVE" >&5
$ECHO "Command: $JAVA -XX:+UnlockDiagnosticVMOptions -XX:-VerifySharedSpaces -XX:SharedArchiveFile=$BOOT_JDK_CDS_ARCHIVE -version" >&5
OUTPUT=`$JAVA -XX:+UnlockDiagnosticVMOptions -XX:-VerifySharedSpaces -XX:SharedArchiveFile=$BOOT_JDK_CDS_ARCHIVE -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | $GREP -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | $GREP " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_cds_args="$boot_jdk_cds_args -XX:+UnlockDiagnosticVMOptions -XX:-VerifySharedSpaces -XX:SharedArchiveFile=$BOOT_JDK_CDS_ARCHIVE"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
if test "x$boot_jdk_cds_args" != x; then
# Try creating a CDS archive
"$JAVA" $boot_jdk_cds_args -Xshare:dump > /dev/null 2>&1
if test $? -eq 0; then
BOOTJDK_USE_LOCAL_CDS=true
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes, created" >&5
$as_echo "yes, created" >&6; }
else
# Generation failed, don't use CDS.
BOOTJDK_USE_LOCAL_CDS=false
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no, creation failed" >&5
$as_echo "no, creation failed" >&6; }
fi
else
BOOTJDK_USE_LOCAL_CDS=false
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no, -XX:SharedArchiveFile not supported" >&5
$as_echo "no, -XX:SharedArchiveFile not supported" >&6; }
fi
# Check whether --with-build-jdk was given.
@ -66232,6 +66266,42 @@ $as_echo_n "checking flags for boot jdk java command ... " >&6; }
fi
if test "x$BOOTJDK_USE_LOCAL_CDS" = xtrue; then
# Use our own CDS archive
$ECHO "Check if jvm arg is ok: $boot_jdk_cds_args -Xshare:auto" >&5
$ECHO "Command: $JAVA $boot_jdk_cds_args -Xshare:auto -version" >&5
OUTPUT=`$JAVA $boot_jdk_cds_args -Xshare:auto -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | $GREP -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | $GREP " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs="$boot_jdk_jvmargs $boot_jdk_cds_args -Xshare:auto"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
else
# Otherwise optimistically use the system-wide one, if one is present
$ECHO "Check if jvm arg is ok: -Xshare:auto" >&5
$ECHO "Command: $JAVA -Xshare:auto -version" >&5
OUTPUT=`$JAVA -Xshare:auto -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | $GREP -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | $GREP " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs="$boot_jdk_jvmargs -Xshare:auto"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
fi
# Apply user provided options.
$ECHO "Check if jvm arg is ok: $with_boot_jdk_jvmargs" >&5
@ -66256,7 +66326,6 @@ $as_echo "$boot_jdk_jvmargs" >&6; }
JAVA_FLAGS=$boot_jdk_jvmargs
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for big workloads" >&5
$as_echo_n "checking flags for boot jdk java command for big workloads... " >&6; }

View File

@ -842,8 +842,6 @@ JRE_SYMBOLS_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(JRE_SYMBOLS_BUNDLE_NAME)
TEST_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(TEST_BUNDLE_NAME)
DOCS_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(DOCS_BUNDLE_NAME)
CONFIGURE_CMD := $(TOPDIR)/configure
# This macro is called to allow inclusion of closed source counterparts.
# Unless overridden in closed sources, it expands to nothing.
# Usage: This function is called in an open makefile, with the following

View File

@ -462,12 +462,23 @@ define NamedParamsMacroTemplate
$(call $(0)Body,$(strip $1))
endef
################################################################################
# Replace question marks with space in string. This macro needs to be called on
# files from CacheFind in case any of them contains space in their file name,
# since CacheFind replaces space with ?.
# Param 1 - String to replace in
DecodeSpace = \
$(subst ?,$(SPACE),$(strip $1))
EncodeSpace = \
$(subst $(SPACE),?,$(strip $1))
################################################################################
# Make directory without forking mkdir if not needed
# 1: List of directories to create
MakeDir = \
$(strip \
$(eval MakeDir_dirs_to_make := $(strip $(foreach d, $1, $(if $(wildcard $d), , $d)))) \
$(eval MakeDir_dirs_to_make := $(strip $(foreach d, $1, $(if $(wildcard $d), , \
"$(call DecodeSpace, $d)")))) \
$(if $(MakeDir_dirs_to_make), $(shell $(MKDIR) -p $(MakeDir_dirs_to_make))) \
)
@ -479,6 +490,7 @@ SetIfEmpty = \
$(if $($(strip $1)),,$(eval $(strip $1) := $2))
################################################################################
# All install-file and related macros automatically call DecodeSpace when needed.
ifeq ($(OPENJDK_TARGET_OS),solaris)
# On Solaris, if the target is a symlink and exists, cp won't overwrite.
@ -487,19 +499,21 @@ ifeq ($(OPENJDK_TARGET_OS),solaris)
# If the source and target parent directories are the same, recursive copy doesn't work
# so we fall back on regular copy, which isn't preserving symlinks.
define install-file
$(MKDIR) -p '$(@D)'
$(RM) '$@'
if [ "$(@D)" != "$(<D)" ]; then \
$(CP) -f -r -P '$<' '$(@D)'; \
if [ "$(@F)" != "$(<F)" ]; then \
$(MV) '$(@D)/$(<F)' '$@'; \
$(call MakeDir, $(@D))
$(RM) '$(call DecodeSpace, $@)'
if [ '$(call DecodeSpace, $(dir $@))' != \
'$(call DecodeSpace, $(dir $(call EncodeSpace, $<)))' ]; then \
$(CP) -f -r -P '$(call DecodeSpace, $<)' '$(call DecodeSpace, $(@D))'; \
if [ '$(call DecodeSpace, $(@F))' != \
'$(call DecodeSpace, $(notdir $(call EncodeSpace, $(<))))' ]; then \
$(MV) '$(call DecodeSpace, $(@D)/$(<F))' '$(call DecodeSpace, $@)'; \
fi; \
else \
if [ -L '$<' ]; then \
if [ -L '$(call DecodeSpace, $<)' ]; then \
$(ECHO) "Source file is a symlink and target is in the same directory: $< $@" ; \
exit 1; \
fi; \
$(CP) -f '$<' '$@'; \
$(CP) -f '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'; \
fi
endef
else ifeq ($(OPENJDK_TARGET_OS),macosx)
@ -512,22 +526,22 @@ else ifeq ($(OPENJDK_TARGET_OS),macosx)
# If copying a soft link to a directory, need to delete the target first to avoid
# weird errors.
define install-file
$(MKDIR) -p '$(@D)'
$(RM) '$@'
$(CP) -fRP '$<' '$@'
if [ -n "`$(XATTR) -l '$@'`" ]; then $(XATTR) -c '$@'; fi
$(call MakeDir, $(@D))
$(RM) '$(call DecodeSpace, $@)'
$(CP) -fRP '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
if [ -n "`$(XATTR) -l '$(call DecodeSpace, $@)'`" ]; then $(XATTR) -c '$(call DecodeSpace, $@)'; fi
endef
else
define install-file
$(call MakeDir, $(@D))
$(CP) -fP '$<' '$@'
$(CP) -fP '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
endef
endif
# Variant of install file that does not preserve symlinks
define install-file-nolink
$(call MakeDir, $(@D))
$(CP) -f '$<' '$@'
$(CP) -f '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
endef
################################################################################
@ -577,14 +591,14 @@ RelativePath = \
# the unix emulation environment.
define link-file-relative
$(call MakeDir, $(@D))
$(RM) $@
$(LN) -s $(call RelativePath, $<, $(@D)) $@
$(RM) '$(call DecodeSpace, $@)'
$(LN) -s '$(call DecodeSpace, $(call RelativePath, $<, $(@D)))' '$(call DecodeSpace, $@)'
endef
define link-file-absolute
$(call MakeDir, $(@D))
$(RM) $@
$(LN) -s $< $@
$(RM) '$(call DecodeSpace, $@)'
$(LN) -s '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
endef
################################################################################
@ -651,6 +665,13 @@ ifneq ($(DISABLE_CACHE_FIND), true)
# This macro can be called multiple times to add to the cache. Only finds files
# with no filters.
#
# Files containing space will get spaces replaced with ? because GNU Make
# cannot handle lists of files with space in them. By using ?, make will match
# the wildcard to space in many situations so we don't need to replace back
# to space on every use. While not a complete solution it does allow some uses
# of CacheFind to function with spaces in file names, including for
# SetupCopyFiles.
#
# Needs to be called with $(eval )
#
# Even if the performance benifit is negligible on other platforms, keep the
@ -668,7 +689,8 @@ ifneq ($(DISABLE_CACHE_FIND), true)
ifneq ($$(FIND_CACHE_NEW_DIRS), )
# Remove any trailing slash from dirs in the cache dir list
FIND_CACHE_DIRS += $$(patsubst %/,%, $$(FIND_CACHE_NEW_DIRS))
FIND_CACHE := $$(sort $$(FIND_CACHE) $$(shell $(FIND) $$(FIND_CACHE_NEW_DIRS) \( -type f -o -type l \) $2))
FIND_CACHE := $$(sort $$(FIND_CACHE) $$(shell $(FIND) $$(FIND_CACHE_NEW_DIRS) \
\( -type f -o -type l \) $2 | $(TR) ' ' '?'))
endif
endef
@ -684,7 +706,8 @@ ifneq ($(DISABLE_CACHE_FIND), true)
# Param 2 - (optional) specialization. Normally "-a \( ... \)" expression.
define CacheFind
$(if $(filter-out $(addsuffix /%,- $(FIND_CACHE_DIRS)) $(FIND_CACHE_DIRS),$1), \
$(if $(wildcard $1), $(shell $(FIND) $1 \( -type f -o -type l \) $2)), \
$(if $(wildcard $1), $(shell $(FIND) $1 \( -type f -o -type l \) $2 \
| $(TR) ' ' '?')), \
$(filter $(addsuffix /%,$(patsubst %/,%,$1)) $1,$(FIND_CACHE)))
endef
@ -693,7 +716,7 @@ else
# Param 1 - Dirs to find in
# Param 2 - (optional) specialization. Normally "-a \( ... \)" expression.
define CacheFind
$(shell $(FIND) $1 \( -type f -o -type l \) $2)
$(shell $(FIND) $1 \( -type f -o -type l \) $2 | $(TR) ' ' '?')
endef
endif
@ -707,7 +730,7 @@ define AddFileToCopy
# 4 : Macro to call for copy operation
# 5 : Action text to log
$2: $1
$$(call LogInfo, $(strip $5) $$(patsubst $(OUTPUTDIR)/%,%,$$@))
$$(call LogInfo, $(strip $5) $$(patsubst $(OUTPUTDIR)/%,%,$$(call DecodeSpace, $$@)))
$$($$(strip $4))
$3 += $2

View File

@ -58,6 +58,7 @@ JVM_CFLAGS_INCLUDES += \
-I$(JVM_VARIANT_OUTPUTDIR)/gensrc \
-I$(TOPDIR)/src/hotspot/share/precompiled \
-I$(TOPDIR)/src/hotspot/share/prims \
-I$(TOPDIR)/src/java.base/share/native/include \
#
# INCLUDE_SUFFIX_* is only meant for including the proper

View File

@ -25,9 +25,13 @@
include LibCommon.gmk
# Hook to include the corresponding custom file, if present.
$(eval $(call IncludeCustomExtension, lib/Lib-java.base.gmk))
# Prepare the find cache.
$(eval $(call FillCacheFind, $(wildcard $(TOPDIR)/src/java.base/*/native \
$(TOPDIR)/src/*/java.base/*/native)))
LIB_java.base_SRC_DIRS += $(TOPDIR)/src/java.base/*/native
$(eval $(call FillCacheFind, $(wildcard $(LIB_java.base_SRC_DIRS))))
include CoreLibraries.gmk
include NetworkingLibraries.gmk

View File

@ -25,9 +25,13 @@
include LibCommon.gmk
# Hook to include the corresponding custom file, if present.
$(eval $(call IncludeCustomExtension, lib/Lib-java.desktop.gmk))
# Prepare the find cache.
$(eval $(call FillCacheFind, $(wildcard $(TOPDIR)/src/java.desktop/*/native \
$(TOPDIR)/src/*/java.desktop/*/native)))
LIB_java.desktop_SRC_DIRS += $(TOPDIR)/src/java.desktop/*/native
$(eval $(call FillCacheFind, $(wildcard $(LIB_java.desktop_SRC_DIRS))))
include LibosxLibraries.gmk
include PlatformLibraries.gmk

View File

@ -35,7 +35,7 @@ include $(SPEC)
include MakeBase.gmk
include TestFilesCompilation.gmk
$(eval $(call IncludeCustomExtension, hotspot/test/JtregNative.gmk))
$(eval $(call IncludeCustomExtension, test/JtregNativeHotspot.gmk))
################################################################################
# Targets for building the native tests themselves.

View File

@ -35,7 +35,7 @@ include $(SPEC)
include MakeBase.gmk
include TestFilesCompilation.gmk
$(eval $(call IncludeCustomExtension, test/JtregNative.gmk))
$(eval $(call IncludeCustomExtension, test/JtregNativeJdk.gmk))
################################################################################
# Targets for building the native tests themselves.

View File

@ -70,7 +70,7 @@
</toolChain>
</folderInfo>
<sourceEntries>
<entry excluding="cpu/vm/templateTable_x86_32.cpp|cpu/vm/templateInterpreter_x86_32.cpp|cpu/vm/stubRoutines_x86_32.cpp|cpu/vm/stubGenerator_x86_32.cpp|cpu/vm/sharedRuntime_x86_32.cpp|cpu/vm/jniFastGetField_x86_32.cpp|cpu/vm/interpreterRT_x86_32.cpp|cpu/vm/interpreter_x86_32.cpp|cpu/vm/interp_masm_x86_32.cpp|cpu/vm/vtableStubs_x86_32.cpp" flags="VALUE_WORKSPACE_PATH" kind="sourcePath" name=""/>
<entry excluding="cpu/x86/templateTable_x86_32.cpp|cpu/x86/templateInterpreter_x86_32.cpp|cpu/x86/stubRoutines_x86_32.cpp|cpu/x86/stubGenerator_x86_32.cpp|cpu/x86/sharedRuntime_x86_32.cpp|cpu/x86/jniFastGetField_x86_32.cpp|cpu/x86/interpreterRT_x86_32.cpp|cpu/x86/interpreter_x86_32.cpp|cpu/x86/interp_masm_x86_32.cpp|cpu/x86/vtableStubs_x86_32.cpp" flags="VALUE_WORKSPACE_PATH" kind="sourcePath" name=""/>
</sourceEntries>
</configuration>
</storageModule>

View File

@ -256,14 +256,10 @@ class HotSpotProject(mx.NativeProject):
"""
roots = [
'ASSEMBLY_EXCEPTION',
'LICENSE',
'README',
'THIRD_PARTY_README',
'agent',
'make',
'src',
'test'
'cpu',
'os',
'os_cpu',
'share'
]
for jvmVariant in _jdkJvmVariants:
@ -605,6 +601,16 @@ def _get_openjdk_cpu():
def _get_openjdk_os_cpu():
return _get_openjdk_os() + '-' + _get_openjdk_cpu()
def _get_jdk_dir():
suiteParentDir = dirname(_suite.dir)
# suitParentDir is now something like: /some_prefix/jdk10-hs/open/src
pathComponents = suiteParentDir.split(os.sep)
for i in range(0, len(pathComponents)):
if pathComponents[i] in ["open", "src"]:
del pathComponents[i:]
break
return os.path.join(os.sep, *pathComponents)
def _get_jdk_build_dir(debugLevel=None):
"""
Gets the directory into which the JDK is built. This directory contains
@ -613,7 +619,7 @@ def _get_jdk_build_dir(debugLevel=None):
if debugLevel is None:
debugLevel = _vm.debugLevel
name = '{}-{}-{}-{}'.format(_get_openjdk_os_cpu(), 'normal', _vm.jvmVariant, debugLevel)
return join(dirname(_suite.dir), 'build', name)
return join(_get_jdk_dir(), 'build', name)
_jvmci_bootclasspath_prepends = []

View File

@ -24,9 +24,7 @@ suite = {
"defaultLicense" : "GPLv2-CPE",
# This puts mx/ as a sibling of the JDK build configuration directories
# (e.g., macosx-x86_64-normal-server-release).
"outputRoot" : "../build/mx/hotspot",
"outputRoot" : "../../build/mx/hotspot",
# ------------- Libraries -------------
@ -43,7 +41,7 @@ suite = {
# ------------- JVMCI:Service -------------
"jdk.vm.ci.services" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"javaCompliance" : "9",
"workingSets" : "API,JVMCI",
@ -52,7 +50,7 @@ suite = {
# ------------- JVMCI:API -------------
"jdk.vm.ci.common" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
@ -60,7 +58,7 @@ suite = {
},
"jdk.vm.ci.meta" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
@ -68,7 +66,7 @@ suite = {
},
"jdk.vm.ci.code" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : ["jdk.vm.ci.meta"],
"checkstyle" : "jdk.vm.ci.services",
@ -77,7 +75,7 @@ suite = {
},
"jdk.vm.ci.code.test" : {
"subDir" : "test/compiler/jvmci",
"subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
"sourceDirs" : ["src"],
"dependencies" : [
"mx:JUNIT",
@ -92,7 +90,7 @@ suite = {
},
"jdk.vm.ci.runtime" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : [
"jdk.vm.ci.code",
@ -104,7 +102,7 @@ suite = {
},
"jdk.vm.ci.runtime.test" : {
"subDir" : "test/compiler/jvmci",
"subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
"sourceDirs" : ["src"],
"dependencies" : [
"mx:JUNIT",
@ -119,7 +117,7 @@ suite = {
# ------------- JVMCI:HotSpot -------------
"jdk.vm.ci.aarch64" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : ["jdk.vm.ci.code"],
"checkstyle" : "jdk.vm.ci.services",
@ -128,7 +126,7 @@ suite = {
},
"jdk.vm.ci.amd64" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : ["jdk.vm.ci.code"],
"checkstyle" : "jdk.vm.ci.services",
@ -137,7 +135,7 @@ suite = {
},
"jdk.vm.ci.sparc" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : ["jdk.vm.ci.code"],
"checkstyle" : "jdk.vm.ci.services",
@ -146,7 +144,7 @@ suite = {
},
"jdk.vm.ci.hotspot" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : [
"jdk.vm.ci.common",
@ -163,7 +161,7 @@ suite = {
},
"jdk.vm.ci.hotspot.test" : {
"subDir" : "test/compiler/jvmci",
"subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
"sourceDirs" : ["src"],
"dependencies" : [
"TESTNG",
@ -175,7 +173,7 @@ suite = {
},
"jdk.vm.ci.hotspot.aarch64" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : [
"jdk.vm.ci.aarch64",
@ -187,7 +185,7 @@ suite = {
},
"jdk.vm.ci.hotspot.amd64" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : [
"jdk.vm.ci.amd64",
@ -199,7 +197,7 @@ suite = {
},
"jdk.vm.ci.hotspot.sparc" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : [
"jdk.vm.ci.sparc",
@ -221,12 +219,12 @@ suite = {
# ------------- Distributions -------------
"JVMCI_SERVICES" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"dependencies" : ["jdk.vm.ci.services"],
},
"JVMCI_API" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"dependencies" : [
"jdk.vm.ci.runtime",
"jdk.vm.ci.common",
@ -240,7 +238,7 @@ suite = {
},
"JVMCI_HOTSPOT" : {
"subDir" : "src/jdk.internal.vm.ci/share/classes",
"subDir" : "../jdk.internal.vm.ci/share/classes",
"dependencies" : [
"jdk.vm.ci.hotspot.aarch64",
"jdk.vm.ci.hotspot.amd64",
@ -253,7 +251,7 @@ suite = {
},
"JVMCI_TEST" : {
"subDir" : "test/compiler/jvmci",
"subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
"dependencies" : [
"jdk.vm.ci.runtime.test",
],

View File

@ -863,7 +863,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
//
// markOop displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
// if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// // We stored the monitor address into the object's mark word.
// } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case.
@ -901,7 +901,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
std(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor);
// if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// Store stack address of the BasicObjectLock (this is monitor) into object.
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
@ -977,7 +977,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, bool check_for_e
// if ((displaced_header = monitor->displaced_header()) == NULL) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
// monitor->set_obj(NULL);
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
// } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL);
// } else {
@ -1010,7 +1010,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, bool check_for_e
cmpdi(CCR0, displaced_header, 0);
beq(CCR0, free_slot); // recursive unlock
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
// } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL);

View File

@ -149,8 +149,7 @@ void VM_Version::initialize() {
print_features();
}
// PPC64 supports 8-byte compare-exchange operations (see
// Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
// PPC64 supports 8-byte compare-exchange operations (see Atomic::cmpxchg)
// and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
_supports_cx8 = true;

View File

@ -914,7 +914,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
//
// markOop displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
// if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// // We stored the monitor address into the object's mark word.
// } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case.
@ -949,7 +949,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
z_stg(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor);
// if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// Store stack address of the BasicObjectLock (this is monitor) into object.
add2reg(object_mark_addr, oopDesc::mark_offset_in_bytes(), object);
@ -1021,7 +1021,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
// if ((displaced_header = monitor->displaced_header()) == NULL) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
// monitor->set_obj(NULL);
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
// } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL);
// } else {
@ -1062,7 +1062,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
BasicLock::displaced_header_offset_in_bytes()));
z_bre(done); // displaced_header == 0 -> goto done
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
// } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL);

View File

@ -224,7 +224,7 @@ void VM_Version::initialize() {
}
// z/Architecture supports 8-byte compare-exchange operations
// (see Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
// (see Atomic::cmpxchg)
// and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
_supports_cx8 = true;

View File

@ -383,6 +383,7 @@ void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp
//------------------------------------------------------------------------------
// frame::adjust_unextended_sp
#ifdef ASSERT
void frame::adjust_unextended_sp() {
// On x86, sites calling method handle intrinsics and lambda forms are treated
// as any other call site. Therefore, no special action is needed when we are
@ -394,11 +395,12 @@ void frame::adjust_unextended_sp() {
// If the sender PC is a deoptimization point, get the original PC.
if (sender_cm->is_deopt_entry(_pc) ||
sender_cm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
verify_deopt_original_pc(sender_cm, _unextended_sp);
}
}
}
}
#endif
//------------------------------------------------------------------------------
// frame::update_map_with_saved_link

View File

@ -117,7 +117,7 @@
// original sp we use that convention.
intptr_t* _unextended_sp;
void adjust_unextended_sp();
void adjust_unextended_sp() NOT_DEBUG_RETURN;
intptr_t* ptr_at_addr(int offset) const {
return (intptr_t*) addr_at(offset);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -566,7 +566,7 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
// Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest)
//
// Arguments :
// c_rarg0: exchange_value
@ -574,8 +574,8 @@ class StubGenerator: public StubCodeGenerator {
//
// Result:
// *dest <- ex, return (orig *dest)
address generate_atomic_xchg_ptr() {
StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
address generate_atomic_xchg_long() {
StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long");
address start = __ pc();
__ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
@ -4998,7 +4998,7 @@ class StubGenerator: public StubCodeGenerator {
// atomic calls
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr();
StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long();
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();

View File

@ -276,7 +276,7 @@ int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
markOop disp = lockee->mark()->set_unlocked();
monitor->lock()->set_displaced_header(disp);
if (Atomic::cmpxchg_ptr(monitor, lockee->mark_addr(), disp) != disp) {
if (Atomic::cmpxchg((markOop)monitor, lockee->mark_addr(), disp) != disp) {
if (thread->is_lock_owned((address) disp->clear_lock_bits())) {
monitor->lock()->set_displaced_header(NULL);
}
@ -420,7 +420,8 @@ int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
monitor->set_obj(NULL);
if (header != NULL) {
if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
markOop old_header = markOopDesc::encode(lock);
if (rcvr->cas_set_mark(header, old_header) != old_header) {
monitor->set_obj(rcvr); {
HandleMark hm(thread);
CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(thread, monitor));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010, 2015 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -253,9 +253,8 @@ class StubGenerator: public StubCodeGenerator {
// atomic calls
StubRoutines::_atomic_xchg_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_xchg_ptr_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_xchg_long_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_cmpxchg_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_cmpxchg_ptr_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_add_entry = ShouldNotCallThisStub();

View File

@ -889,8 +889,12 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
stack_size / K);
}
// Configure libc guard page.
ret = pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type));
// Save some cycles and a page by disabling OS guard pages where we have our own
// VM guard pages (in java threads). For other threads, keep system default guard
// pages in place.
if (thr_type == java_thread || thr_type == compiler_thread) {
ret = pthread_attr_setguardsize(&attr, 0);
}
pthread_t tid = 0;
if (ret == 0) {
@ -3019,19 +3023,6 @@ bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
return chained;
}
size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
// Creating guard page is very expensive. Java thread has HotSpot
// guard pages, only enable glibc guard page for non-Java threads.
// (Remember: compiler thread is a Java thread, too!)
//
// Aix can have different page sizes for stack (4K) and heap (64K).
// As Hotspot knows only one page size, we assume the stack has
// the same page size as the heap. Returning page_size() here can
// cause 16 guard pages which we want to avoid. Thus we return 4K
// which will be rounded to the real page size by the OS.
return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : 4 * K);
}
struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
if (sigismember(&sigs, sig)) {
return &sigact[sig];

View File

@ -139,9 +139,6 @@ class Aix {
// libpthread version string
static void libpthread_init();
// Return default libc guard size for the specified thread type.
static size_t default_guard_size(os::ThreadType thr_type);
// Function returns true if we run on OS/400 (pase), false if we run
// on AIX.
static bool on_pase() {

View File

@ -98,6 +98,11 @@ inline int os::ftruncate(int fd, jlong length) {
inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf)
{
// readdir_r has been deprecated since glibc 2.24.
// See https://sourceware.org/bugzilla/show_bug.cgi?id=19056 for more details.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
dirent* p;
int status;
assert(dirp != NULL, "just checking");
@ -111,6 +116,8 @@ inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf)
return NULL;
} else
return p;
#pragma GCC diagnostic pop
}
inline int os::closedir(DIR *dirp) {

View File

@ -137,7 +137,7 @@ template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
// Note that xchg_ptr doesn't necessarily do an acquire
// Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp).
T old_value;
@ -176,7 +176,7 @@ template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
// Note that xchg_ptr doesn't necessarily do an acquire
// Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp).
T old_value;

View File

@ -134,7 +134,7 @@ template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
// Note that xchg_ptr doesn't necessarily do an acquire
// Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp).
T old_value;
@ -173,7 +173,7 @@ template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
// Note that xchg_ptr doesn't necessarily do an acquire
// Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp).
T old_value;

View File

@ -73,7 +73,7 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
}
DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func)
DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_ptr_func)
DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func)
#undef DEFINE_STUB_XCHG

View File

@ -219,7 +219,7 @@ void os::initialize_thread(Thread* thr) {
// Atomics and Stub Functions
typedef jint xchg_func_t (jint, volatile jint*);
typedef intptr_t xchg_ptr_func_t (intptr_t, volatile intptr_t*);
typedef intptr_t xchg_long_func_t (jlong, volatile jlong*);
typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
typedef jbyte cmpxchg_byte_func_t (jbyte, volatile jbyte*, jbyte);
typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong);
@ -243,12 +243,12 @@ jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
return old_value;
}
intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) {
intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* dest) {
// try to use the stub:
xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry());
xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
if (func != NULL) {
os::atomic_xchg_ptr_func = func;
os::atomic_xchg_long_func = func;
return (*func)(exchange_value, dest);
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
@ -338,7 +338,7 @@ intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* des
}
xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap;
xchg_long_func_t* os::atomic_xchg_long_func = os::atomic_xchg_long_bootstrap;
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
//
#ifdef AMD64
static jint (*atomic_xchg_func) (jint, volatile jint*);
static intptr_t (*atomic_xchg_ptr_func) (intptr_t, volatile intptr_t*);
static intptr_t (*atomic_xchg_long_func) (jlong, volatile jlong*);
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
static jbyte (*atomic_cmpxchg_byte_func) (jbyte, volatile jbyte*, jbyte);
@ -40,7 +40,7 @@
static intptr_t (*atomic_add_ptr_func) (intptr_t, volatile intptr_t*);
static jint atomic_xchg_bootstrap (jint, volatile jint*);
static intptr_t atomic_xchg_ptr_bootstrap (intptr_t, volatile intptr_t*);
static intptr_t atomic_xchg_long_bootstrap (jlong, volatile jlong*);
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
static jbyte atomic_cmpxchg_byte_bootstrap(jbyte, volatile jbyte*, jbyte);

View File

@ -236,11 +236,9 @@ DelayedConstant* DelayedConstant::add(BasicType type,
if (dcon->match(type, cfn))
return dcon;
if (dcon->value_fn == NULL) {
// (cmpxchg not because this is multi-threaded but because I'm paranoid)
if (Atomic::cmpxchg_ptr(CAST_FROM_FN_PTR(void*, cfn), &dcon->value_fn, NULL) == NULL) {
dcon->value_fn = cfn;
dcon->type = type;
return dcon;
}
}
}
// If this assert is hit (in pre-integration testing!) then re-evaluate

View File

@ -1221,11 +1221,6 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
guarantee(nm != NULL, "only nmethods can contain non-perm oops");
if (!nm->on_scavenge_root_list() &&
((mirror.not_null() && mirror()->is_scavengable()) ||
(appendix.not_null() && appendix->is_scavengable()))) {
CodeCache::add_scavenge_root_nmethod(nm);
}
// Since we've patched some oops in the nmethod,
// (re)register it with the heap.

View File

@ -48,13 +48,11 @@ private:
ClassPathEntry* volatile _next;
public:
// Next entry in class path
ClassPathEntry* next() const {
return (ClassPathEntry*) OrderAccess::load_ptr_acquire(&_next);
}
ClassPathEntry* next() const { return OrderAccess::load_acquire(&_next); }
virtual ~ClassPathEntry() {}
void set_next(ClassPathEntry* next) {
// may have unlocked readers, so ensure visibility.
OrderAccess::release_store_ptr(&_next, next);
OrderAccess::release_store(&_next, next);
}
virtual bool is_jrt() = 0;
virtual bool is_jar_file() const = 0;

View File

@ -82,11 +82,6 @@
#include "trace/tracing.hpp"
#endif
// helper function to avoid in-line casts
template <typename T> static T* load_ptr_acquire(T* volatile *p) {
return static_cast<T*>(OrderAccess::load_ptr_acquire(p));
}
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
@ -152,7 +147,7 @@ ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() {
oop* ClassLoaderData::ChunkedHandleList::add(oop o) {
if (_head == NULL || _head->_size == Chunk::CAPACITY) {
Chunk* next = new Chunk(_head);
OrderAccess::release_store_ptr(&_head, next);
OrderAccess::release_store(&_head, next);
}
oop* handle = &_head->_data[_head->_size];
*handle = o;
@ -169,7 +164,7 @@ inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chu
}
void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
Chunk* head = (Chunk*) OrderAccess::load_ptr_acquire(&_head);
Chunk* head = OrderAccess::load_acquire(&_head);
if (head != NULL) {
// Must be careful when reading size of head
oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size));
@ -257,24 +252,24 @@ void ClassLoaderData::Dependencies::oops_do(OopClosure* f) {
}
void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
// Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
klass_closure->do_klass(k);
assert(k != k->next_link(), "no loops!");
}
}
void ClassLoaderData::classes_do(void f(Klass * const)) {
// Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
f(k);
assert(k != k->next_link(), "no loops!");
}
}
void ClassLoaderData::methods_do(void f(Method*)) {
// Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
InstanceKlass::cast(k)->methods_do(f);
}
@ -282,8 +277,8 @@ void ClassLoaderData::methods_do(void f(Method*)) {
}
void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
// Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
// Do not filter ArrayKlass oops here...
if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) {
klass_closure->do_klass(k);
@ -292,8 +287,8 @@ void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
}
void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
// Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass()) {
f(InstanceKlass::cast(k));
}
@ -449,7 +444,7 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
k->set_next_link(old_value);
// Link the new item into the list, making sure the linked class is stable
// since the list can be walked without a lock
OrderAccess::release_store_ptr(&_klasses, k);
OrderAccess::release_store(&_klasses, k);
}
if (publicize && k->class_loader_data() != NULL) {
@ -589,8 +584,8 @@ void ClassLoaderData::unload() {
ModuleEntryTable* ClassLoaderData::modules() {
// Lazily create the module entry table at first request.
// Lock-free access requires load_ptr_acquire.
ModuleEntryTable* modules = load_ptr_acquire(&_modules);
// Lock-free access requires load_acquire.
ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules);
if (modules == NULL) {
MutexLocker m1(Module_lock);
// Check if _modules got allocated while we were waiting for this lock.
@ -600,7 +595,7 @@ ModuleEntryTable* ClassLoaderData::modules() {
{
MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// Ensure _modules is stable, since it is examined without a lock
OrderAccess::release_store_ptr(&_modules, modules);
OrderAccess::release_store(&_modules, modules);
}
}
}
@ -737,8 +732,8 @@ Metaspace* ClassLoaderData::metaspace_non_null() {
// to create smaller arena for Reflection class loaders also.
// The reason for the delayed allocation is because some class loaders are
// simply for delegating with no metadata of their own.
// Lock-free access requires load_ptr_acquire.
Metaspace* metaspace = load_ptr_acquire(&_metaspace);
// Lock-free access requires load_acquire.
Metaspace* metaspace = OrderAccess::load_acquire(&_metaspace);
if (metaspace == NULL) {
MutexLockerEx ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
// Check if _metaspace got allocated while we were waiting for this lock.
@ -760,7 +755,7 @@ Metaspace* ClassLoaderData::metaspace_non_null() {
metaspace = new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
}
// Ensure _metaspace is stable, since it is examined without a lock
OrderAccess::release_store_ptr(&_metaspace, metaspace);
OrderAccess::release_store(&_metaspace, metaspace);
}
}
return metaspace;
@ -914,8 +909,8 @@ void ClassLoaderData::verify() {
}
bool ClassLoaderData::contains_klass(Klass* klass) {
// Lock-free access requires load_ptr_acquire
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k == klass) return true;
}
return false;
@ -948,7 +943,7 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRA
if (!is_anonymous) {
ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader());
// First, Atomically set it
ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL);
ClassLoaderData* old = Atomic::cmpxchg(cld, cld_addr, (ClassLoaderData*)NULL);
if (old != NULL) {
delete cld;
// Returns the data.
@ -963,7 +958,7 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRA
do {
cld->set_next(next);
ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next);
ClassLoaderData* exchanged = Atomic::cmpxchg(cld, list_head, next);
if (exchanged == next) {
LogTarget(Debug, class, loader, data) lt;
if (lt.is_enabled()) {
@ -1387,7 +1382,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
while (head != NULL) {
Klass* next = next_klass_in_cldg(head);
Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head);
Klass* old_head = Atomic::cmpxchg(next, &_next_klass, head);
if (old_head == head) {
return head; // Won the CAS.

View File

@ -194,7 +194,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
Chunk(Chunk* c) : _next(c), _size(0) { }
};
Chunk* _head;
Chunk* volatile _head;
void oops_do_chunk(OopClosure* f, Chunk* c, const juint size);

View File

@ -161,10 +161,10 @@ class DictionaryEntry : public HashtableEntry<InstanceKlass*, mtClass> {
void set_pd_set(ProtectionDomainEntry* new_head) { _pd_set = new_head; }
ProtectionDomainEntry* pd_set_acquire() const {
return (ProtectionDomainEntry*)OrderAccess::load_ptr_acquire(&_pd_set);
return OrderAccess::load_acquire(&_pd_set);
}
void release_set_pd_set(ProtectionDomainEntry* new_head) {
OrderAccess::release_store_ptr(&_pd_set, new_head);
OrderAccess::release_store(&_pd_set, new_head);
}
// Tells whether the initiating class' protection domain can access the klass in this entry

View File

@ -69,14 +69,14 @@ static void* volatile _verify_byte_codes_fn = NULL;
static volatile jint _is_new_verify_byte_codes_fn = (jint) true;
static void* verify_byte_codes_fn() {
if (OrderAccess::load_ptr_acquire(&_verify_byte_codes_fn) == NULL) {
if (OrderAccess::load_acquire(&_verify_byte_codes_fn) == NULL) {
void *lib_handle = os::native_java_library();
void *func = os::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion");
OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
OrderAccess::release_store(&_verify_byte_codes_fn, func);
if (func == NULL) {
_is_new_verify_byte_codes_fn = false;
func = os::dll_lookup(lib_handle, "VerifyClassCodes");
OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
OrderAccess::release_store(&_verify_byte_codes_fn, func);
}
}
return (void*)_verify_byte_codes_fn;

View File

@ -683,22 +683,19 @@ void CodeCache::blobs_do(CodeBlobClosure* f) {
if (cb->is_alive()) {
f->do_code_blob(cb);
#ifdef ASSERT
if (cb->is_nmethod())
((nmethod*)cb)->verify_scavenge_root_oops();
if (cb->is_nmethod()) {
Universe::heap()->verify_nmethod((nmethod*)cb);
}
#endif //ASSERT
}
}
}
}
// Walk the list of methods which might contain non-perm oops.
// Walk the list of methods which might contain oops to the java heap.
void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
assert_locked_or_safepoint(CodeCache_lock);
if (UseG1GC) {
return;
}
const bool fix_relocations = f->fix_relocations();
debug_only(mark_scavenge_root_nmethods());
@ -735,13 +732,20 @@ void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
debug_only(verify_perm_nmethods(NULL));
}
void CodeCache::register_scavenge_root_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) {
add_scavenge_root_nmethod(nm);
}
}
void CodeCache::verify_scavenge_root_nmethod(nmethod* nm) {
nm->verify_scavenge_root_oops();
}
void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
if (UseG1GC) {
return;
}
nm->set_on_scavenge_root_list();
nm->set_scavenge_root_link(_scavenge_root_nmethods);
set_scavenge_root_nmethods(nm);
@ -754,8 +758,6 @@ void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) {
assert((prev == NULL && scavenge_root_nmethods() == nm) ||
(prev != NULL && prev->scavenge_root_link() == nm), "precondition");
assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list");
print_trace("unlink_scavenge_root", nm);
if (prev == NULL) {
set_scavenge_root_nmethods(nm->scavenge_root_link());
@ -769,10 +771,6 @@ void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) {
void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
if (UseG1GC) {
return;
}
print_trace("drop_scavenge_root", nm);
nmethod* prev = NULL;
for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@ -788,10 +786,6 @@ void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
void CodeCache::prune_scavenge_root_nmethods() {
assert_locked_or_safepoint(CodeCache_lock);
if (UseG1GC) {
return;
}
debug_only(mark_scavenge_root_nmethods());
nmethod* last = NULL;
@ -820,10 +814,6 @@ void CodeCache::prune_scavenge_root_nmethods() {
#ifndef PRODUCT
void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
if (UseG1GC) {
return;
}
// While we are here, verify the integrity of the list.
mark_scavenge_root_nmethods();
for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@ -833,7 +823,7 @@ void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
verify_perm_nmethods(f);
}
// Temporarily mark nmethods that are claimed to be on the non-perm list.
// Temporarily mark nmethods that are claimed to be on the scavenge list.
void CodeCache::mark_scavenge_root_nmethods() {
NMethodIterator iter;
while(iter.next_alive()) {
@ -854,7 +844,7 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
assert(nm->scavenge_root_not_marked(), "must be already processed");
if (nm->on_scavenge_root_list())
call_f = false; // don't show this one to the client
nm->verify_scavenge_root_oops();
Universe::heap()->verify_nmethod(nm);
if (call_f) f_or_null->do_code_blob(nm);
}
}
@ -1640,4 +1630,3 @@ void CodeCache::log_state(outputStream* st) {
blob_count(), nmethod_count(), adapter_count(),
unallocated_capacity());
}

View File

@ -181,6 +181,10 @@ class CodeCache : AllStatic {
static void scavenge_root_nmethods_do(CodeBlobToOopClosure* f);
static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
// register_scavenge_root_nmethod() conditionally adds the nmethod to the list
// if it is not already on the list and has a scavengeable root
static void register_scavenge_root_nmethod(nmethod* nm);
static void verify_scavenge_root_nmethod(nmethod* nm);
static void add_scavenge_root_nmethod(nmethod* nm);
static void drop_scavenge_root_nmethod(nmethod* nm);

View File

@ -288,7 +288,7 @@ public:
// Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
ExceptionCache* exception_cache() const { return _exception_cache; }
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); }
void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store(&_exception_cache, ec); }
address handler_for_exception_and_pc(Handle exception, address pc);
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
void clean_exception_cache(BoolObjectClosure* is_alive);

View File

@ -1,115 +0,0 @@
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* This header file defines the data structures sent by the VM
* through the JVMTI CompiledMethodLoad callback function via the
* "void * compile_info" parameter. The memory pointed to by the
* compile_info parameter may not be referenced after returning from
* the CompiledMethodLoad callback. These are VM implementation
* specific data structures that may evolve in future releases. A
* JVMTI agent should interpret a non-NULL compile_info as a pointer
* to a region of memory containing a list of records. In a typical
* usage scenario, a JVMTI agent would cast each record to a
* jvmtiCompiledMethodLoadRecordHeader, a struct that represents
* arbitrary information. This struct contains a kind field to indicate
* the kind of information being passed, and a pointer to the next
* record. If the kind field indicates inlining information, then the
* agent would cast the record to a jvmtiCompiledMethodLoadInlineRecord.
* This record contains an array of PCStackInfo structs, which indicate
* for every pc address what are the methods on the invocation stack.
* The "methods" and "bcis" fields in each PCStackInfo struct specify a
* 1-1 mapping between these inlined methods and their bytecode indices.
* This can be used to derive the proper source lines of the inlined
* methods.
*/
#ifndef _JVMTI_CMLR_H_
#define _JVMTI_CMLR_H_
enum {
JVMTI_CMLR_MAJOR_VERSION_1 = 0x00000001,
JVMTI_CMLR_MINOR_VERSION_0 = 0x00000000,
JVMTI_CMLR_MAJOR_VERSION = 0x00000001,
JVMTI_CMLR_MINOR_VERSION = 0x00000000
/*
* This comment is for the "JDK import from HotSpot" sanity check:
* version: 1.0.0
*/
};
typedef enum {
JVMTI_CMLR_DUMMY = 1,
JVMTI_CMLR_INLINE_INFO = 2
} jvmtiCMLRKind;
/*
* Record that represents arbitrary information passed through JVMTI
* CompiledMethodLoadEvent void pointer.
*/
typedef struct _jvmtiCompiledMethodLoadRecordHeader {
jvmtiCMLRKind kind; /* id for the kind of info passed in the record */
jint majorinfoversion; /* major and minor info version values. Init'ed */
jint minorinfoversion; /* to current version value in jvmtiExport.cpp. */
struct _jvmtiCompiledMethodLoadRecordHeader* next;
} jvmtiCompiledMethodLoadRecordHeader;
/*
* Record that gives information about the methods on the compile-time
* stack at a specific pc address of a compiled method. Each element in
* the methods array maps to same element in the bcis array.
*/
typedef struct _PCStackInfo {
void* pc; /* the pc address for this compiled method */
jint numstackframes; /* number of methods on the stack */
jmethodID* methods; /* array of numstackframes method ids */
jint* bcis; /* array of numstackframes bytecode indices */
} PCStackInfo;
/*
* Record that contains inlining information for each pc address of
* an nmethod.
*/
typedef struct _jvmtiCompiledMethodLoadInlineRecord {
jvmtiCompiledMethodLoadRecordHeader header; /* common header for casting */
jint numpcs; /* number of pc descriptors in this nmethod */
PCStackInfo* pcinfo; /* array of numpcs pc descriptors */
} jvmtiCompiledMethodLoadInlineRecord;
/*
* Dummy record used to test that we can pass records with different
* information through the void pointer provided that they can be cast
* to a jvmtiCompiledMethodLoadRecordHeader.
*/
typedef struct _jvmtiCompiledMethodLoadDummyRecord {
jvmtiCompiledMethodLoadRecordHeader header; /* common header for casting */
char message[50];
} jvmtiCompiledMethodLoadDummyRecord;
#endif

View File

@ -411,11 +411,8 @@ void nmethod::init_defaults() {
_oops_do_mark_link = NULL;
_jmethod_id = NULL;
_osr_link = NULL;
if (UseG1GC) {
_unloading_next = NULL;
} else {
_scavenge_root_link = NULL;
}
_unloading_next = NULL;
_scavenge_root_link = NULL;
_scavenge_root_state = 0;
#if INCLUDE_RTM_OPT
_rtm_state = NoRTM;
@ -599,12 +596,9 @@ nmethod::nmethod(
code_buffer->copy_code_and_locs_to(this);
code_buffer->copy_values_to(this);
if (ScavengeRootsInCode) {
if (detect_scavenge_root_oops()) {
CodeCache::add_scavenge_root_nmethod(this);
}
Universe::heap()->register_nmethod(this);
}
debug_only(verify_scavenge_root_oops());
debug_only(Universe::heap()->verify_nmethod(this));
CodeCache::commit(this);
}
@ -754,12 +748,9 @@ nmethod::nmethod(
debug_info->copy_to(this);
dependencies->copy_to(this);
if (ScavengeRootsInCode) {
if (detect_scavenge_root_oops()) {
CodeCache::add_scavenge_root_nmethod(this);
}
Universe::heap()->register_nmethod(this);
}
debug_only(verify_scavenge_root_oops());
debug_only(Universe::heap()->verify_nmethod(this));
CodeCache::commit(this);
@ -1661,20 +1652,16 @@ nmethod* volatile nmethod::_oops_do_mark_nmethods;
// This code must be MP safe, because it is used from parallel GC passes.
bool nmethod::test_set_oops_do_mark() {
assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
nmethod* observed_mark_link = _oops_do_mark_link;
if (observed_mark_link == NULL) {
if (_oops_do_mark_link == NULL) {
// Claim this nmethod for this thread to mark.
observed_mark_link = (nmethod*)
Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
if (observed_mark_link == NULL) {
if (Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_link, (nmethod*)NULL) == NULL) {
// Atomically append this nmethod (now claimed) to the head of the list:
nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
for (;;) {
nmethod* required_mark_nmethods = observed_mark_nmethods;
_oops_do_mark_link = required_mark_nmethods;
observed_mark_nmethods = (nmethod*)
Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
observed_mark_nmethods =
Atomic::cmpxchg(this, &_oops_do_mark_nmethods, required_mark_nmethods);
if (observed_mark_nmethods == required_mark_nmethods)
break;
}
@ -1690,9 +1677,9 @@ bool nmethod::test_set_oops_do_mark() {
void nmethod::oops_do_marking_prologue() {
if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
// We use cmpxchg_ptr instead of regular assignment here because the user
// We use cmpxchg instead of regular assignment here because the user
// may fork a bunch of threads, and we need them all to see the same state.
void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
nmethod* observed = Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, (nmethod*)NULL);
guarantee(observed == NULL, "no races in this sequential code");
}
@ -1707,8 +1694,8 @@ void nmethod::oops_do_marking_epilogue() {
NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
cur = next;
}
void* required = _oops_do_mark_nmethods;
void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
nmethod* required = _oops_do_mark_nmethods;
nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
guarantee(observed == required, "no races in this sequential code");
if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
}
@ -2137,7 +2124,7 @@ void nmethod::verify() {
VerifyOopsClosure voc(this);
oops_do(&voc);
assert(voc.ok(), "embedded oops must be OK");
verify_scavenge_root_oops();
Universe::heap()->verify_nmethod(this);
verify_scopes();
}
@ -2230,10 +2217,6 @@ public:
};
void nmethod::verify_scavenge_root_oops() {
if (UseG1GC) {
return;
}
if (!on_scavenge_root_list()) {
// Actually look inside, to verify the claim that it's clean.
DebugScavengeRoot debug_scavenge_root(this);

View File

@ -0,0 +1,178 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/vmCMSOperations.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/workgroup.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/stack.inline.hpp"
CMSHeap::CMSHeap(GenCollectorPolicy *policy) : GenCollectedHeap(policy) {
_workers = new WorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
/* are_ConcurrentGC_threads */false);
_workers->initialize_workers();
}
jint CMSHeap::initialize() {
jint status = GenCollectedHeap::initialize();
if (status != JNI_OK) return status;
// If we are running CMS, create the collector responsible
// for collecting the CMS generations.
assert(collector_policy()->is_concurrent_mark_sweep_policy(), "must be CMS policy");
create_cms_collector();
return JNI_OK;
}
void CMSHeap::check_gen_kinds() {
assert(young_gen()->kind() == Generation::ParNew,
"Wrong youngest generation type");
assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
"Wrong generation kind");
}
CMSHeap* CMSHeap::heap() {
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
assert(heap->kind() == CollectedHeap::CMSHeap, "Not a CMSHeap");
return (CMSHeap*) heap;
}
void CMSHeap::gc_threads_do(ThreadClosure* tc) const {
assert(workers() != NULL, "should have workers here");
workers()->threads_do(tc);
ConcurrentMarkSweepThread::threads_do(tc);
}
void CMSHeap::print_gc_threads_on(outputStream* st) const {
assert(workers() != NULL, "should have workers here");
workers()->print_worker_threads_on(st);
ConcurrentMarkSweepThread::print_all_on(st);
}
void CMSHeap::print_on_error(outputStream* st) const {
GenCollectedHeap::print_on_error(st);
st->cr();
CMSCollector::print_on_error(st);
}
void CMSHeap::create_cms_collector() {
assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
"Unexpected generation kinds");
assert(gen_policy()->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
CMSCollector* collector =
new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(),
rem_set(),
gen_policy()->as_concurrent_mark_sweep_policy());
if (!collector->completed_initialization()) {
vm_shutdown_during_initialization("Could not create CMS collector");
}
}
void CMSHeap::collect(GCCause::Cause cause) {
if (should_do_concurrent_full_gc(cause)) {
// Mostly concurrent full collection.
collect_mostly_concurrent(cause);
} else {
GenCollectedHeap::collect(cause);
}
}
bool CMSHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
switch (cause) {
case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
case GCCause::_java_lang_system_gc:
case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
default: return false;
}
}
void CMSHeap::collect_mostly_concurrent(GCCause::Cause cause) {
assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
MutexLocker ml(Heap_lock);
// Read the GC counts while holding the Heap_lock
unsigned int full_gc_count_before = total_full_collections();
unsigned int gc_count_before = total_collections();
{
MutexUnlocker mu(Heap_lock);
VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
VMThread::execute(&op);
}
}
void CMSHeap::stop() {
ConcurrentMarkSweepThread::cmst()->stop();
}
void CMSHeap::safepoint_synchronize_begin() {
ConcurrentMarkSweepThread::synchronize(false);
}
void CMSHeap::safepoint_synchronize_end() {
ConcurrentMarkSweepThread::desynchronize(false);
}
void CMSHeap::cms_process_roots(StrongRootsScope* scope,
bool young_gen_as_roots,
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* root_closure,
CLDClosure* cld_closure) {
MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
if (!only_strong_roots) {
process_string_table_roots(scope, root_closure);
}
if (young_gen_as_roots &&
!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
root_closure->set_generation(young_gen());
young_gen()->oop_iterate(root_closure);
root_closure->reset_generation();
}
_process_strong_tasks->all_tasks_completed(scope->n_threads());
}
void CMSHeap::gc_prologue(bool full) {
always_do_update_barrier = false;
GenCollectedHeap::gc_prologue(full);
};
void CMSHeap::gc_epilogue(bool full) {
GenCollectedHeap::gc_epilogue(full);
always_do_update_barrier = true;
};

View File

@ -0,0 +1,117 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_CMS_CMSHEAP_HPP
#define SHARE_VM_GC_CMS_CMSHEAP_HPP
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/genCollectedHeap.hpp"
class CLDClosure;
class GenCollectorPolicy;
class OopsInGenClosure;
class outputStream;
class StrongRootsScope;
class ThreadClosure;
class WorkGang;
class CMSHeap : public GenCollectedHeap {
public:
CMSHeap(GenCollectorPolicy *policy);
// Returns JNI_OK on success
virtual jint initialize();
virtual void check_gen_kinds();
// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
static CMSHeap* heap();
virtual Name kind() const {
return CollectedHeap::CMSHeap;
}
virtual const char* name() const {
return "Concurrent Mark Sweep";
}
WorkGang* workers() const { return _workers; }
virtual void print_gc_threads_on(outputStream* st) const;
virtual void gc_threads_do(ThreadClosure* tc) const;
virtual void print_on_error(outputStream* st) const;
// Perform a full collection of the heap; intended for use in implementing
// "System.gc". This implies as full a collection as the CollectedHeap
// supports. Caller does not hold the Heap_lock on entry.
void collect(GCCause::Cause cause);
bool is_in_closed_subset(const void* p) const {
return is_in_reserved(p);
}
bool card_mark_must_follow_store() const {
return true;
}
void stop();
void safepoint_synchronize_begin();
void safepoint_synchronize_end();
// If "young_gen_as_roots" is false, younger generations are
// not scanned as roots; in this case, the caller must be arranging to
// scan the younger generations itself. (For example, a generation might
// explicitly mark reachable objects in younger generations, to avoid
// excess storage retention.)
void cms_process_roots(StrongRootsScope* scope,
bool young_gen_as_roots,
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* root_closure,
CLDClosure* cld_closure);
private:
WorkGang* _workers;
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
// Accessor for memory state verification support
NOT_PRODUCT(
virtual size_t skip_header_HeapWords() { return CMSCollector::skip_header_HeapWords(); }
)
// Returns success or failure.
void create_cms_collector();
// In support of ExplicitGCInvokesConcurrent functionality
bool should_do_concurrent_full_gc(GCCause::Cause cause);
void collect_mostly_concurrent(GCCause::Cause cause);
};
#endif // SHARE_VM_GC_CMS_CMSHEAP_HPP

View File

@ -23,13 +23,13 @@
*/
#include "precompiled.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/cmsLockVerifier.hpp"
#include "gc/cms/compactibleFreeListSpace.hpp"
#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/shared/blockOffsetTable.inline.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/space.inline.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "logging/log.hpp"
@ -154,7 +154,7 @@ HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
cp->space->set_compaction_top(compact_top);
cp->space = cp->space->next_compaction_space();
if (cp->space == NULL) {
cp->gen = GenCollectedHeap::heap()->young_gen();
cp->gen = CMSHeap::heap()->young_gen();
assert(cp->gen != NULL, "compaction must succeed");
cp->space = cp->gen->first_compaction_space();
assert(cp->space != NULL, "generation must have a first compaction space");
@ -2298,7 +2298,7 @@ void CompactibleFreeListSpace::verify() const {
// Iterate over all oops in the heap. Uses the _no_header version
// since we are not interested in following the klass pointers.
GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
CMSHeap::heap()->oop_iterate_no_header(&cl);
}
if (VerifyObjectStartArray) {

View File

@ -29,6 +29,7 @@
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc/cms/cmsCollectorPolicy.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/cmsOopClosures.inline.hpp"
#include "gc/cms/compactibleFreeListSpace.hpp"
#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
@ -298,14 +299,14 @@ void CMSCollector::ref_processor_init() {
}
AdaptiveSizePolicy* CMSCollector::size_policy() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
return gch->gen_policy()->size_policy();
CMSHeap* heap = CMSHeap::heap();
return heap->gen_policy()->size_policy();
}
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
const char* gen_name = "old";
GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();
// Generation Counters - generation 1, 1 subspace
_gen_counters = new GenerationCounters(gen_name, 1, 1,
gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
@ -354,8 +355,8 @@ void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
// young generation collection.
double CMSStats::time_until_cms_gen_full() const {
size_t cms_free = _cms_gen->cmsSpace()->free();
GenCollectedHeap* gch = GenCollectedHeap::heap();
size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
CMSHeap* heap = CMSHeap::heap();
size_t expected_promotion = MIN2(heap->young_gen()->capacity(),
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
if (cms_free > expected_promotion) {
// Start a cms collection if there isn't enough space to promote
@ -595,12 +596,12 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
assert(CGC_lock != NULL, "Where's the CGC_lock?");
// Support for parallelizing young gen rescan
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
_young_gen = (ParNewGeneration*)gch->young_gen();
if (gch->supports_inline_contig_alloc()) {
_top_addr = gch->top_addr();
_end_addr = gch->end_addr();
CMSHeap* heap = CMSHeap::heap();
assert(heap->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
_young_gen = (ParNewGeneration*)heap->young_gen();
if (heap->supports_inline_contig_alloc()) {
_top_addr = heap->top_addr();
_end_addr = heap->end_addr();
assert(_young_gen != NULL, "no _young_gen");
_eden_chunk_index = 0;
_eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
@ -762,9 +763,9 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
log.trace(" Maximum free fraction %f", maximum_free_percentage);
log.trace(" Capacity " SIZE_FORMAT, capacity() / 1000);
log.trace(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
size_t young_size = gch->young_gen()->capacity();
CMSHeap* heap = CMSHeap::heap();
assert(heap->is_old_gen(this), "The CMS generation should always be the old generation");
size_t young_size = heap->young_gen()->capacity();
log.trace(" Young gen size " SIZE_FORMAT, young_size / 1000);
log.trace(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
log.trace(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
@ -923,7 +924,7 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
assert_lock_strong(freelistLock());
#ifndef PRODUCT
if (GenCollectedHeap::heap()->promotion_should_fail()) {
if (CMSHeap::heap()->promotion_should_fail()) {
return NULL;
}
#endif // #ifndef PRODUCT
@ -1000,7 +1001,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
oop old, markOop m,
size_t word_sz) {
#ifndef PRODUCT
if (GenCollectedHeap::heap()->promotion_should_fail()) {
if (CMSHeap::heap()->promotion_should_fail()) {
return NULL;
}
#endif // #ifndef PRODUCT
@ -1076,7 +1077,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
NOT_PRODUCT(
Atomic::inc(&_numObjectsPromoted);
Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
Atomic::add(alloc_sz, &_numWordsPromoted);
)
return obj;
@ -1179,10 +1180,10 @@ bool CMSCollector::shouldConcurrentCollect() {
// We start a collection if we believe an incremental collection may fail;
// this is not likely to be productive in practice because it's probably too
// late anyway.
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->collector_policy()->is_generation_policy(),
CMSHeap* heap = CMSHeap::heap();
assert(heap->collector_policy()->is_generation_policy(),
"You may want to check the correctness of the following");
if (gch->incremental_collection_will_fail(true /* consult_young */)) {
if (heap->incremental_collection_will_fail(true /* consult_young */)) {
log.print("CMSCollector: collect because incremental collection will fail ");
return true;
}
@ -1294,8 +1295,8 @@ void CMSCollector::collect(bool full,
}
void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
unsigned int gc_count = gch->total_full_collections();
CMSHeap* heap = CMSHeap::heap();
unsigned int gc_count = heap->total_full_collections();
if (gc_count == full_gc_count) {
MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
_full_gc_requested = true;
@ -1307,7 +1308,7 @@ void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause ca
}
bool CMSCollector::is_external_interruption() {
GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
GCCause::Cause cause = CMSHeap::heap()->gc_cause();
return GCCause::is_user_requested_gc(cause) ||
GCCause::is_serviceability_requested_gc(cause);
}
@ -1456,8 +1457,8 @@ void CMSCollector::acquire_control_and_collect(bool full,
// Inform cms gen if this was due to partial collection failing.
// The CMS gen may use this fact to determine its expansion policy.
GenCollectedHeap* gch = GenCollectedHeap::heap();
if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
CMSHeap* heap = CMSHeap::heap();
if (heap->incremental_collection_will_fail(false /* don't consult_young */)) {
assert(!_cmsGen->incremental_collection_failed(),
"Should have been noticed, reacted to and cleared");
_cmsGen->set_incremental_collection_failed();
@ -1489,14 +1490,14 @@ void CMSCollector::acquire_control_and_collect(bool full,
// Has the GC time limit been exceeded?
size_t max_eden_size = _young_gen->max_eden_size();
GCCause::Cause gc_cause = gch->gc_cause();
GCCause::Cause gc_cause = heap->gc_cause();
size_policy()->check_gc_overhead_limit(_young_gen->used(),
_young_gen->eden()->used(),
_cmsGen->max_capacity(),
max_eden_size,
full,
gc_cause,
gch->collector_policy());
heap->collector_policy());
// Reset the expansion cause, now that we just completed
// a collection cycle.
@ -1518,21 +1519,21 @@ void CMSCollector::compute_new_size() {
// A work method used by the foreground collector to do
// a mark-sweep-compact.
void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
gc_timer->register_gc_start();
SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start());
gch->pre_full_gc_dump(gc_timer);
heap->pre_full_gc_dump(gc_timer);
GCTraceTime(Trace, gc, phases) t("CMS:MSC");
// Temporarily widen the span of the weak reference processing to
// the entire heap.
MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
MemRegion new_span(CMSHeap::heap()->reserved_region());
ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
// Temporarily, clear the "is_alive_non_header" field of the
// reference processor.
@ -1608,7 +1609,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
// No longer a need to do a concurrent collection for Metaspace.
MetaspaceGC::set_should_concurrent_collect(false);
gch->post_full_gc_dump(gc_timer);
heap->post_full_gc_dump(gc_timer);
gc_timer->register_gc_end();
@ -1702,7 +1703,7 @@ void CMSCollector::collect_in_background(GCCause::Cause cause) {
assert(Thread::current()->is_ConcurrentGC_thread(),
"A CMS asynchronous collection is only allowed on a CMS thread.");
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
{
bool safepoint_check = Mutex::_no_safepoint_check_flag;
MutexLockerEx hl(Heap_lock, safepoint_check);
@ -1731,8 +1732,8 @@ void CMSCollector::collect_in_background(GCCause::Cause cause) {
_full_gc_requested = false; // acks all outstanding full gc requests
_full_gc_cause = GCCause::_no_gc;
// Signal that we are about to start a collection
gch->increment_total_full_collections(); // ... starting a collection cycle
_collection_count_start = gch->total_full_collections();
heap->increment_total_full_collections(); // ... starting a collection cycle
_collection_count_start = heap->total_full_collections();
}
size_t prev_used = _cmsGen->used();
@ -1925,9 +1926,9 @@ void CMSCollector::register_gc_end() {
}
void CMSCollector::save_heap_summary() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
_last_heap_summary = gch->create_heap_summary();
_last_metaspace_summary = gch->create_metaspace_summary();
CMSHeap* heap = CMSHeap::heap();
_last_heap_summary = heap->create_heap_summary();
_last_metaspace_summary = heap->create_metaspace_summary();
}
void CMSCollector::report_heap_summary(GCWhen::Type when) {
@ -2303,10 +2304,10 @@ bool CMSCollector::verify_after_remark() {
assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
verify_work_stacks_empty();
GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->ensure_parsability(false); // fill TLABs, but no need to retire them
CMSHeap* heap = CMSHeap::heap();
heap->ensure_parsability(false); // fill TLABs, but no need to retire them
// Update the saved marks which may affect the root scans.
gch->save_marks();
heap->save_marks();
if (CMSRemarkVerifyVariant == 1) {
// In this first variant of verification, we complete
@ -2329,19 +2330,19 @@ bool CMSCollector::verify_after_remark() {
void CMSCollector::verify_after_remark_work_1() {
ResourceMark rm;
HandleMark hm;
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
// Get a clear set of claim bits for the roots processing to work with.
ClassLoaderDataGraph::clear_claimed_marks();
// Mark from roots one level into CMS
MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
{
StrongRootsScope srs(1);
gch->cms_process_roots(&srs,
heap->cms_process_roots(&srs,
true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
@ -2376,7 +2377,7 @@ void CMSCollector::verify_after_remark_work_1() {
log.error("Failed marking verification after remark");
ResourceMark rm;
LogStream ls(log.error());
gch->print_on(&ls);
heap->print_on(&ls);
fatal("CMS: failed marking verification after remark");
}
}
@ -2399,7 +2400,7 @@ class VerifyCLDOopsCLDClosure : public CLDClosure {
void CMSCollector::verify_after_remark_work_2() {
ResourceMark rm;
HandleMark hm;
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
// Get a clear set of claim bits for the roots processing to work with.
ClassLoaderDataGraph::clear_claimed_marks();
@ -2409,12 +2410,12 @@ void CMSCollector::verify_after_remark_work_2() {
markBitMap());
CLDToOopClosure cld_closure(&notOlder, true);
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
{
StrongRootsScope srs(1);
gch->cms_process_roots(&srs,
heap->cms_process_roots(&srs,
true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
@ -2803,7 +2804,7 @@ class CMSParInitialMarkTask: public CMSParMarkTask {
void CMSCollector::checkpointRootsInitial() {
assert(_collectorState == InitialMarking, "Wrong collector state");
check_correct_thread_executing();
TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
save_heap_summary();
report_heap_summary(GCWhen::BeforeGC);
@ -2844,14 +2845,14 @@ void CMSCollector::checkpointRootsInitialWork() {
HandleMark hm;
MarkRefsIntoClosure notOlder(_span, &_markBitMap);
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
verify_work_stacks_empty();
verify_overflow_empty();
gch->ensure_parsability(false); // fill TLABs, but no need to retire them
heap->ensure_parsability(false); // fill TLABs, but no need to retire them
// Update the saved marks which may affect the root scans.
gch->save_marks();
heap->save_marks();
// weak reference processing has not started yet.
ref_processor()->set_enqueuing_is_done(false);
@ -2872,7 +2873,7 @@ void CMSCollector::checkpointRootsInitialWork() {
#endif
if (CMSParallelInitialMarkEnabled) {
// The parallel version.
WorkGang* workers = gch->workers();
WorkGang* workers = heap->workers();
assert(workers != NULL, "Need parallel worker threads.");
uint n_workers = workers->active_workers();
@ -2891,11 +2892,11 @@ void CMSCollector::checkpointRootsInitialWork() {
} else {
// The serial version.
CLDToOopClosure cld_closure(&notOlder, true);
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
StrongRootsScope srs(1);
gch->cms_process_roots(&srs,
heap->cms_process_roots(&srs,
true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
@ -3179,7 +3180,7 @@ void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
HeapWord* cur = read;
while (f > read) {
cur = read;
read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
read = Atomic::cmpxchg(f, &_global_finger, cur);
if (cur == read) {
// our cas succeeded
assert(_global_finger >= f, "protocol consistency");
@ -3800,7 +3801,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
bitMapLock());
startTimer();
unsigned int before_count =
GenCollectedHeap::heap()->total_collections();
CMSHeap::heap()->total_collections();
SurvivorSpacePrecleanClosure
sss_cl(this, _span, &_markBitMap, &_markStack,
&pam_cl, before_count, CMSYield);
@ -4103,7 +4104,7 @@ void CMSCollector::checkpointRootsFinal() {
// world is stopped at this checkpoint
assert(SafepointSynchronize::is_at_safepoint(),
"world should be stopped");
TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
verify_work_stacks_empty();
verify_overflow_empty();
@ -4112,16 +4113,16 @@ void CMSCollector::checkpointRootsFinal() {
_young_gen->used() / K, _young_gen->capacity() / K);
{
if (CMSScavengeBeforeRemark) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
// Temporarily set flag to false, GCH->do_collection will
// expect it to be false and set to true
FlagSetting fl(gch->_is_gc_active, false);
FlagSetting fl(heap->_is_gc_active, false);
gch->do_collection(true, // full (i.e. force, see below)
false, // !clear_all_soft_refs
0, // size
false, // is_tlab
GenCollectedHeap::YoungGen // type
heap->do_collection(true, // full (i.e. force, see below)
false, // !clear_all_soft_refs
0, // size
false, // is_tlab
GenCollectedHeap::YoungGen // type
);
}
FreelistLocker x(this);
@ -4142,7 +4143,7 @@ void CMSCollector::checkpointRootsFinalWork() {
ResourceMark rm;
HandleMark hm;
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
if (should_unload_classes()) {
CodeCache::gc_prologue();
@ -4162,9 +4163,9 @@ void CMSCollector::checkpointRootsFinalWork() {
// or of an indication of whether the scavenge did indeed occur,
// we cannot rely on TLAB's having been filled and must do
// so here just in case a scavenge did not happen.
gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
heap->ensure_parsability(false); // fill TLAB's, but no need to retire them
// Update the saved marks which may affect the root scans.
gch->save_marks();
heap->save_marks();
print_eden_and_survivor_chunk_arrays();
@ -4240,7 +4241,7 @@ void CMSCollector::checkpointRootsFinalWork() {
_markStack._failed_double = 0;
if ((VerifyAfterGC || VerifyDuringGC) &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
verify_after_remark();
}
@ -4262,7 +4263,7 @@ void CMSParInitialMarkTask::work(uint worker_id) {
// ---------- scan from roots --------------
_timer.start();
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
// ---------- young gen roots --------------
@ -4278,12 +4279,12 @@ void CMSParInitialMarkTask::work(uint worker_id) {
CLDToOopClosure cld_closure(&par_mri_cl, true);
gch->cms_process_roots(_strong_roots_scope,
false, // yg was scanned above
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(),
&par_mri_cl,
&cld_closure);
heap->cms_process_roots(_strong_roots_scope,
false, // yg was scanned above
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(),
&par_mri_cl,
&cld_closure);
assert(_collector->should_unload_classes()
|| (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
@ -4387,7 +4388,7 @@ void CMSParRemarkTask::work(uint worker_id) {
// ---------- rescan from roots --------------
_timer.start();
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
_collector->_span, _collector->ref_processor(),
&(_collector->_markBitMap),
@ -4407,12 +4408,12 @@ void CMSParRemarkTask::work(uint worker_id) {
// ---------- remaining roots --------------
_timer.reset();
_timer.start();
gch->cms_process_roots(_strong_roots_scope,
false, // yg was scanned above
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(),
&par_mrias_cl,
NULL); // The dirty klasses will be handled below
heap->cms_process_roots(_strong_roots_scope,
false, // yg was scanned above
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(),
&par_mrias_cl,
NULL); // The dirty klasses will be handled below
assert(_collector->should_unload_classes()
|| (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
@ -4839,8 +4840,8 @@ initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
// Parallel version of remark
void CMSCollector::do_remark_parallel() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
WorkGang* workers = gch->workers();
CMSHeap* heap = CMSHeap::heap();
WorkGang* workers = heap->workers();
assert(workers != NULL, "Need parallel worker threads.");
// Choose to use the number of GC workers most recently set
// into "active_workers".
@ -4856,7 +4857,7 @@ void CMSCollector::do_remark_parallel() {
// the younger_gen cards, so we shouldn't call the following else
// the verification code as well as subsequent younger_refs_iterate
// code would get confused. XXX
// gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
// heap->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
// The young gen rescan work will not be done as part of
// process_roots (which currently doesn't know how to
@ -4898,7 +4899,7 @@ void CMSCollector::do_remark_parallel() {
void CMSCollector::do_remark_non_parallel() {
ResourceMark rm;
HandleMark hm;
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
MarkRefsIntoAndScanClosure
@ -4939,7 +4940,7 @@ void CMSCollector::do_remark_non_parallel() {
}
}
if (VerifyDuringGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify();
}
@ -4948,15 +4949,15 @@ void CMSCollector::do_remark_non_parallel() {
verify_work_stacks_empty();
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
StrongRootsScope srs(1);
gch->cms_process_roots(&srs,
true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&mrias_cl,
NULL); // The dirty klasses will be handled below
heap->cms_process_roots(&srs,
true, // young gen as roots
GenCollectedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&mrias_cl,
NULL); // The dirty klasses will be handled below
assert(should_unload_classes()
|| (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
@ -5148,8 +5149,8 @@ void CMSRefProcTaskProxy::do_work_steal(int i,
void CMSRefProcTaskExecutor::execute(ProcessTask& task)
{
GenCollectedHeap* gch = GenCollectedHeap::heap();
WorkGang* workers = gch->workers();
CMSHeap* heap = CMSHeap::heap();
WorkGang* workers = heap->workers();
assert(workers != NULL, "Need parallel worker threads.");
CMSRefProcTaskProxy rp_task(task, &_collector,
_collector.ref_processor()->span(),
@ -5161,8 +5162,8 @@ void CMSRefProcTaskExecutor::execute(ProcessTask& task)
void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
{
GenCollectedHeap* gch = GenCollectedHeap::heap();
WorkGang* workers = gch->workers();
CMSHeap* heap = CMSHeap::heap();
WorkGang* workers = heap->workers();
assert(workers != NULL, "Need parallel worker threads.");
CMSRefEnqueueTaskProxy enq_task(task);
workers->run_task(&enq_task);
@ -5195,9 +5196,9 @@ void CMSCollector::refProcessingWork() {
// and a different number of discovered lists may have Ref objects.
// That is OK as long as the Reference lists are balanced (see
// balance_all_queues() and balance_queues()).
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
uint active_workers = ParallelGCThreads;
WorkGang* workers = gch->workers();
WorkGang* workers = heap->workers();
if (workers != NULL) {
active_workers = workers->active_workers();
// The expectation is that active_workers will have already
@ -5305,7 +5306,7 @@ void CMSCollector::sweep() {
verify_work_stacks_empty();
verify_overflow_empty();
increment_sweep_count();
TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
_inter_sweep_timer.stop();
_inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
@ -5378,9 +5379,9 @@ void CMSCollector::sweep() {
// this generation. If such a promotion may still fail,
// the flag will be set again when a young collection is
// attempted.
GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
gch->update_full_collections_completed(_collection_count_start);
CMSHeap* heap = CMSHeap::heap();
heap->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
heap->update_full_collections_completed(_collection_count_start);
}
// FIX ME!!! Looks like this belongs in CFLSpace, with
@ -5415,7 +5416,7 @@ void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generati
bool full) {
// If the young generation has been collected, gather any statistics
// that are of interest at this point.
bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
if (!full && current_is_young) {
// Gather statistics on the young generation collection.
collector()->stats().record_gc0_end(used());
@ -6188,7 +6189,7 @@ size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
do_yield_check();
}
unsigned int after_count =
GenCollectedHeap::heap()->total_collections();
CMSHeap::heap()->total_collections();
bool abort = (_before_count != after_count) ||
_collector->should_abort_preclean();
return abort ? 0 : size;
@ -7852,7 +7853,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
return false;
}
// Grab the entire list; we'll put back a suffix
oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
Thread* tid = Thread::current();
// Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
// set to ParallelGCThreads.
@ -7867,7 +7868,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
return false;
} else if (_overflow_list != BUSY) {
// Try and grab the prefix
prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
}
}
// If the list was found to be empty, or we spun long
@ -7880,7 +7881,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
if (prefix == NULL) {
// Write back the NULL in case we overwrote it with BUSY above
// and it is still the same value.
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
}
return false;
}
@ -7895,7 +7896,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
// Write back the NULL in lieu of the BUSY we wrote
// above, if it is still the same value.
if (_overflow_list == BUSY) {
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
}
} else {
// Chop off the suffix and return it to the global list.
@ -7911,7 +7912,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
bool attached = false;
while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
observed_overflow_list =
(oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
if (cur_overflow_list == observed_overflow_list) {
attached = true;
break;
@ -7936,7 +7937,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
}
// ... and try to place spliced list back on overflow_list ...
observed_overflow_list =
(oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
} while (cur_overflow_list != observed_overflow_list);
// ... until we have succeeded in doing so.
}
@ -7957,7 +7958,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
}
#ifndef PRODUCT
assert(_num_par_pushes >= n, "Too many pops?");
Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
Atomic::sub(n, &_num_par_pushes);
#endif
return true;
}
@ -7986,7 +7987,7 @@ void CMSCollector::par_push_on_overflow_list(oop p) {
p->set_mark(NULL);
}
observed_overflow_list =
(oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
} while (cur_overflow_list != observed_overflow_list);
}
#undef BUSY

View File

@ -25,13 +25,13 @@
#ifndef SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
#define SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/cmsLockVerifier.hpp"
#include "gc/cms/compactibleFreeListSpace.hpp"
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/cms/parNewGeneration.hpp"
#include "gc/shared/gcUtil.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "utilities/align.hpp"
#include "utilities/bitMap.inline.hpp"
@ -256,7 +256,7 @@ inline bool CMSCollector::should_abort_preclean() const {
// scavenge is done or foreground GC wants to take over collection
return _collectorState == AbortablePreclean &&
(_abort_preclean || _foregroundGCIsActive ||
GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
CMSHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
}
inline size_t CMSCollector::get_eden_used() const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,10 +24,10 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp"
@ -225,7 +225,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
// Wait time in millis or 0 value representing infinite wait for a scavenge
assert(t_millis >= 0, "Wait time for scavenge should be 0 or positive");
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
double start_time_secs = os::elapsedTime();
double end_time_secs = start_time_secs + (t_millis / ((double) MILLIUNITS));
@ -233,7 +233,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
unsigned int before_count;
{
MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
before_count = gch->total_collections();
before_count = heap->total_collections();
}
unsigned int loop_count = 0;
@ -279,7 +279,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
unsigned int after_count;
{
MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
after_count = gch->total_collections();
after_count = heap->total_collections();
}
if(before_count != after_count) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,10 +23,10 @@
*/
#include "precompiled.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/space.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/virtualspace.hpp"
@ -394,7 +394,7 @@ get_LNC_array_for_space(Space* sp,
// Do a dirty read here. If we pass the conditional then take the rare
// event lock and do the read again in case some other thread had already
// succeeded and done the resize.
int cur_collection = GenCollectedHeap::heap()->total_collections();
int cur_collection = CMSHeap::heap()->total_collections();
// Updated _last_LNC_resizing_collection[i] must not be visible before
// _lowest_non_clean and friends are visible. Therefore use acquire/release
// to guarantee this on non TSO architecures.

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/compactibleFreeListSpace.hpp"
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
#include "gc/cms/parNewGeneration.inline.hpp"
@ -124,7 +125,7 @@ bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) c
void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
assert(old->is_objArray(), "must be obj array");
assert(old->is_forwarded(), "must be forwarded");
assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");
assert(CMSHeap::heap()->is_in_reserved(old), "must be in heap.");
assert(!old_gen()->is_in(old), "must be in young generation.");
objArrayOop obj = objArrayOop(old->forwardee());
@ -205,9 +206,9 @@ bool ParScanThreadState::take_from_overflow_stack() {
for (size_t i = 0; i != num_take_elems; i++) {
oop cur = of_stack->pop();
oop obj_to_push = cur->forwardee();
assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap");
assert(CMSHeap::heap()->is_in_reserved(cur), "Should be in heap");
assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
assert(CMSHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
if (should_be_partially_scanned(obj_to_push, cur)) {
assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
obj_to_push = cur;
@ -590,7 +591,7 @@ ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
{}
void ParNewGenTask::work(uint worker_id) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
// Since this is being done in a separate thread, need new resource
// and handle marks.
ResourceMark rm;
@ -602,10 +603,10 @@ void ParNewGenTask::work(uint worker_id) {
par_scan_state.set_young_old_boundary(_young_old_boundary);
CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(),
gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
heap->rem_set()->cld_rem_set()->accumulate_modified_oops());
par_scan_state.start_strong_roots();
gch->young_process_roots(_strong_roots_scope,
heap->young_process_roots(_strong_roots_scope,
&par_scan_state.to_space_root_closure(),
&par_scan_state.older_gen_closure(),
&cld_scan_closure);
@ -687,7 +688,7 @@ void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
_par_cl->do_oop_nv(p);
if (GenCollectedHeap::heap()->is_in_reserved(p)) {
if (CMSHeap::heap()->is_in_reserved(p)) {
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
_rs->write_ref_field_gc_par(p, obj);
}
@ -714,7 +715,7 @@ void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
_cl->do_oop_nv(p);
if (GenCollectedHeap::heap()->is_in_reserved(p)) {
if (CMSHeap::heap()->is_in_reserved(p)) {
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
_rs->write_ref_field_gc_par(p, obj);
}
@ -804,7 +805,7 @@ public:
};
void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* gch = CMSHeap::heap();
WorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads.");
_state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
@ -816,7 +817,7 @@ void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
}
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* gch = CMSHeap::heap();
WorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads.");
ParNewRefEnqueueTaskProxy enq_task(task);
@ -825,8 +826,8 @@ void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
_state_set.flush();
GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->save_marks();
CMSHeap* heap = CMSHeap::heap();
heap->save_marks();
}
ScanClosureWithParBarrier::
@ -835,10 +836,10 @@ ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
{ }
EvacuateFollowersClosureGeneral::
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
EvacuateFollowersClosureGeneral(CMSHeap* heap,
OopsInGenClosure* cur,
OopsInGenClosure* older) :
_gch(gch),
_heap(heap),
_scan_cur_or_nonheap(cur), _scan_older(older)
{ }
@ -846,15 +847,15 @@ void EvacuateFollowersClosureGeneral::do_void() {
do {
// Beware: this call will lead to closure applications via virtual
// calls.
_gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
_scan_cur_or_nonheap,
_scan_older);
} while (!_gch->no_allocs_since_save_marks());
_heap->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
_scan_cur_or_nonheap,
_scan_older);
} while (!_heap->no_allocs_since_save_marks());
}
// A Generation that does parallel young-gen collection.
void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
void ParNewGeneration::handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set) {
assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack.clear(true); // Clear cached segments.
@ -883,7 +884,7 @@ void ParNewGeneration::collect(bool full,
bool is_tlab) {
assert(full || size > 0, "otherwise we don't want to collect");
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* gch = CMSHeap::heap();
_gc_timer->register_gc_start();
@ -1064,7 +1065,7 @@ void ParNewGeneration::collect(bool full,
}
size_t ParNewGeneration::desired_plab_sz() {
return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers());
return _plab_stats.desired_plab_sz(CMSHeap::heap()->workers()->active_workers());
}
static int sum;
@ -1168,7 +1169,7 @@ oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
} else {
// Is in to-space; do copying ourselves.
Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
assert(CMSHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
forward_ptr = old->forward_to_atomic(new_obj);
// Restore the mark word copied above.
new_obj->set_mark(m);
@ -1296,7 +1297,7 @@ void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadSt
from_space_obj->set_klass_to_list_ptr(NULL);
}
observed_overflow_list =
(oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
} while (cur_overflow_list != observed_overflow_list);
}
}
@ -1339,7 +1340,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
if (_overflow_list == NULL) return false;
// Otherwise, there was something there; try claiming the list.
oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
// Trim off a prefix of at most objsFromOverflow items
Thread* tid = Thread::current();
size_t spin_count = ParallelGCThreads;
@ -1353,7 +1354,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
return false;
} else if (_overflow_list != BUSY) {
// try and grab the prefix
prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
}
}
if (prefix == NULL || prefix == BUSY) {
@ -1361,7 +1362,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
if (prefix == NULL) {
// Write back the NULL in case we overwrote it with BUSY above
// and it is still the same value.
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
(void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
}
return false;
}
@ -1380,7 +1381,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
// Write back the NULL in lieu of the BUSY we wrote
// above and it is still the same value.
if (_overflow_list == BUSY) {
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
(void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
}
} else {
assert(suffix != BUSY, "Error");
@ -1394,7 +1395,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
bool attached = false;
while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
observed_overflow_list =
(oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
if (cur_overflow_list == observed_overflow_list) {
attached = true;
break;
@ -1420,7 +1421,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
last->set_klass_to_list_ptr(NULL);
}
observed_overflow_list =
(oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
} while (cur_overflow_list != observed_overflow_list);
}
}
@ -1452,7 +1453,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
#ifndef PRODUCT
assert(_num_par_pushes >= n, "Too many pops?");
Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
Atomic::sub(n, &_num_par_pushes);
#endif
return true;
}
@ -1475,3 +1476,9 @@ void ParNewGeneration::ref_processor_init() {
const char* ParNewGeneration::name() const {
return "par new generation";
}
void ParNewGeneration::restore_preserved_marks() {
SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers());
_preserved_marks_set.restore(&task_executor);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,6 +35,7 @@
#include "memory/padded.hpp"
class ChunkArray;
class CMSHeap;
class ParScanWithoutBarrierClosure;
class ParScanWithBarrierClosure;
class ParRootScanWithoutBarrierClosure;
@ -259,11 +260,11 @@ class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
class EvacuateFollowersClosureGeneral: public VoidClosure {
private:
GenCollectedHeap* _gch;
CMSHeap* _heap;
OopsInGenClosure* _scan_cur_or_nonheap;
OopsInGenClosure* _scan_older;
public:
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
EvacuateFollowersClosureGeneral(CMSHeap* heap,
OopsInGenClosure* cur,
OopsInGenClosure* older);
virtual void do_void();
@ -336,7 +337,7 @@ class ParNewGeneration: public DefNewGeneration {
static oop real_forwardee_slow(oop obj);
static void waste_some_time();
void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set);
void handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set);
protected:
@ -345,6 +346,8 @@ class ParNewGeneration: public DefNewGeneration {
bool survivor_overflow() { return _survivor_overflow; }
void set_survivor_overflow(bool v) { _survivor_overflow = v; }
void restore_preserved_marks();
public:
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size);

View File

@ -25,10 +25,10 @@
#ifndef SHARE_VM_GC_CMS_PAROOPCLOSURES_INLINE_HPP
#define SHARE_VM_GC_CMS_PAROOPCLOSURES_INLINE_HPP
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/parNewGeneration.hpp"
#include "gc/cms/parOopClosures.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
@ -72,9 +72,9 @@ template <class T>
inline void ParScanClosure::do_oop_work(T* p,
bool gc_barrier,
bool root_scan) {
assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
assert((!CMSHeap::heap()->is_in_reserved(p) ||
generation()->is_in_reserved(p))
&& (GenCollectedHeap::heap()->is_young_gen(generation()) || gc_barrier),
&& (CMSHeap::heap()->is_young_gen(generation()) || gc_barrier),
"The gen must be right, and we must be doing the barrier "
"in older generations.");
T heap_oop = oopDesc::load_heap_oop(p);
@ -85,8 +85,8 @@ inline void ParScanClosure::do_oop_work(T* p,
if (_g->to()->is_in_reserved(obj)) {
Log(gc) log;
log.error("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
GenCollectedHeap* gch = GenCollectedHeap::heap();
Space* sp = gch->space_containing(p);
CMSHeap* heap = CMSHeap::heap();
Space* sp = heap->space_containing(p);
oop obj = oop(sp->block_start(p));
assert((HeapWord*)obj < (HeapWord*)p, "Error");
log.error("Object: " PTR_FORMAT, p2i((void *)obj));
@ -96,7 +96,7 @@ inline void ParScanClosure::do_oop_work(T* p,
log.error("-----");
log.error("Heap:");
log.error("-----");
gch->print_on(&ls);
heap->print_on(&ls);
ShouldNotReachHere();
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/cms/vmCMSOperations.hpp"
@ -39,19 +40,19 @@
//////////////////////////////////////////////////////////
void VM_CMS_Operation::verify_before_gc() {
if (VerifyBeforeGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
GenCollectedHeap::heap()->prepare_for_verify();
CMSHeap::heap()->prepare_for_verify();
Universe::verify();
}
}
void VM_CMS_Operation::verify_after_gc() {
if (VerifyAfterGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
HandleMark hm;
FreelistLocker x(_collector);
@ -112,13 +113,13 @@ void VM_CMS_Initial_Mark::doit() {
_collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
CMSHeap* heap = CMSHeap::heap();
GCCauseSetter gccs(heap, GCCause::_cms_initial_mark);
VM_CMS_Operation::verify_before_gc();
IsGCActiveMark x; // stop-world GC active
_collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
_collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, heap->gc_cause());
VM_CMS_Operation::verify_after_gc();
@ -140,13 +141,13 @@ void VM_CMS_Final_Remark::doit() {
_collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
CMSHeap* heap = CMSHeap::heap();
GCCauseSetter gccs(heap, GCCause::_cms_final_remark);
VM_CMS_Operation::verify_before_gc();
IsGCActiveMark x; // stop-world GC active
_collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
_collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, heap->gc_cause());
VM_CMS_Operation::verify_after_gc();
@ -162,8 +163,8 @@ void VM_GenCollectFullConcurrent::doit() {
assert(Thread::current()->is_VM_thread(), "Should be VM thread");
assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
GenCollectedHeap* gch = GenCollectedHeap::heap();
if (_gc_count_before == gch->total_collections()) {
CMSHeap* heap = CMSHeap::heap();
if (_gc_count_before == heap->total_collections()) {
// The "full" of do_full_collection call below "forces"
// a collection; the second arg, 0, below ensures that
// only the young gen is collected. XXX In the future,
@ -173,21 +174,21 @@ void VM_GenCollectFullConcurrent::doit() {
// for the future.
assert(SafepointSynchronize::is_at_safepoint(),
"We can only be executing this arm of if at a safepoint");
GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
GCCauseSetter gccs(heap, _gc_cause);
heap->do_full_collection(heap->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
} // Else no need for a foreground young gc
assert((_gc_count_before < gch->total_collections()) ||
assert((_gc_count_before < heap->total_collections()) ||
(GCLocker::is_active() /* gc may have been skipped */
&& (_gc_count_before == gch->total_collections())),
&& (_gc_count_before == heap->total_collections())),
"total_collections() should be monotonically increasing");
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
if (gch->total_full_collections() == _full_gc_count_before) {
assert(_full_gc_count_before <= heap->total_full_collections(), "Error");
if (heap->total_full_collections() == _full_gc_count_before) {
// Nudge the CMS thread to start a concurrent collection.
CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
} else {
assert(_full_gc_count_before < gch->total_full_collections(), "Error");
assert(_full_gc_count_before < heap->total_full_collections(), "Error");
FullGCCount_lock->notify_all(); // Inform the Java thread its work is done
}
}
@ -197,11 +198,11 @@ bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
assert(thr != NULL, "Unexpected tid");
if (!thr->is_Java_thread()) {
assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
GenCollectedHeap* gch = GenCollectedHeap::heap();
if (_gc_count_before != gch->total_collections()) {
CMSHeap* heap = CMSHeap::heap();
if (_gc_count_before != heap->total_collections()) {
// No need to do a young gc, we'll just nudge the CMS thread
// in the doit() method above, to be executed soon.
assert(_gc_count_before < gch->total_collections(),
assert(_gc_count_before < heap->total_collections(),
"total_collections() should be monotonically increasing");
return false; // no need for foreground young gc
}
@ -227,9 +228,9 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
// count overflows and wraps around. XXX fix me !!!
// e.g. at the rate of 1 full gc per ms, this could
// overflow in about 1000 years.
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSHeap* heap = CMSHeap::heap();
if (_gc_cause != GCCause::_gc_locker &&
gch->total_full_collections_completed() <= _full_gc_count_before) {
heap->total_full_collections_completed() <= _full_gc_count_before) {
// maybe we should change the condition to test _gc_cause ==
// GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
// instead of _gc_cause != GCCause::_gc_locker
@ -245,7 +246,7 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
// Either a concurrent or a stop-world full gc is sufficient
// witness to our request.
while (gch->total_full_collections_completed() <= _full_gc_count_before) {
while (heap->total_full_collections_completed() <= _full_gc_count_before) {
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
}
}

View File

@ -280,13 +280,13 @@ void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntr
BufferNode* nd = _cur_par_buffer_node;
while (nd != NULL) {
BufferNode* next = nd->next();
void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd);
BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd);
if (actual == nd) {
bool b = apply_closure_to_buffer(cl, nd, false);
guarantee(b, "Should not stop early.");
nd = next;
} else {
nd = static_cast<BufferNode*>(actual);
nd = actual;
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -155,19 +155,19 @@ G1CodeRootSet::~G1CodeRootSet() {
}
G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
return (G1CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table);
return OrderAccess::load_acquire(&_table);
}
void G1CodeRootSet::allocate_small_table() {
G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize);
OrderAccess::release_store_ptr(&_table, temp);
OrderAccess::release_store(&_table, temp);
}
void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
for (;;) {
table->_purge_next = _purge_list;
G1CodeRootSetTable* old = (G1CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next);
G1CodeRootSetTable* old = Atomic::cmpxchg(table, &_purge_list, table->_purge_next);
if (old == table->_purge_next) {
break;
}
@ -191,7 +191,7 @@ void G1CodeRootSet::move_to_large() {
G1CodeRootSetTable::purge_list_append(_table);
OrderAccess::release_store_ptr(&_table, temp);
OrderAccess::release_store(&_table, temp);
}
void G1CodeRootSet::purge() {

View File

@ -141,13 +141,6 @@ void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_region
reset_from_card_cache(start_idx, num_regions);
}
// Returns true if the reference points to an object that
// can move in an incremental collection.
bool G1CollectedHeap::is_scavengable(const void* p) {
HeapRegion* hr = heap_region_containing(p);
return !hr->is_pinned();
}
// Private methods.
HeapRegion*
@ -1849,6 +1842,14 @@ void G1CollectedHeap::stop() {
}
}
void G1CollectedHeap::safepoint_synchronize_begin() {
SuspendibleThreadSet::synchronize();
}
void G1CollectedHeap::safepoint_synchronize_end() {
SuspendibleThreadSet::desynchronize();
}
size_t G1CollectedHeap::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
}
@ -3458,10 +3459,10 @@ private:
// Variables used to claim nmethods.
CompiledMethod* _first_nmethod;
volatile CompiledMethod* _claimed_nmethod;
CompiledMethod* volatile _claimed_nmethod;
// The list of nmethods that need to be processed by the second pass.
volatile CompiledMethod* _postponed_list;
CompiledMethod* volatile _postponed_list;
volatile uint _num_entered_barrier;
public:
@ -3480,7 +3481,7 @@ private:
if(iter.next_alive()) {
_first_nmethod = iter.method();
}
_claimed_nmethod = (volatile CompiledMethod*)_first_nmethod;
_claimed_nmethod = _first_nmethod;
}
~G1CodeCacheUnloadingTask() {
@ -3496,9 +3497,9 @@ private:
void add_to_postponed_list(CompiledMethod* nm) {
CompiledMethod* old;
do {
old = (CompiledMethod*)_postponed_list;
old = _postponed_list;
nm->set_unloading_next(old);
} while ((CompiledMethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
} while (Atomic::cmpxchg(nm, &_postponed_list, old) != old);
}
void clean_nmethod(CompiledMethod* nm) {
@ -3527,7 +3528,7 @@ private:
do {
*num_claimed_nmethods = 0;
first = (CompiledMethod*)_claimed_nmethod;
first = _claimed_nmethod;
last = CompiledMethodIterator(first);
if (first != NULL) {
@ -3541,7 +3542,7 @@ private:
}
}
} while ((CompiledMethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
} while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first);
}
CompiledMethod* claim_postponed_nmethod() {
@ -3549,14 +3550,14 @@ private:
CompiledMethod* next;
do {
claim = (CompiledMethod*)_postponed_list;
claim = _postponed_list;
if (claim == NULL) {
return NULL;
}
next = claim->unloading_next();
} while ((CompiledMethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
} while (Atomic::cmpxchg(next, &_postponed_list, claim) != claim);
return claim;
}
@ -5323,17 +5324,20 @@ public:
void do_oop(narrowOop* p) { do_oop_work(p); }
};
void G1CollectedHeap::register_nmethod(nmethod* nm) {
CollectedHeap::register_nmethod(nm);
// Returns true if the reference points to an object that
// can move in an incremental collection.
bool G1CollectedHeap::is_scavengable(oop obj) {
HeapRegion* hr = heap_region_containing(obj);
return !hr->is_pinned();
}
void G1CollectedHeap::register_nmethod(nmethod* nm) {
guarantee(nm != NULL, "sanity");
RegisterNMethodOopClosure reg_cl(this, nm);
nm->oops_do(&reg_cl);
}
void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
CollectedHeap::unregister_nmethod(nm);
guarantee(nm != NULL, "sanity");
UnregisterNMethodOopClosure reg_cl(this, nm);
nm->oops_do(&reg_cl, true);

View File

@ -968,6 +968,8 @@ public:
jint initialize();
virtual void stop();
virtual void safepoint_synchronize_begin();
virtual void safepoint_synchronize_end();
// Return the (conservative) maximum heap alignment for any G1 heap
static size_t conservative_max_heap_alignment();
@ -1282,8 +1284,6 @@ public:
inline bool is_in_young(const oop obj);
virtual bool is_scavengable(const void* addr);
// We don't need barriers for initializing stores to objects
// in the young gen: for the SATB pre-barrier, there is no
// pre-value that needs to be remembered; for the remembered-set
@ -1395,6 +1395,9 @@ public:
// Optimized nmethod scanning support routines
// Is an oop scavengeable
virtual bool is_scavengable(oop obj);
// Register the given nmethod with the G1 heap.
virtual void register_nmethod(nmethod* nm);

View File

@ -1870,7 +1870,7 @@ G1ConcurrentMark::claim_region(uint worker_id) {
HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
// Is the gap between reading the finger and doing the CAS too long?
HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
if (res == finger && curr_region != NULL) {
// we succeeded
HeapWord* bottom = curr_region->bottom();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,17 +29,17 @@
#include "runtime/atomic.hpp"
inline void G1EvacStats::add_direct_allocated(size_t value) {
Atomic::add_ptr(value, &_direct_allocated);
Atomic::add(value, &_direct_allocated);
}
inline void G1EvacStats::add_region_end_waste(size_t value) {
Atomic::add_ptr(value, &_region_end_waste);
Atomic::add_ptr(1, &_regions_filled);
Atomic::add(value, &_region_end_waste);
Atomic::inc(&_regions_filled);
}
inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) {
Atomic::add_ptr(used, &_failure_used);
Atomic::add_ptr(waste, &_failure_waste);
Atomic::add(used, &_failure_used);
Atomic::add(waste, &_failure_waste);
}
#endif // SHARE_VM_GC_G1_G1EVACSTATS_INLINE_HPP

View File

@ -74,9 +74,9 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
// card_ptr in favor of the other option, which would be starting over. This
// should be OK since card_ptr will likely be the older card already when/if
// this ever happens.
jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr,
&_hot_cache[masked_index],
current_ptr);
jbyte* previous_ptr = Atomic::cmpxchg(card_ptr,
&_hot_cache[masked_index],
current_ptr);
return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
}

View File

@ -251,7 +251,7 @@ public:
virtual void work(uint worker_id) {
size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
while (true) {
char* touch_addr = (char*)Atomic::add_ptr((intptr_t)actual_chunk_size, (volatile void*) &_cur_addr) - actual_chunk_size;
char* touch_addr = Atomic::add(actual_chunk_size, &_cur_addr) - actual_chunk_size;
if (touch_addr < _start_addr || touch_addr >= _end_addr) {
break;
}

View File

@ -203,12 +203,12 @@ G1StringDedupUnlinkOrOopsDoClosure::~G1StringDedupUnlinkOrOopsDoClosure() {
// Atomically claims the next available queue for exclusive access by
// the current thread. Returns the queue number of the claimed queue.
size_t G1StringDedupUnlinkOrOopsDoClosure::claim_queue() {
return (size_t)Atomic::add_ptr(1, &_next_queue) - 1;
return Atomic::add((size_t)1, &_next_queue) - 1;
}
// Atomically claims the next available table partition for exclusive
// access by the current thread. Returns the table bucket number where
// the claimed partition starts.
size_t G1StringDedupUnlinkOrOopsDoClosure::claim_table_partition(size_t partition_size) {
return (size_t)Atomic::add_ptr(partition_size, &_next_bucket) - partition_size;
return Atomic::add(partition_size, &_next_bucket) - partition_size;
}

View File

@ -59,7 +59,7 @@ inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size,
size_t want_to_allocate = MIN2(available, desired_word_size);
if (want_to_allocate >= min_word_size) {
HeapWord* new_top = obj + want_to_allocate;
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
// result can be one of two:
// the old top value: the exchange succeeded
// otherwise: the new value of the top is returned.

View File

@ -113,9 +113,7 @@ protected:
public:
HeapRegion* hr() const {
return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
}
HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); }
jint occupied() const {
// Overkill, but if we ever need it...
@ -133,7 +131,7 @@ public:
_bm.clear();
// Make sure that the bitmap clearing above has been finished before publishing
// this PRT to concurrent threads.
OrderAccess::release_store_ptr(&_hr, hr);
OrderAccess::release_store(&_hr, hr);
}
void add_reference(OopOrNarrowOopStar from) {
@ -182,7 +180,7 @@ public:
while (true) {
PerRegionTable* fl = _free_list;
last->set_next(fl);
PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl);
if (res == fl) {
return;
}
@ -199,9 +197,7 @@ public:
PerRegionTable* fl = _free_list;
while (fl != NULL) {
PerRegionTable* nxt = fl->next();
PerRegionTable* res =
(PerRegionTable*)
Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
PerRegionTable* res = Atomic::cmpxchg(nxt, &_free_list, fl);
if (res == fl) {
fl->init(hr, true);
return fl;
@ -416,7 +412,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
// some mark bits may not yet seem cleared or a 'later' update
// performed by a concurrent thread could be undone when the
// zeroing becomes visible). This requires store ordering.
OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt);
OrderAccess::release_store(&_fine_grain_regions[ind], prt);
_n_fine_entries++;
if (G1HRRSUseSparseTable) {

View File

@ -292,9 +292,7 @@ void SparsePRT::add_to_expanded_list(SparsePRT* sprt) {
SparsePRT* hd = _head_expanded_list;
while (true) {
sprt->_next_expanded = hd;
SparsePRT* res =
(SparsePRT*)
Atomic::cmpxchg_ptr(sprt, &_head_expanded_list, hd);
SparsePRT* res = Atomic::cmpxchg(sprt, &_head_expanded_list, hd);
if (res == hd) return;
else hd = res;
}
@ -305,9 +303,7 @@ SparsePRT* SparsePRT::get_from_expanded_list() {
SparsePRT* hd = _head_expanded_list;
while (hd != NULL) {
SparsePRT* next = hd->next_expanded();
SparsePRT* res =
(SparsePRT*)
Atomic::cmpxchg_ptr(next, &_head_expanded_list, hd);
SparsePRT* res = Atomic::cmpxchg(next, &_head_expanded_list, hd);
if (res == hd) {
hd->set_next_expanded(NULL);
return hd;

View File

@ -77,8 +77,7 @@ GCTaskTimeStamp* GCTaskThread::time_stamp_at(uint index) {
if (_time_stamps == NULL) {
// We allocate the _time_stamps array lazily since logging can be enabled dynamically
GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
void* old = Atomic::cmpxchg_ptr(time_stamps, &_time_stamps, NULL);
if (old != NULL) {
if (Atomic::cmpxchg(time_stamps, &_time_stamps, (GCTaskTimeStamp*)NULL) != NULL) {
// Someone already setup the time stamps
FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -862,7 +862,7 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
if (p != NULL) {
HeapWord* cur_top, *cur_chunk_top = p + size;
while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
if (Atomic::cmpxchg(cur_chunk_top, top_addr(), cur_top) == cur_top) {
break;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -192,7 +192,7 @@ HeapWord* MutableSpace::cas_allocate(size_t size) {
HeapWord* obj = top();
if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size;
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
// result can be one of two:
// the old top value: the exchange succeeded
// otherwise: the new value of the top is returned.
@ -211,7 +211,7 @@ HeapWord* MutableSpace::cas_allocate(size_t size) {
// Try to deallocate previous allocation. Returns true upon success.
bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
HeapWord* expected_top = obj + size;
return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top;
return Atomic::cmpxchg(obj, top_addr(), expected_top) == expected_top;
}
void MutableSpace::oop_iterate_no_header(OopClosure* cl) {

View File

@ -90,7 +90,7 @@ ParMarkBitMap::mark_obj(HeapWord* addr, size_t size)
bool end_bit_ok = _end_bits.par_set_bit(end_bit);
assert(end_bit_ok, "concurrency problem");
DEBUG_ONLY(Atomic::inc(&mark_bitmap_count));
DEBUG_ONLY(Atomic::add_ptr(size, &mark_bitmap_size));
DEBUG_ONLY(Atomic::add(size, &mark_bitmap_size));
return true;
}
return false;

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "gc/parallel/adjoiningGenerations.hpp"
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
#include "gc/parallel/cardTableExtension.hpp"
@ -169,10 +170,6 @@ bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
}
bool ParallelScavengeHeap::is_scavengable(const void* addr) {
return is_in_young((oop)addr);
}
// There are two levels of allocation policy here.
//
// When an allocation request fails, the requesting thread must invoke a VM
@ -665,3 +662,15 @@ void ParallelScavengeHeap::gen_mangle_unused_area() {
}
}
#endif
bool ParallelScavengeHeap::is_scavengable(oop obj) {
return is_in_young(obj);
}
void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
CodeCache::register_scavenge_root_nmethod(nm);
}
void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
CodeCache::verify_scavenge_root_nmethod(nm);
}

View File

@ -134,7 +134,9 @@ class ParallelScavengeHeap : public CollectedHeap {
// can be moved in a partial collection. For currently implemented
// generational collectors that means during a collection of
// the young gen.
virtual bool is_scavengable(const void* addr);
virtual bool is_scavengable(oop obj);
virtual void register_nmethod(nmethod* nm);
virtual void verify_nmethod(nmethod* nmethod);
size_t max_capacity() const;

View File

@ -521,7 +521,7 @@ void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
DEBUG_ONLY(Atomic::inc(&add_obj_count);)
DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
DEBUG_ONLY(Atomic::add(len, &add_obj_size);)
if (beg_region == end_region) {
// All in one region.

View File

@ -586,7 +586,7 @@ inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
#ifdef ASSERT
HeapWord* tmp = _highest_ref;
while (addr > tmp) {
tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp);
}
#endif // #ifdef ASSERT
}

View File

@ -734,8 +734,11 @@ void DefNewGeneration::remove_forwarding_pointers() {
RemoveForwardedPointerClosure rspc;
eden()->object_iterate(&rspc);
from()->object_iterate(&rspc);
restore_preserved_marks();
}
SharedRestorePreservedMarksTaskExecutor task_executor(GenCollectedHeap::heap()->workers());
void DefNewGeneration::restore_preserved_marks() {
SharedRestorePreservedMarksTaskExecutor task_executor(NULL);
_preserved_marks_set.restore(&task_executor);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -89,6 +89,8 @@ protected:
// therefore we must remove their forwarding pointers.
void remove_forwarding_pointers();
virtual void restore_preserved_marks();
// Preserved marks
PreservedMarksSet _preserved_marks_set;

View File

@ -135,14 +135,6 @@ void CollectedHeap::print_on_error(outputStream* st) const {
_barrier_set->print_on(st);
}
void CollectedHeap::register_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
}
void CollectedHeap::unregister_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
}
void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
const GCHeapSummary& heap_summary = create_heap_summary();
gc_tracer->report_gc_heap_summary(when, heap_summary);

View File

@ -83,6 +83,7 @@ class GCHeapLog : public EventLogBase<GCMessage> {
// GenCollectedHeap
// G1CollectedHeap
// ParallelScavengeHeap
// CMSHeap
//
class CollectedHeap : public CHeapObj<mtInternal> {
friend class VMStructs;
@ -194,7 +195,8 @@ class CollectedHeap : public CHeapObj<mtInternal> {
enum Name {
GenCollectedHeap,
ParallelScavengeHeap,
G1CollectedHeap
G1CollectedHeap,
CMSHeap
};
static inline size_t filler_array_max_size() {
@ -219,6 +221,10 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Stop any onging concurrent work and prepare for exit.
virtual void stop() {}
// Stop and resume concurrent GC threads interfering with safepoint operations
virtual void safepoint_synchronize_begin() {}
virtual void safepoint_synchronize_end() {}
void initialize_reserved_region(HeapWord *start, HeapWord *end);
MemRegion reserved_region() const { return _reserved; }
address base() const { return (address)reserved_region().start(); }
@ -287,10 +293,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
return p == NULL || is_in_closed_subset(p);
}
// An object is scavengable if its location may move during a scavenge.
// (A scavenge is a GC which is not a full GC.)
virtual bool is_scavengable(const void *p) = 0;
void set_gc_cause(GCCause::Cause v) {
if (UsePerfData) {
_gc_lastcause = _gc_cause;
@ -568,10 +570,14 @@ class CollectedHeap : public CHeapObj<mtInternal> {
void print_heap_before_gc();
void print_heap_after_gc();
// An object is scavengable if its location may move during a scavenge.
// (A scavenge is a GC which is not a full GC.)
virtual bool is_scavengable(oop obj) = 0;
// Registering and unregistering an nmethod (compiled code) with the heap.
// Override with specific mechanism for each specialized heap type.
virtual void register_nmethod(nmethod* nm);
virtual void unregister_nmethod(nmethod* nm);
virtual void register_nmethod(nmethod* nm) {}
virtual void unregister_nmethod(nmethod* nm) {}
virtual void verify_nmethod(nmethod* nmethod) {}
void trace_heap_before_gc(const GCTracer* gc_tracer);
void trace_heap_after_gc(const GCTracer* gc_tracer);

View File

@ -58,28 +58,6 @@
#include "utilities/macros.hpp"
#include "utilities/stack.inline.hpp"
#include "utilities/vmError.hpp"
#if INCLUDE_ALL_GCS
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/cms/vmCMSOperations.hpp"
#endif // INCLUDE_ALL_GCS
NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
// The set of potentially parallel tasks in root scanning.
enum GCH_strong_roots_tasks {
GCH_PS_Universe_oops_do,
GCH_PS_JNIHandles_oops_do,
GCH_PS_ObjectSynchronizer_oops_do,
GCH_PS_Management_oops_do,
GCH_PS_SystemDictionary_oops_do,
GCH_PS_ClassLoaderDataGraph_oops_do,
GCH_PS_jvmti_oops_do,
GCH_PS_CodeCache_oops_do,
GCH_PS_aot_oops_do,
GCH_PS_younger_gens,
// Leave this one last.
GCH_PS_NumElements
};
GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
CollectedHeap(),
@ -89,15 +67,6 @@ GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
_full_collections_completed(0)
{
assert(policy != NULL, "Sanity check");
if (UseConcMarkSweepGC) {
_workers = new WorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
/* are_ConcurrentGC_threads */false);
_workers->initialize_workers();
} else {
// Serial GC does not use workers.
_workers = NULL;
}
}
jint GenCollectedHeap::initialize() {
@ -138,15 +107,6 @@ jint GenCollectedHeap::initialize() {
_old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
clear_incremental_collection_failed();
#if INCLUDE_ALL_GCS
// If we are running CMS, create the collector responsible
// for collecting the CMS generations.
if (collector_policy()->is_concurrent_mark_sweep_policy()) {
bool success = create_cms_collector();
if (!success) return JNI_ENOMEM;
}
#endif // INCLUDE_ALL_GCS
return JNI_OK;
}
@ -183,21 +143,22 @@ char* GenCollectedHeap::allocate(size_t alignment,
void GenCollectedHeap::post_initialize() {
ref_processing_init();
assert((_young_gen->kind() == Generation::DefNew) ||
(_young_gen->kind() == Generation::ParNew),
"Wrong youngest generation type");
check_gen_kinds();
DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
_old_gen->kind() == Generation::MarkSweepCompact,
"Wrong generation kind");
_gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
_old_gen->capacity(),
def_new_gen->from()->capacity());
_gen_policy->initialize_gc_policy_counters();
}
void GenCollectedHeap::check_gen_kinds() {
assert(young_gen()->kind() == Generation::DefNew,
"Wrong youngest generation type");
assert(old_gen()->kind() == Generation::MarkSweepCompact,
"Wrong generation kind");
}
void GenCollectedHeap::ref_processing_init() {
_young_gen->ref_processor_init();
_old_gen->ref_processor_init();
@ -309,19 +270,6 @@ bool GenCollectedHeap::must_clear_all_soft_refs() {
_gc_cause == GCCause::_wb_full_gc;
}
bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
if (!UseConcMarkSweepGC) {
return false;
}
switch (cause) {
case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
case GCCause::_java_lang_system_gc:
case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
default: return false;
}
}
void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
bool is_tlab, bool run_verification, bool clear_soft_refs,
bool restore_marks_for_biased_locking) {
@ -553,6 +501,14 @@ void GenCollectedHeap::do_collection(bool full,
#endif
}
void GenCollectedHeap::register_nmethod(nmethod* nm) {
CodeCache::register_scavenge_root_nmethod(nm);
}
void GenCollectedHeap::verify_nmethod(nmethod* nm) {
CodeCache::verify_scavenge_root_nmethod(nm);
}
HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
return gen_policy()->satisfy_failed_allocation(size, is_tlab);
}
@ -674,31 +630,6 @@ void GenCollectedHeap::young_process_roots(StrongRootsScope* scope,
_process_strong_tasks->all_tasks_completed(scope->n_threads());
}
void GenCollectedHeap::cms_process_roots(StrongRootsScope* scope,
bool young_gen_as_roots,
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* root_closure,
CLDClosure* cld_closure) {
MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
if (!only_strong_roots) {
process_string_table_roots(scope, root_closure);
}
if (young_gen_as_roots &&
!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
root_closure->set_generation(_young_gen);
_young_gen->oop_iterate(root_closure);
root_closure->reset_generation();
}
_process_strong_tasks->all_tasks_completed(scope->n_threads());
}
void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
bool is_adjust_phase,
ScanningOption so,
@ -763,14 +694,7 @@ HeapWord** GenCollectedHeap::end_addr() const {
// public collection interfaces
void GenCollectedHeap::collect(GCCause::Cause cause) {
if (should_do_concurrent_full_gc(cause)) {
#if INCLUDE_ALL_GCS
// Mostly concurrent full collection.
collect_mostly_concurrent(cause);
#else // INCLUDE_ALL_GCS
ShouldNotReachHere();
#endif // INCLUDE_ALL_GCS
} else if (cause == GCCause::_wb_young_gc) {
if (cause == GCCause::_wb_young_gc) {
// Young collection for the WhiteBox API.
collect(cause, YoungGen);
} else {
@ -817,44 +741,6 @@ void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_g
}
}
#if INCLUDE_ALL_GCS
bool GenCollectedHeap::create_cms_collector() {
assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
"Unexpected generation kinds");
// Skip two header words in the block content verification
NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
CMSCollector* collector =
new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen,
_rem_set,
_gen_policy->as_concurrent_mark_sweep_policy());
if (collector == NULL || !collector->completed_initialization()) {
if (collector) {
delete collector; // Be nice in embedded situation
}
vm_shutdown_during_initialization("Could not create CMS collector");
return false;
}
return true; // success
}
void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
MutexLocker ml(Heap_lock);
// Read the GC counts while holding the Heap_lock
unsigned int full_gc_count_before = total_full_collections();
unsigned int gc_count_before = total_collections();
{
MutexUnlocker mu(Heap_lock);
VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
VMThread::execute(&op);
}
}
#endif // INCLUDE_ALL_GCS
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
do_full_collection(clear_all_soft_refs, OldGen);
}
@ -1097,8 +983,9 @@ void GenCollectedHeap::save_marks() {
GenCollectedHeap* GenCollectedHeap::heap() {
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
return (GenCollectedHeap*)heap;
assert(heap->kind() == CollectedHeap::GenCollectedHeap ||
heap->kind() == CollectedHeap::CMSHeap, "Not a GenCollectedHeap");
return (GenCollectedHeap*) heap;
}
void GenCollectedHeap::prepare_for_compaction() {
@ -1126,34 +1013,9 @@ void GenCollectedHeap::print_on(outputStream* st) const {
}
void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
if (workers() != NULL) {
workers()->threads_do(tc);
}
#if INCLUDE_ALL_GCS
if (UseConcMarkSweepGC) {
ConcurrentMarkSweepThread::threads_do(tc);
}
#endif // INCLUDE_ALL_GCS
}
void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
#if INCLUDE_ALL_GCS
if (UseConcMarkSweepGC) {
workers()->print_worker_threads_on(st);
ConcurrentMarkSweepThread::print_all_on(st);
}
#endif // INCLUDE_ALL_GCS
}
void GenCollectedHeap::print_on_error(outputStream* st) const {
this->CollectedHeap::print_on_error(st);
#if INCLUDE_ALL_GCS
if (UseConcMarkSweepGC) {
st->cr();
CMSCollector::print_on_error(st);
}
#endif // INCLUDE_ALL_GCS
}
void GenCollectedHeap::print_tracing_info() const {
@ -1184,7 +1046,6 @@ class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
void GenCollectedHeap::gc_prologue(bool full) {
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
always_do_update_barrier = false;
// Fill TLAB's and such
CollectedHeap::accumulate_statistics_all_tlabs();
ensure_parsability(true); // retire TLABs
@ -1222,8 +1083,6 @@ void GenCollectedHeap::gc_epilogue(bool full) {
MetaspaceCounters::update_performance_counters();
CompressedClassSpaceCounters::update_performance_counters();
always_do_update_barrier = UseConcMarkSweepGC;
};
#ifndef PRODUCT
@ -1304,11 +1163,3 @@ jlong GenCollectedHeap::millis_since_last_gc() {
}
return retVal;
}
void GenCollectedHeap::stop() {
#if INCLUDE_ALL_GCS
if (UseConcMarkSweepGC) {
ConcurrentMarkSweepThread::cmst()->stop();
}
#endif
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,21 +78,34 @@ private:
// In support of ExplicitGCInvokesConcurrent functionality
unsigned int _full_collections_completed;
// Data structure for claiming the (potentially) parallel tasks in
// (gen-specific) roots processing.
SubTasksDone* _process_strong_tasks;
// Collects the given generation.
void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
bool run_verification, bool clear_soft_refs,
bool restore_marks_for_biased_locking);
// In block contents verification, the number of header words to skip
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
WorkGang* _workers;
protected:
// The set of potentially parallel tasks in root scanning.
enum GCH_strong_roots_tasks {
GCH_PS_Universe_oops_do,
GCH_PS_JNIHandles_oops_do,
GCH_PS_ObjectSynchronizer_oops_do,
GCH_PS_FlatProfiler_oops_do,
GCH_PS_Management_oops_do,
GCH_PS_SystemDictionary_oops_do,
GCH_PS_ClassLoaderDataGraph_oops_do,
GCH_PS_jvmti_oops_do,
GCH_PS_CodeCache_oops_do,
GCH_PS_aot_oops_do,
GCH_PS_younger_gens,
// Leave this one last.
GCH_PS_NumElements
};
// Data structure for claiming the (potentially) parallel tasks in
// (gen-specific) roots processing.
SubTasksDone* _process_strong_tasks;
// Helper functions for allocation
HeapWord* attempt_allocation(size_t size,
bool is_tlab,
@ -124,8 +137,6 @@ protected:
public:
GenCollectedHeap(GenCollectorPolicy *policy);
WorkGang* workers() const { return _workers; }
// Returns JNI_OK on success
virtual jint initialize();
@ -135,6 +146,8 @@ public:
// Does operations required after initialization has been done.
void post_initialize();
virtual void check_gen_kinds();
// Initialize ("weak") refs processing support
virtual void ref_processing_init();
@ -143,11 +156,7 @@ public:
}
virtual const char* name() const {
if (UseConcMarkSweepGC) {
return "Concurrent Mark Sweep";
} else {
return "Serial";
}
return "Serial";
}
Generation* young_gen() const { return _young_gen; }
@ -190,7 +199,7 @@ public:
// Perform a full collection of the heap; intended for use in implementing
// "System.gc". This implies as full a collection as the CollectedHeap
// supports. Caller does not hold the Heap_lock on entry.
void collect(GCCause::Cause cause);
virtual void collect(GCCause::Cause cause);
// The same as above but assume that the caller holds the Heap_lock.
void collect_locked(GCCause::Cause cause);
@ -207,12 +216,8 @@ public:
bool is_in(const void* p) const;
// override
bool is_in_closed_subset(const void* p) const {
if (UseConcMarkSweepGC) {
return is_in_reserved(p);
} else {
return is_in(p);
}
virtual bool is_in_closed_subset(const void* p) const {
return is_in(p);
}
// Returns true if the reference is to an object in the reserved space
@ -224,10 +229,14 @@ public:
bool is_in_partial_collection(const void* p);
#endif
virtual bool is_scavengable(const void* addr) {
return is_in_young((oop)addr);
virtual bool is_scavengable(oop obj) {
return is_in_young(obj);
}
// Optimized nmethod scanning support routines
virtual void register_nmethod(nmethod* nm);
virtual void verify_nmethod(nmethod* nmethod);
// Iteration functions.
void oop_iterate_no_header(OopClosure* cl);
void oop_iterate(ExtendedOopClosure* cl);
@ -278,7 +287,7 @@ public:
}
virtual bool card_mark_must_follow_store() const {
return UseConcMarkSweepGC;
return false;
}
// We don't need barriers for stores to objects in the
@ -344,7 +353,6 @@ public:
virtual void print_gc_threads_on(outputStream* st) const;
virtual void gc_threads_do(ThreadClosure* tc) const;
virtual void print_tracing_info() const;
virtual void print_on_error(outputStream* st) const;
void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
@ -383,7 +391,7 @@ public:
SO_ScavengeCodeCache = 0x10
};
private:
protected:
void process_roots(StrongRootsScope* scope,
ScanningOption so,
OopClosure* strong_roots,
@ -395,24 +403,20 @@ public:
void process_string_table_roots(StrongRootsScope* scope,
OopClosure* root_closure);
// Accessor for memory state verification support
NOT_PRODUCT(
virtual size_t skip_header_HeapWords() { return 0; }
)
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
public:
void young_process_roots(StrongRootsScope* scope,
OopsInGenClosure* root_closure,
OopsInGenClosure* old_gen_closure,
CLDClosure* cld_closure);
// If "young_gen_as_roots" is false, younger generations are
// not scanned as roots; in this case, the caller must be arranging to
// scan the younger generations itself. (For example, a generation might
// explicitly mark reachable objects in younger generations, to avoid
// excess storage retention.)
void cms_process_roots(StrongRootsScope* scope,
bool young_gen_as_roots,
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* root_closure,
CLDClosure* cld_closure);
void full_process_roots(StrongRootsScope* scope,
bool is_adjust_phase,
ScanningOption so,
@ -479,12 +483,8 @@ public:
oop obj,
size_t obj_size);
private:
// Accessor for memory state verification support
NOT_PRODUCT(
static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
)
private:
// Override
void check_for_non_bad_heap_word_value(HeapWord* addr,
size_t size) PRODUCT_RETURN;
@ -499,22 +499,8 @@ private:
// collect() and collect_locked(). Caller holds the Heap_lock on entry.
void collect_locked(GCCause::Cause cause, GenerationType max_generation);
// Returns success or failure.
bool create_cms_collector();
// In support of ExplicitGCInvokesConcurrent functionality
bool should_do_concurrent_full_gc(GCCause::Cause cause);
void collect_mostly_concurrent(GCCause::Cause cause);
// Save the tops of the spaces in all generations
void record_gen_tops_before_GC() PRODUCT_RETURN;
protected:
void gc_prologue(bool full);
void gc_epilogue(bool full);
public:
void stop();
};
#endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,19 +43,19 @@ inline HeapWord* PLAB::allocate_aligned(size_t word_sz, unsigned short alignment
}
void PLABStats::add_allocated(size_t v) {
Atomic::add_ptr(v, &_allocated);
Atomic::add(v, &_allocated);
}
void PLABStats::add_unused(size_t v) {
Atomic::add_ptr(v, &_unused);
Atomic::add(v, &_unused);
}
void PLABStats::add_wasted(size_t v) {
Atomic::add_ptr(v, &_wasted);
Atomic::add(v, &_wasted);
}
void PLABStats::add_undo_wasted(size_t v) {
Atomic::add_ptr(v, &_undo_wasted);
Atomic::add(v, &_undo_wasted);
}
#endif // SHARE_VM_GC_SHARED_PLAB_INLINE_HPP

View File

@ -631,7 +631,7 @@ inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
HeapWord* obj = top();
if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size;
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
// result can be one of two:
// the old top value: the exchange succeeded
// otherwise: the new value of the top is returned.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -259,9 +259,7 @@ GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
template <unsigned int N, MEMFLAGS F>
inline typename TaskQueueSuper<N, F>::Age TaskQueueSuper<N, F>::Age::cmpxchg(const Age new_age, const Age old_age) volatile {
return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
(volatile intptr_t *)&_data,
(intptr_t)old_age._data);
return Atomic::cmpxchg(new_age._data, &_data, old_age._data);
}
template<class E, MEMFLAGS F, unsigned int N>

View File

@ -705,7 +705,7 @@ BytecodeInterpreter::run(interpreterState istate) {
if (hash != markOopDesc::no_hash) {
header = header->copy_set_hash(hash);
}
if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
if (Atomic::cmpxchg(header, rcvr->mark_addr(), mark) == mark) {
if (PrintBiasedLockingStatistics)
(*BiasedLocking::revoked_lock_entry_count_addr())++;
}
@ -715,7 +715,7 @@ BytecodeInterpreter::run(interpreterState istate) {
if (hash != markOopDesc::no_hash) {
new_header = new_header->copy_set_hash(hash);
}
if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), mark) == mark) {
if (PrintBiasedLockingStatistics) {
(* BiasedLocking::rebiased_lock_entry_count_addr())++;
}
@ -734,7 +734,7 @@ BytecodeInterpreter::run(interpreterState istate) {
markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
// Debugging hint.
DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), header) == header) {
if (PrintBiasedLockingStatistics) {
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
}
@ -750,7 +750,7 @@ BytecodeInterpreter::run(interpreterState istate) {
markOop displaced = rcvr->mark()->set_unlocked();
mon->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors;
if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
if (call_vm || Atomic::cmpxchg((markOop)mon, rcvr->mark_addr(), displaced) != displaced) {
// Is it simple recursive case?
if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
mon->lock()->set_displaced_header(NULL);
@ -903,7 +903,7 @@ BytecodeInterpreter::run(interpreterState istate) {
if (hash != markOopDesc::no_hash) {
header = header->copy_set_hash(hash);
}
if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) {
if (PrintBiasedLockingStatistics) {
(*BiasedLocking::revoked_lock_entry_count_addr())++;
}
@ -914,7 +914,7 @@ BytecodeInterpreter::run(interpreterState istate) {
if (hash != markOopDesc::no_hash) {
new_header = new_header->copy_set_hash(hash);
}
if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) {
if (PrintBiasedLockingStatistics) {
(* BiasedLocking::rebiased_lock_entry_count_addr())++;
}
@ -932,7 +932,7 @@ BytecodeInterpreter::run(interpreterState istate) {
markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
// debugging hint
DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) {
if (PrintBiasedLockingStatistics) {
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
}
@ -948,7 +948,7 @@ BytecodeInterpreter::run(interpreterState istate) {
markOop displaced = lockee->mark()->set_unlocked();
entry->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors;
if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) {
// Is it simple recursive case?
if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
entry->lock()->set_displaced_header(NULL);
@ -1844,7 +1844,7 @@ run:
if (hash != markOopDesc::no_hash) {
header = header->copy_set_hash(hash);
}
if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) {
if (PrintBiasedLockingStatistics)
(*BiasedLocking::revoked_lock_entry_count_addr())++;
}
@ -1855,7 +1855,7 @@ run:
if (hash != markOopDesc::no_hash) {
new_header = new_header->copy_set_hash(hash);
}
if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) {
if (PrintBiasedLockingStatistics)
(* BiasedLocking::rebiased_lock_entry_count_addr())++;
}
@ -1875,7 +1875,7 @@ run:
markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
// debugging hint
DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) {
if (PrintBiasedLockingStatistics)
(* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
}
@ -1891,7 +1891,7 @@ run:
markOop displaced = lockee->mark()->set_unlocked();
entry->lock()->set_displaced_header(displaced);
bool call_vm = UseHeavyMonitors;
if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) {
// Is it simple recursive case?
if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
entry->lock()->set_displaced_header(NULL);
@ -1923,7 +1923,8 @@ run:
bool call_vm = UseHeavyMonitors;
// If it isn't recursive we either must swap old header or call the runtime
if (header != NULL || call_vm) {
if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
markOop old_header = markOopDesc::encode(lock);
if (call_vm || lockee->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
most_recent->set_obj(lockee);
CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
@ -2189,7 +2190,7 @@ run:
HeapWord* compare_to = *Universe::heap()->top_addr();
HeapWord* new_top = compare_to + obj_size;
if (new_top <= *Universe::heap()->end_addr()) {
if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
if (Atomic::cmpxchg(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
goto retry;
}
result = (oop) compare_to;
@ -2975,7 +2976,8 @@ run:
if (!lockee->mark()->has_bias_pattern()) {
// If it isn't recursive we either must swap old header or call the runtime
if (header != NULL) {
if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
markOop old_header = markOopDesc::encode(lock);
if (lockee->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
end->set_obj(lockee);
{
@ -3050,7 +3052,8 @@ run:
base->set_obj(NULL);
// If it isn't recursive we either must swap old header or call the runtime
if (header != NULL) {
if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
markOop old_header = markOopDesc::encode(lock);
if (rcvr->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
base->set_obj(rcvr);
{

View File

@ -448,11 +448,11 @@ OopMapCache::~OopMapCache() {
}
OopMapCacheEntry* OopMapCache::entry_at(int i) const {
return (OopMapCacheEntry*)OrderAccess::load_ptr_acquire(&(_array[i % _size]));
return OrderAccess::load_acquire(&(_array[i % _size]));
}
bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
return Atomic::cmpxchg_ptr (entry, &_array[i % _size], old) == old;
return Atomic::cmpxchg(entry, &_array[i % _size], old) == old;
}
void OopMapCache::flush() {
@ -564,7 +564,7 @@ void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
do {
head = _old_entries;
entry->_next = head;
success = Atomic::cmpxchg_ptr (entry, &_old_entries, head) == head;
success = Atomic::cmpxchg(entry, &_old_entries, head) == head;
} while (!success);
if (log_is_enabled(Debug, interpreter, oopmap)) {

View File

@ -1499,7 +1499,7 @@ size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
}
size_t MetaspaceGC::capacity_until_GC() {
size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
assert(value >= MetaspaceSize, "Not initialized properly?");
return value;
}
@ -1507,16 +1507,16 @@ size_t MetaspaceGC::capacity_until_GC() {
bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
assert_is_aligned(v, Metaspace::commit_alignment());
size_t capacity_until_GC = (size_t) _capacity_until_GC;
size_t new_value = capacity_until_GC + v;
intptr_t capacity_until_GC = _capacity_until_GC;
intptr_t new_value = capacity_until_GC + v;
if (new_value < capacity_until_GC) {
// The addition wrapped around, set new_value to aligned max value.
new_value = align_down(max_uintx, Metaspace::commit_alignment());
}
intptr_t expected = (intptr_t) capacity_until_GC;
intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
intptr_t expected = _capacity_until_GC;
intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
if (expected != actual) {
return false;
@ -1534,7 +1534,7 @@ bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size
size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
assert_is_aligned(v, Metaspace::commit_alignment());
return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
}
void MetaspaceGC::initialize() {
@ -2398,7 +2398,7 @@ void SpaceManager::inc_size_metrics(size_t words) {
void SpaceManager::inc_used_metrics(size_t words) {
// Add to the per SpaceManager total
Atomic::add_ptr(words, &_allocated_blocks_words);
Atomic::add(words, &_allocated_blocks_words);
// Add to the global total
MetaspaceAux::inc_used(mdtype(), words);
}
@ -2753,8 +2753,7 @@ void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
// sweep which is a concurrent phase. Protection by the expand_lock()
// is not enough since allocation is on a per Metaspace basis
// and protected by the Metaspace lock.
jlong minus_words = (jlong) - (jlong) words;
Atomic::add_ptr(minus_words, &_used_words[mdtype]);
Atomic::sub(words, &_used_words[mdtype]);
}
void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
@ -2762,7 +2761,7 @@ void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
// each piece of metadata. Those allocations are
// generally done concurrently by different application
// threads so must be done atomically.
Atomic::add_ptr(words, &_used_words[mdtype]);
Atomic::add(words, &_used_words[mdtype]);
}
size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {

View File

@ -84,6 +84,7 @@
#include "utilities/preserveException.hpp"
#if INCLUDE_ALL_GCS
#include "gc/cms/cmsCollectorPolicy.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
@ -536,7 +537,7 @@ bool Universe::has_reference_pending_list() {
oop Universe::swap_reference_pending_list(oop list) {
assert_pll_locked(is_locked);
return (oop)Atomic::xchg_ptr(list, &_reference_pending_list);
return Atomic::xchg(list, &_reference_pending_list);
}
#undef assert_pll_locked
@ -758,7 +759,7 @@ CollectedHeap* Universe::create_heap() {
} else if (UseG1GC) {
return Universe::create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
} else if (UseConcMarkSweepGC) {
return Universe::create_heap_with_policy<GenCollectedHeap, ConcurrentMarkSweepPolicy>();
return Universe::create_heap_with_policy<CMSHeap, ConcurrentMarkSweepPolicy>();
#endif
} else if (UseSerialGC) {
return Universe::create_heap_with_policy<GenCollectedHeap, MarkSweepPolicy>();

View File

@ -44,7 +44,7 @@
// T is an integral type, and is the value_type.
// v is an integral constant, and is the value.
template<typename T, T v>
struct IntegralConstant : AllStatic {
struct IntegralConstant VALUE_OBJ_CLASS_SPEC {
typedef T value_type;
static const value_type value = v;
typedef IntegralConstant<T, v> type;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,11 +29,11 @@
#include "oops/arrayKlass.hpp"
inline Klass* ArrayKlass::higher_dimension_acquire() const {
return (Klass*) OrderAccess::load_ptr_acquire(&_higher_dimension);
return OrderAccess::load_acquire(&_higher_dimension);
}
inline void ArrayKlass::release_set_higher_dimension(Klass* k) {
OrderAccess::release_store_ptr(&_higher_dimension, k);
OrderAccess::release_store(&_higher_dimension, k);
}
#endif // SHARE_VM_OOPS_ARRAYKLASS_INLINE_HPP

View File

@ -226,7 +226,7 @@ void ConstantPool::klass_at_put(int class_index, int name_index, int resolved_kl
symbol_at_put(name_index, name);
name->increment_refcount();
Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
OrderAccess::release_store_ptr((Klass* volatile *)adr, k);
OrderAccess::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* non-NULL, so we need hardware store ordering here.
@ -243,7 +243,7 @@ void ConstantPool::klass_at_put(int class_index, Klass* k) {
CPKlassSlot kslot = klass_slot_at(class_index);
int resolved_klass_index = kslot.resolved_klass_index();
Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
OrderAccess::release_store_ptr((Klass* volatile *)adr, k);
OrderAccess::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* non-NULL, so we need hardware store ordering here.
@ -511,7 +511,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which,
trace_class_resolution(this_cp, k);
}
Klass** adr = this_cp->resolved_klasses()->adr_at(resolved_klass_index);
OrderAccess::release_store_ptr((Klass* volatile *)adr, k);
OrderAccess::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* stored in _resolved_klasses is non-NULL, so we need
// hardware store ordering here.

View File

@ -145,7 +145,7 @@ class ConstantPool : public Metadata {
assert(is_within_bounds(which), "index out of bounds");
assert(!tag_at(which).is_unresolved_klass() && !tag_at(which).is_unresolved_klass_in_error(), "Corrupted constant pool");
// Uses volatile because the klass slot changes without a lock.
volatile intptr_t adr = (intptr_t)OrderAccess::load_ptr_acquire(obj_at_addr_raw(which));
intptr_t adr = OrderAccess::load_acquire(obj_at_addr_raw(which));
assert(adr != 0 || which == 0, "cp entry for klass should not be zero");
return CPSlot(adr);
}
@ -407,7 +407,7 @@ class ConstantPool : public Metadata {
assert(tag_at(kslot.name_index()).is_symbol(), "sanity");
Klass** adr = resolved_klasses()->adr_at(kslot.resolved_klass_index());
return (Klass*)OrderAccess::load_ptr_acquire(adr);
return OrderAccess::load_acquire(adr);
}
// RedefineClasses() API support:

View File

@ -91,7 +91,7 @@ void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
assert(c == 0 || c == code || code == 0, "update must be consistent");
#endif
// Need to flush pending stores here before bytecode is written.
OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift));
OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift));
}
void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
@ -101,19 +101,13 @@ void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
assert(c == 0 || c == code || code == 0, "update must be consistent");
#endif
// Need to flush pending stores here before bytecode is written.
OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift));
OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift));
}
// Sets f1, ordering with previous writes.
void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
assert(f1 != NULL, "");
OrderAccess::release_store_ptr((HeapWord*) &_f1, f1);
}
// Sets flags, but only if the value was previously zero.
bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) {
intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0);
return (result == 0);
OrderAccess::release_store(&_f1, f1);
}
// Note that concurrent update of both bytecodes can leave one of them
@ -154,7 +148,8 @@ void ConstantPoolCacheEntry::set_parameter_size(int value) {
// bother trying to update it once it's nonzero but always make
// sure that the final parameter size agrees with what was passed.
if (_flags == 0) {
Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0);
intx newflags = (value & parameter_size_mask);
Atomic::cmpxchg(newflags, &_flags, (intx)0);
}
guarantee(parameter_size() == value,
"size must not change: parameter_size=%d, value=%d", parameter_size(), value);

Some files were not shown because too many files have changed in this diff Show More