mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
Merge branch 'master' into 8344116
This commit is contained in:
commit
c526f0211c
2
.github/workflows/build-alpine-linux.yml
vendored
2
.github/workflows/build-alpine-linux.yml
vendored
@ -59,7 +59,7 @@ on:
|
||||
jobs:
|
||||
build-linux:
|
||||
name: build
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
container:
|
||||
image: alpine:3.20
|
||||
|
||||
|
||||
2
.github/workflows/build-cross-compile.yml
vendored
2
.github/workflows/build-cross-compile.yml
vendored
@ -48,7 +48,7 @@ on:
|
||||
jobs:
|
||||
build-cross-compile:
|
||||
name: build
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
20
.github/workflows/build-linux.yml
vendored
20
.github/workflows/build-linux.yml
vendored
@ -75,7 +75,7 @@ on:
|
||||
jobs:
|
||||
build-linux:
|
||||
name: build
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@ -115,9 +115,21 @@ jobs:
|
||||
if [[ '${{ inputs.apt-architecture }}' != '' ]]; then
|
||||
sudo dpkg --add-architecture ${{ inputs.apt-architecture }}
|
||||
fi
|
||||
sudo apt-get update
|
||||
sudo apt-get install --only-upgrade apt
|
||||
sudo apt-get install gcc-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }} g++-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }} libxrandr-dev${{ steps.arch.outputs.suffix }} libxtst-dev${{ steps.arch.outputs.suffix }} libcups2-dev${{ steps.arch.outputs.suffix }} libasound2-dev${{ steps.arch.outputs.suffix }} ${{ inputs.apt-extra-packages }}
|
||||
sudo apt update
|
||||
sudo apt install --only-upgrade apt
|
||||
sudo apt install \
|
||||
gcc-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }} \
|
||||
g++-${{ inputs.gcc-major-version }}${{ inputs.gcc-package-suffix }} \
|
||||
libasound2-dev${{ steps.arch.outputs.suffix }} \
|
||||
libcups2-dev${{ steps.arch.outputs.suffix }} \
|
||||
libfontconfig1-dev${{ steps.arch.outputs.suffix }} \
|
||||
libx11-dev${{ steps.arch.outputs.suffix }} \
|
||||
libxext-dev${{ steps.arch.outputs.suffix }} \
|
||||
libxrandr-dev${{ steps.arch.outputs.suffix }} \
|
||||
libxrender-dev${{ steps.arch.outputs.suffix }} \
|
||||
libxt-dev${{ steps.arch.outputs.suffix }} \
|
||||
libxtst-dev${{ steps.arch.outputs.suffix }} \
|
||||
${{ inputs.apt-extra-packages }}
|
||||
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${{ inputs.gcc-major-version }} 100 --slave /usr/bin/g++ g++ /usr/bin/g++-${{ inputs.gcc-major-version }}
|
||||
|
||||
- name: 'Configure'
|
||||
|
||||
6
.github/workflows/main.yml
vendored
6
.github/workflows/main.yml
vendored
@ -57,7 +57,7 @@ jobs:
|
||||
|
||||
prepare:
|
||||
name: 'Prepare the run'
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# List of platforms to exclude by default
|
||||
EXCLUDED_PLATFORMS: 'alpine-linux-x64'
|
||||
@ -405,7 +405,7 @@ jobs:
|
||||
with:
|
||||
platform: linux-x64
|
||||
bootjdk-platform: linux-x64
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
|
||||
debug-suffix: -debug
|
||||
|
||||
@ -419,7 +419,7 @@ jobs:
|
||||
with:
|
||||
platform: linux-x64
|
||||
bootjdk-platform: linux-x64
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
dry-run: ${{ needs.prepare.outputs.dry-run == 'true' }}
|
||||
static-suffix: "-static"
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
[general]
|
||||
project=jdk
|
||||
jbs=JDK
|
||||
version=26
|
||||
version=27
|
||||
|
||||
[checks]
|
||||
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists,copyright
|
||||
|
||||
@ -38,7 +38,7 @@
|
||||
# directory.
|
||||
# - open a terminal program and run these commands:
|
||||
# cd "${JDK_CHECKOUT}"/src/jdk.compiler/share/data/symbols
|
||||
# bash ../../../../../make/scripts/generate-symbol-data.sh "${JDK_N_INSTALL}"
|
||||
# bash ../../../../../bin/generate-symbol-data.sh "${JDK_N_INSTALL}"
|
||||
# - this command will generate or update data for "--release N" into the ${JDK_CHECKOUT}/src/jdk.compiler/share/data/symbols
|
||||
# directory, updating all registration necessary. If the goal was to update the data, and there are no
|
||||
# new or changed files in the ${JDK_CHECKOUT}/src/jdk.compiler/share/data/symbols directory after running this script,
|
||||
|
||||
@ -541,6 +541,11 @@ href="#apple-xcode">Apple Xcode</a> on some strategies to deal with
|
||||
this.</p>
|
||||
<p>It is recommended that you use at least macOS 14 and Xcode 15.4, but
|
||||
earlier versions may also work.</p>
|
||||
<p>Starting with Xcode 26, introduced in macOS 26, the Metal toolchain
|
||||
no longer comes bundled with Xcode, so it needs to be installed
|
||||
separately. This can either be done via the Xcode's Settings/Components
|
||||
UI, or in the command line calling
|
||||
<code>xcodebuild -downloadComponent metalToolchain</code>.</p>
|
||||
<p>The standard macOS environment contains the basic tooling needed to
|
||||
build, but for external libraries a package manager is recommended. The
|
||||
JDK uses <a href="https://brew.sh/">homebrew</a> in the examples, but
|
||||
|
||||
@ -352,6 +352,11 @@ on some strategies to deal with this.
|
||||
It is recommended that you use at least macOS 14 and Xcode 15.4, but
|
||||
earlier versions may also work.
|
||||
|
||||
Starting with Xcode 26, introduced in macOS 26, the Metal toolchain no longer
|
||||
comes bundled with Xcode, so it needs to be installed separately. This can
|
||||
either be done via the Xcode's Settings/Components UI, or in the command line
|
||||
calling `xcodebuild -downloadComponent metalToolchain`.
|
||||
|
||||
The standard macOS environment contains the basic tooling needed to build, but
|
||||
for external libraries a package manager is recommended. The JDK uses
|
||||
[homebrew](https://brew.sh/) in the examples, but feel free to use whatever
|
||||
|
||||
@ -1037,8 +1037,8 @@ running destructors at exit can lead to problems.</p>
|
||||
<p>Some of the approaches used in HotSpot to avoid dynamic
|
||||
initialization include:</p>
|
||||
<ul>
|
||||
<li><p>Use the <code>Deferred<T></code> class template. Add a call
|
||||
to its initialization function at an appropriate place during VM
|
||||
<li><p>Use the <code>DeferredStatic<T></code> class template. Add
|
||||
a call to its initialization function at an appropriate place during VM
|
||||
initialization. The underlying object is never destroyed.</p></li>
|
||||
<li><p>For objects of class type, use a variable whose value is a
|
||||
pointer to the class, initialized to <code>nullptr</code>. Provide an
|
||||
|
||||
@ -954,7 +954,7 @@ destructors at exit can lead to problems.
|
||||
Some of the approaches used in HotSpot to avoid dynamic initialization
|
||||
include:
|
||||
|
||||
* Use the `Deferred<T>` class template. Add a call to its initialization
|
||||
* Use the `DeferredStatic<T>` class template. Add a call to its initialization
|
||||
function at an appropriate place during VM initialization. The underlying
|
||||
object is never destroyed.
|
||||
|
||||
|
||||
@ -119,6 +119,9 @@ cover the new source version</li>
|
||||
and
|
||||
<code>test/langtools/tools/javac/preview/classReaderTest/Client.preview.out</code>:
|
||||
update expected messages for preview errors and warnings</li>
|
||||
<li><code>test/langtools/tools/javac/versions/Versions.java</code>: add
|
||||
new source version to the set of valid sources and add new enum constant
|
||||
for the new class file version.</li>
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@ -65,4 +65,4 @@ to be updated for a particular release.
|
||||
* `test/langtools/tools/javac/lib/JavacTestingAbstractProcessor.java`
|
||||
update annotation processor extended by `javac` tests to cover the new source version
|
||||
* `test/langtools/tools/javac/preview/classReaderTest/Client.nopreview.out` and `test/langtools/tools/javac/preview/classReaderTest/Client.preview.out`: update expected messages for preview errors and warnings
|
||||
|
||||
* `test/langtools/tools/javac/versions/Versions.java`: add new source version to the set of valid sources and add new enum constant for the new class file version.
|
||||
|
||||
@ -125,13 +125,6 @@ define SetupBundleFileBody
|
||||
&& $(TAR) cf - -$(TAR_INCLUDE_PARAM) $$($1_$$d_LIST_FILE) \
|
||||
$(TAR_IGNORE_EXIT_VALUE) ) \
|
||||
| ( $(CD) $(SUPPORT_OUTPUTDIR)/bundles/$1/$$($1_SUBDIR) && $(TAR) xf - )$$(NEWLINE) )
|
||||
# Rename stripped pdb files
|
||||
ifeq ($(call isTargetOs, windows)+$(SHIP_DEBUG_SYMBOLS), true+public)
|
||||
for f in `$(FIND) $(SUPPORT_OUTPUTDIR)/bundles/$1/$$($1_SUBDIR) -name "*.stripped.pdb"`; do \
|
||||
$(ECHO) Renaming $$$${f} to $$$${f%stripped.pdb}pdb $(LOG_INFO); \
|
||||
$(MV) $$$${f} $$$${f%stripped.pdb}pdb; \
|
||||
done
|
||||
endif
|
||||
# Unzip any zipped debuginfo files
|
||||
ifeq ($$($1_UNZIP_DEBUGINFO), true)
|
||||
for f in `$(FIND) $(SUPPORT_OUTPUTDIR)/bundles/$1/$$($1_SUBDIR) -name "*.diz"`; do \
|
||||
@ -222,14 +215,6 @@ ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), )
|
||||
JDK_SYMBOLS_EXCLUDE_PATTERN := %.pdb
|
||||
else
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), public)
|
||||
JDK_SYMBOLS_EXCLUDE_PATTERN := \
|
||||
$(filter-out \
|
||||
%.stripped.pdb, \
|
||||
$(filter %.pdb, $(ALL_JDK_FILES)) \
|
||||
)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
@ -244,10 +229,7 @@ ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
|
||||
)
|
||||
|
||||
JDK_SYMBOLS_BUNDLE_FILES := \
|
||||
$(filter-out \
|
||||
%.stripped.pdb, \
|
||||
$(call FindFiles, $(SYMBOLS_IMAGE_DIR)) \
|
||||
)
|
||||
$(call FindFiles, $(SYMBOLS_IMAGE_DIR))
|
||||
|
||||
TEST_DEMOS_BUNDLE_FILES := $(filter $(JDK_DEMOS_IMAGE_HOMEDIR)/demo/%, \
|
||||
$(ALL_JDK_DEMOS_FILES))
|
||||
@ -267,14 +249,6 @@ ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), )
|
||||
JRE_SYMBOLS_EXCLUDE_PATTERN := %.pdb
|
||||
else
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), public)
|
||||
JRE_SYMBOLS_EXCLUDE_PATTERN := \
|
||||
$(filter-out \
|
||||
%.stripped.pdb, \
|
||||
$(filter %.pdb, $(ALL_JRE_FILES)) \
|
||||
)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
|
||||
@ -282,29 +282,33 @@ else
|
||||
endif
|
||||
CMDS_TARGET_SUBDIR := bin
|
||||
|
||||
# Param 1 - either JDK or JRE
|
||||
# Copy debug info files into symbols bundle.
|
||||
# In case of Windows and --with-external-symbols-in-bundles=public, take care to remove *.stripped.pdb files
|
||||
SetupCopyDebuginfo = \
|
||||
$(foreach m, $(ALL_$1_MODULES), \
|
||||
$(eval dbgfiles := $(call FindDebuginfoFiles, $(SUPPORT_OUTPUTDIR)/modules_libs/$m)) \
|
||||
$(eval dbgfiles := $(if $(filter true+public,$(call isTargetOs,windows)+$(SHIP_DEBUG_SYMBOLS)), \
|
||||
$(filter-out %.stripped.pdb,$(dbgfiles)),$(dbgfiles)) \
|
||||
) \
|
||||
$(eval $(call SetupCopyFiles, COPY_$1_LIBS_DEBUGINFO_$m, \
|
||||
SRC := $(SUPPORT_OUTPUTDIR)/modules_libs/$m, \
|
||||
DEST := $($1_IMAGE_DIR)/$(LIBS_TARGET_SUBDIR), \
|
||||
FILES := $(call FindDebuginfoFiles, \
|
||||
$(SUPPORT_OUTPUTDIR)/modules_libs/$m), \
|
||||
FILES := $(dbgfiles), \
|
||||
)) \
|
||||
$(eval $1_TARGETS += $$(COPY_$1_LIBS_DEBUGINFO_$m)) \
|
||||
$(eval dbgfiles := $(call FindDebuginfoFiles, $(SUPPORT_OUTPUTDIR)/modules_cmds/$m)) \
|
||||
$(eval dbgfiles := $(if $(filter true+public,$(call isTargetOs,windows)+$(SHIP_DEBUG_SYMBOLS)), \
|
||||
$(filter-out %.stripped.pdb,$(dbgfiles)),$(dbgfiles)) \
|
||||
) \
|
||||
$(eval $(call SetupCopyFiles, COPY_$1_CMDS_DEBUGINFO_$m, \
|
||||
SRC := $(SUPPORT_OUTPUTDIR)/modules_cmds/$m, \
|
||||
DEST := $($1_IMAGE_DIR)/$(CMDS_TARGET_SUBDIR), \
|
||||
FILES := $(call FindDebuginfoFiles, \
|
||||
$(SUPPORT_OUTPUTDIR)/modules_cmds/$m), \
|
||||
FILES := $(dbgfiles), \
|
||||
)) \
|
||||
$(eval $1_TARGETS += $$(COPY_$1_CMDS_DEBUGINFO_$m)) \
|
||||
)
|
||||
|
||||
# No space before argument to avoid having to put $(strip ) everywhere in
|
||||
# implementation above.
|
||||
$(call SetupCopyDebuginfo,JDK)
|
||||
$(call SetupCopyDebuginfo,JRE)
|
||||
# No space before argument to avoid having to put $(strip ) everywhere in implementation above.
|
||||
$(call SetupCopyDebuginfo,SYMBOLS)
|
||||
|
||||
################################################################################
|
||||
|
||||
@ -873,7 +873,7 @@ define SetupRunJtregTestBody
|
||||
$1_JTREG_BASIC_OPTIONS += -testThreadFactoryPath:$$(JTREG_TEST_THREAD_FACTORY_JAR)
|
||||
$1_JTREG_BASIC_OPTIONS += -testThreadFactory:$$(JTREG_TEST_THREAD_FACTORY)
|
||||
$1_JTREG_BASIC_OPTIONS += $$(addprefix $$(JTREG_PROBLEM_LIST_PREFIX), $$(wildcard \
|
||||
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-$$(JTREG_TEST_THREAD_FACTORY).txt) \
|
||||
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-$$(JTREG_TEST_THREAD_FACTORY).txt) \
|
||||
))
|
||||
endif
|
||||
|
||||
@ -881,8 +881,8 @@ define SetupRunJtregTestBody
|
||||
AGENT := $$(LIBRARY_PREFIX)JvmtiStressAgent$$(SHARED_LIBRARY_SUFFIX)=$$(JTREG_JVMTI_STRESS_AGENT)
|
||||
$1_JTREG_BASIC_OPTIONS += -javaoption:'-agentpath:$(TEST_IMAGE_DIR)/hotspot/jtreg/native/$$(AGENT)'
|
||||
$1_JTREG_BASIC_OPTIONS += $$(addprefix $$(JTREG_PROBLEM_LIST_PREFIX), $$(wildcard \
|
||||
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-jvmti-stress-agent.txt) \
|
||||
))
|
||||
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-jvmti-stress-agent.txt) \
|
||||
))
|
||||
endif
|
||||
|
||||
|
||||
@ -1092,7 +1092,7 @@ define SetupRunJtregTestBody
|
||||
$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR) \
|
||||
$$($1_TEST_TMP_DIR))
|
||||
$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/jtreg, \
|
||||
$$(COV_ENVIRONMENT) $$($1_COMMAND_LINE) \
|
||||
$$(COV_ENVIRONMENT) $$($1_COMMAND_LINE) \
|
||||
)
|
||||
|
||||
$1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/text/stats.txt
|
||||
@ -1102,11 +1102,11 @@ define SetupRunJtregTestBody
|
||||
$$(call LogWarn, Test report is stored in $$(strip \
|
||||
$$(subst $$(TOPDIR)/, , $$($1_TEST_RESULTS_DIR))))
|
||||
|
||||
# Read jtreg documentation to learn on the test stats categories:
|
||||
# https://github.com/openjdk/jtreg/blob/master/src/share/doc/javatest/regtest/faq.md#what-do-all-those-numbers-in-the-test-results-line-mean
|
||||
# In jtreg, "skipped:" category accounts for tests that threw jtreg.SkippedException at runtime.
|
||||
# At the same time these tests contribute to "passed:" tests.
|
||||
# In here we don't want that and so we substract number of "skipped:" from "passed:".
|
||||
# Read jtreg documentation to learn on the test stats categories:
|
||||
# https://github.com/openjdk/jtreg/blob/master/src/share/doc/javatest/regtest/faq.md#what-do-all-those-numbers-in-the-test-results-line-mean
|
||||
# In jtreg, "skipped:" category accounts for tests that threw jtreg.SkippedException at runtime.
|
||||
# At the same time these tests contribute to "passed:" tests.
|
||||
# In here we don't want that and so we substract number of "skipped:" from "passed:".
|
||||
|
||||
$$(if $$(wildcard $$($1_RESULT_FILE)), \
|
||||
$$(eval $1_PASSED_AND_RUNTIME_SKIPPED := $$(shell $$(AWK) '{ gsub(/[,;]/, ""); \
|
||||
|
||||
@ -79,7 +79,7 @@ TOOL_GENERATEEXTRAPROPERTIES = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_too
|
||||
build.tools.generateextraproperties.GenerateExtraProperties
|
||||
|
||||
TOOL_GENERATECASEFOLDING = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
|
||||
build.tools.generatecharacter.CaseFolding
|
||||
build.tools.generatecharacter.GenerateCaseFolding
|
||||
|
||||
TOOL_MAKEZIPREPRODUCIBLE = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \
|
||||
build.tools.makezipreproducible.MakeZipReproducible
|
||||
|
||||
@ -353,7 +353,12 @@ AC_DEFUN_ONCE([BASIC_SETUP_DEVKIT],
|
||||
[set up toolchain on Mac OS using a path to an Xcode installation])])
|
||||
|
||||
UTIL_DEPRECATED_ARG_WITH(sys-root)
|
||||
UTIL_DEPRECATED_ARG_WITH(tools-dir)
|
||||
|
||||
AC_ARG_WITH([tools-dir], [AS_HELP_STRING([--with-tools-dir],
|
||||
[Point to a nonstandard Visual Studio installation location on Windows by
|
||||
specifying any existing directory 2 or 3 levels below the installation
|
||||
root.])]
|
||||
)
|
||||
|
||||
if test "x$with_xcode_path" != x; then
|
||||
if test "x$OPENJDK_BUILD_OS" = "xmacosx"; then
|
||||
|
||||
@ -34,7 +34,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS],
|
||||
FLAGS_SETUP_LDFLAGS_CPU_DEP([TARGET])
|
||||
|
||||
# Setup the build toolchain
|
||||
FLAGS_SETUP_LDFLAGS_CPU_DEP([BUILD], [OPENJDK_BUILD_])
|
||||
FLAGS_SETUP_LDFLAGS_CPU_DEP([BUILD], [OPENJDK_BUILD_], [BUILD_])
|
||||
|
||||
AC_SUBST(ADLC_LDFLAGS)
|
||||
])
|
||||
@ -52,11 +52,6 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
|
||||
# add --no-as-needed to disable default --as-needed link flag on some GCC toolchains
|
||||
# add --icf=all (Identical Code Folding — merges identical functions)
|
||||
BASIC_LDFLAGS="-Wl,-z,defs -Wl,-z,relro -Wl,-z,now -Wl,--no-as-needed -Wl,--exclude-libs,ALL"
|
||||
if test "x$LINKER_TYPE" = "xgold"; then
|
||||
if test x$DEBUG_LEVEL = xrelease; then
|
||||
BASIC_LDFLAGS="$BASIC_LDFLAGS -Wl,--icf=all"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Linux : remove unused code+data in link step
|
||||
if test "x$ENABLE_LINKTIME_GC" = xtrue; then
|
||||
@ -108,6 +103,9 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
|
||||
|
||||
# Setup OS-dependent LDFLAGS
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx && test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
if test x$DEBUG_LEVEL = xrelease; then
|
||||
BASIC_LDFLAGS_JDK_ONLY="$BASIC_LDFLAGS_JDK_ONLY -Wl,-dead_strip"
|
||||
fi
|
||||
# FIXME: We should really generalize SetSharedLibraryOrigin instead.
|
||||
OS_LDFLAGS_JVM_ONLY="-Wl,-rpath,@loader_path/. -Wl,-rpath,@loader_path/.."
|
||||
OS_LDFLAGS="-mmacosx-version-min=$MACOSX_VERSION_MIN -Wl,-reproducible"
|
||||
@ -166,7 +164,8 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
|
||||
################################################################################
|
||||
# $1 - Either BUILD or TARGET to pick the correct OS/CPU variables to check
|
||||
# conditionals against.
|
||||
# $2 - Optional prefix for each variable defined.
|
||||
# $2 - Optional prefix for each variable defined (OPENJDK_BUILD_ or nothing).
|
||||
# $3 - Optional prefix for toolchain variables (BUILD_ or nothing).
|
||||
AC_DEFUN([FLAGS_SETUP_LDFLAGS_CPU_DEP],
|
||||
[
|
||||
# Setup CPU-dependent basic LDFLAGS. These can differ between the target and
|
||||
@ -200,6 +199,12 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_CPU_DEP],
|
||||
fi
|
||||
fi
|
||||
|
||||
if test "x${$3LD_TYPE}" = "xgold"; then
|
||||
if test x$DEBUG_LEVEL = xrelease; then
|
||||
$1_CPU_LDFLAGS="${$1_CPU_LDFLAGS} -Wl,--icf=all"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Export variables according to old definitions, prefix with $2 if present.
|
||||
LDFLAGS_JDK_COMMON="$BASIC_LDFLAGS $BASIC_LDFLAGS_JDK_ONLY \
|
||||
$OS_LDFLAGS $DEBUGLEVEL_LDFLAGS_JDK_ONLY ${$2EXTRA_LDFLAGS}"
|
||||
|
||||
@ -516,7 +516,7 @@ AC_DEFUN([TOOLCHAIN_EXTRACT_LD_VERSION],
|
||||
if [ [[ "$LINKER_VERSION_STRING" == *gold* ]] ]; then
|
||||
[ LINKER_VERSION_NUMBER=`$ECHO $LINKER_VERSION_STRING | \
|
||||
$SED -e 's/.* \([0-9][0-9]*\(\.[0-9][0-9]*\)*\).*) .*/\1/'` ]
|
||||
LINKER_TYPE=gold
|
||||
$1_TYPE=gold
|
||||
else
|
||||
[ LINKER_VERSION_NUMBER=`$ECHO $LINKER_VERSION_STRING | \
|
||||
$SED -e 's/.* \([0-9][0-9]*\(\.[0-9][0-9]*\)*\).*/\1/'` ]
|
||||
|
||||
@ -229,6 +229,11 @@ define SetupLinkerFlags
|
||||
# TOOLCHAIN_TYPE plus OPENJDK_TARGET_OS
|
||||
ifeq ($$($1_LINK_TIME_OPTIMIZATION), true)
|
||||
$1_EXTRA_LDFLAGS += $(LDFLAGS_LTO)
|
||||
# Instruct the ld64 linker not to delete the temporary object file
|
||||
# generated during Link Time Optimization
|
||||
ifeq ($(call isTargetOs, macosx), true)
|
||||
$1_EXTRA_LDFLAGS += -Wl,-object_path_lto,$$($1_OBJECT_DIR)/$$($1_NAME)_lto_helper.o
|
||||
endif
|
||||
endif
|
||||
|
||||
$1_EXTRA_LDFLAGS += $$($1_LDFLAGS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_LDFLAGS_$(OPENJDK_TARGET_OS)) \
|
||||
|
||||
@ -26,17 +26,17 @@
|
||||
# Default version, product, and vendor information to use,
|
||||
# unless overridden by configure
|
||||
|
||||
DEFAULT_VERSION_FEATURE=26
|
||||
DEFAULT_VERSION_FEATURE=27
|
||||
DEFAULT_VERSION_INTERIM=0
|
||||
DEFAULT_VERSION_UPDATE=0
|
||||
DEFAULT_VERSION_PATCH=0
|
||||
DEFAULT_VERSION_EXTRA1=0
|
||||
DEFAULT_VERSION_EXTRA2=0
|
||||
DEFAULT_VERSION_EXTRA3=0
|
||||
DEFAULT_VERSION_DATE=2026-03-17
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=70 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_DATE=2026-09-15
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=71 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_CLASSFILE_MINOR=0
|
||||
DEFAULT_VERSION_DOCS_API_SINCE=11
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="25 26"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=26
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="25 26 27"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=27
|
||||
DEFAULT_PROMOTED_VERSION_PRE=ea
|
||||
|
||||
@ -151,6 +151,12 @@ JVM_STRIPFLAGS ?= $(STRIPFLAGS)
|
||||
# This source set is reused so save in cache.
|
||||
$(call FillFindCache, $(JVM_SRC_DIRS))
|
||||
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), full)
|
||||
CFLAGS_SHIP_DEBUGINFO := -DSHIP_DEBUGINFO_FULL
|
||||
else ifeq ($(SHIP_DEBUG_SYMBOLS), public)
|
||||
CFLAGS_SHIP_DEBUGINFO := -DSHIP_DEBUGINFO_PUBLIC
|
||||
endif
|
||||
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
ifeq ($(STATIC_LIBS), true)
|
||||
WIN_EXPORT_FILE := $(JVM_OUTPUTDIR)/static-win-exports.def
|
||||
@ -158,10 +164,6 @@ ifeq ($(call isTargetOs, windows), true)
|
||||
WIN_EXPORT_FILE := $(JVM_OUTPUTDIR)/win-exports.def
|
||||
endif
|
||||
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), public)
|
||||
CFLAGS_STRIPPED_DEBUGINFO := -DHAS_STRIPPED_DEBUGINFO
|
||||
endif
|
||||
|
||||
JVM_LDFLAGS += -def:$(WIN_EXPORT_FILE)
|
||||
endif
|
||||
|
||||
@ -187,7 +189,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJVM, \
|
||||
CFLAGS := $(JVM_CFLAGS), \
|
||||
abstract_vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
|
||||
arguments.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
|
||||
whitebox.cpp_CXXFLAGS := $(CFLAGS_STRIPPED_DEBUGINFO), \
|
||||
whitebox.cpp_CXXFLAGS := $(CFLAGS_SHIP_DEBUGINFO), \
|
||||
DISABLED_WARNINGS_gcc := $(DISABLED_WARNINGS_gcc), \
|
||||
DISABLED_WARNINGS_gcc_ad_$(HOTSPOT_TARGET_CPU_ARCH).cpp := nonnull, \
|
||||
DISABLED_WARNINGS_gcc_bytecodeInterpreter.cpp := unused-label, \
|
||||
|
||||
@ -1,73 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package build.tools.generatecharacter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class CaseFolding {
|
||||
|
||||
public static void main(String[] args) throws Throwable {
|
||||
if (args.length != 3) {
|
||||
System.err.println("Usage: java CaseFolding TemplateFile CaseFolding.txt CaseFolding.java");
|
||||
System.exit(1);
|
||||
}
|
||||
var templateFile = Paths.get(args[0]);
|
||||
var caseFoldingTxt = Paths.get(args[1]);
|
||||
var genSrcFile = Paths.get(args[2]);
|
||||
var supportedTypes = "^.*; [CTS]; .*$";
|
||||
var caseFoldingEntries = Files.lines(caseFoldingTxt)
|
||||
.filter(line -> !line.startsWith("#") && line.matches(supportedTypes))
|
||||
.map(line -> {
|
||||
String[] cols = line.split("; ");
|
||||
return new String[] {cols[0], cols[1], cols[2]};
|
||||
})
|
||||
.filter(cols -> {
|
||||
// the folding case doesn't map back to the original char.
|
||||
var cp1 = Integer.parseInt(cols[0], 16);
|
||||
var cp2 = Integer.parseInt(cols[2], 16);
|
||||
return Character.toUpperCase(cp2) != cp1 && Character.toLowerCase(cp2) != cp1;
|
||||
})
|
||||
.map(cols -> String.format(" entry(0x%s, 0x%s)", cols[0], cols[2]))
|
||||
.collect(Collectors.joining(",\n", "", ""));
|
||||
|
||||
// hack, hack, hack! the logic does not pick 0131. just add manually to support 'I's.
|
||||
// 0049; T; 0131; # LATIN CAPITAL LETTER I
|
||||
final String T_0x0131_0x49 = String.format(" entry(0x%04x, 0x%04x),\n", 0x0131, 0x49);
|
||||
|
||||
// Generate .java file
|
||||
Files.write(
|
||||
genSrcFile,
|
||||
Files.lines(templateFile)
|
||||
.map(line -> line.contains("%%%Entries") ? T_0x0131_0x49 + caseFoldingEntries : line)
|
||||
.collect(Collectors.toList()),
|
||||
StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package build.tools.generatecharacter;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.Arrays;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
public class GenerateCaseFolding {
|
||||
|
||||
public static void main(String[] args) throws Throwable {
|
||||
if (args.length != 3) {
|
||||
System.err.println("Usage: java GenerateCaseFolding TemplateFile CaseFolding.txt CaseFolding.java");
|
||||
System.exit(1);
|
||||
}
|
||||
var templateFile = Paths.get(args[0]);
|
||||
var caseFoldingTxt = Paths.get(args[1]);
|
||||
var genSrcFile = Paths.get(args[2]);
|
||||
|
||||
// java.lang
|
||||
var supportedTypes = "^.*; [CF]; .*$"; // full/1:M case folding
|
||||
String[][] caseFoldings = Files.lines(caseFoldingTxt)
|
||||
.filter(line -> !line.startsWith("#") && line.matches(supportedTypes))
|
||||
.map(line -> {
|
||||
var fields = line.split("; ");
|
||||
var cp = fields[0];
|
||||
fields = fields[2].trim().split(" ");
|
||||
var folding = new String[fields.length + 1];
|
||||
folding[0] = cp;
|
||||
System.arraycopy(fields, 0, folding, 1, fields.length);
|
||||
return folding;
|
||||
})
|
||||
.toArray(size -> new String[size][]);
|
||||
|
||||
// util.regex
|
||||
var expandedSupportedTypes = "^.*; [CTS]; .*$";
|
||||
var expanded_caseFoldingEntries = Files.lines(caseFoldingTxt)
|
||||
.filter(line -> !line.startsWith("#") && line.matches(expandedSupportedTypes))
|
||||
.map(line -> {
|
||||
String[] cols = line.split("; ");
|
||||
return new String[]{cols[0], cols[1], cols[2]};
|
||||
})
|
||||
.filter(cols -> {
|
||||
// the folding case doesn't map back to the original char.
|
||||
var cp1 = Integer.parseInt(cols[0], 16);
|
||||
var cp2 = Integer.parseInt(cols[2], 16);
|
||||
return Character.toUpperCase(cp2) != cp1 && Character.toLowerCase(cp2) != cp1;
|
||||
})
|
||||
.map(cols -> String.format(" entry(0x%s, 0x%s)", cols[0], cols[2]))
|
||||
.collect(Collectors.joining(",\n", "", ""));
|
||||
|
||||
// hack, hack, hack! the logic does not pick 0131. just add manually to support 'I's.
|
||||
// 0049; T; 0131; # LATIN CAPITAL LETTER I
|
||||
final String T_0x0131_0x49 = String.format(" entry(0x%04x, 0x%04x),\n", 0x0131, 0x49);
|
||||
|
||||
Files.write(
|
||||
genSrcFile,
|
||||
Files.lines(templateFile)
|
||||
.map(line -> line.contains("%%%Entries") ? genFoldingEntries(caseFoldings) : line)
|
||||
.map(line -> line.contains("%%%Expanded_Case_Map_Entries") ? T_0x0131_0x49 + expanded_caseFoldingEntries : line)
|
||||
.collect(Collectors.toList()),
|
||||
StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
|
||||
}
|
||||
|
||||
private static long foldingToLong(String[] folding) {
|
||||
int cp = Integer.parseInt(folding[0], 16);
|
||||
long value = (long)Integer.parseInt(folding[1], 16);
|
||||
if (!Character.isSupplementaryCodePoint(cp) && folding.length != 2) {
|
||||
var shift = 16;
|
||||
for (int j = 2; j < folding.length; j++) {
|
||||
value |= (long)Integer.parseInt(folding[j], 16) << shift;
|
||||
shift <<= 1;
|
||||
}
|
||||
value = value | (long) (folding.length - 1) << 48;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
private static String genFoldingEntries(String[][] foldings) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(" private static final int[] CASE_FOLDING_CPS = {\n");
|
||||
int width = 10;
|
||||
for (int i = 0; i < foldings.length; i++) {
|
||||
if (i % width == 0)
|
||||
sb.append(" ");
|
||||
sb.append(String.format("0X%s", foldings[i][0]));
|
||||
if (i < foldings.length - 1)
|
||||
sb.append(", ");
|
||||
if (i % width == width - 1 || i == foldings.length - 1)
|
||||
sb.append("\n");
|
||||
}
|
||||
sb.append(" };\n\n");
|
||||
|
||||
sb.append(" private static final long[] CASE_FOLDING_VALUES = {\n");
|
||||
width = 6;
|
||||
for (int i = 0; i < foldings.length; i++) {
|
||||
if (i % width == 0)
|
||||
sb.append(" "); // indent
|
||||
sb.append(String.format("0x%013xL", foldingToLong(foldings[i])));
|
||||
if (i < foldings.length - 1)
|
||||
sb.append(", ");
|
||||
if (i % width == width - 1 || i == foldings.length - 1) {
|
||||
sb.append("\n");
|
||||
}
|
||||
}
|
||||
sb.append(" };\n");
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
@ -120,3 +120,25 @@ $(INTPOLY_GEN_DONE): $(INTPLOY_HEADER) $(BUILD_TOOLS_JDK)
|
||||
TARGETS += $(INTPOLY_GEN_DONE)
|
||||
|
||||
################################################################################
|
||||
|
||||
RELEASE_FILE_TEMPLATE := $(TOPDIR)/src/java.base/share/classes/jdk/internal/misc/resources/release.txt.template
|
||||
RELEASE_FILE_TARGET := $(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/jdk/internal/misc/resources/release.txt
|
||||
|
||||
RELEASE_FILE_VARDEPS := $(COMPANY_NAME) $(VERSION_STRING) $(VERSION_DATE)
|
||||
RELEASE_FILE_VARDEPS_FILE := $(call DependOnVariable, RELEASE_FILE_VARDEPS, \
|
||||
$(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/jlink_release_txt.vardeps)
|
||||
|
||||
$(eval $(call SetupTextFileProcessing, BUILD_RELEASE_FILE, \
|
||||
SOURCE_FILES := $(RELEASE_FILE_TEMPLATE), \
|
||||
OUTPUT_FILE := $(RELEASE_FILE_TARGET), \
|
||||
REPLACEMENTS := \
|
||||
@@COMPANY_NAME@@ => $(COMPANY_NAME) ; \
|
||||
@@VERSION_STRING@@ => $(VERSION_STRING) ; \
|
||||
@@VERSION_DATE@@ => $(VERSION_DATE) , \
|
||||
))
|
||||
|
||||
$(BUILD_RELEASE_FILE): $(RELEASE_FILE_VARDEPS_FILE)
|
||||
|
||||
TARGETS += $(BUILD_RELEASE_FILE)
|
||||
|
||||
################################################################################
|
||||
|
||||
@ -34,7 +34,7 @@
|
||||
DOCLINT += -Xdoclint:all/protected \
|
||||
'-Xdoclint/package:java.*,javax.*'
|
||||
JAVAC_FLAGS += -XDstringConcat=inline
|
||||
COPY += .icu .dat .spp .nrm content-types.properties \
|
||||
COPY += .icu .dat .spp .nrm .txt content-types.properties \
|
||||
hijrah-config-Hijrah-umalqura_islamic-umalqura.properties
|
||||
CLEAN += intrinsic.properties
|
||||
|
||||
|
||||
@ -72,5 +72,22 @@ TARGETS += $(GENSRC_CHARACTERDATA)
|
||||
|
||||
################################################################################
|
||||
|
||||
|
||||
GENSRC_STRINGCASEFOLDING := $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/lang/CaseFolding.java
|
||||
|
||||
STRINGCASEFOLDING_TEMPLATE := $(MODULE_SRC)/share/classes/jdk/internal/lang/CaseFolding.java.template
|
||||
CASEFOLDINGTXT := $(MODULE_SRC)/share/data/unicodedata/CaseFolding.txt
|
||||
|
||||
$(GENSRC_STRINGCASEFOLDING): $(BUILD_TOOLS_JDK) $(STRINGCASEFOLDING_TEMPLATE) $(CASEFOLDINGTXT)
|
||||
$(call LogInfo, Generating $@)
|
||||
$(call MakeTargetDir)
|
||||
$(TOOL_GENERATECASEFOLDING) \
|
||||
$(STRINGCASEFOLDING_TEMPLATE) \
|
||||
$(CASEFOLDINGTXT) \
|
||||
$(GENSRC_STRINGCASEFOLDING)
|
||||
|
||||
TARGETS += $(GENSRC_STRINGCASEFOLDING)
|
||||
|
||||
|
||||
endif # include guard
|
||||
include MakeIncludeEnd.gmk
|
||||
|
||||
@ -50,22 +50,5 @@ TARGETS += $(GENSRC_INDICCONJUNCTBREAK)
|
||||
|
||||
################################################################################
|
||||
|
||||
GENSRC_CASEFOLDING := $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/util/regex/CaseFolding.java
|
||||
|
||||
CASEFOLDINGTEMP := $(MODULE_SRC)/share/classes/jdk/internal/util/regex/CaseFolding.java.template
|
||||
CASEFOLDINGTXT := $(MODULE_SRC)/share/data/unicodedata/CaseFolding.txt
|
||||
|
||||
$(GENSRC_CASEFOLDING): $(BUILD_TOOLS_JDK) $(CASEFOLDINGTEMP) $(CASEFOLDINGTXT)
|
||||
$(call LogInfo, Generating $@)
|
||||
$(call MakeTargetDir)
|
||||
$(TOOL_GENERATECASEFOLDING) \
|
||||
$(CASEFOLDINGTEMP) \
|
||||
$(CASEFOLDINGTXT) \
|
||||
$(GENSRC_CASEFOLDING)
|
||||
|
||||
TARGETS += $(GENSRC_CASEFOLDING)
|
||||
|
||||
################################################################################
|
||||
|
||||
endif # include guard
|
||||
include MakeIncludeEnd.gmk
|
||||
|
||||
@ -164,6 +164,24 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
|
||||
|
||||
ifeq ($(USE_EXTERNAL_LIBPNG), false)
|
||||
LIBSPLASHSCREEN_HEADER_DIRS += libsplashscreen/libpng
|
||||
LIBSPLASHSCREEN_CFLAGS += -DPNG_NO_MMX_CODE -DPNG_ARM_NEON_OPT=0
|
||||
-DPNG_ARM_NEON_IMPLEMENTATION=0 -DPNG_LOONGARCH_LSX_OPT=0
|
||||
|
||||
ifeq ($(call isTargetOs, linux)+$(call isTargetCpuArch, ppc), true+true)
|
||||
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
|
||||
endif
|
||||
|
||||
# The libpng bundled with jdk is a reduced version which does not
|
||||
# contain .png_init_filter_functions_vsx.
|
||||
# Therefore we need to disable PNG_POWERPC_VSX_OPT explicitly by setting
|
||||
# it to 0. If this define is not set, it would be automatically set to 2,
|
||||
# because
|
||||
# "#if defined(__PPC64__) && defined(__ALTIVEC__) && defined(__VSX__)"
|
||||
# expands to true. This would results in the fact that
|
||||
# .png_init_filter_functions_vsx is needed in libpng.
|
||||
ifeq ($(call isTargetOs, aix), true)
|
||||
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
|
||||
endif
|
||||
else
|
||||
LIBSPLASHSCREEN_EXCLUDES += libpng
|
||||
endif
|
||||
@ -176,25 +194,7 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
|
||||
LIBSPLASHSCREEN_STATIC_LIB_EXCLUDE_OBJS += $(LIBZIP_OBJS)
|
||||
endif
|
||||
|
||||
LIBSPLASHSCREEN_CFLAGS += -DSPLASHSCREEN -DPNG_NO_MMX_CODE \
|
||||
-DPNG_ARM_NEON_OPT=0 -DPNG_ARM_NEON_IMPLEMENTATION=0 \
|
||||
-DPNG_LOONGARCH_LSX_OPT=0
|
||||
|
||||
ifeq ($(call isTargetOs, linux)+$(call isTargetCpuArch, ppc), true+true)
|
||||
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
|
||||
endif
|
||||
|
||||
# The external libpng submitted in the jdk is a reduced version
|
||||
# which does not contain .png_init_filter_functions_vsx.
|
||||
# Therefore we need to disable PNG_POWERPC_VSX_OPT explicitly by setting
|
||||
# it to 0. If this define is not set, it would be automatically set to 2,
|
||||
# because
|
||||
# "#if defined(__PPC64__) && defined(__ALTIVEC__) && defined(__VSX__)"
|
||||
# expands to true. This would results in the fact that
|
||||
# .png_init_filter_functions_vsx is needed in libpng.
|
||||
ifeq ($(call isTargetOs, aix), true)
|
||||
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
|
||||
endif
|
||||
LIBSPLASHSCREEN_CFLAGS += -DSPLASHSCREEN
|
||||
|
||||
ifeq ($(call isTargetOs, macosx), true)
|
||||
# libsplashscreen on macosx does not use the unix code
|
||||
|
||||
@ -2003,6 +2003,9 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
|
||||
|
||||
if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
|
||||
uint ireg = ideal_reg();
|
||||
DEBUG_ONLY(int algm = MIN2(RegMask::num_registers(ireg), (int)Matcher::stack_alignment_in_slots()) * VMRegImpl::stack_slot_size);
|
||||
assert((src_lo_rc != rc_stack) || is_aligned(src_offset, algm), "unaligned vector spill sp offset %d (src)", src_offset);
|
||||
assert((dst_lo_rc != rc_stack) || is_aligned(dst_offset, algm), "unaligned vector spill sp offset %d (dst)", dst_offset);
|
||||
if (ireg == Op_VecA && masm) {
|
||||
int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
|
||||
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
|
||||
|
||||
@ -695,7 +695,7 @@ instruct getAndSetP(indirect mem, iRegP newval, iRegPNoSp oldval) %{
|
||||
instruct getAndSetIAcq(indirect mem, iRegI newval, iRegINoSp oldval) %{
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
match(Set oldval (GetAndSetI mem newval));
|
||||
ins_cost(2*VOLATILE_REF_COST);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchgw_acq $oldval, $newval, [$mem]" %}
|
||||
ins_encode %{
|
||||
__ atomic_xchgalw($oldval$$Register, $newval$$Register, as_Register($mem$$base));
|
||||
@ -706,7 +706,7 @@ instruct getAndSetIAcq(indirect mem, iRegI newval, iRegINoSp oldval) %{
|
||||
instruct getAndSetLAcq(indirect mem, iRegL newval, iRegLNoSp oldval) %{
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
match(Set oldval (GetAndSetL mem newval));
|
||||
ins_cost(2*VOLATILE_REF_COST);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchg_acq $oldval, $newval, [$mem]" %}
|
||||
ins_encode %{
|
||||
__ atomic_xchgal($oldval$$Register, $newval$$Register, as_Register($mem$$base));
|
||||
@ -717,7 +717,7 @@ instruct getAndSetLAcq(indirect mem, iRegL newval, iRegLNoSp oldval) %{
|
||||
instruct getAndSetNAcq(indirect mem, iRegN newval, iRegNNoSp oldval) %{
|
||||
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
|
||||
match(Set oldval (GetAndSetN mem newval));
|
||||
ins_cost(2*VOLATILE_REF_COST);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchgw_acq $oldval, $newval, [$mem]" %}
|
||||
ins_encode %{
|
||||
__ atomic_xchgalw($oldval$$Register, $newval$$Register, as_Register($mem$$base));
|
||||
@ -728,7 +728,7 @@ instruct getAndSetNAcq(indirect mem, iRegN newval, iRegNNoSp oldval) %{
|
||||
instruct getAndSetPAcq(indirect mem, iRegP newval, iRegPNoSp oldval) %{
|
||||
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
|
||||
match(Set oldval (GetAndSetP mem newval));
|
||||
ins_cost(2*VOLATILE_REF_COST);
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchg_acq $oldval, $newval, [$mem]" %}
|
||||
ins_encode %{
|
||||
__ atomic_xchgal($oldval$$Register, $newval$$Register, as_Register($mem$$base));
|
||||
|
||||
@ -187,7 +187,7 @@ ifelse($1$3,PAcq,INDENT(predicate(needs_acquiring_load_exclusive(n) && (n->as_Lo
|
||||
$3,Acq,INDENT(predicate(needs_acquiring_load_exclusive(n));),
|
||||
`dnl')
|
||||
match(Set oldval (GetAndSet$1 mem newval));
|
||||
ins_cost(`'ifelse($4,Acq,,2*)VOLATILE_REF_COST);
|
||||
ins_cost(`'ifelse($3,Acq,,2*)VOLATILE_REF_COST);
|
||||
format %{ "atomic_xchg$2`'ifelse($3,Acq,_acq) $oldval, $newval, [$mem]" %}
|
||||
ins_encode %{
|
||||
__ atomic_xchg`'ifelse($3,Acq,al)$2($oldval$$Register, $newval$$Register, as_Register($mem$$base));
|
||||
|
||||
@ -85,7 +85,7 @@ void Relocation::pd_set_call_destination(address x) {
|
||||
} else {
|
||||
MacroAssembler::pd_patch_instruction(addr(), x);
|
||||
}
|
||||
assert(pd_call_destination(addr()) == x, "fail in reloc");
|
||||
guarantee(pd_call_destination(addr()) == x, "fail in reloc");
|
||||
}
|
||||
|
||||
void trampoline_stub_Relocation::pd_fix_owner_after_move() {
|
||||
|
||||
@ -2879,7 +2879,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg2 - sessionKe (key) in little endian int array
|
||||
//
|
||||
address generate_aescrypt_encryptBlock() {
|
||||
__ align(CodeEntryAlignment);
|
||||
@ -2912,7 +2912,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg2 - sessionKd (key) in little endian int array
|
||||
//
|
||||
address generate_aescrypt_decryptBlock() {
|
||||
assert(UseAES, "need AES cryptographic extension support");
|
||||
@ -2946,7 +2946,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg2 - sessionKe (key) in little endian int array
|
||||
// c_rarg3 - r vector byte array address
|
||||
// c_rarg4 - input length
|
||||
//
|
||||
@ -3051,7 +3051,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg2 - sessionKd (key) in little endian int array
|
||||
// c_rarg3 - r vector byte array address
|
||||
// c_rarg4 - input length
|
||||
//
|
||||
@ -3178,7 +3178,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg2 - sessionKe (key) in little endian int array
|
||||
// c_rarg3 - counter vector byte array address
|
||||
// c_rarg4 - input length
|
||||
// c_rarg5 - saved encryptedCounter start
|
||||
|
||||
@ -1795,10 +1795,13 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
|
||||
return size; // Self copy, no move.
|
||||
|
||||
if (bottom_type()->isa_vect() != nullptr && ideal_reg() == Op_VecX) {
|
||||
int src_offset = ra_->reg2offset(src_lo);
|
||||
int dst_offset = ra_->reg2offset(dst_lo);
|
||||
DEBUG_ONLY(int algm = MIN2(RegMask::num_registers(ideal_reg()), (int)Matcher::stack_alignment_in_slots()) * VMRegImpl::stack_slot_size);
|
||||
assert((src_lo_rc != rc_stack) || is_aligned(src_offset, algm), "unaligned vector spill sp offset %d (src)", src_offset);
|
||||
assert((dst_lo_rc != rc_stack) || is_aligned(dst_offset, algm), "unaligned vector spill sp offset %d (dst)", dst_offset);
|
||||
// Memory->Memory Spill.
|
||||
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
|
||||
int src_offset = ra_->reg2offset(src_lo);
|
||||
int dst_offset = ra_->reg2offset(dst_lo);
|
||||
if (masm) {
|
||||
__ ld(R0, src_offset, R1_SP);
|
||||
__ std(R0, dst_offset, R1_SP);
|
||||
@ -1806,26 +1809,20 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
|
||||
__ std(R0, dst_offset+8, R1_SP);
|
||||
}
|
||||
size += 16;
|
||||
#ifndef PRODUCT
|
||||
if (st != nullptr) {
|
||||
st->print("%-7s [R1_SP + #%d] -> [R1_SP + #%d] \t// vector spill copy", "SPILL", src_offset, dst_offset);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
}
|
||||
// VectorRegister->Memory Spill.
|
||||
else if (src_lo_rc == rc_vec && dst_lo_rc == rc_stack) {
|
||||
VectorSRegister Rsrc = as_VectorRegister(Matcher::_regEncode[src_lo]).to_vsr();
|
||||
int dst_offset = ra_->reg2offset(dst_lo);
|
||||
if (PowerArchitecturePPC64 >= 9) {
|
||||
if (is_aligned(dst_offset, 16)) {
|
||||
if (masm) {
|
||||
__ stxv(Rsrc, dst_offset, R1_SP); // matches storeV16_Power9
|
||||
}
|
||||
size += 4;
|
||||
} else {
|
||||
// Other alignment can be used by Vector API (VectorPayload in rearrangeOp,
|
||||
// observed with VectorRearrangeTest.java on Power9).
|
||||
if (masm) {
|
||||
__ addi(R0, R1_SP, dst_offset);
|
||||
__ stxvx(Rsrc, R0); // matches storeV16_Power9 (regarding element ordering)
|
||||
}
|
||||
size += 8;
|
||||
if (masm) {
|
||||
__ stxv(Rsrc, dst_offset, R1_SP); // matches storeV16_Power9
|
||||
}
|
||||
size += 4;
|
||||
} else {
|
||||
if (masm) {
|
||||
__ addi(R0, R1_SP, dst_offset);
|
||||
@ -1833,24 +1830,25 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
|
||||
}
|
||||
size += 8;
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
if (st != nullptr) {
|
||||
if (PowerArchitecturePPC64 >= 9) {
|
||||
st->print("%-7s %s, [R1_SP + #%d] \t// vector spill copy", "STXV", Matcher::regName[src_lo], dst_offset);
|
||||
} else {
|
||||
st->print("%-7s R0, R1_SP, %d \t// vector spill copy\n\t"
|
||||
"%-7s %s, [R0] \t// vector spill copy", "ADDI", dst_offset, "STXVD2X", Matcher::regName[src_lo]);
|
||||
}
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
}
|
||||
// Memory->VectorRegister Spill.
|
||||
else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vec) {
|
||||
VectorSRegister Rdst = as_VectorRegister(Matcher::_regEncode[dst_lo]).to_vsr();
|
||||
int src_offset = ra_->reg2offset(src_lo);
|
||||
if (PowerArchitecturePPC64 >= 9) {
|
||||
if (is_aligned(src_offset, 16)) {
|
||||
if (masm) {
|
||||
__ lxv(Rdst, src_offset, R1_SP);
|
||||
}
|
||||
size += 4;
|
||||
} else {
|
||||
if (masm) {
|
||||
__ addi(R0, R1_SP, src_offset);
|
||||
__ lxvx(Rdst, R0);
|
||||
}
|
||||
size += 8;
|
||||
if (masm) {
|
||||
__ lxv(Rdst, src_offset, R1_SP);
|
||||
}
|
||||
size += 4;
|
||||
} else {
|
||||
if (masm) {
|
||||
__ addi(R0, R1_SP, src_offset);
|
||||
@ -1858,6 +1856,16 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
|
||||
}
|
||||
size += 8;
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
if (st != nullptr) {
|
||||
if (PowerArchitecturePPC64 >= 9) {
|
||||
st->print("%-7s %s, [R1_SP + #%d] \t// vector spill copy", "LXV", Matcher::regName[dst_lo], src_offset);
|
||||
} else {
|
||||
st->print("%-7s R0, R1_SP, %d \t// vector spill copy\n\t"
|
||||
"%-7s %s, [R0] \t// vector spill copy", "ADDI", src_offset, "LXVD2X", Matcher::regName[dst_lo]);
|
||||
}
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
}
|
||||
// VectorRegister->VectorRegister.
|
||||
else if (src_lo_rc == rc_vec && dst_lo_rc == rc_vec) {
|
||||
@ -1867,6 +1875,12 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
|
||||
__ xxlor(Rdst, Rsrc, Rsrc);
|
||||
}
|
||||
size += 4;
|
||||
#ifndef PRODUCT
|
||||
if (st != nullptr) {
|
||||
st->print("%-7s %s, %s, %s\t// vector spill copy",
|
||||
"XXLOR", Matcher::regName[dst_lo], Matcher::regName[src_lo], Matcher::regName[src_lo]);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
}
|
||||
else {
|
||||
ShouldNotReachHere(); // No VR spill.
|
||||
@ -6321,8 +6335,36 @@ instruct loadConD_Ex(regD dst, immD src) %{
|
||||
// Prefetch instructions.
|
||||
// Must be safe to execute with invalid address (cannot fault).
|
||||
|
||||
// Special prefetch versions which use the dcbz instruction.
|
||||
instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
|
||||
match(PrefetchAllocation (AddP mem src));
|
||||
predicate(AllocatePrefetchStyle == 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ dcbz($src$$Register, $mem$$base$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
|
||||
match(PrefetchAllocation mem);
|
||||
predicate(AllocatePrefetchStyle == 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ dcbz($mem$$base$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
|
||||
match(PrefetchAllocation (AddP mem src));
|
||||
predicate(AllocatePrefetchStyle != 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
|
||||
@ -6335,6 +6377,7 @@ instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
|
||||
|
||||
instruct prefetch_alloc_no_offset(indirectMemory mem) %{
|
||||
match(PrefetchAllocation mem);
|
||||
predicate(AllocatePrefetchStyle != 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}
|
||||
|
||||
@ -2956,7 +2956,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Arguments for generated stub:
|
||||
// R3_ARG1 - source byte array address
|
||||
// R4_ARG2 - destination byte array address
|
||||
// R5_ARG3 - K (key) in little endian int array
|
||||
// R5_ARG3 - sessionKe (key) in little endian int array
|
||||
address generate_aescrypt_decryptBlock() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
StubId stub_id = StubId::stubgen_aescrypt_decryptBlock_id;
|
||||
|
||||
@ -2463,7 +2463,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg2 - sessionKe (key) in little endian int array
|
||||
//
|
||||
address generate_aescrypt_encryptBlock() {
|
||||
assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support");
|
||||
@ -2493,8 +2493,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
|
||||
__ vle32_v(res, from);
|
||||
|
||||
__ mv(t2, 52);
|
||||
__ blt(keylen, t2, L_aes128);
|
||||
__ mv(t2, 52); // key length could be only {11, 13, 15} * 4 = {44, 52, 60}
|
||||
__ bltu(keylen, t2, L_aes128);
|
||||
__ beq(keylen, t2, L_aes192);
|
||||
// Else we fallthrough to the biggest case (256-bit key size)
|
||||
|
||||
@ -2542,7 +2542,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg2 - sessionKe (key) in little endian int array
|
||||
//
|
||||
address generate_aescrypt_decryptBlock() {
|
||||
assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support");
|
||||
@ -2572,8 +2572,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
|
||||
__ vle32_v(res, from);
|
||||
|
||||
__ mv(t2, 52);
|
||||
__ blt(keylen, t2, L_aes128);
|
||||
__ mv(t2, 52); // key length could be only {11, 13, 15} * 4 = {44, 52, 60}
|
||||
__ bltu(keylen, t2, L_aes128);
|
||||
__ beq(keylen, t2, L_aes192);
|
||||
// Else we fallthrough to the biggest case (256-bit key size)
|
||||
|
||||
@ -2606,6 +2606,223 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// Load big-endian 128-bit from memory.
|
||||
void be_load_counter_128(Register counter_hi, Register counter_lo, Register counter) {
|
||||
__ ld(counter_lo, Address(counter, 8)); // Load 128-bits from counter
|
||||
__ ld(counter_hi, Address(counter));
|
||||
__ rev8(counter_lo, counter_lo); // Convert big-endian to little-endian
|
||||
__ rev8(counter_hi, counter_hi);
|
||||
}
|
||||
|
||||
// Little-endian 128-bit + 64-bit -> 128-bit addition.
|
||||
void add_counter_128(Register counter_hi, Register counter_lo) {
|
||||
assert_different_registers(counter_hi, counter_lo, t0);
|
||||
__ addi(counter_lo, counter_lo, 1);
|
||||
__ seqz(t0, counter_lo); // Check for result overflow
|
||||
__ add(counter_hi, counter_hi, t0); // Add 1 if overflow otherwise 0
|
||||
}
|
||||
|
||||
// Store big-endian 128-bit to memory.
|
||||
void be_store_counter_128(Register counter_hi, Register counter_lo, Register counter) {
|
||||
assert_different_registers(counter_hi, counter_lo, t0, t1);
|
||||
__ rev8(t0, counter_lo); // Convert little-endian to big-endian
|
||||
__ rev8(t1, counter_hi);
|
||||
__ sd(t0, Address(counter, 8)); // Store 128-bits to counter
|
||||
__ sd(t1, Address(counter));
|
||||
}
|
||||
|
||||
void counterMode_AESCrypt(int round, Register in, Register out, Register key, Register counter,
|
||||
Register input_len, Register saved_encrypted_ctr, Register used_ptr) {
|
||||
// Algorithm:
|
||||
//
|
||||
// generate_aes_loadkeys();
|
||||
// load_counter_128(counter_hi, counter_lo, counter);
|
||||
//
|
||||
// L_next:
|
||||
// if (used >= BLOCK_SIZE) goto L_main_loop;
|
||||
//
|
||||
// L_encrypt_next:
|
||||
// *out = *in ^ saved_encrypted_ctr[used]);
|
||||
// out++; in++; used++; len--;
|
||||
// if (len == 0) goto L_exit;
|
||||
// goto L_next;
|
||||
//
|
||||
// L_main_loop:
|
||||
// if (len == 0) goto L_exit;
|
||||
// saved_encrypted_ctr = generate_aes_encrypt(counter);
|
||||
//
|
||||
// add_counter_128(counter_hi, counter_lo);
|
||||
// be_store_counter_128(counter_hi, counter_lo, counter);
|
||||
// used = 0;
|
||||
//
|
||||
// if(len < BLOCK_SIZE) goto L_encrypt_next;
|
||||
//
|
||||
// v_in = load_16Byte(in);
|
||||
// v_out = load_16Byte(out);
|
||||
// v_saved_encrypted_ctr = load_16Byte(saved_encrypted_ctr);
|
||||
// v_out = v_in ^ v_saved_encrypted_ctr;
|
||||
// out += BLOCK_SIZE;
|
||||
// in += BLOCK_SIZE;
|
||||
// len -= BLOCK_SIZE;
|
||||
// used = BLOCK_SIZE;
|
||||
// goto L_main_loop;
|
||||
//
|
||||
//
|
||||
// L_exit:
|
||||
// store(used);
|
||||
// result = input_len
|
||||
// return result;
|
||||
|
||||
const Register used = x28;
|
||||
const Register len = x29;
|
||||
const Register counter_hi = x30;
|
||||
const Register counter_lo = x31;
|
||||
const Register block_size = t2;
|
||||
|
||||
const unsigned int BLOCK_SIZE = 16;
|
||||
|
||||
VectorRegister working_vregs[] = {
|
||||
v1, v2, v3, v4, v5, v6, v7, v8,
|
||||
v9, v10, v11, v12, v13, v14, v15
|
||||
};
|
||||
|
||||
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
|
||||
|
||||
__ lwu(used, Address(used_ptr));
|
||||
__ mv(len, input_len);
|
||||
__ mv(block_size, BLOCK_SIZE);
|
||||
|
||||
// load keys to working_vregs according to round
|
||||
generate_aes_loadkeys(key, working_vregs, round);
|
||||
|
||||
// 128-bit big-endian load
|
||||
be_load_counter_128(counter_hi, counter_lo, counter);
|
||||
|
||||
Label L_next, L_encrypt_next, L_main_loop, L_exit;
|
||||
// Check the last saved_encrypted_ctr used value, we fall through
|
||||
// to L_encrypt_next when the used value lower than block_size
|
||||
__ bind(L_next);
|
||||
__ bgeu(used, block_size, L_main_loop);
|
||||
|
||||
// There is still data left fewer than block_size after L_main_loop
|
||||
// or last used, we encrypt them one by one.
|
||||
__ bind(L_encrypt_next);
|
||||
__ add(t0, saved_encrypted_ctr, used);
|
||||
__ lbu(t1, Address(t0));
|
||||
__ lbu(t0, Address(in));
|
||||
__ xorr(t1, t1, t0);
|
||||
__ sb(t1, Address(out));
|
||||
__ addi(in, in, 1);
|
||||
__ addi(out, out, 1);
|
||||
__ addi(used, used, 1);
|
||||
__ subi(len, len, 1);
|
||||
__ beqz(len, L_exit);
|
||||
__ j(L_next);
|
||||
|
||||
// We will calculate the next saved_encrypted_ctr and encrypt the blocks of data
|
||||
// one by one until there is less than a full block remaining if len not zero
|
||||
__ bind(L_main_loop);
|
||||
__ beqz(len, L_exit);
|
||||
__ vle32_v(v16, counter);
|
||||
|
||||
// encrypt counter according to round
|
||||
generate_aes_encrypt(v16, working_vregs, round);
|
||||
|
||||
__ vse32_v(v16, saved_encrypted_ctr);
|
||||
|
||||
// 128-bit little-endian increment
|
||||
add_counter_128(counter_hi, counter_lo);
|
||||
// 128-bit big-endian store
|
||||
be_store_counter_128(counter_hi, counter_lo, counter);
|
||||
|
||||
__ mv(used, 0);
|
||||
// Check if we have a full block_size
|
||||
__ bltu(len, block_size, L_encrypt_next);
|
||||
|
||||
// We have one full block to encrypt at least
|
||||
__ vle32_v(v17, in);
|
||||
__ vxor_vv(v16, v16, v17);
|
||||
__ vse32_v(v16, out);
|
||||
__ add(out, out, block_size);
|
||||
__ add(in, in, block_size);
|
||||
__ sub(len, len, block_size);
|
||||
__ mv(used, block_size);
|
||||
__ j(L_main_loop);
|
||||
|
||||
__ bind(L_exit);
|
||||
__ sw(used, Address(used_ptr));
|
||||
__ mv(x10, input_len);
|
||||
__ leave();
|
||||
__ ret();
|
||||
};
|
||||
|
||||
// CTR AES crypt.
|
||||
// Arguments:
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg3 - counter vector byte array address
|
||||
// c_rarg4 - input length
|
||||
// c_rarg5 - saved encryptedCounter start
|
||||
// c_rarg6 - saved used length
|
||||
//
|
||||
// Output:
|
||||
// x10 - input length
|
||||
//
|
||||
address generate_counterMode_AESCrypt() {
|
||||
assert(UseZvkn, "need AES instructions (Zvkned extension) support");
|
||||
assert(UseAESCTRIntrinsics, "need AES instructions (Zvkned extension) support");
|
||||
assert(UseZbb, "need basic bit manipulation (Zbb extension) support");
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubId stub_id = StubId::stubgen_counterMode_AESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
const Register in = c_rarg0;
|
||||
const Register out = c_rarg1;
|
||||
const Register key = c_rarg2;
|
||||
const Register counter = c_rarg3;
|
||||
const Register input_len = c_rarg4;
|
||||
const Register saved_encrypted_ctr = c_rarg5;
|
||||
const Register used_len_ptr = c_rarg6;
|
||||
|
||||
const Register keylen = c_rarg7; // temporary register
|
||||
|
||||
const address start = __ pc();
|
||||
__ enter();
|
||||
|
||||
Label L_exit;
|
||||
__ beqz(input_len, L_exit);
|
||||
|
||||
Label L_aes128, L_aes192;
|
||||
// Compute #rounds for AES based on the length of the key array
|
||||
__ lwu(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
|
||||
__ mv(t0, 52); // key length could be only {11, 13, 15} * 4 = {44, 52, 60}
|
||||
__ bltu(keylen, t0, L_aes128);
|
||||
__ beq(keylen, t0, L_aes192);
|
||||
// Else we fallthrough to the biggest case (256-bit key size)
|
||||
|
||||
// Note: the following function performs crypt with key += 15*16
|
||||
counterMode_AESCrypt(15, in, out, key, counter, input_len, saved_encrypted_ctr, used_len_ptr);
|
||||
|
||||
// Note: the following function performs crypt with key += 13*16
|
||||
__ bind(L_aes192);
|
||||
counterMode_AESCrypt(13, in, out, key, counter, input_len, saved_encrypted_ctr, used_len_ptr);
|
||||
|
||||
// Note: the following function performs crypt with key += 11*16
|
||||
__ bind(L_aes128);
|
||||
counterMode_AESCrypt(11, in, out, key, counter, input_len, saved_encrypted_ctr, used_len_ptr);
|
||||
|
||||
__ bind(L_exit);
|
||||
__ mv(x10, input_len);
|
||||
__ leave();
|
||||
__ ret();
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// code for comparing 8 characters of strings with Latin1 and Utf16 encoding
|
||||
void compare_string_8_x_LU(Register tmpL, Register tmpU,
|
||||
Register strL, Register strU, Label& DIFF) {
|
||||
@ -6826,6 +7043,10 @@ static const int64_t right_3_bits = right_n_bits(3);
|
||||
StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
|
||||
}
|
||||
|
||||
if (UseAESCTRIntrinsics) {
|
||||
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt();
|
||||
}
|
||||
|
||||
if (UsePoly1305Intrinsics) {
|
||||
StubRoutines::_poly1305_processBlocks = generate_poly1305_processBlocks();
|
||||
}
|
||||
|
||||
@ -434,6 +434,15 @@ void VM_Version::c2_initialize() {
|
||||
warning("UseAESIntrinsics enabled, but UseAES not, enabling");
|
||||
UseAES = true;
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics) && UseZbb) {
|
||||
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true);
|
||||
}
|
||||
|
||||
if (UseAESCTRIntrinsics && !UseZbb) {
|
||||
warning("Cannot enable UseAESCTRIntrinsics on cpu without UseZbb support.");
|
||||
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
|
||||
}
|
||||
} else {
|
||||
if (UseAES) {
|
||||
warning("AES instructions are not available on this CPU");
|
||||
@ -443,11 +452,10 @@ void VM_Version::c2_initialize() {
|
||||
warning("AES intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (UseAESCTRIntrinsics) {
|
||||
warning("AES/CTR intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
|
||||
if (UseAESCTRIntrinsics) {
|
||||
warning("Cannot enable UseAESCTRIntrinsics on cpu without UseZvkn support.");
|
||||
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -73,7 +73,7 @@
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 109000 WINDOWS_ONLY(+2000)) \
|
||||
do_arch_blob(compiler, 120000 WINDOWS_ONLY(+2000)) \
|
||||
do_stub(compiler, vector_float_sign_mask) \
|
||||
do_arch_entry(x86, compiler, vector_float_sign_mask, \
|
||||
vector_float_sign_mask, vector_float_sign_mask) \
|
||||
|
||||
@ -480,7 +480,7 @@ address StubGenerator::generate_counterMode_VectorAESCrypt() {
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg2 - sessionKe (key) in little endian int array
|
||||
// c_rarg3 - counter vector byte array address
|
||||
// Linux
|
||||
// c_rarg4 - input length
|
||||
@ -1063,7 +1063,7 @@ address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() {
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg2 - sessionKe (key) in little endian int array
|
||||
//
|
||||
address StubGenerator::generate_aescrypt_encryptBlock() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
@ -1158,7 +1158,7 @@ address StubGenerator::generate_aescrypt_encryptBlock() {
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg2 - sessionKd (key) in little endian int array
|
||||
//
|
||||
address StubGenerator::generate_aescrypt_decryptBlock() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
@ -1255,7 +1255,7 @@ address StubGenerator::generate_aescrypt_decryptBlock() {
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg2 - sessionKe (key) in little endian int array
|
||||
// c_rarg3 - r vector byte array address
|
||||
// c_rarg4 - input length
|
||||
//
|
||||
@ -1407,7 +1407,7 @@ address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() {
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg2 - sessionKd (key) in little endian int array
|
||||
// c_rarg3 - r vector byte array address
|
||||
// c_rarg4 - input length
|
||||
//
|
||||
|
||||
@ -3386,6 +3386,11 @@ bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case Op_VectorBlend:
|
||||
if (UseAVX == 0 && size_in_bits < 128) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case Op_VectorTest:
|
||||
if (UseSSE < 4) {
|
||||
return false; // Implementation limitation
|
||||
|
||||
@ -4305,7 +4305,7 @@ OSReturn os::get_native_priority(const Thread* const thread,
|
||||
// For reference, please, see IEEE Std 1003.1-2004:
|
||||
// http://www.unix.org/single_unix_specification
|
||||
|
||||
jlong os::Linux::total_thread_cpu_time(clockid_t clockid) {
|
||||
jlong os::Linux::thread_cpu_time(clockid_t clockid) {
|
||||
struct timespec tp;
|
||||
int status = clock_gettime(clockid, &tp);
|
||||
assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
|
||||
@ -4960,20 +4960,42 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
return fd;
|
||||
}
|
||||
|
||||
// Since kernel v2.6.12 the Linux ABI has had support for encoding the clock
|
||||
// types in the last three bits. Bit 2 indicates whether a cpu clock refers to a
|
||||
// thread or a process. Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or
|
||||
// FD=3. The clock CPUCLOCK_VIRT (0b001) reports the thread's consumed user
|
||||
// time. POSIX compliant implementations of pthread_getcpuclockid return the
|
||||
// clock CPUCLOCK_SCHED (0b010) which reports the thread's consumed system+user
|
||||
// time (as mandated by the POSIX standard POSIX.1-2024/IEEE Std 1003.1-2024
|
||||
// §3.90).
|
||||
static bool get_thread_clockid(Thread* thread, clockid_t* clockid, bool total) {
|
||||
constexpr clockid_t CLOCK_TYPE_MASK = 3;
|
||||
constexpr clockid_t CPUCLOCK_VIRT = 1;
|
||||
|
||||
int rc = pthread_getcpuclockid(thread->osthread()->pthread_id(), clockid);
|
||||
if (rc != 0) {
|
||||
// It's possible to encounter a terminated native thread that failed
|
||||
// to detach itself from the VM - which should result in ESRCH.
|
||||
assert_status(rc == ESRCH, rc, "pthread_getcpuclockid failed");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!total) {
|
||||
clockid_t clockid_tmp = *clockid;
|
||||
clockid_tmp = (clockid_tmp & ~CLOCK_TYPE_MASK) | CPUCLOCK_VIRT;
|
||||
*clockid = clockid_tmp;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static jlong user_thread_cpu_time(Thread *thread);
|
||||
|
||||
static jlong total_thread_cpu_time(Thread *thread) {
|
||||
clockid_t clockid;
|
||||
int rc = pthread_getcpuclockid(thread->osthread()->pthread_id(),
|
||||
&clockid);
|
||||
if (rc == 0) {
|
||||
return os::Linux::total_thread_cpu_time(clockid);
|
||||
} else {
|
||||
// It's possible to encounter a terminated native thread that failed
|
||||
// to detach itself from the VM - which should result in ESRCH.
|
||||
assert_status(rc == ESRCH, rc, "pthread_getcpuclockid failed");
|
||||
return -1;
|
||||
}
|
||||
clockid_t clockid;
|
||||
bool success = get_thread_clockid(thread, &clockid, true);
|
||||
|
||||
return success ? os::Linux::thread_cpu_time(clockid) : -1;
|
||||
}
|
||||
|
||||
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
|
||||
@ -4984,7 +5006,7 @@ static jlong total_thread_cpu_time(Thread *thread) {
|
||||
// the fast estimate available on the platform.
|
||||
|
||||
jlong os::current_thread_cpu_time() {
|
||||
return os::Linux::total_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
|
||||
return os::Linux::thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
|
||||
}
|
||||
|
||||
jlong os::thread_cpu_time(Thread* thread) {
|
||||
@ -4993,7 +5015,7 @@ jlong os::thread_cpu_time(Thread* thread) {
|
||||
|
||||
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
|
||||
if (user_sys_cpu_time) {
|
||||
return os::Linux::total_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
|
||||
return os::Linux::thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
|
||||
} else {
|
||||
return user_thread_cpu_time(Thread::current());
|
||||
}
|
||||
@ -5007,46 +5029,11 @@ jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
|
||||
}
|
||||
}
|
||||
|
||||
// -1 on error.
|
||||
static jlong user_thread_cpu_time(Thread *thread) {
|
||||
pid_t tid = thread->osthread()->thread_id();
|
||||
char *s;
|
||||
char stat[2048];
|
||||
size_t statlen;
|
||||
char proc_name[64];
|
||||
int count;
|
||||
long sys_time, user_time;
|
||||
char cdummy;
|
||||
int idummy;
|
||||
long ldummy;
|
||||
FILE *fp;
|
||||
clockid_t clockid;
|
||||
bool success = get_thread_clockid(thread, &clockid, false);
|
||||
|
||||
os::snprintf_checked(proc_name, 64, "/proc/self/task/%d/stat", tid);
|
||||
fp = os::fopen(proc_name, "r");
|
||||
if (fp == nullptr) return -1;
|
||||
statlen = fread(stat, 1, 2047, fp);
|
||||
stat[statlen] = '\0';
|
||||
fclose(fp);
|
||||
|
||||
// Skip pid and the command string. Note that we could be dealing with
|
||||
// weird command names, e.g. user could decide to rename java launcher
|
||||
// to "java 1.4.2 :)", then the stat file would look like
|
||||
// 1234 (java 1.4.2 :)) R ... ...
|
||||
// We don't really need to know the command string, just find the last
|
||||
// occurrence of ")" and then start parsing from there. See bug 4726580.
|
||||
s = strrchr(stat, ')');
|
||||
if (s == nullptr) return -1;
|
||||
|
||||
// Skip blank chars
|
||||
do { s++; } while (s && isspace((unsigned char) *s));
|
||||
|
||||
count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
|
||||
&cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
|
||||
&ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
|
||||
&user_time, &sys_time);
|
||||
if (count != 13) return -1;
|
||||
|
||||
return (jlong)user_time * (1000000000 / os::Posix::clock_tics_per_second());
|
||||
return success ? os::Linux::thread_cpu_time(clockid) : -1;
|
||||
}
|
||||
|
||||
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
|
||||
|
||||
@ -142,7 +142,7 @@ class os::Linux {
|
||||
static bool manually_expand_stack(JavaThread * t, address addr);
|
||||
static void expand_stack_to(address bottom);
|
||||
|
||||
static jlong total_thread_cpu_time(clockid_t clockid);
|
||||
static jlong thread_cpu_time(clockid_t clockid);
|
||||
|
||||
static jlong sendfile(int out_fd, int in_fd, jlong* offset, jlong count);
|
||||
|
||||
|
||||
@ -90,7 +90,7 @@ typedef CodeBuffer::csize_t csize_t; // file-local definition
|
||||
|
||||
// External buffer, in a predefined CodeBlob.
|
||||
// Important: The code_start must be taken exactly, and not realigned.
|
||||
CodeBuffer::CodeBuffer(CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(*this))) {
|
||||
CodeBuffer::CodeBuffer(const CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(*this))) {
|
||||
// Provide code buffer with meaningful name
|
||||
initialize_misc(blob->name());
|
||||
initialize(blob->content_begin(), blob->content_size());
|
||||
|
||||
@ -672,7 +672,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
|
||||
}
|
||||
|
||||
// (2) CodeBuffer referring to pre-allocated CodeBlob.
|
||||
CodeBuffer(CodeBlob* blob);
|
||||
CodeBuffer(const CodeBlob* blob);
|
||||
|
||||
// (3) code buffer allocating codeBlob memory for code & relocation
|
||||
// info but with lazy initialization. The name must be something
|
||||
|
||||
@ -86,9 +86,9 @@ void AOTMappedHeapWriter::init() {
|
||||
if (CDSConfig::is_dumping_heap()) {
|
||||
Universe::heap()->collect(GCCause::_java_lang_system_gc);
|
||||
|
||||
_buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
|
||||
_buffer_offset_to_source_obj_table = new (mtClassShared) BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
|
||||
_dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
|
||||
_fillers = new FillersTable();
|
||||
_fillers = new (mtClassShared) FillersTable();
|
||||
_requested_bottom = nullptr;
|
||||
_requested_top = nullptr;
|
||||
|
||||
|
||||
@ -184,6 +184,7 @@ static size_t archive_object_size(oopDesc* archive_object) {
|
||||
oop AOTStreamedHeapLoader::allocate_object(oopDesc* archive_object, markWord mark, size_t size, TRAPS) {
|
||||
assert(!archive_object->is_stackChunk(), "no such objects are archived");
|
||||
|
||||
NoJvmtiEventsMark njem;
|
||||
oop heap_object;
|
||||
|
||||
Klass* klass = archive_object->klass();
|
||||
|
||||
@ -63,7 +63,7 @@ void AOTThread::initialize() {
|
||||
// This is important because this thread runs before JVMTI monitors are set up appropriately.
|
||||
// Therefore, callbacks would not work as intended. JVMTI has no business peeking at how we
|
||||
// materialize primordial objects from the AOT cache.
|
||||
thread->toggle_is_disable_suspend();
|
||||
thread->disable_jvmti_events();
|
||||
#endif
|
||||
|
||||
JavaThread::vm_exit_on_osthread_failure(thread);
|
||||
|
||||
@ -357,7 +357,7 @@ InstanceKlass* LambdaProxyClassDictionary::load_and_init_lambda_proxy_class(Inst
|
||||
InstanceKlass* nest_host = caller_ik->nest_host(THREAD);
|
||||
assert(nest_host == shared_nest_host, "mismatched nest host");
|
||||
|
||||
EventClassLoad class_load_start_event;
|
||||
EventClassLoad class_load_event;
|
||||
|
||||
// Add to class hierarchy, and do possible deoptimizations.
|
||||
lambda_ik->add_to_hierarchy(THREAD);
|
||||
@ -368,8 +368,8 @@ InstanceKlass* LambdaProxyClassDictionary::load_and_init_lambda_proxy_class(Inst
|
||||
if (JvmtiExport::should_post_class_load()) {
|
||||
JvmtiExport::post_class_load(THREAD, lambda_ik);
|
||||
}
|
||||
if (class_load_start_event.should_commit()) {
|
||||
SystemDictionary::post_class_load_event(&class_load_start_event, lambda_ik, ClassLoaderData::class_loader_data(class_loader()));
|
||||
if (class_load_event.should_commit()) {
|
||||
JFR_ONLY(SystemDictionary::post_class_load_event(&class_load_event, lambda_ik, ClassLoaderData::class_loader_data(class_loader()));)
|
||||
}
|
||||
|
||||
lambda_ik->initialize(CHECK_NULL);
|
||||
|
||||
@ -149,6 +149,10 @@ public:
|
||||
assert(is_loaded(), "must be loaded");
|
||||
return _flags;
|
||||
}
|
||||
|
||||
// Fetch Klass::access_flags.
|
||||
jint access_flags() { return flags().as_int(); }
|
||||
|
||||
bool has_finalizer() {
|
||||
assert(is_loaded(), "must be loaded");
|
||||
return _has_finalizer; }
|
||||
|
||||
@ -216,15 +216,6 @@ jint ciKlass::modifier_flags() {
|
||||
)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciKlass::access_flags
|
||||
jint ciKlass::access_flags() {
|
||||
assert(is_loaded(), "not loaded");
|
||||
GUARDED_VM_ENTRY(
|
||||
return get_Klass()->access_flags().as_unsigned_short();
|
||||
)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciKlass::misc_flags
|
||||
klass_flags_t ciKlass::misc_flags() {
|
||||
|
||||
@ -122,9 +122,6 @@ public:
|
||||
// Fetch modifier flags.
|
||||
jint modifier_flags();
|
||||
|
||||
// Fetch Klass::access_flags.
|
||||
jint access_flags();
|
||||
|
||||
// Fetch Klass::misc_flags.
|
||||
klass_flags_t misc_flags();
|
||||
|
||||
|
||||
@ -89,9 +89,6 @@
|
||||
#if INCLUDE_CDS
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#endif
|
||||
#if INCLUDE_JFR
|
||||
#include "jfr/support/jfrTraceIdExtension.hpp"
|
||||
#endif
|
||||
|
||||
// We generally try to create the oops directly when parsing, rather than
|
||||
// allocating temporary data structures and copying the bytes twice. A
|
||||
@ -157,6 +154,8 @@
|
||||
|
||||
#define JAVA_26_VERSION 70
|
||||
|
||||
#define JAVA_27_VERSION 71
|
||||
|
||||
void ClassFileParser::set_class_bad_constant_seen(short bad_constant) {
|
||||
assert((bad_constant == JVM_CONSTANT_Module ||
|
||||
bad_constant == JVM_CONSTANT_Package) && _major_version >= JAVA_9_VERSION,
|
||||
@ -5272,8 +5271,6 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik,
|
||||
}
|
||||
}
|
||||
|
||||
JFR_ONLY(INIT_ID(ik);)
|
||||
|
||||
// If we reach here, all is well.
|
||||
// Now remove the InstanceKlass* from the _klass_to_deallocate field
|
||||
// in order for it to not be destroyed in the ClassFileParser destructor.
|
||||
|
||||
@ -500,6 +500,8 @@ class ClassFileParser {
|
||||
|
||||
InstanceKlass* create_instance_klass(bool cf_changed_in_CFLH, const ClassInstanceInfo& cl_inst_info, TRAPS);
|
||||
|
||||
const ClassFileStream& stream() const { return *_stream; }
|
||||
|
||||
const ClassFileStream* clone_stream() const;
|
||||
|
||||
void set_klass_to_deallocate(InstanceKlass* klass);
|
||||
|
||||
@ -439,7 +439,7 @@ class MethodFamily : public ResourceObj {
|
||||
StreamIndentor si(str, indent * 2);
|
||||
str->print("Selected method: ");
|
||||
print_method(str, _selected_target);
|
||||
Klass* method_holder = _selected_target->method_holder();
|
||||
InstanceKlass* method_holder = _selected_target->method_holder();
|
||||
if (!method_holder->is_interface()) {
|
||||
str->print(" : in superclass");
|
||||
}
|
||||
|
||||
@ -1091,10 +1091,6 @@ void java_lang_Class::allocate_mirror(Klass* k, bool is_scratch, Handle protecti
|
||||
// Set the modifiers flag.
|
||||
u2 computed_modifiers = k->compute_modifier_flags();
|
||||
set_modifiers(mirror(), computed_modifiers);
|
||||
// Set the raw access_flags, this is used by reflection instead of modifier flags.
|
||||
// The Java code for array classes gets the access flags from the element type.
|
||||
assert(!k->is_array_klass() || k->access_flags().as_unsigned_short() == 0, "access flags are not set for arrays");
|
||||
set_raw_access_flags(mirror(), k->access_flags().as_unsigned_short());
|
||||
|
||||
InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
|
||||
assert(oop_size(mirror()) == mk->instance_size(k), "should have been set");
|
||||
@ -1103,6 +1099,8 @@ void java_lang_Class::allocate_mirror(Klass* k, bool is_scratch, Handle protecti
|
||||
|
||||
// It might also have a component mirror. This mirror must already exist.
|
||||
if (k->is_array_klass()) {
|
||||
// The Java code for array classes gets the access flags from the element type.
|
||||
set_raw_access_flags(mirror(), 0);
|
||||
if (k->is_typeArray_klass()) {
|
||||
BasicType type = TypeArrayKlass::cast(k)->element_type();
|
||||
if (is_scratch) {
|
||||
@ -1129,6 +1127,8 @@ void java_lang_Class::allocate_mirror(Klass* k, bool is_scratch, Handle protecti
|
||||
// and java_mirror in this klass.
|
||||
} else {
|
||||
assert(k->is_instance_klass(), "Must be");
|
||||
// Set the raw access_flags, this is used by reflection instead of modifier flags.
|
||||
set_raw_access_flags(mirror(), InstanceKlass::cast(k)->access_flags().as_unsigned_short());
|
||||
initialize_mirror_fields(InstanceKlass::cast(k), mirror, protection_domain, classData, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
// If any of the fields throws an exception like OOM remove the klass field
|
||||
@ -1684,8 +1684,8 @@ int java_lang_Thread::_name_offset;
|
||||
int java_lang_Thread::_contextClassLoader_offset;
|
||||
int java_lang_Thread::_eetop_offset;
|
||||
int java_lang_Thread::_jvmti_thread_state_offset;
|
||||
int java_lang_Thread::_jvmti_VTMS_transition_disable_count_offset;
|
||||
int java_lang_Thread::_jvmti_is_in_VTMS_transition_offset;
|
||||
int java_lang_Thread::_vthread_transition_disable_count_offset;
|
||||
int java_lang_Thread::_is_in_vthread_transition_offset;
|
||||
int java_lang_Thread::_interrupted_offset;
|
||||
int java_lang_Thread::_interruptLock_offset;
|
||||
int java_lang_Thread::_tid_offset;
|
||||
@ -1745,34 +1745,34 @@ void java_lang_Thread::set_jvmti_thread_state(oop java_thread, JvmtiThreadState*
|
||||
java_thread->address_field_put(_jvmti_thread_state_offset, (address)state);
|
||||
}
|
||||
|
||||
int java_lang_Thread::VTMS_transition_disable_count(oop java_thread) {
|
||||
return java_thread->int_field(_jvmti_VTMS_transition_disable_count_offset);
|
||||
int java_lang_Thread::vthread_transition_disable_count(oop java_thread) {
|
||||
jint* addr = java_thread->field_addr<jint>(_vthread_transition_disable_count_offset);
|
||||
return AtomicAccess::load(addr);
|
||||
}
|
||||
|
||||
void java_lang_Thread::inc_VTMS_transition_disable_count(oop java_thread) {
|
||||
assert(JvmtiVTMSTransition_lock->owned_by_self(), "Must be locked");
|
||||
int val = VTMS_transition_disable_count(java_thread);
|
||||
java_thread->int_field_put(_jvmti_VTMS_transition_disable_count_offset, val + 1);
|
||||
void java_lang_Thread::inc_vthread_transition_disable_count(oop java_thread) {
|
||||
assert(VThreadTransition_lock->owned_by_self(), "Must be locked");
|
||||
jint* addr = java_thread->field_addr<jint>(_vthread_transition_disable_count_offset);
|
||||
int val = AtomicAccess::load(addr);
|
||||
AtomicAccess::store(addr, val + 1);
|
||||
}
|
||||
|
||||
void java_lang_Thread::dec_VTMS_transition_disable_count(oop java_thread) {
|
||||
assert(JvmtiVTMSTransition_lock->owned_by_self(), "Must be locked");
|
||||
int val = VTMS_transition_disable_count(java_thread);
|
||||
assert(val > 0, "VTMS_transition_disable_count should never be negative");
|
||||
java_thread->int_field_put(_jvmti_VTMS_transition_disable_count_offset, val - 1);
|
||||
void java_lang_Thread::dec_vthread_transition_disable_count(oop java_thread) {
|
||||
assert(VThreadTransition_lock->owned_by_self(), "Must be locked");
|
||||
jint* addr = java_thread->field_addr<jint>(_vthread_transition_disable_count_offset);
|
||||
int val = AtomicAccess::load(addr);
|
||||
AtomicAccess::store(addr, val - 1);
|
||||
}
|
||||
|
||||
bool java_lang_Thread::is_in_VTMS_transition(oop java_thread) {
|
||||
return java_thread->bool_field_volatile(_jvmti_is_in_VTMS_transition_offset);
|
||||
bool java_lang_Thread::is_in_vthread_transition(oop java_thread) {
|
||||
jboolean* addr = java_thread->field_addr<jboolean>(_is_in_vthread_transition_offset);
|
||||
return AtomicAccess::load(addr);
|
||||
}
|
||||
|
||||
void java_lang_Thread::set_is_in_VTMS_transition(oop java_thread, bool val) {
|
||||
assert(is_in_VTMS_transition(java_thread) != val, "already %s transition", val ? "inside" : "outside");
|
||||
java_thread->bool_field_put_volatile(_jvmti_is_in_VTMS_transition_offset, val);
|
||||
}
|
||||
|
||||
int java_lang_Thread::is_in_VTMS_transition_offset() {
|
||||
return _jvmti_is_in_VTMS_transition_offset;
|
||||
void java_lang_Thread::set_is_in_vthread_transition(oop java_thread, bool val) {
|
||||
assert(is_in_vthread_transition(java_thread) != val, "already %s transition", val ? "inside" : "outside");
|
||||
jboolean* addr = java_thread->field_addr<jboolean>(_is_in_vthread_transition_offset);
|
||||
AtomicAccess::store(addr, (jboolean)val);
|
||||
}
|
||||
|
||||
void java_lang_Thread::clear_scopedValueBindings(oop java_thread) {
|
||||
|
||||
@ -375,8 +375,8 @@ class java_lang_Class : AllStatic {
|
||||
|
||||
#define THREAD_INJECTED_FIELDS(macro) \
|
||||
macro(java_lang_Thread, jvmti_thread_state, intptr_signature, false) \
|
||||
macro(java_lang_Thread, jvmti_VTMS_transition_disable_count, int_signature, false) \
|
||||
macro(java_lang_Thread, jvmti_is_in_VTMS_transition, bool_signature, false) \
|
||||
macro(java_lang_Thread, vthread_transition_disable_count, int_signature, false) \
|
||||
macro(java_lang_Thread, is_in_vthread_transition, bool_signature, false) \
|
||||
JFR_ONLY(macro(java_lang_Thread, jfr_epoch, short_signature, false))
|
||||
|
||||
class java_lang_Thread : AllStatic {
|
||||
@ -390,8 +390,8 @@ class java_lang_Thread : AllStatic {
|
||||
static int _contextClassLoader_offset;
|
||||
static int _eetop_offset;
|
||||
static int _jvmti_thread_state_offset;
|
||||
static int _jvmti_VTMS_transition_disable_count_offset;
|
||||
static int _jvmti_is_in_VTMS_transition_offset;
|
||||
static int _vthread_transition_disable_count_offset;
|
||||
static int _is_in_vthread_transition_offset;
|
||||
static int _interrupted_offset;
|
||||
static int _interruptLock_offset;
|
||||
static int _tid_offset;
|
||||
@ -444,12 +444,15 @@ class java_lang_Thread : AllStatic {
|
||||
|
||||
static JvmtiThreadState* jvmti_thread_state(oop java_thread);
|
||||
static void set_jvmti_thread_state(oop java_thread, JvmtiThreadState* state);
|
||||
static int VTMS_transition_disable_count(oop java_thread);
|
||||
static void inc_VTMS_transition_disable_count(oop java_thread);
|
||||
static void dec_VTMS_transition_disable_count(oop java_thread);
|
||||
static bool is_in_VTMS_transition(oop java_thread);
|
||||
static void set_is_in_VTMS_transition(oop java_thread, bool val);
|
||||
static int is_in_VTMS_transition_offset();
|
||||
|
||||
static int vthread_transition_disable_count(oop java_thread);
|
||||
static void inc_vthread_transition_disable_count(oop java_thread);
|
||||
static void dec_vthread_transition_disable_count(oop java_thread);
|
||||
static int vthread_transition_disable_count_offset() { return _vthread_transition_disable_count_offset; }
|
||||
|
||||
static bool is_in_vthread_transition(oop java_thread);
|
||||
static void set_is_in_vthread_transition(oop java_thread, bool val);
|
||||
static int is_in_vthread_transition_offset() { return _is_in_vthread_transition_offset; }
|
||||
|
||||
// Clear all scoped value bindings on error
|
||||
static void clear_scopedValueBindings(oop java_thread);
|
||||
|
||||
@ -37,7 +37,7 @@
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_JFR
|
||||
#include "jfr/support/jfrKlassExtension.hpp"
|
||||
#include "jfr/jfr.hpp"
|
||||
#endif
|
||||
|
||||
|
||||
@ -99,6 +99,9 @@ InstanceKlass* KlassFactory::check_shared_class_file_load_hook(
|
||||
new_ik->set_classpath_index(path_index);
|
||||
}
|
||||
|
||||
|
||||
JFR_ONLY(Jfr::on_klass_creation(new_ik, parser, THREAD);)
|
||||
|
||||
return new_ik;
|
||||
}
|
||||
}
|
||||
@ -213,7 +216,7 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream,
|
||||
result->set_cached_class_file(cached_class_file);
|
||||
}
|
||||
|
||||
JFR_ONLY(ON_KLASS_CREATION(result, parser, THREAD);)
|
||||
JFR_ONLY(Jfr::on_klass_creation(result, parser, THREAD);)
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (CDSConfig::is_dumping_archive()) {
|
||||
|
||||
@ -560,15 +560,6 @@ static InstanceKlass* handle_parallel_loading(JavaThread* current,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void SystemDictionary::post_class_load_event(EventClassLoad* event, const InstanceKlass* k, const ClassLoaderData* init_cld) {
|
||||
assert(event != nullptr, "invariant");
|
||||
assert(k != nullptr, "invariant");
|
||||
event->set_loadedClass(k);
|
||||
event->set_definingClassLoader(k->class_loader_data());
|
||||
event->set_initiatingClassLoader(init_cld);
|
||||
event->commit();
|
||||
}
|
||||
|
||||
// SystemDictionary::resolve_instance_class_or_null is the main function for class name resolution.
|
||||
// After checking if the InstanceKlass already exists, it checks for ClassCircularityError and
|
||||
// whether the thread must wait for loading in parallel. It eventually calls load_instance_class,
|
||||
@ -582,7 +573,7 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
|
||||
assert(name != nullptr && !Signature::is_array(name) &&
|
||||
!Signature::has_envelope(name), "invalid class name: %s", name == nullptr ? "nullptr" : name->as_C_string());
|
||||
|
||||
EventClassLoad class_load_start_event;
|
||||
EventClassLoad class_load_event;
|
||||
|
||||
HandleMark hm(THREAD);
|
||||
|
||||
@ -713,8 +704,8 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (class_load_start_event.should_commit()) {
|
||||
post_class_load_event(&class_load_start_event, loaded_class, loader_data);
|
||||
if (class_load_event.should_commit()) {
|
||||
JFR_ONLY(post_class_load_event(&class_load_event, loaded_class, loader_data);)
|
||||
}
|
||||
|
||||
// Make sure we have the right class in the dictionary
|
||||
@ -789,7 +780,7 @@ InstanceKlass* SystemDictionary::resolve_hidden_class_from_stream(
|
||||
const ClassLoadInfo& cl_info,
|
||||
TRAPS) {
|
||||
|
||||
EventClassLoad class_load_start_event;
|
||||
EventClassLoad class_load_event;
|
||||
ClassLoaderData* loader_data;
|
||||
|
||||
// - for hidden classes that are not strong: create a new CLD that has a class holder and
|
||||
@ -819,15 +810,16 @@ InstanceKlass* SystemDictionary::resolve_hidden_class_from_stream(
|
||||
k->add_to_hierarchy(THREAD);
|
||||
// But, do not add to dictionary.
|
||||
|
||||
if (class_load_event.should_commit()) {
|
||||
JFR_ONLY(post_class_load_event(&class_load_event, k, loader_data);)
|
||||
}
|
||||
|
||||
k->link_class(CHECK_NULL);
|
||||
|
||||
// notify jvmti
|
||||
if (JvmtiExport::should_post_class_load()) {
|
||||
JvmtiExport::post_class_load(THREAD, k);
|
||||
}
|
||||
if (class_load_start_event.should_commit()) {
|
||||
post_class_load_event(&class_load_start_event, k, loader_data);
|
||||
}
|
||||
|
||||
return k;
|
||||
}
|
||||
@ -1182,6 +1174,8 @@ void SystemDictionary::preload_class(Handle class_loader, InstanceKlass* ik, TRA
|
||||
}
|
||||
#endif
|
||||
|
||||
EventClassLoad class_load_event;
|
||||
|
||||
ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
|
||||
oop java_mirror = ik->archived_java_mirror();
|
||||
precond(java_mirror != nullptr);
|
||||
@ -1203,11 +1197,26 @@ void SystemDictionary::preload_class(Handle class_loader, InstanceKlass* ik, TRA
|
||||
update_dictionary(THREAD, ik, loader_data);
|
||||
}
|
||||
|
||||
if (class_load_event.should_commit()) {
|
||||
JFR_ONLY(post_class_load_event(&class_load_event, ik, loader_data);)
|
||||
}
|
||||
|
||||
assert(ik->is_loaded(), "Must be in at least loaded state");
|
||||
}
|
||||
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
#if INCLUDE_JFR
|
||||
void SystemDictionary::post_class_load_event(EventClassLoad* event, const InstanceKlass* k, const ClassLoaderData* init_cld) {
|
||||
assert(event != nullptr, "invariant");
|
||||
assert(k != nullptr, "invariant");
|
||||
event->set_loadedClass(k);
|
||||
event->set_definingClassLoader(k->class_loader_data());
|
||||
event->set_initiatingClassLoader(init_cld);
|
||||
event->commit();
|
||||
}
|
||||
#endif // INCLUDE_JFR
|
||||
|
||||
InstanceKlass* SystemDictionary::load_instance_class_impl(Symbol* class_name, Handle class_loader, TRAPS) {
|
||||
|
||||
if (class_loader.is_null()) {
|
||||
@ -1380,15 +1389,6 @@ InstanceKlass* SystemDictionary::load_instance_class(Symbol* name,
|
||||
return loaded_class;
|
||||
}
|
||||
|
||||
static void post_class_define_event(InstanceKlass* k, const ClassLoaderData* def_cld) {
|
||||
EventClassDefine event;
|
||||
if (event.should_commit()) {
|
||||
event.set_definedClass(k);
|
||||
event.set_definingClassLoader(def_cld);
|
||||
event.commit();
|
||||
}
|
||||
}
|
||||
|
||||
void SystemDictionary::define_instance_class(InstanceKlass* k, Handle class_loader, TRAPS) {
|
||||
|
||||
ClassLoaderData* loader_data = k->class_loader_data();
|
||||
@ -1440,7 +1440,6 @@ void SystemDictionary::define_instance_class(InstanceKlass* k, Handle class_load
|
||||
if (JvmtiExport::should_post_class_load()) {
|
||||
JvmtiExport::post_class_load(THREAD, k);
|
||||
}
|
||||
post_class_define_event(k, loader_data);
|
||||
}
|
||||
|
||||
// Support parallel classloading
|
||||
@ -2173,9 +2172,10 @@ static bool is_always_visible_class(oop mirror) {
|
||||
return true; // primitive array
|
||||
}
|
||||
assert(klass->is_instance_klass(), "%s", klass->external_name());
|
||||
return klass->is_public() &&
|
||||
(InstanceKlass::cast(klass)->is_same_class_package(vmClasses::Object_klass()) || // java.lang
|
||||
InstanceKlass::cast(klass)->is_same_class_package(vmClasses::MethodHandle_klass())); // java.lang.invoke
|
||||
InstanceKlass* ik = InstanceKlass::cast(klass);
|
||||
return ik->is_public() &&
|
||||
(ik->is_same_class_package(vmClasses::Object_klass()) || // java.lang
|
||||
ik->is_same_class_package(vmClasses::MethodHandle_klass())); // java.lang.invoke
|
||||
}
|
||||
|
||||
// Find or construct the Java mirror (java.lang.Class instance) for
|
||||
|
||||
@ -326,11 +326,10 @@ private:
|
||||
static void restore_archived_method_handle_intrinsics_impl(TRAPS) NOT_CDS_RETURN;
|
||||
|
||||
protected:
|
||||
// Used by AOTLinkedClassBulkLoader, LambdaProxyClassDictionary, and SystemDictionaryShared
|
||||
// Used by AOTLinkedClassBulkLoader, LambdaProxyClassDictionary, VMClasses and SystemDictionaryShared
|
||||
|
||||
static bool add_loader_constraint(Symbol* name, Klass* klass_being_linked, Handle loader1,
|
||||
Handle loader2);
|
||||
static void post_class_load_event(EventClassLoad* event, const InstanceKlass* k, const ClassLoaderData* init_cld);
|
||||
static InstanceKlass* load_shared_class(InstanceKlass* ik,
|
||||
Handle class_loader,
|
||||
Handle protection_domain,
|
||||
@ -342,6 +341,9 @@ protected:
|
||||
static InstanceKlass* find_or_define_instance_class(Symbol* class_name,
|
||||
Handle class_loader,
|
||||
InstanceKlass* k, TRAPS);
|
||||
JFR_ONLY(static void post_class_load_event(EventClassLoad* event,
|
||||
const InstanceKlass* k,
|
||||
const ClassLoaderData* init_cld);)
|
||||
|
||||
public:
|
||||
static bool is_system_class_loader(oop class_loader);
|
||||
|
||||
@ -35,6 +35,7 @@
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
@ -240,6 +241,8 @@ void vmClasses::resolve_shared_class(InstanceKlass* klass, ClassLoaderData* load
|
||||
return;
|
||||
}
|
||||
|
||||
EventClassLoad class_load_event;
|
||||
|
||||
// add super and interfaces first
|
||||
InstanceKlass* super = klass->super();
|
||||
if (super != nullptr && super->class_loader_data() == nullptr) {
|
||||
@ -261,6 +264,10 @@ void vmClasses::resolve_shared_class(InstanceKlass* klass, ClassLoaderData* load
|
||||
dictionary->add_klass(THREAD, klass->name(), klass);
|
||||
klass->add_to_hierarchy(THREAD);
|
||||
assert(klass->is_loaded(), "Must be in at least loaded state");
|
||||
|
||||
if (class_load_event.should_commit()) {
|
||||
JFR_ONLY(SystemDictionary::post_class_load_event(&class_load_event, klass, loader_data);)
|
||||
}
|
||||
}
|
||||
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
@ -649,10 +649,10 @@ class methodHandle;
|
||||
do_intrinsic(_Continuation_unpin, jdk_internal_vm_Continuation, unpin_name, void_method_signature, F_SN) \
|
||||
\
|
||||
/* java/lang/VirtualThread */ \
|
||||
do_intrinsic(_notifyJvmtiVThreadStart, java_lang_VirtualThread, notifyJvmtiStart_name, void_method_signature, F_RN) \
|
||||
do_intrinsic(_notifyJvmtiVThreadEnd, java_lang_VirtualThread, notifyJvmtiEnd_name, void_method_signature, F_RN) \
|
||||
do_intrinsic(_notifyJvmtiVThreadMount, java_lang_VirtualThread, notifyJvmtiMount_name, bool_void_signature, F_RN) \
|
||||
do_intrinsic(_notifyJvmtiVThreadUnmount, java_lang_VirtualThread, notifyJvmtiUnmount_name, bool_void_signature, F_RN) \
|
||||
do_intrinsic(_vthreadEndFirstTransition, java_lang_VirtualThread, endFirstTransition_name, void_method_signature, F_RN) \
|
||||
do_intrinsic(_vthreadStartFinalTransition, java_lang_VirtualThread, startFinalTransition_name, void_method_signature, F_RN) \
|
||||
do_intrinsic(_vthreadStartTransition, java_lang_VirtualThread, startTransition_name, bool_void_signature, F_RN) \
|
||||
do_intrinsic(_vthreadEndTransition, java_lang_VirtualThread, endTransition_name, bool_void_signature, F_RN) \
|
||||
do_intrinsic(_notifyJvmtiVThreadDisableSuspend, java_lang_VirtualThread, notifyJvmtiDisableSuspend_name, bool_void_signature, F_SN) \
|
||||
\
|
||||
/* support for UnsafeConstants */ \
|
||||
|
||||
@ -395,10 +395,10 @@ class SerializeClosure;
|
||||
template(run_finalization_name, "runFinalization") \
|
||||
template(dispatchUncaughtException_name, "dispatchUncaughtException") \
|
||||
template(loadClass_name, "loadClass") \
|
||||
template(notifyJvmtiStart_name, "notifyJvmtiStart") \
|
||||
template(notifyJvmtiEnd_name, "notifyJvmtiEnd") \
|
||||
template(notifyJvmtiMount_name, "notifyJvmtiMount") \
|
||||
template(notifyJvmtiUnmount_name, "notifyJvmtiUnmount") \
|
||||
template(startTransition_name, "startTransition") \
|
||||
template(endTransition_name, "endTransition") \
|
||||
template(startFinalTransition_name, "startFinalTransition") \
|
||||
template(endFirstTransition_name, "endFirstTransition") \
|
||||
template(notifyJvmtiDisableSuspend_name, "notifyJvmtiDisableSuspend") \
|
||||
template(doYield_name, "doYield") \
|
||||
template(enter_name, "enter") \
|
||||
@ -497,8 +497,8 @@ class SerializeClosure;
|
||||
template(java_lang_Boolean_signature, "Ljava/lang/Boolean;") \
|
||||
template(url_code_signer_array_void_signature, "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \
|
||||
template(jvmti_thread_state_name, "jvmti_thread_state") \
|
||||
template(jvmti_VTMS_transition_disable_count_name, "jvmti_VTMS_transition_disable_count") \
|
||||
template(jvmti_is_in_VTMS_transition_name, "jvmti_is_in_VTMS_transition") \
|
||||
template(vthread_transition_disable_count_name, "vthread_transition_disable_count") \
|
||||
template(is_in_vthread_transition_name, "is_in_vthread_transition") \
|
||||
template(module_entry_name, "module_entry") \
|
||||
template(resolved_references_name, "<resolved_references>") \
|
||||
template(init_lock_name, "<init_lock>") \
|
||||
|
||||
@ -1346,18 +1346,16 @@ void AOTCodeAddressTable::init_extrs() {
|
||||
SET_ADDRESS(_extrs, OptoRuntime::multianewarray4_C);
|
||||
SET_ADDRESS(_extrs, OptoRuntime::multianewarray5_C);
|
||||
SET_ADDRESS(_extrs, OptoRuntime::multianewarrayN_C);
|
||||
#if INCLUDE_JVMTI
|
||||
SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_start);
|
||||
SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_end);
|
||||
SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_mount);
|
||||
SET_ADDRESS(_extrs, SharedRuntime::notify_jvmti_vthread_unmount);
|
||||
#endif
|
||||
SET_ADDRESS(_extrs, OptoRuntime::complete_monitor_locking_C);
|
||||
SET_ADDRESS(_extrs, OptoRuntime::monitor_notify_C);
|
||||
SET_ADDRESS(_extrs, OptoRuntime::monitor_notifyAll_C);
|
||||
SET_ADDRESS(_extrs, OptoRuntime::rethrow_C);
|
||||
SET_ADDRESS(_extrs, OptoRuntime::slow_arraycopy_C);
|
||||
SET_ADDRESS(_extrs, OptoRuntime::register_finalizer_C);
|
||||
SET_ADDRESS(_extrs, OptoRuntime::vthread_end_first_transition_C);
|
||||
SET_ADDRESS(_extrs, OptoRuntime::vthread_start_final_transition_C);
|
||||
SET_ADDRESS(_extrs, OptoRuntime::vthread_start_transition_C);
|
||||
SET_ADDRESS(_extrs, OptoRuntime::vthread_end_transition_C);
|
||||
#if defined(AARCH64)
|
||||
SET_ADDRESS(_extrs, JavaThread::verify_cross_modify_fence_failure);
|
||||
#endif // AARCH64
|
||||
|
||||
@ -1498,6 +1498,40 @@ nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm.
|
||||
// - OOP table
|
||||
memcpy(consts_begin(), nm.consts_begin(), nm.data_end() - nm.consts_begin());
|
||||
|
||||
// Fix relocation
|
||||
RelocIterator iter(this);
|
||||
CodeBuffer src(&nm);
|
||||
CodeBuffer dst(this);
|
||||
while (iter.next()) {
|
||||
#ifdef USE_TRAMPOLINE_STUB_FIX_OWNER
|
||||
// After an nmethod is moved, some direct call sites may end up out of range.
|
||||
// CallRelocation::fix_relocation_after_move() assumes the target is always
|
||||
// reachable and does not check branch range. Calling it without range checks
|
||||
// could cause us to write an offset too large for the instruction.
|
||||
//
|
||||
// If a call site has a trampoline, we skip the normal call relocation. The
|
||||
// associated trampoline_stub_Relocation will handle the call and the
|
||||
// trampoline, including range checks and updating the branch as needed.
|
||||
//
|
||||
// If no trampoline exists, we can assume the call target is always
|
||||
// reachable and therefore within direct branch range, so calling
|
||||
// CallRelocation::fix_relocation_after_move() is safe.
|
||||
if (iter.reloc()->is_call()) {
|
||||
address trampoline = trampoline_stub_Relocation::get_trampoline_for(iter.reloc()->addr(), this);
|
||||
if (trampoline != nullptr) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
iter.reloc()->fix_relocation_after_move(&src, &dst);
|
||||
}
|
||||
|
||||
{
|
||||
MutexLocker ml(NMethodState_lock, Mutex::_no_safepoint_check_flag);
|
||||
clear_inline_caches();
|
||||
}
|
||||
|
||||
post_init();
|
||||
}
|
||||
|
||||
@ -1521,25 +1555,6 @@ nmethod* nmethod::relocate(CodeBlobType code_blob_type) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Fix relocation
|
||||
RelocIterator iter(nm_copy);
|
||||
CodeBuffer src(this);
|
||||
CodeBuffer dst(nm_copy);
|
||||
while (iter.next()) {
|
||||
#ifdef USE_TRAMPOLINE_STUB_FIX_OWNER
|
||||
// Direct calls may no longer be in range and the use of a trampoline may now be required.
|
||||
// Instead, allow trampoline relocations to update their owners and perform the necessary checks.
|
||||
if (iter.reloc()->is_call()) {
|
||||
address trampoline = trampoline_stub_Relocation::get_trampoline_for(iter.reloc()->addr(), nm_copy);
|
||||
if (trampoline != nullptr) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
iter.reloc()->fix_relocation_after_move(&src, &dst);
|
||||
}
|
||||
|
||||
// To make dependency checking during class loading fast, record
|
||||
// the nmethod dependencies in the classes it is dependent on.
|
||||
// This allows the dependency checking code to simply walk the
|
||||
@ -1569,8 +1584,6 @@ nmethod* nmethod::relocate(CodeBlobType code_blob_type) {
|
||||
if (!is_marked_for_deoptimization() && is_in_use()) {
|
||||
assert(method() != nullptr && method()->code() == this, "should be if is in use");
|
||||
|
||||
nm_copy->clear_inline_caches();
|
||||
|
||||
// Attempt to start using the copy
|
||||
if (nm_copy->make_in_use()) {
|
||||
ICache::invalidate_range(nm_copy->code_begin(), nm_copy->code_size());
|
||||
@ -1578,7 +1591,7 @@ nmethod* nmethod::relocate(CodeBlobType code_blob_type) {
|
||||
methodHandle mh(Thread::current(), nm_copy->method());
|
||||
nm_copy->method()->set_code(mh, nm_copy);
|
||||
|
||||
make_not_used();
|
||||
make_not_entrant(InvalidationReason::RELOCATED);
|
||||
|
||||
nm_copy->post_compiled_method_load_event();
|
||||
|
||||
|
||||
@ -499,6 +499,7 @@ public:
|
||||
UNCOMMON_TRAP,
|
||||
WHITEBOX_DEOPTIMIZATION,
|
||||
ZOMBIE,
|
||||
RELOCATED,
|
||||
INVALIDATION_REASONS_COUNT
|
||||
};
|
||||
|
||||
@ -543,6 +544,8 @@ public:
|
||||
return "whitebox deoptimization";
|
||||
case InvalidationReason::ZOMBIE:
|
||||
return "zombie";
|
||||
case InvalidationReason::RELOCATED:
|
||||
return "relocated";
|
||||
default: {
|
||||
assert(false, "Unhandled reason");
|
||||
return "Unknown";
|
||||
|
||||
@ -891,9 +891,23 @@ void ParallelScavengeHeap::resize_after_young_gc(bool is_survivor_overflowing) {
|
||||
|
||||
// Consider if should shrink old-gen
|
||||
if (!is_survivor_overflowing) {
|
||||
// Upper bound for a single step shrink
|
||||
size_t max_shrink_bytes = SpaceAlignment;
|
||||
assert(old_gen()->capacity_in_bytes() >= old_gen()->min_gen_size(), "inv");
|
||||
|
||||
// Old gen min_gen_size constraint.
|
||||
const size_t max_shrink_bytes_gen_size_constraint = old_gen()->capacity_in_bytes() - old_gen()->min_gen_size();
|
||||
|
||||
// Per-step delta to avoid too aggressive shrinking.
|
||||
const size_t max_shrink_bytes_per_step_constraint = SpaceAlignment;
|
||||
|
||||
// Combining the above two constraints.
|
||||
const size_t max_shrink_bytes = MIN2(max_shrink_bytes_gen_size_constraint,
|
||||
max_shrink_bytes_per_step_constraint);
|
||||
|
||||
size_t shrink_bytes = _size_policy->compute_old_gen_shrink_bytes(old_gen()->free_in_bytes(), max_shrink_bytes);
|
||||
|
||||
assert(old_gen()->capacity_in_bytes() >= shrink_bytes, "inv");
|
||||
assert(old_gen()->capacity_in_bytes() - shrink_bytes >= old_gen()->min_gen_size(), "inv");
|
||||
|
||||
if (shrink_bytes != 0) {
|
||||
if (MinHeapFreeRatio != 0) {
|
||||
size_t new_capacity = old_gen()->capacity_in_bytes() - shrink_bytes;
|
||||
|
||||
@ -236,7 +236,10 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
|
||||
// These values are exported as performance counters.
|
||||
uintx size = _virtual_space.reserved_size();
|
||||
_max_survivor_size = compute_survivor_size(size, SpaceAlignment);
|
||||
_max_eden_size = size - (2*_max_survivor_size);
|
||||
|
||||
// Eden might grow to be almost as large as the entire young generation.
|
||||
// We approximate this as the entire virtual space.
|
||||
_max_eden_size = size;
|
||||
|
||||
// allocate the performance counters
|
||||
|
||||
|
||||
@ -147,7 +147,8 @@ GrowableArray<MemoryPool*> SerialHeap::memory_pools() {
|
||||
|
||||
HeapWord* SerialHeap::allocate_loaded_archive_space(size_t word_size) {
|
||||
MutexLocker ml(Heap_lock);
|
||||
return old_gen()->allocate(word_size);
|
||||
HeapWord* const addr = old_gen()->allocate(word_size);
|
||||
return addr != nullptr ? addr : old_gen()->expand_and_allocate(word_size);
|
||||
}
|
||||
|
||||
void SerialHeap::complete_loaded_archive_space(MemRegion archive_space) {
|
||||
|
||||
@ -37,6 +37,7 @@
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
size_t ThreadLocalAllocBuffer::_max_size = 0;
|
||||
int ThreadLocalAllocBuffer::_reserve_for_allocation_prefetch = 0;
|
||||
unsigned int ThreadLocalAllocBuffer::_target_refills = 0;
|
||||
|
||||
ThreadLocalAllocBuffer::ThreadLocalAllocBuffer() :
|
||||
@ -224,6 +225,30 @@ void ThreadLocalAllocBuffer::startup_initialization() {
|
||||
// abort during VM initialization.
|
||||
_target_refills = MAX2(_target_refills, 2U);
|
||||
|
||||
#ifdef COMPILER2
|
||||
// If the C2 compiler is present, extra space is needed at the end of
|
||||
// TLABs, otherwise prefetching instructions generated by the C2
|
||||
// compiler will fault (due to accessing memory outside of heap).
|
||||
// The amount of space is the max of the number of lines to
|
||||
// prefetch for array and for instance allocations. (Extra space must be
|
||||
// reserved to accommodate both types of allocations.)
|
||||
//
|
||||
// Only SPARC-specific BIS instructions are known to fault. (Those
|
||||
// instructions are generated if AllocatePrefetchStyle==3 and
|
||||
// AllocatePrefetchInstr==1). To be on the safe side, however,
|
||||
// extra space is reserved for all combinations of
|
||||
// AllocatePrefetchStyle and AllocatePrefetchInstr.
|
||||
//
|
||||
// If the C2 compiler is not present, no space is reserved.
|
||||
|
||||
// +1 for rounding up to next cache line, +1 to be safe
|
||||
if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
|
||||
int lines = MAX2(AllocatePrefetchLines, AllocateInstancePrefetchLines) + 2;
|
||||
_reserve_for_allocation_prefetch = (AllocatePrefetchDistance + AllocatePrefetchStepSize * lines) /
|
||||
(int)HeapWordSize;
|
||||
}
|
||||
#endif
|
||||
|
||||
// During jvm startup, the main thread is initialized
|
||||
// before the heap is initialized. So reinitialize it now.
|
||||
guarantee(Thread::current()->is_Java_thread(), "tlab initialization thread not Java thread");
|
||||
@ -429,7 +454,8 @@ void ThreadLocalAllocStats::publish() {
|
||||
}
|
||||
|
||||
size_t ThreadLocalAllocBuffer::end_reserve() {
|
||||
return CollectedHeap::lab_alignment_reserve();
|
||||
size_t reserve_size = CollectedHeap::lab_alignment_reserve();
|
||||
return MAX2(reserve_size, (size_t)_reserve_for_allocation_prefetch);
|
||||
}
|
||||
|
||||
const HeapWord* ThreadLocalAllocBuffer::start_relaxed() const {
|
||||
|
||||
@ -58,6 +58,7 @@ private:
|
||||
size_t _allocated_before_last_gc; // total bytes allocated up until the last gc
|
||||
|
||||
static size_t _max_size; // maximum size of any TLAB
|
||||
static int _reserve_for_allocation_prefetch; // Reserve at the end of the TLAB
|
||||
static unsigned _target_refills; // expected number of refills between GCs
|
||||
|
||||
unsigned _number_of_refills;
|
||||
|
||||
@ -83,16 +83,15 @@ public:
|
||||
return "PLAB";
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
// When ShenandoahElasticTLAB is enabled, the request cannot be made smaller than _min_size.
|
||||
size_t _min_size;
|
||||
size_t const _min_size;
|
||||
|
||||
// The size of the request in words.
|
||||
size_t _requested_size;
|
||||
size_t const _requested_size;
|
||||
|
||||
// The allocation may be increased for padding or decreased to fit in the remaining space of a region.
|
||||
size_t _actual_size;
|
||||
@ -104,7 +103,7 @@ private:
|
||||
size_t _waste;
|
||||
|
||||
// This is the type of the request.
|
||||
Type _alloc_type;
|
||||
Type const _alloc_type;
|
||||
|
||||
#ifdef ASSERT
|
||||
// Check that this is set before being read.
|
||||
@ -209,6 +208,10 @@ public:
|
||||
return (_alloc_type & bit_old_alloc) == 0;
|
||||
}
|
||||
|
||||
inline bool is_cds() const {
|
||||
return _alloc_type == _alloc_cds;
|
||||
}
|
||||
|
||||
inline ShenandoahAffiliation affiliation() const {
|
||||
return (_alloc_type & bit_old_alloc) == 0 ? YOUNG_GENERATION : OLD_GENERATION ;
|
||||
}
|
||||
|
||||
@ -144,13 +144,12 @@ public:
|
||||
{
|
||||
ShenandoahReentrantLocker locker(nm_data->lock());
|
||||
|
||||
// Heal oops and disarm
|
||||
// Heal oops
|
||||
if (_bs->is_armed(nm)) {
|
||||
ShenandoahEvacOOMScope oom_evac_scope;
|
||||
ShenandoahNMethod::heal_nmethod_metadata(nm_data);
|
||||
// Code cache unloading needs to know about on-stack nmethods. Arm the nmethods to get
|
||||
// mark_as_maybe_on_stack() callbacks when they are used again.
|
||||
_bs->arm(nm);
|
||||
// Must remain armed to complete remaining work in nmethod entry barrier
|
||||
assert(_bs->is_armed(nm), "Should remain armed");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -175,7 +175,6 @@ ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, Shena
|
||||
void ShenandoahFreeSet::account_for_pip_regions(size_t mutator_regions, size_t mutator_bytes,
|
||||
size_t collector_regions, size_t collector_bytes) {
|
||||
shenandoah_assert_heaplocked();
|
||||
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
|
||||
// We have removed all of these regions from their respective partition. Each pip region is "in" the NotFree partition.
|
||||
// We want to account for all pip pad memory as if it had been consumed from within the Mutator partition.
|
||||
@ -1370,7 +1369,7 @@ template<typename Iter>
|
||||
HeapWord* ShenandoahFreeSet::allocate_from_regions(Iter& iterator, ShenandoahAllocRequest &req, bool &in_new_region) {
|
||||
for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) {
|
||||
ShenandoahHeapRegion* r = _heap->get_region(idx);
|
||||
size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab) ? req.min_size() : req.size();
|
||||
size_t min_size = req.is_lab_alloc() ? req.min_size() : req.size();
|
||||
if (alloc_capacity(r) >= min_size * HeapWordSize) {
|
||||
HeapWord* result = try_allocate_in(r, req, in_new_region);
|
||||
if (result != nullptr) {
|
||||
@ -1502,7 +1501,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
|
||||
|
||||
if (in_new_region) {
|
||||
log_debug(gc, free)("Using new region (%zu) for %s (" PTR_FORMAT ").",
|
||||
r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
|
||||
r->index(), req.type_string(), p2i(&req));
|
||||
assert(!r->is_affiliated(), "New region %zu should be unaffiliated", r->index());
|
||||
r->set_affiliation(req.affiliation());
|
||||
if (r->is_old()) {
|
||||
@ -1521,7 +1520,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
|
||||
assert(ctx->is_bitmap_range_within_region_clear(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
|
||||
#endif
|
||||
log_debug(gc, free)("Using new region (%zu) for %s (" PTR_FORMAT ").",
|
||||
r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
|
||||
r->index(), req.type_string(), p2i(&req));
|
||||
} else {
|
||||
assert(r->is_affiliated(), "Region %zu that is not new should be affiliated", r->index());
|
||||
if (r->affiliation() != req.affiliation()) {
|
||||
@ -1535,8 +1534,8 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
|
||||
if (req.is_lab_alloc()) {
|
||||
size_t adjusted_size = req.size();
|
||||
size_t free = r->free(); // free represents bytes available within region r
|
||||
if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
|
||||
// This is a PLAB allocation
|
||||
if (req.is_old()) {
|
||||
// This is a PLAB allocation(lab alloc in old gen)
|
||||
assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
|
||||
assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, r->index()),
|
||||
"PLABS must be allocated in old_collector_free regions");
|
||||
@ -1597,26 +1596,19 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
|
||||
r->set_update_watermark(r->top());
|
||||
if (r->is_old()) {
|
||||
_partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, (req.actual_size() + req.waste()) * HeapWordSize);
|
||||
assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
|
||||
// for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
|
||||
} else {
|
||||
_partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, (req.actual_size() + req.waste()) * HeapWordSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t ac = alloc_capacity(r);
|
||||
ShenandoahFreeSetPartitionId orig_partition;
|
||||
ShenandoahGeneration* request_generation = nullptr;
|
||||
if (req.is_mutator_alloc()) {
|
||||
request_generation = _heap->mode()->is_generational()? _heap->young_generation(): _heap->global_generation();
|
||||
orig_partition = ShenandoahFreeSetPartitionId::Mutator;
|
||||
} else if (req.is_old()) {
|
||||
request_generation = _heap->old_generation();
|
||||
orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
|
||||
} else {
|
||||
// Not old collector alloc, so this is a young collector gclab or shared allocation
|
||||
request_generation = _heap->mode()->is_generational()? _heap->young_generation(): _heap->global_generation();
|
||||
orig_partition = ShenandoahFreeSetPartitionId::Collector;
|
||||
}
|
||||
if (alloc_capacity(r) < PLAB::min_size() * HeapWordSize) {
|
||||
@ -1688,7 +1680,6 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
|
||||
idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
|
||||
|
||||
assert(req.is_young(), "Humongous regions always allocated in YOUNG");
|
||||
ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
|
||||
|
||||
// Check if there are enough regions left to satisfy allocation.
|
||||
if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) {
|
||||
@ -1833,107 +1824,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
|
||||
}
|
||||
|
||||
class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionClosure {
|
||||
private:
|
||||
static const ssize_t SentinelUsed = -1;
|
||||
static const ssize_t SentinelIndex = -1;
|
||||
static const size_t MaxSavedRegions = 128;
|
||||
|
||||
ShenandoahRegionPartitions* _partitions;
|
||||
volatile size_t _recycled_region_count;
|
||||
ssize_t _region_indices[MaxSavedRegions];
|
||||
ssize_t _region_used[MaxSavedRegions];
|
||||
|
||||
void get_lock_and_flush_buffer(size_t region_count, size_t overflow_region_used, size_t overflow_region_index) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
ShenandoahHeapLocker locker(heap->lock());
|
||||
size_t recycled_regions = AtomicAccess::load(&_recycled_region_count);
|
||||
size_t region_tallies[int(ShenandoahRegionPartitions::NumPartitions)];
|
||||
size_t used_byte_tallies[int(ShenandoahRegionPartitions::NumPartitions)];
|
||||
for (int p = 0; p < int(ShenandoahRegionPartitions::NumPartitions); p++) {
|
||||
region_tallies[p] = 0;
|
||||
used_byte_tallies[p] = 0;
|
||||
}
|
||||
ShenandoahFreeSetPartitionId p = _partitions->membership(overflow_region_index);
|
||||
used_byte_tallies[int(p)] += overflow_region_used;
|
||||
if (region_count <= recycled_regions) {
|
||||
// _recycled_region_count has not been decremented after I incremented it to obtain region_count, so I will
|
||||
// try to flush the buffer.
|
||||
|
||||
// Multiple worker threads may attempt to flush this buffer. The first thread to acquire the lock does the work.
|
||||
// _recycled_region_count is only decreased while holding the heap lock.
|
||||
if (region_count > recycled_regions) {
|
||||
region_count = recycled_regions;
|
||||
}
|
||||
for (size_t i = 0; i < region_count; i++) {
|
||||
ssize_t used;
|
||||
// wait for other threads to finish updating their entries within the region buffer before processing entry
|
||||
do {
|
||||
used = _region_used[i];
|
||||
} while (used == SentinelUsed);
|
||||
ssize_t index;
|
||||
do {
|
||||
index = _region_indices[i];
|
||||
} while (index == SentinelIndex);
|
||||
|
||||
ShenandoahFreeSetPartitionId p = _partitions->membership(index);
|
||||
assert(p != ShenandoahFreeSetPartitionId::NotFree, "Trashed regions should be in a free partition");
|
||||
used_byte_tallies[int(p)] += used;
|
||||
region_tallies[int(p)]++;
|
||||
}
|
||||
if (region_count > 0) {
|
||||
for (size_t i = 0; i < MaxSavedRegions; i++) {
|
||||
_region_indices[i] = SentinelIndex;
|
||||
_region_used[i] = SentinelUsed;
|
||||
}
|
||||
}
|
||||
|
||||
// The almost last thing we do before releasing the lock is to set the _recycled_region_count to 0. What happens next?
|
||||
//
|
||||
// 1. Any worker thread that attempted to buffer a new region while we were flushing the buffer will have seen
|
||||
// that _recycled_region_count > MaxSavedRegions. All such worker threads will first wait for the lock, then
|
||||
// discover that the _recycled_region_count is zero, then, while holding the lock, they will process the
|
||||
// region so it doesn't have to be placed into the buffer. This handles the large majority of cases.
|
||||
//
|
||||
// 2. However, there's a race that can happen, which will result in someewhat different behavior. Suppose
|
||||
// this thread resets _recycled_region_count to 0. Then some other worker thread increments _recycled_region_count
|
||||
// in order to stores its region into the buffer and suppose this happens before all of the other worker threads
|
||||
// which are waiting to acquire the heap lock have finished their efforts to flush the buffer. If this happens,
|
||||
// then the workers who are waiting to acquire the heap lock and flush the buffer will find that _recycled_region_count
|
||||
// has decreased from the value it held when they last tried to increment its value. In this case, these worker
|
||||
// threads will process their overflow region while holding the lock, but they will not attempt to process regions
|
||||
// newly placed into the buffer. Otherwise, confusion could result.
|
||||
//
|
||||
// Assumption: all worker threads who are attempting to acquire lock and flush buffer will finish their efforts before
|
||||
// the buffer once again overflows.
|
||||
// How could we avoid depending on this assumption?
|
||||
// 1. Let MaxSavedRegions be as large as number of regions, or at least as large as the collection set.
|
||||
// 2. Keep a count of how many times the buffer has been flushed per instantation of the
|
||||
// ShenandoahRecycleTrashedRegionClosure object, and only consult/update this value while holding the heap lock.
|
||||
// Need to think about how this helps resolve the race.
|
||||
_recycled_region_count = 0;
|
||||
} else {
|
||||
// Some other thread has already processed the buffer, resetting _recycled_region_count to zero. Its current value
|
||||
// may be greater than zero because other workers may have accumulated entries into the buffer. But it is "extremely"
|
||||
// unlikely that it will overflow again before all waiting workers have had a chance to clear their state. While I've
|
||||
// got the heap lock, I'll go ahead and update the global state for my overflow region. I'll let other heap regions
|
||||
// accumulate in the buffer to be processed when the buffer is once again full.
|
||||
region_count = 0;
|
||||
}
|
||||
for (size_t p = 0; p < int(ShenandoahRegionPartitions::NumPartitions); p++) {
|
||||
_partitions->decrease_used(ShenandoahFreeSetPartitionId(p), used_byte_tallies[p]);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
ShenandoahRecycleTrashedRegionClosure(ShenandoahRegionPartitions* p): ShenandoahHeapRegionClosure() {
|
||||
_partitions = p;
|
||||
_recycled_region_count = 0;
|
||||
for (size_t i = 0; i < MaxSavedRegions; i++) {
|
||||
_region_indices[i] = SentinelIndex;
|
||||
_region_used[i] = SentinelUsed;
|
||||
}
|
||||
}
|
||||
|
||||
void heap_region_do(ShenandoahHeapRegion* r) {
|
||||
r->try_recycle();
|
||||
}
|
||||
@ -1950,14 +1841,12 @@ void ShenandoahFreeSet::recycle_trash() {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
heap->assert_gc_workers(heap->workers()->active_workers());
|
||||
|
||||
ShenandoahRecycleTrashedRegionClosure closure(&_partitions);
|
||||
ShenandoahRecycleTrashedRegionClosure closure;
|
||||
heap->parallel_heap_region_iterate(&closure);
|
||||
}
|
||||
|
||||
bool ShenandoahFreeSet::transfer_one_region_from_mutator_to_old_collector(size_t idx, size_t alloc_capacity) {
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
|
||||
ShenandoahYoungGeneration* young_gen = gen_heap->young_generation();
|
||||
ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
|
||||
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
assert(alloc_capacity == region_size_bytes, "Region must be empty");
|
||||
if (young_unaffiliated_regions() > 0) {
|
||||
@ -1985,7 +1874,6 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
|
||||
assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
|
||||
assert(can_allocate_from(r), "Should not be allocated");
|
||||
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
|
||||
const size_t region_alloc_capacity = alloc_capacity(r);
|
||||
|
||||
if (transfer_one_region_from_mutator_to_old_collector(idx, region_alloc_capacity)) {
|
||||
@ -2133,7 +2021,6 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
|
||||
size_t total_mutator_regions = 0;
|
||||
size_t total_old_collector_regions = 0;
|
||||
|
||||
bool is_generational = _heap->mode()->is_generational();
|
||||
size_t num_regions = _heap->num_regions();
|
||||
for (size_t idx = 0; idx < num_regions; idx++) {
|
||||
ShenandoahHeapRegion* region = _heap->get_region(idx);
|
||||
@ -2222,7 +2109,6 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
|
||||
}
|
||||
} else {
|
||||
assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Region should have been retired");
|
||||
size_t ac = alloc_capacity(region);
|
||||
size_t humongous_waste_bytes = 0;
|
||||
if (region->is_humongous_start()) {
|
||||
oop obj = cast_to_oop(region->bottom());
|
||||
@ -3120,7 +3006,6 @@ void ShenandoahFreeSet::log_status() {
|
||||
size_t total_used = 0;
|
||||
size_t total_free = 0;
|
||||
size_t total_free_ext = 0;
|
||||
size_t total_trashed_free = 0;
|
||||
|
||||
for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
|
||||
idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx++) {
|
||||
|
||||
@ -76,6 +76,9 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
// Bitmap reset task is heavy-weight and benefits from much smaller tasks than the default.
|
||||
size_t parallel_region_stride() override { return 8; }
|
||||
|
||||
bool is_thread_safe() override { return true; }
|
||||
};
|
||||
|
||||
@ -524,7 +527,6 @@ size_t ShenandoahGeneration::select_aged_regions(const size_t old_promotion_rese
|
||||
assert_no_in_place_promotions();
|
||||
|
||||
auto const heap = ShenandoahGenerationalHeap::heap();
|
||||
ShenandoahYoungGeneration* young_gen = heap->young_generation();
|
||||
ShenandoahFreeSet* free_set = heap->free_set();
|
||||
bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions();
|
||||
ShenandoahMarkingContext* const ctx = heap->marking_context();
|
||||
@ -562,7 +564,6 @@ size_t ShenandoahGeneration::select_aged_regions(const size_t old_promotion_rese
|
||||
size_t pip_mutator_bytes = 0;
|
||||
size_t pip_collector_bytes = 0;
|
||||
|
||||
size_t min_remnant_size = PLAB::min_size() * HeapWordSize;
|
||||
for (idx_t i = 0; i < num_regions; i++) {
|
||||
ShenandoahHeapRegion* const r = heap->get_region(i);
|
||||
if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) {
|
||||
|
||||
@ -688,19 +688,6 @@ void ShenandoahGenerationalHeap::reset_generation_reserves() {
|
||||
old_generation()->set_promoted_reserve(0);
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeap::TransferResult::print_on(const char* when, outputStream* ss) const {
|
||||
auto heap = ShenandoahGenerationalHeap::heap();
|
||||
ShenandoahYoungGeneration* const young_gen = heap->young_generation();
|
||||
ShenandoahOldGeneration* const old_gen = heap->old_generation();
|
||||
const size_t young_available = young_gen->available();
|
||||
const size_t old_available = old_gen->available();
|
||||
ss->print_cr("After %s, %s %zu regions to %s to prepare for next gc, old available: "
|
||||
PROPERFMT ", young_available: " PROPERFMT,
|
||||
when,
|
||||
success? "successfully transferred": "failed to transfer", region_count, region_destination,
|
||||
PROPERFMTARGS(old_available), PROPERFMTARGS(young_available));
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {
|
||||
class ShenandoahGlobalCoalesceAndFill : public WorkerTask {
|
||||
private:
|
||||
|
||||
@ -132,24 +132,12 @@ public:
|
||||
|
||||
bool requires_barriers(stackChunkOop obj) const override;
|
||||
|
||||
// Used for logging the result of a region transfer outside the heap lock
|
||||
struct TransferResult {
|
||||
bool success;
|
||||
size_t region_count;
|
||||
const char* region_destination;
|
||||
|
||||
void print_on(const char* when, outputStream* ss) const;
|
||||
};
|
||||
|
||||
// Zeros out the evacuation and promotion reserves
|
||||
void reset_generation_reserves();
|
||||
|
||||
// Computes the optimal size for the old generation, represented as a surplus or deficit of old regions
|
||||
void compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions);
|
||||
|
||||
// Transfers surplus old regions to young, or takes regions from young to satisfy old region deficit
|
||||
TransferResult balance_generations();
|
||||
|
||||
// Balances generations, coalesces and fills old regions if necessary
|
||||
void complete_degenerated_cycle();
|
||||
void complete_concurrent_cycle();
|
||||
|
||||
@ -985,7 +985,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
|
||||
|
||||
assert (req.is_lab_alloc() || (requested == actual),
|
||||
"Only LAB allocations are elastic: %s, requested = %zu, actual = %zu",
|
||||
ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
|
||||
req.type_string(), requested, actual);
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -1014,8 +1014,9 @@ HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req
|
||||
|
||||
// Record the plab configuration for this result and register the object.
|
||||
if (result != nullptr && req.is_old()) {
|
||||
old_generation()->configure_plab_for_current_thread(req);
|
||||
if (!req.is_lab_alloc()) {
|
||||
if (req.is_lab_alloc()) {
|
||||
old_generation()->configure_plab_for_current_thread(req);
|
||||
} else {
|
||||
// Register the newly allocated object while we're holding the global lock since there's no synchronization
|
||||
// built in to the implementation of register_object(). There are potential races when multiple independent
|
||||
// threads are allocating objects, some of which might span the same card region. For example, consider
|
||||
@ -1035,6 +1036,13 @@ HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req
|
||||
// last-start representing object b while first-start represents object c. This is why we need to require all
|
||||
// register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
|
||||
old_generation()->card_scan()->register_object(result);
|
||||
|
||||
if (req.is_promotion()) {
|
||||
// Shared promotion.
|
||||
const size_t actual_size = req.actual_size() * HeapWordSize;
|
||||
log_debug(gc, plab)("Expend shared promotion of %zu bytes", actual_size);
|
||||
old_generation()->expend_promoted(actual_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1962,7 +1970,7 @@ void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* b
|
||||
assert(blk->is_thread_safe(), "Only thread-safe closures here");
|
||||
const uint active_workers = workers()->active_workers();
|
||||
const size_t n_regions = num_regions();
|
||||
size_t stride = ShenandoahParallelRegionStride;
|
||||
size_t stride = blk->parallel_region_stride();
|
||||
if (stride == 0 && active_workers > 1) {
|
||||
// Automatically derive the stride to balance the work between threads
|
||||
// evenly. Do not try to split work if below the reasonable threshold.
|
||||
|
||||
@ -113,6 +113,7 @@ public:
|
||||
class ShenandoahHeapRegionClosure : public StackObj {
|
||||
public:
|
||||
virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
|
||||
virtual size_t parallel_region_stride() { return ShenandoahParallelRegionStride; }
|
||||
virtual bool is_thread_safe() { return false; }
|
||||
};
|
||||
|
||||
|
||||
@ -447,7 +447,7 @@ public:
|
||||
return (bottom() <= p) && (p < top());
|
||||
}
|
||||
|
||||
inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t);
|
||||
inline void adjust_alloc_metadata(const ShenandoahAllocRequest &req, size_t);
|
||||
void reset_alloc_metadata();
|
||||
size_t get_shared_allocs() const;
|
||||
size_t get_tlab_allocs() const;
|
||||
|
||||
@ -71,7 +71,7 @@ HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocReq
|
||||
}
|
||||
|
||||
make_regular_allocation(req.affiliation());
|
||||
adjust_alloc_metadata(req.type(), size);
|
||||
adjust_alloc_metadata(req, size);
|
||||
|
||||
HeapWord* new_top = aligned_obj + size;
|
||||
assert(new_top <= end(), "PLAB cannot span end of heap region");
|
||||
@ -111,7 +111,7 @@ HeapWord* ShenandoahHeapRegion::allocate(size_t size, const ShenandoahAllocReque
|
||||
HeapWord* obj = top();
|
||||
if (pointer_delta(end(), obj) >= size) {
|
||||
make_regular_allocation(req.affiliation());
|
||||
adjust_alloc_metadata(req.type(), size);
|
||||
adjust_alloc_metadata(req, size);
|
||||
|
||||
HeapWord* new_top = obj + size;
|
||||
set_top(new_top);
|
||||
@ -125,26 +125,16 @@ HeapWord* ShenandoahHeapRegion::allocate(size_t size, const ShenandoahAllocReque
|
||||
}
|
||||
}
|
||||
|
||||
inline void ShenandoahHeapRegion::adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t size) {
|
||||
switch (type) {
|
||||
case ShenandoahAllocRequest::_alloc_shared:
|
||||
case ShenandoahAllocRequest::_alloc_shared_gc:
|
||||
case ShenandoahAllocRequest::_alloc_shared_gc_old:
|
||||
case ShenandoahAllocRequest::_alloc_shared_gc_promotion:
|
||||
case ShenandoahAllocRequest::_alloc_cds:
|
||||
// Counted implicitly by tlab/gclab allocs
|
||||
break;
|
||||
case ShenandoahAllocRequest::_alloc_tlab:
|
||||
inline void ShenandoahHeapRegion::adjust_alloc_metadata(const ShenandoahAllocRequest &req, size_t size) {
|
||||
// Only need to update alloc metadata for lab alloc, shared alloc is counted implicitly by tlab/gclab allocs
|
||||
if (req.is_lab_alloc()) {
|
||||
if (req.is_mutator_alloc()) {
|
||||
_tlab_allocs += size;
|
||||
break;
|
||||
case ShenandoahAllocRequest::_alloc_gclab:
|
||||
_gclab_allocs += size;
|
||||
break;
|
||||
case ShenandoahAllocRequest::_alloc_plab:
|
||||
} else if (req.is_old()) {
|
||||
_plab_allocs += size;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
} else {
|
||||
_gclab_allocs += size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,7 +147,7 @@ inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) {
|
||||
}
|
||||
|
||||
inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {
|
||||
size_t new_live_data = AtomicAccess::add(&_live_data, s, memory_order_relaxed);
|
||||
AtomicAccess::add(&_live_data, s, memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline void ShenandoahHeapRegion::clear_live_data() {
|
||||
|
||||
@ -44,6 +44,10 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
size_t parallel_region_stride() override {
|
||||
return _closure->parallel_region_stride();
|
||||
}
|
||||
|
||||
bool is_thread_safe() override {
|
||||
return _closure->is_thread_safe();
|
||||
}
|
||||
@ -64,6 +68,10 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
size_t parallel_region_stride() override {
|
||||
return _closure->parallel_region_stride();
|
||||
}
|
||||
|
||||
bool is_thread_safe() override {
|
||||
return _closure->is_thread_safe();
|
||||
}
|
||||
|
||||
@ -168,7 +168,7 @@ size_t ShenandoahOldGeneration::get_promoted_expended() const {
|
||||
}
|
||||
|
||||
bool ShenandoahOldGeneration::can_allocate(const ShenandoahAllocRequest &req) const {
|
||||
assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory");
|
||||
assert(req.is_old(), "Must be old allocation request");
|
||||
|
||||
const size_t requested_bytes = req.size() * HeapWordSize;
|
||||
// The promotion reserve may also be used for evacuations. If we can promote this object,
|
||||
@ -180,7 +180,7 @@ bool ShenandoahOldGeneration::can_allocate(const ShenandoahAllocRequest &req) co
|
||||
return true;
|
||||
}
|
||||
|
||||
if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
|
||||
if (req.is_lab_alloc()) {
|
||||
// The promotion reserve cannot accommodate this plab request. Check if we still have room for
|
||||
// evacuations. Note that we cannot really know how much of the plab will be used for evacuations,
|
||||
// so here we only check that some evacuation reserve still exists.
|
||||
@ -195,37 +195,29 @@ bool ShenandoahOldGeneration::can_allocate(const ShenandoahAllocRequest &req) co
|
||||
|
||||
void
|
||||
ShenandoahOldGeneration::configure_plab_for_current_thread(const ShenandoahAllocRequest &req) {
|
||||
// Note: Even when a mutator is performing a promotion outside a LAB, we use a 'shared_gc' request.
|
||||
if (req.is_gc_alloc()) {
|
||||
const size_t actual_size = req.actual_size() * HeapWordSize;
|
||||
if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
|
||||
// We've created a new plab. Now we configure it whether it will be used for promotions
|
||||
// and evacuations - or just evacuations.
|
||||
Thread* thread = Thread::current();
|
||||
ShenandoahThreadLocalData::reset_plab_promoted(thread);
|
||||
assert(req.is_gc_alloc() && req.is_old() && req.is_lab_alloc(), "Must be a plab alloc request");
|
||||
const size_t actual_size = req.actual_size() * HeapWordSize;
|
||||
// We've created a new plab. Now we configure it whether it will be used for promotions
|
||||
// and evacuations - or just evacuations.
|
||||
Thread* thread = Thread::current();
|
||||
ShenandoahThreadLocalData::reset_plab_promoted(thread);
|
||||
|
||||
// The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries).
|
||||
// If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread.
|
||||
if (can_promote(actual_size)) {
|
||||
// Assume the entirety of this PLAB will be used for promotion. This prevents promotion from overreach.
|
||||
// When we retire this plab, we'll unexpend what we don't really use.
|
||||
log_debug(gc, plab)("Thread can promote using PLAB of %zu bytes. Expended: %zu, available: %zu",
|
||||
actual_size, get_promoted_expended(), get_promoted_reserve());
|
||||
expend_promoted(actual_size);
|
||||
ShenandoahThreadLocalData::enable_plab_promotions(thread);
|
||||
ShenandoahThreadLocalData::set_plab_actual_size(thread, actual_size);
|
||||
} else {
|
||||
// Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
|
||||
ShenandoahThreadLocalData::disable_plab_promotions(thread);
|
||||
ShenandoahThreadLocalData::set_plab_actual_size(thread, 0);
|
||||
log_debug(gc, plab)("Thread cannot promote using PLAB of %zu bytes. Expended: %zu, available: %zu, mixed evacuations? %s",
|
||||
actual_size, get_promoted_expended(), get_promoted_reserve(), BOOL_TO_STR(ShenandoahHeap::heap()->collection_set()->has_old_regions()));
|
||||
}
|
||||
} else if (req.is_promotion()) {
|
||||
// Shared promotion.
|
||||
log_debug(gc, plab)("Expend shared promotion of %zu bytes", actual_size);
|
||||
expend_promoted(actual_size);
|
||||
}
|
||||
// The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries).
|
||||
// If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread.
|
||||
if (can_promote(actual_size)) {
|
||||
// Assume the entirety of this PLAB will be used for promotion. This prevents promotion from overreach.
|
||||
// When we retire this plab, we'll unexpend what we don't really use.
|
||||
log_debug(gc, plab)("Thread can promote using PLAB of %zu bytes. Expended: %zu, available: %zu",
|
||||
actual_size, get_promoted_expended(), get_promoted_reserve());
|
||||
expend_promoted(actual_size);
|
||||
ShenandoahThreadLocalData::enable_plab_promotions(thread);
|
||||
ShenandoahThreadLocalData::set_plab_actual_size(thread, actual_size);
|
||||
} else {
|
||||
// Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
|
||||
ShenandoahThreadLocalData::disable_plab_promotions(thread);
|
||||
ShenandoahThreadLocalData::set_plab_actual_size(thread, 0);
|
||||
log_debug(gc, plab)("Thread cannot promote using PLAB of %zu bytes. Expended: %zu, available: %zu, mixed evacuations? %s",
|
||||
actual_size, get_promoted_expended(), get_promoted_reserve(), BOOL_TO_STR(ShenandoahHeap::heap()->collection_set()->has_old_regions()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -62,7 +62,6 @@ class ShenandoahRegulatorThread: public ConcurrentGCThread {
|
||||
bool start_old_cycle() const;
|
||||
bool start_young_cycle() const;
|
||||
bool start_global_cycle() const;
|
||||
bool resume_old_cycle();
|
||||
|
||||
// The generational mode can only unload classes in a global cycle. The regulator
|
||||
// thread itself will trigger a global cycle if metaspace is out of memory.
|
||||
|
||||
@ -335,7 +335,6 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
|
||||
if (ctx->is_marked(p)) {
|
||||
oop obj = cast_to_oop(p);
|
||||
assert(oopDesc::is_oop(obj), "Should be an object");
|
||||
assert(Klass::is_valid(obj->klass()), "Not a valid klass ptr");
|
||||
assert(p + obj->size() > left, "This object should span start of card");
|
||||
assert(p < right, "Result must precede right");
|
||||
return p;
|
||||
@ -362,15 +361,15 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
|
||||
|
||||
// Recall that we already dealt with the co-initial object case above
|
||||
assert(p < left, "obj should start before left");
|
||||
// While it is safe to ask an object its size in the loop that
|
||||
// follows, the (ifdef'd out) loop should never be needed.
|
||||
// While it is safe to ask an object its size in the block that
|
||||
// follows, the (ifdef'd out) block should never be needed.
|
||||
// 1. we ask this question only for regions in the old generation, and those
|
||||
// that are not humongous regions
|
||||
// 2. there is no direct allocation ever by mutators in old generation
|
||||
// regions walked by this code. Only GC will ever allocate in old regions,
|
||||
// and then too only during promotion/evacuation phases. Thus there is no danger
|
||||
// of races between reading from and writing to the object start array,
|
||||
// or of asking partially initialized objects their size (in the loop below).
|
||||
// or of asking partially initialized objects their size (in the ifdef below).
|
||||
// Furthermore, humongous regions (and their dirty cards) are never processed
|
||||
// by this code.
|
||||
// 3. only GC asks this question during phases when it is not concurrently
|
||||
@ -382,15 +381,6 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
|
||||
#ifdef ASSERT
|
||||
oop obj = cast_to_oop(p);
|
||||
assert(oopDesc::is_oop(obj), "Should be an object");
|
||||
while (p + obj->size() < left) {
|
||||
p += obj->size();
|
||||
obj = cast_to_oop(p);
|
||||
assert(oopDesc::is_oop(obj), "Should be an object");
|
||||
assert(Klass::is_valid(obj->klass()), "Not a valid klass ptr");
|
||||
// Check assumptions in previous block comment if this assert fires
|
||||
fatal("Should never need forward walk in block start");
|
||||
}
|
||||
assert(p <= left, "p should start at or before left end of card");
|
||||
assert(p + obj->size() > left, "obj should end after left end of card");
|
||||
#endif // ASSERT
|
||||
return p;
|
||||
|
||||
@ -233,8 +233,6 @@ public:
|
||||
inline bool is_write_card_dirty(size_t card_index) const;
|
||||
inline void mark_card_as_dirty(size_t card_index);
|
||||
inline void mark_range_as_dirty(size_t card_index, size_t num_cards);
|
||||
inline void mark_card_as_clean(size_t card_index);
|
||||
inline void mark_range_as_clean(size_t card_index, size_t num_cards);
|
||||
inline bool is_card_dirty(HeapWord* p) const;
|
||||
inline bool is_write_card_dirty(HeapWord* p) const;
|
||||
inline void mark_card_as_dirty(HeapWord* p);
|
||||
|
||||
@ -217,11 +217,10 @@ static void deoptimize_allocation(JavaThread* thread) {
|
||||
|
||||
void ZBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
|
||||
const ZPage* const page = ZHeap::heap()->page(to_zaddress(new_obj));
|
||||
const ZPageAge age = page->age();
|
||||
if (age == ZPageAge::old) {
|
||||
if (!page->allows_raw_null()) {
|
||||
// We promised C2 that its allocations would end up in young gen. This object
|
||||
// breaks that promise. Take a few steps in the interpreter instead, which has
|
||||
// no such assumptions about where an object resides.
|
||||
// is too old to guarantee that. Take a few steps in the interpreter instead,
|
||||
// which does not elide barriers based on the age of an object.
|
||||
deoptimize_allocation(thread);
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,7 +190,8 @@ void ZGeneration::flip_age_pages(const ZRelocationSetSelector* selector) {
|
||||
ZRendezvousHandshakeClosure cl;
|
||||
Handshake::execute(&cl);
|
||||
|
||||
_relocate.barrier_flip_promoted_pages(_relocation_set.flip_promoted_pages());
|
||||
_relocate.barrier_promoted_pages(_relocation_set.flip_promoted_pages(),
|
||||
_relocation_set.relocate_promoted_pages());
|
||||
}
|
||||
|
||||
static double fragmentation_limit(ZGenerationId generation) {
|
||||
|
||||
@ -41,7 +41,8 @@ ZPage::ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, ZMultiPar
|
||||
_top(to_zoffset_end(start())),
|
||||
_livemap(object_max_count()),
|
||||
_remembered_set(),
|
||||
_multi_partition_tracker(multi_partition_tracker) {
|
||||
_multi_partition_tracker(multi_partition_tracker),
|
||||
_relocate_promoted(false) {
|
||||
assert(!_virtual.is_null(), "Should not be null");
|
||||
assert((_type == ZPageType::small && size() == ZPageSizeSmall) ||
|
||||
(_type == ZPageType::medium && ZPageSizeMediumMin <= size() && size() <= ZPageSizeMediumMax) ||
|
||||
@ -70,6 +71,14 @@ ZPage* ZPage::clone_for_promotion() const {
|
||||
return page;
|
||||
}
|
||||
|
||||
bool ZPage::allows_raw_null() const {
|
||||
return is_young() && !AtomicAccess::load(&_relocate_promoted);
|
||||
}
|
||||
|
||||
void ZPage::set_is_relocate_promoted() {
|
||||
AtomicAccess::store(&_relocate_promoted, true);
|
||||
}
|
||||
|
||||
ZGeneration* ZPage::generation() {
|
||||
return ZGeneration::generation(_generation_id);
|
||||
}
|
||||
|
||||
@ -52,6 +52,7 @@ private:
|
||||
ZLiveMap _livemap;
|
||||
ZRememberedSet _remembered_set;
|
||||
ZMultiPartitionTracker* const _multi_partition_tracker;
|
||||
volatile bool _relocate_promoted;
|
||||
|
||||
const char* type_to_string() const;
|
||||
|
||||
@ -103,6 +104,9 @@ public:
|
||||
|
||||
ZPageAge age() const;
|
||||
|
||||
bool allows_raw_null() const;
|
||||
void set_is_relocate_promoted();
|
||||
|
||||
uint32_t seqnum() const;
|
||||
bool is_allocating() const;
|
||||
bool is_relocatable() const;
|
||||
|
||||
@ -1366,27 +1366,35 @@ public:
|
||||
|
||||
class ZPromoteBarrierTask : public ZTask {
|
||||
private:
|
||||
ZArrayParallelIterator<ZPage*> _iter;
|
||||
ZArrayParallelIterator<ZPage*> _flip_promoted_iter;
|
||||
ZArrayParallelIterator<ZPage*> _relocate_promoted_iter;
|
||||
|
||||
public:
|
||||
ZPromoteBarrierTask(const ZArray<ZPage*>* pages)
|
||||
ZPromoteBarrierTask(const ZArray<ZPage*>* flip_promoted_pages,
|
||||
const ZArray<ZPage*>* relocate_promoted_pages)
|
||||
: ZTask("ZPromoteBarrierTask"),
|
||||
_iter(pages) {}
|
||||
_flip_promoted_iter(flip_promoted_pages),
|
||||
_relocate_promoted_iter(relocate_promoted_pages) {}
|
||||
|
||||
virtual void work() {
|
||||
SuspendibleThreadSetJoiner sts_joiner;
|
||||
|
||||
for (ZPage* page; _iter.next(&page);) {
|
||||
// When promoting an object (and before relocate start), we must ensure that all
|
||||
// contained zpointers are store good. The marking code ensures that for non-null
|
||||
// pointers, but null pointers are ignored. This code ensures that even null pointers
|
||||
// are made store good, for the promoted objects.
|
||||
page->object_iterate([&](oop obj) {
|
||||
ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
|
||||
});
|
||||
auto promote_barriers = [&](ZArrayParallelIterator<ZPage*>* iter) {
|
||||
for (ZPage* page; iter->next(&page);) {
|
||||
// When promoting an object (and before relocate start), we must ensure that all
|
||||
// contained zpointers are store good. The marking code ensures that for non-null
|
||||
// pointers, but null pointers are ignored. This code ensures that even null pointers
|
||||
// are made store good, for the promoted objects.
|
||||
page->object_iterate([&](oop obj) {
|
||||
ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
|
||||
});
|
||||
|
||||
SuspendibleThreadSet::yield();
|
||||
}
|
||||
SuspendibleThreadSet::yield();
|
||||
}
|
||||
};
|
||||
|
||||
promote_barriers(&_flip_promoted_iter);
|
||||
promote_barriers(&_relocate_promoted_iter);
|
||||
}
|
||||
};
|
||||
|
||||
@ -1395,8 +1403,9 @@ void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) {
|
||||
workers()->run(&flip_age_task);
|
||||
}
|
||||
|
||||
void ZRelocate::barrier_flip_promoted_pages(const ZArray<ZPage*>* pages) {
|
||||
ZPromoteBarrierTask promote_barrier_task(pages);
|
||||
void ZRelocate::barrier_promoted_pages(const ZArray<ZPage*>* flip_promoted_pages,
|
||||
const ZArray<ZPage*>* relocate_promoted_pages) {
|
||||
ZPromoteBarrierTask promote_barrier_task(flip_promoted_pages, relocate_promoted_pages);
|
||||
workers()->run(&promote_barrier_task);
|
||||
}
|
||||
|
||||
|
||||
@ -119,7 +119,8 @@ public:
|
||||
void relocate(ZRelocationSet* relocation_set);
|
||||
|
||||
void flip_age_pages(const ZArray<ZPage*>* pages);
|
||||
void barrier_flip_promoted_pages(const ZArray<ZPage*>* pages);
|
||||
void barrier_promoted_pages(const ZArray<ZPage*>* flip_promoted_pages,
|
||||
const ZArray<ZPage*>* relocate_promoted_pages);
|
||||
|
||||
void synchronize();
|
||||
void desynchronize();
|
||||
|
||||
@ -38,6 +38,7 @@
|
||||
|
||||
class ZRelocationSetInstallTask : public ZTask {
|
||||
private:
|
||||
ZRelocationSet* _relocation_set;
|
||||
ZForwardingAllocator* const _allocator;
|
||||
ZForwarding** _forwardings;
|
||||
const size_t _nforwardings;
|
||||
@ -54,16 +55,6 @@ private:
|
||||
page->log_msg(" (relocation selected)");
|
||||
|
||||
_forwardings[index] = forwarding;
|
||||
|
||||
if (forwarding->is_promotion()) {
|
||||
// Before promoting an object (and before relocate start), we must ensure that all
|
||||
// contained zpointers are store good. The marking code ensures that for non-null
|
||||
// pointers, but null pointers are ignored. This code ensures that even null pointers
|
||||
// are made store good, for the promoted objects.
|
||||
page->object_iterate([&](oop obj) {
|
||||
ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void install_small(ZForwarding* forwarding, size_t index) {
|
||||
@ -78,10 +69,18 @@ private:
|
||||
return ZRelocate::compute_to_age(page->age());
|
||||
}
|
||||
|
||||
void track_if_promoted(ZPage* page, ZForwarding* forwarding, ZArray<ZPage*>& relocate_promoted) {
|
||||
if (forwarding->is_promotion()) {
|
||||
page->set_is_relocate_promoted();
|
||||
relocate_promoted.append(page);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
ZRelocationSetInstallTask(ZForwardingAllocator* allocator, const ZRelocationSetSelector* selector)
|
||||
ZRelocationSetInstallTask(ZRelocationSet* relocation_set, const ZRelocationSetSelector* selector)
|
||||
: ZTask("ZRelocationSetInstallTask"),
|
||||
_allocator(allocator),
|
||||
_relocation_set(relocation_set),
|
||||
_allocator(&relocation_set->_allocator),
|
||||
_forwardings(nullptr),
|
||||
_nforwardings((size_t)selector->selected_small()->length() + (size_t)selector->selected_medium()->length()),
|
||||
_small(selector->selected_small()),
|
||||
@ -108,11 +107,14 @@ public:
|
||||
// Join the STS to block out VMThreads while running promote_barrier_on_young_oop_field
|
||||
SuspendibleThreadSetJoiner sts_joiner;
|
||||
|
||||
ZArray<ZPage*> relocate_promoted;
|
||||
|
||||
// Allocate and install forwardings for small pages
|
||||
for (size_t page_index; _small_iter.next_index(&page_index);) {
|
||||
ZPage* page = _small->at(int(page_index));
|
||||
ZForwarding* const forwarding = ZForwarding::alloc(_allocator, page, to_age(page));
|
||||
install_small(forwarding, (size_t)_medium->length() + page_index);
|
||||
track_if_promoted(page, forwarding, relocate_promoted);
|
||||
|
||||
SuspendibleThreadSet::yield();
|
||||
}
|
||||
@ -122,9 +124,12 @@ public:
|
||||
ZPage* page = _medium->at(int(page_index));
|
||||
ZForwarding* const forwarding = ZForwarding::alloc(_allocator, page, to_age(page));
|
||||
install_medium(forwarding, page_index);
|
||||
track_if_promoted(page, forwarding, relocate_promoted);
|
||||
|
||||
SuspendibleThreadSet::yield();
|
||||
}
|
||||
|
||||
_relocation_set->register_relocate_promoted(relocate_promoted);
|
||||
}
|
||||
|
||||
ZForwarding** forwardings() const {
|
||||
@ -143,6 +148,7 @@ ZRelocationSet::ZRelocationSet(ZGeneration* generation)
|
||||
_nforwardings(0),
|
||||
_promotion_lock(),
|
||||
_flip_promoted_pages(),
|
||||
_relocate_promoted_pages(),
|
||||
_in_place_relocate_promoted_pages() {}
|
||||
|
||||
ZWorkers* ZRelocationSet::workers() const {
|
||||
@ -157,9 +163,13 @@ ZArray<ZPage*>* ZRelocationSet::flip_promoted_pages() {
|
||||
return &_flip_promoted_pages;
|
||||
}
|
||||
|
||||
ZArray<ZPage*>* ZRelocationSet::relocate_promoted_pages() {
|
||||
return &_relocate_promoted_pages;
|
||||
}
|
||||
|
||||
void ZRelocationSet::install(const ZRelocationSetSelector* selector) {
|
||||
// Install relocation set
|
||||
ZRelocationSetInstallTask task(&_allocator, selector);
|
||||
ZRelocationSetInstallTask task(this, selector);
|
||||
workers()->run(&task);
|
||||
|
||||
_forwardings = task.forwardings();
|
||||
@ -189,6 +199,7 @@ void ZRelocationSet::reset(ZPageAllocator* page_allocator) {
|
||||
|
||||
destroy_and_clear(page_allocator, &_in_place_relocate_promoted_pages);
|
||||
destroy_and_clear(page_allocator, &_flip_promoted_pages);
|
||||
_relocate_promoted_pages.clear();
|
||||
}
|
||||
|
||||
void ZRelocationSet::register_flip_promoted(const ZArray<ZPage*>& pages) {
|
||||
@ -199,6 +210,18 @@ void ZRelocationSet::register_flip_promoted(const ZArray<ZPage*>& pages) {
|
||||
}
|
||||
}
|
||||
|
||||
void ZRelocationSet::register_relocate_promoted(const ZArray<ZPage*>& pages) {
|
||||
if (pages.is_empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ZLocker<ZLock> locker(&_promotion_lock);
|
||||
for (ZPage* const page : pages) {
|
||||
assert(!_relocate_promoted_pages.contains(page), "no duplicates allowed");
|
||||
_relocate_promoted_pages.append(page);
|
||||
}
|
||||
}
|
||||
|
||||
void ZRelocationSet::register_in_place_relocate_promoted(ZPage* page) {
|
||||
ZLocker<ZLock> locker(&_promotion_lock);
|
||||
assert(!_in_place_relocate_promoted_pages.contains(page), "no duplicates allowed");
|
||||
|
||||
@ -37,6 +37,7 @@ class ZWorkers;
|
||||
|
||||
class ZRelocationSet {
|
||||
template <bool> friend class ZRelocationSetIteratorImpl;
|
||||
friend class ZRelocationSetInstallTask;
|
||||
|
||||
private:
|
||||
ZGeneration* _generation;
|
||||
@ -45,6 +46,7 @@ private:
|
||||
size_t _nforwardings;
|
||||
ZLock _promotion_lock;
|
||||
ZArray<ZPage*> _flip_promoted_pages;
|
||||
ZArray<ZPage*> _relocate_promoted_pages;
|
||||
ZArray<ZPage*> _in_place_relocate_promoted_pages;
|
||||
|
||||
ZWorkers* workers() const;
|
||||
@ -58,8 +60,10 @@ public:
|
||||
void reset(ZPageAllocator* page_allocator);
|
||||
ZGeneration* generation() const;
|
||||
ZArray<ZPage*>* flip_promoted_pages();
|
||||
ZArray<ZPage*>* relocate_promoted_pages();
|
||||
|
||||
void register_flip_promoted(const ZArray<ZPage*>& pages);
|
||||
void register_relocate_promoted(const ZArray<ZPage*>& pages);
|
||||
void register_in_place_relocate_promoted(ZPage* page);
|
||||
};
|
||||
|
||||
|
||||
@ -1100,16 +1100,16 @@ JVM_GetEnclosingMethodInfo(JNIEnv* env, jclass ofClass);
|
||||
* Virtual thread support.
|
||||
*/
|
||||
JNIEXPORT void JNICALL
|
||||
JVM_VirtualThreadStart(JNIEnv* env, jobject vthread);
|
||||
JVM_VirtualThreadEndFirstTransition(JNIEnv* env, jobject vthread);
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
JVM_VirtualThreadEnd(JNIEnv* env, jobject vthread);
|
||||
JVM_VirtualThreadStartFinalTransition(JNIEnv* env, jobject vthread);
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
JVM_VirtualThreadMount(JNIEnv* env, jobject vthread, jboolean hide);
|
||||
JVM_VirtualThreadStartTransition(JNIEnv* env, jobject vthread, jboolean is_mount);
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
JVM_VirtualThreadUnmount(JNIEnv* env, jobject vthread, jboolean hide);
|
||||
JVM_VirtualThreadEndTransition(JNIEnv* env, jobject vthread, jboolean is_mount);
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
JVM_VirtualThreadDisableSuspend(JNIEnv* env, jclass clazz, jboolean enter);
|
||||
|
||||
@ -1427,10 +1427,10 @@ static void transform(InstanceKlass*& ik, ClassFileParser& parser, JavaThread* t
|
||||
} else {
|
||||
JfrClassTransformer::cache_class_file_data(new_ik, stream, thread);
|
||||
}
|
||||
JfrClassTransformer::copy_traceid(ik, new_ik);
|
||||
if (is_instrumented && JdkJfrEvent::is_subklass(new_ik)) {
|
||||
bless_commit_method(new_ik);
|
||||
}
|
||||
JfrClassTransformer::copy_traceid(ik, new_ik);
|
||||
JfrClassTransformer::rewrite_klass_pointer(ik, new_ik, parser, thread);
|
||||
}
|
||||
|
||||
|
||||
@ -22,6 +22,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "classfile/classFileParser.hpp"
|
||||
#include "jfr/instrumentation/jfrEventClassTransformer.hpp"
|
||||
#include "jfr/jfr.hpp"
|
||||
#include "jfr/jni/jfrJavaSupport.hpp"
|
||||
@ -31,6 +32,7 @@
|
||||
#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
|
||||
#include "jfr/recorder/repository/jfrRepository.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/support/jfrClassDefineEvent.hpp"
|
||||
#include "jfr/support/jfrKlassExtension.hpp"
|
||||
#include "jfr/support/jfrResolution.hpp"
|
||||
#include "jfr/support/jfrThreadLocal.hpp"
|
||||
@ -78,13 +80,15 @@ void Jfr::on_unloading_classes() {
|
||||
}
|
||||
|
||||
void Jfr::on_klass_creation(InstanceKlass*& ik, ClassFileParser& parser, TRAPS) {
|
||||
JfrTraceId::assign(ik);
|
||||
if (IS_EVENT_OR_HOST_KLASS(ik)) {
|
||||
JfrEventClassTransformer::on_klass_creation(ik, parser, THREAD);
|
||||
return;
|
||||
}
|
||||
if (JfrMethodTracer::in_use()) {
|
||||
} else if (JfrMethodTracer::in_use()) {
|
||||
JfrMethodTracer::on_klass_creation(ik, parser, THREAD);
|
||||
}
|
||||
if (!parser.is_internal()) {
|
||||
JfrClassDefineEvent::on_creation(ik, parser, THREAD);
|
||||
}
|
||||
}
|
||||
|
||||
void Jfr::on_klass_redefinition(const InstanceKlass* ik, const InstanceKlass* scratch_klass) {
|
||||
@ -168,3 +172,13 @@ bool Jfr::on_flight_recorder_option(const JavaVMOption** option, char* delimiter
|
||||
bool Jfr::on_start_flight_recording_option(const JavaVMOption** option, char* delimiter) {
|
||||
return JfrOptionSet::parse_start_flight_recording_option(option, delimiter);
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void Jfr::on_restoration(const Klass* k, JavaThread* jt) {
|
||||
assert(k != nullptr, "invariant");
|
||||
JfrTraceId::restore(k);
|
||||
if (k->is_instance_klass()) {
|
||||
JfrClassDefineEvent::on_restoration(InstanceKlass::cast(k), jt);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_JFR_JFR_HPP
|
||||
#define SHARE_JFR_JFR_HPP
|
||||
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
@ -78,6 +79,7 @@ class Jfr : AllStatic {
|
||||
static void initialize_main_thread(JavaThread* jt);
|
||||
static bool has_sample_request(JavaThread* jt);
|
||||
static void check_and_process_sample_request(JavaThread* jt);
|
||||
CDS_ONLY(static void on_restoration(const Klass* k, JavaThread* jt);)
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_JFR_HPP
|
||||
|
||||
@ -215,6 +215,7 @@
|
||||
<Event name="ClassDefine" category="Java Virtual Machine, Class Loading" label="Class Define" thread="true" stackTrace="true" startTime="false">
|
||||
<Field type="Class" name="definedClass" label="Defined Class" />
|
||||
<Field type="ClassLoader" name="definingClassLoader" label="Defining Class Loader" />
|
||||
<Field type="Symbol" name="source" label="Source" />
|
||||
</Event>
|
||||
|
||||
<Event name="ClassRedefinition" category="Java Virtual Machine, Class Loading" label="Class Redefinition" thread="false" stackTrace="false" startTime="false">
|
||||
|
||||
@ -30,11 +30,12 @@
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrTypeSet.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp"
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/support/jfrKlassUnloading.hpp"
|
||||
#include "jfr/support/jfrSymbolTable.inline.hpp"
|
||||
#include "jfr/support/methodtracer/jfrInstrumentedClass.hpp"
|
||||
#include "jfr/support/methodtracer/jfrMethodTracer.hpp"
|
||||
#include "jfr/utilities/jfrHashtable.hpp"
|
||||
@ -1262,7 +1263,7 @@ static size_t teardown() {
|
||||
clear_klasses_and_methods();
|
||||
clear_method_tracer_klasses();
|
||||
JfrKlassUnloading::clear();
|
||||
_artifacts->increment_checkpoint_id();
|
||||
_artifacts->clear();
|
||||
_initial_type_set = true;
|
||||
} else {
|
||||
_initial_type_set = false;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user