diff --git a/doc/building.html b/doc/building.html
index 99eb3e0c473..19313ebf43a 100644
--- a/doc/building.html
+++ b/doc/building.html
@@ -668,7 +668,7 @@ update.
(Note that this version is often presented as "MSVC 14.28", and reported
by cl.exe as 19.28.) Older versions will not be accepted by
configure and will not work. The maximum accepted version
-of Visual Studio is 2022.
+of Visual Studio is 2026.
If you have multiple versions of Visual Studio installed,
configure will by default pick the latest. You can request
a specific version to be used by setting
diff --git a/doc/building.md b/doc/building.md
index 047255d1848..1fbd395a9d1 100644
--- a/doc/building.md
+++ b/doc/building.md
@@ -468,7 +468,7 @@ available for this update.
The minimum accepted version is Visual Studio 2019 version 16.8. (Note that
this version is often presented as "MSVC 14.28", and reported by cl.exe as
19.28.) Older versions will not be accepted by `configure` and will not work.
-The maximum accepted version of Visual Studio is 2022.
+The maximum accepted version of Visual Studio is 2026.
If you have multiple versions of Visual Studio installed, `configure` will by
default pick the latest. You can request a specific version to be used by
diff --git a/doc/testing.html b/doc/testing.html
index b9838735e4f..31f4fbd1778 100644
--- a/doc/testing.html
+++ b/doc/testing.html
@@ -535,6 +535,8 @@ failure. This helps to reproduce intermittent test failures. Defaults to
REPORT
Use this report style when reporting test results (sent to JTReg as
-report). Defaults to files.
+MANUAL
+Set to true to execute manual tests only.
Gtest keywords
REPEAT
The number of times to repeat the tests
diff --git a/doc/testing.md b/doc/testing.md
index 0144610a5bf..b95f59de9fd 100644
--- a/doc/testing.md
+++ b/doc/testing.md
@@ -512,6 +512,10 @@ helps to reproduce intermittent test failures. Defaults to 0.
Use this report style when reporting test results (sent to JTReg as `-report`).
Defaults to `files`.
+#### MANUAL
+
+Set to `true` to execute manual tests only.
+
### Gtest keywords
#### REPEAT
diff --git a/make/RunTests.gmk b/make/RunTests.gmk
index 947389f64f9..1f50b97531b 100644
--- a/make/RunTests.gmk
+++ b/make/RunTests.gmk
@@ -206,7 +206,7 @@ $(eval $(call ParseKeywordVariable, JTREG, \
SINGLE_KEYWORDS := JOBS TIMEOUT_FACTOR FAILURE_HANDLER_TIMEOUT \
TEST_MODE ASSERT VERBOSE RETAIN TEST_THREAD_FACTORY JVMTI_STRESS_AGENT \
MAX_MEM RUN_PROBLEM_LISTS RETRY_COUNT REPEAT_COUNT MAX_OUTPUT REPORT \
- AOT_JDK $(CUSTOM_JTREG_SINGLE_KEYWORDS), \
+ AOT_JDK MANUAL $(CUSTOM_JTREG_SINGLE_KEYWORDS), \
STRING_KEYWORDS := OPTIONS JAVA_OPTIONS VM_OPTIONS KEYWORDS \
EXTRA_PROBLEM_LISTS LAUNCHER_OPTIONS \
$(CUSTOM_JTREG_STRING_KEYWORDS), \
@@ -911,7 +911,13 @@ define SetupRunJtregTestBody
-vmoption:-Dtest.boot.jdk="$$(BOOT_JDK)" \
-vmoption:-Djava.io.tmpdir="$$($1_TEST_TMP_DIR)"
- $1_JTREG_BASIC_OPTIONS += -automatic -ignore:quiet
+ $1_JTREG_BASIC_OPTIONS += -ignore:quiet
+
+ ifeq ($$(JTREG_MANUAL), true)
+ $1_JTREG_BASIC_OPTIONS += -manual
+ else
+ $1_JTREG_BASIC_OPTIONS += -automatic
+ endif
# Make it possible to specify the JIB_DATA_DIR for tests using the
# JIB Artifact resolver
@@ -1151,6 +1157,7 @@ define SetupRunJtregTestBody
$$(EXPR) $$($1_PASSED) + $$($1_FAILED) + $$($1_ERROR) + $$($1_SKIPPED))) \
, \
$$(eval $1_PASSED_AND_RUNTIME_SKIPPED := 0) \
+ $$(eval $1_PASSED := 0) \
$$(eval $1_RUNTIME_SKIPPED := 0) \
$$(eval $1_SKIPPED := 0) \
$$(eval $1_FAILED := 0) \
diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4
index 9d58a280998..6298bcae416 100644
--- a/make/autoconf/flags-cflags.m4
+++ b/make/autoconf/flags-cflags.m4
@@ -282,10 +282,17 @@ AC_DEFUN([FLAGS_SETUP_OPTIMIZATION],
C_O_FLAG_DEBUG_JVM="-O0"
C_O_FLAG_NONE="-O0"
+ if test "x$TOOLCHAIN_TYPE" = xgcc; then
+ C_O_FLAG_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing -fno-fat-lto-objects"
+ else
+ C_O_FLAG_LTO="-flto -fno-strict-aliasing"
+ fi
+
if test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then
C_O_FLAG_HIGHEST_JVM="${C_O_FLAG_HIGHEST_JVM} -finline-functions"
C_O_FLAG_HIGHEST="${C_O_FLAG_HIGHEST} -finline-functions"
C_O_FLAG_HI="${C_O_FLAG_HI} -finline-functions"
+ C_O_FLAG_LTO="${C_O_FLAG_LTO} -ffat-lto-objects"
fi
# -D_FORTIFY_SOURCE=2 hardening option needs optimization (at least -O1) enabled
@@ -317,6 +324,7 @@ AC_DEFUN([FLAGS_SETUP_OPTIMIZATION],
C_O_FLAG_DEBUG_JVM=""
C_O_FLAG_NONE="-Od"
C_O_FLAG_SIZE="-O1"
+ C_O_FLAG_LTO="-GL"
fi
# Now copy to C++ flags
@@ -328,6 +336,7 @@ AC_DEFUN([FLAGS_SETUP_OPTIMIZATION],
CXX_O_FLAG_DEBUG_JVM="$C_O_FLAG_DEBUG_JVM"
CXX_O_FLAG_NONE="$C_O_FLAG_NONE"
CXX_O_FLAG_SIZE="$C_O_FLAG_SIZE"
+ CXX_O_FLAG_LTO="$C_O_FLAG_LTO"
# Adjust optimization flags according to debug level.
case $DEBUG_LEVEL in
@@ -360,12 +369,15 @@ AC_DEFUN([FLAGS_SETUP_OPTIMIZATION],
AC_SUBST(C_O_FLAG_NORM)
AC_SUBST(C_O_FLAG_NONE)
AC_SUBST(C_O_FLAG_SIZE)
+ AC_SUBST(C_O_FLAG_LTO)
+
AC_SUBST(CXX_O_FLAG_HIGHEST_JVM)
AC_SUBST(CXX_O_FLAG_HIGHEST)
AC_SUBST(CXX_O_FLAG_HI)
AC_SUBST(CXX_O_FLAG_NORM)
AC_SUBST(CXX_O_FLAG_NONE)
AC_SUBST(CXX_O_FLAG_SIZE)
+ AC_SUBST(CXX_O_FLAG_LTO)
])
AC_DEFUN([FLAGS_SETUP_CFLAGS],
diff --git a/make/autoconf/flags-ldflags.m4 b/make/autoconf/flags-ldflags.m4
index 66f8904db89..ad131b20e27 100644
--- a/make/autoconf/flags-ldflags.m4
+++ b/make/autoconf/flags-ldflags.m4
@@ -61,6 +61,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
fi
BASIC_LDFLAGS_JVM_ONLY=""
+ LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing"
LDFLAGS_CXX_PARTIAL_LINKING="$MACHINE_FLAG -r"
@@ -68,6 +69,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
BASIC_LDFLAGS_JVM_ONLY="-mno-omit-leaf-frame-pointer -mstack-alignment=16 \
-fPIC"
+ LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing"
LDFLAGS_CXX_PARTIAL_LINKING="$MACHINE_FLAG -r"
if test "x$OPENJDK_TARGET_OS" = xlinux; then
@@ -87,6 +89,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
BASIC_LDFLAGS="-opt:ref"
BASIC_LDFLAGS_JDK_ONLY="-incremental:no"
BASIC_LDFLAGS_JVM_ONLY="-opt:icf,8 -subsystem:windows"
+ LDFLAGS_LTO="-LTCG:INCREMENTAL"
fi
if (test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang) \
@@ -148,6 +151,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
# Export some intermediate variables for compatibility
LDFLAGS_CXX_JDK="$DEBUGLEVEL_LDFLAGS_JDK_ONLY"
+ AC_SUBST(LDFLAGS_LTO)
AC_SUBST(LDFLAGS_CXX_JDK)
AC_SUBST(LDFLAGS_CXX_PARTIAL_LINKING)
])
diff --git a/make/autoconf/spec.gmk.template b/make/autoconf/spec.gmk.template
index 0b336721d65..b3d58704c50 100644
--- a/make/autoconf/spec.gmk.template
+++ b/make/autoconf/spec.gmk.template
@@ -513,12 +513,14 @@ C_O_FLAG_HI := @C_O_FLAG_HI@
C_O_FLAG_NORM := @C_O_FLAG_NORM@
C_O_FLAG_NONE := @C_O_FLAG_NONE@
C_O_FLAG_SIZE := @C_O_FLAG_SIZE@
+C_O_FLAG_LTO := @C_O_FLAG_LTO@
CXX_O_FLAG_HIGHEST_JVM := @CXX_O_FLAG_HIGHEST_JVM@
CXX_O_FLAG_HIGHEST := @CXX_O_FLAG_HIGHEST@
CXX_O_FLAG_HI := @CXX_O_FLAG_HI@
CXX_O_FLAG_NORM := @CXX_O_FLAG_NORM@
CXX_O_FLAG_NONE := @CXX_O_FLAG_NONE@
CXX_O_FLAG_SIZE := @CXX_O_FLAG_SIZE@
+CXX_O_FLAG_LTO := @CXX_O_FLAG_LTO@
GENDEPS_FLAGS := @GENDEPS_FLAGS@
@@ -587,6 +589,9 @@ LDFLAGS_CXX_JDK := @LDFLAGS_CXX_JDK@
# LDFLAGS specific to partial linking.
LDFLAGS_CXX_PARTIAL_LINKING := @LDFLAGS_CXX_PARTIAL_LINKING@
+# LDFLAGS specific to link time optimization
+LDFLAGS_LTO := @LDFLAGS_LTO@
+
# Sometimes a different linker is needed for c++ libs
LDCXX := @LDCXX@
# The flags for linking libstdc++ linker.
diff --git a/make/autoconf/toolchain_microsoft.m4 b/make/autoconf/toolchain_microsoft.m4
index 17ad2666b3a..f577cf1a2a1 100644
--- a/make/autoconf/toolchain_microsoft.m4
+++ b/make/autoconf/toolchain_microsoft.m4
@@ -25,7 +25,7 @@
################################################################################
# The order of these defines the priority by which we try to find them.
-VALID_VS_VERSIONS="2022 2019"
+VALID_VS_VERSIONS="2022 2019 2026"
VS_DESCRIPTION_2019="Microsoft Visual Studio 2019"
VS_VERSION_INTERNAL_2019=142
@@ -57,6 +57,21 @@ VS_SDK_PLATFORM_NAME_2022=
VS_SUPPORTED_2022=true
VS_TOOLSET_SUPPORTED_2022=true
+VS_DESCRIPTION_2026="Microsoft Visual Studio 2026"
+VS_VERSION_INTERNAL_2026=145
+VS_MSVCR_2026=vcruntime140.dll
+VS_VCRUNTIME_1_2026=vcruntime140_1.dll
+VS_MSVCP_2026=msvcp140.dll
+VS_ENVVAR_2026="VS180COMNTOOLS"
+VS_USE_UCRT_2026="true"
+VS_VS_INSTALLDIR_2026="Microsoft Visual Studio/18"
+VS_EDITIONS_2026="BuildTools Community Professional Enterprise"
+VS_SDK_INSTALLDIR_2026=
+VS_VS_PLATFORM_NAME_2026="v145"
+VS_SDK_PLATFORM_NAME_2026=
+VS_SUPPORTED_2026=true
+VS_TOOLSET_SUPPORTED_2026=true
+
################################################################################
AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT],
diff --git a/make/common/NativeCompilation.gmk b/make/common/NativeCompilation.gmk
index 9721f1c0aca..28e186adf5f 100644
--- a/make/common/NativeCompilation.gmk
+++ b/make/common/NativeCompilation.gmk
@@ -98,6 +98,7 @@ include native/Paths.gmk
# SYSROOT_CFLAGS the compiler flags for using the specific sysroot
# SYSROOT_LDFLAGS the linker flags for using the specific sysroot
# OPTIMIZATION sets optimization level to NONE, LOW, HIGH, HIGHEST, HIGHEST_JVM, SIZE
+# LINK_TIME_OPTIMIZATION if set to true, enables link time optimization
# DISABLED_WARNINGS_ Disable the given warnings for the specified toolchain
# DISABLED_WARNINGS__ Disable the given warnings for the specified
# toolchain and target OS
diff --git a/make/common/native/Flags.gmk b/make/common/native/Flags.gmk
index 747e090b816..843701cb4db 100644
--- a/make/common/native/Flags.gmk
+++ b/make/common/native/Flags.gmk
@@ -194,6 +194,11 @@ define SetupCompilerFlags
$1_EXTRA_CXXFLAGS += $(CFLAGS_WARNINGS_ARE_ERRORS)
endif
+ ifeq (true, $$($1_LINK_TIME_OPTIMIZATION))
+ $1_EXTRA_CFLAGS += $(C_O_FLAG_LTO)
+ $1_EXTRA_CXXFLAGS += $(CXX_O_FLAG_LTO)
+ endif
+
ifeq (NONE, $$($1_OPTIMIZATION))
$1_OPT_CFLAGS := $(C_O_FLAG_NONE)
$1_OPT_CXXFLAGS := $(CXX_O_FLAG_NONE)
@@ -222,6 +227,10 @@ define SetupLinkerFlags
# Pickup extra OPENJDK_TARGET_OS_TYPE, OPENJDK_TARGET_OS and TOOLCHAIN_TYPE
# dependent variables for LDFLAGS and LIBS, and additionally the pair dependent
# TOOLCHAIN_TYPE plus OPENJDK_TARGET_OS
+ ifeq ($$($1_LINK_TIME_OPTIMIZATION), true)
+ $1_EXTRA_LDFLAGS += $(LDFLAGS_LTO)
+ endif
+
$1_EXTRA_LDFLAGS += $$($1_LDFLAGS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_LDFLAGS_$(OPENJDK_TARGET_OS)) \
$$($1_LDFLAGS_$(TOOLCHAIN_TYPE)) $$($1_LDFLAGS_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS))
$1_EXTRA_LIBS += $$($1_LIBS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_LIBS_$(OPENJDK_TARGET_OS)) \
diff --git a/make/hotspot/lib/CompileGtest.gmk b/make/hotspot/lib/CompileGtest.gmk
index d615e254f5a..60912992134 100644
--- a/make/hotspot/lib/CompileGtest.gmk
+++ b/make/hotspot/lib/CompileGtest.gmk
@@ -95,6 +95,7 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBJVM, \
EXTRA_OBJECT_FILES := $(BUILD_LIBJVM_ALL_OBJS), \
DEFAULT_CFLAGS := false, \
CFLAGS := $(JVM_CFLAGS) \
+ -DHOTSPOT_GTEST \
-I$(GTEST_FRAMEWORK_SRC)/googletest/include \
-I$(GTEST_FRAMEWORK_SRC)/googlemock/include \
$(addprefix -I, $(GTEST_TEST_SRC)), \
diff --git a/make/hotspot/lib/CompileJvm.gmk b/make/hotspot/lib/CompileJvm.gmk
index a8b90c92e4d..b0ea27e5081 100644
--- a/make/hotspot/lib/CompileJvm.gmk
+++ b/make/hotspot/lib/CompileJvm.gmk
@@ -234,6 +234,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJVM, \
LDFLAGS := $(JVM_LDFLAGS), \
LIBS := $(JVM_LIBS), \
OPTIMIZATION := $(JVM_OPTIMIZATION), \
+ LINK_TIME_OPTIMIZATION := $(JVM_LTO), \
OBJECT_DIR := $(JVM_OUTPUTDIR)/objs, \
STRIPFLAGS := $(JVM_STRIPFLAGS), \
EMBED_MANIFEST := true, \
diff --git a/make/hotspot/lib/JvmFeatures.gmk b/make/hotspot/lib/JvmFeatures.gmk
index 79bbd6a4106..90ea8a985e3 100644
--- a/make/hotspot/lib/JvmFeatures.gmk
+++ b/make/hotspot/lib/JvmFeatures.gmk
@@ -175,22 +175,12 @@ ifeq ($(call check-jvm-feature, link-time-opt), true)
# Set JVM_OPTIMIZATION directly so other jvm-feature flags can override it
# later on if desired
JVM_OPTIMIZATION := HIGHEST_JVM
- ifeq ($(call isCompiler, gcc), true)
- JVM_CFLAGS_FEATURES += -flto=auto -fuse-linker-plugin -fno-strict-aliasing \
- -fno-fat-lto-objects
- JVM_LDFLAGS_FEATURES += $(CXX_O_FLAG_HIGHEST_JVM) -flto=auto \
- -fuse-linker-plugin -fno-strict-aliasing
- else ifeq ($(call isCompiler, clang), true)
- JVM_CFLAGS_FEATURES += -flto -fno-strict-aliasing
- ifeq ($(call isBuildOs, aix), true)
- JVM_CFLAGS_FEATURES += -ffat-lto-objects
- endif
- JVM_LDFLAGS_FEATURES += $(CXX_O_FLAG_HIGHEST_JVM) -flto -fno-strict-aliasing
- else ifeq ($(call isCompiler, microsoft), true)
- JVM_CFLAGS_FEATURES += -GL
- JVM_LDFLAGS_FEATURES += -LTCG:INCREMENTAL
+ JVM_LTO := true
+ ifneq ($(call isCompiler, microsoft), true)
+ JVM_LDFLAGS_FEATURES += $(CXX_O_FLAG_HIGHEST_JVM)
endif
else
+ JVM_LTO := false
ifeq ($(call isCompiler, gcc), true)
JVM_LDFLAGS_FEATURES += -O1
endif
diff --git a/make/jdk/src/classes/build/tools/taglet/SealedGraph.java b/make/jdk/src/classes/build/tools/taglet/SealedGraph.java
index 17867b99595..3e93826c180 100644
--- a/make/jdk/src/classes/build/tools/taglet/SealedGraph.java
+++ b/make/jdk/src/classes/build/tools/taglet/SealedGraph.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -219,13 +219,13 @@ public final class SealedGraph implements Taglet {
// This implies the module is always the same.
private String relativeLink(TypeElement node) {
var util = SealedGraph.this.docletEnvironment.getElementUtils();
- var rootPackage = util.getPackageOf(rootNode);
var nodePackage = util.getPackageOf(node);
- var backNavigator = rootPackage.getQualifiedName().toString().chars()
+ // Note: SVG files for nested types use the simple names of containing types as parent directories.
+ // We therefore need to convert all dots in the qualified name to "../" below.
+ var backNavigator = rootNode.getQualifiedName().toString().chars()
.filter(c -> c == '.')
.mapToObj(c -> "../")
- .collect(joining()) +
- "../";
+ .collect(joining());
var forwardNavigator = nodePackage.getQualifiedName().toString()
.replace(".", "/");
diff --git a/make/modules/java.desktop/lib/ClientLibraries.gmk b/make/modules/java.desktop/lib/ClientLibraries.gmk
index 2c29092cdd6..b036973b776 100644
--- a/make/modules/java.desktop/lib/ClientLibraries.gmk
+++ b/make/modules/java.desktop/lib/ClientLibraries.gmk
@@ -226,6 +226,7 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
EXCLUDE_FILES := imageioJPEG.c jpegdecoder.c pngtest.c, \
EXCLUDES := $(LIBSPLASHSCREEN_EXCLUDES), \
OPTIMIZATION := SIZE, \
+ LINK_TIME_OPTIMIZATION := true, \
CFLAGS := $(LIBSPLASHSCREEN_CFLAGS) \
$(GIFLIB_CFLAGS) $(LIBJPEG_CFLAGS) $(PNG_CFLAGS) $(LIBZ_CFLAGS) \
$(ICONV_CFLAGS), \
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index e8f9733fe7e..364db407bd3 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -1,6 +1,7 @@
//
// Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved.
+// Copyright 2025 Arm Limited and/or its affiliates.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -1194,15 +1195,10 @@ class HandlerImpl {
public:
- static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
- static uint size_exception_handler() {
- return MacroAssembler::far_codestub_branch_size();
- }
-
static uint size_deopt_handler() {
- // count one adr and one far branch instruction
+ // count one branch instruction and one far call instruction sequence
return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
}
};
@@ -2261,25 +2257,6 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
//=============================================================================
-// Emit exception handler code.
-int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
-{
- // mov rscratch1 #exception_blob_entry_point
- // br rscratch1
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a handler.
- address base = __ start_a_stub(size_exception_handler());
- if (base == nullptr) {
- ciEnv::current()->record_failure("CodeCache is full");
- return 0; // CodeBuffer::expand failed
- }
- int offset = __ offset();
- __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
- assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
- __ end_a_stub();
- return offset;
-}
-
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
{
@@ -2290,14 +2267,20 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
- int offset = __ offset();
- __ adr(lr, __ pc());
- __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+ int offset = __ offset();
+ Label start;
+ __ bind(start);
+ __ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+
+ int entry_offset = __ offset();
+ __ b(start);
assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
+ assert(__ offset() - entry_offset >= NativePostCallNop::first_check_size,
+ "out of bounds read in post-call NOP check");
__ end_a_stub();
- return offset;
+ return entry_offset;
}
// REQUIRED MATCHER CODE
@@ -3388,28 +3371,28 @@ encode %{
// aarch64_enc_cmpxchg_acq is that we use load-acquire in the
// CompareAndSwap sequence to serve as a barrier on acquiring a
// lock.
- enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
+ enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegL oldval, iRegL newval) %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
%}
- enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
+ enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegI oldval, iRegI newval) %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
%}
- enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
+ enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegI oldval, iRegI newval) %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
%}
- enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
+ enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegI oldval, iRegI newval) %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ true, /*release*/ true,
@@ -3417,7 +3400,7 @@ encode %{
%}
// auxiliary used for CompareAndSwapX to set result register
- enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
+ enc_class aarch64_enc_cset_eq(iRegI res) %{
Register res_reg = as_Register($res$$reg);
__ cset(res_reg, Assembler::EQ);
%}
@@ -8403,7 +8386,7 @@ instruct castVVMask(pRegGov dst)
// XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
// can't match them
-instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapB mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
@@ -8421,7 +8404,7 @@ instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoS
ins_pipe(pipe_slow);
%}
-instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapS mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
@@ -8439,7 +8422,7 @@ instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoS
ins_pipe(pipe_slow);
%}
-instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapI mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
@@ -8457,7 +8440,7 @@ instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoS
ins_pipe(pipe_slow);
%}
-instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapL mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
@@ -8494,7 +8477,7 @@ instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval
ins_pipe(pipe_slow);
%}
-instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
predicate(n->as_LoadStore()->barrier_data() == 0);
@@ -8515,7 +8498,7 @@ instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoS
// alternative CompareAndSwapX when we are eliding barriers
-instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapB mem (Binary oldval newval)));
@@ -8534,7 +8517,7 @@ instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegI
ins_pipe(pipe_slow);
%}
-instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapS mem (Binary oldval newval)));
@@ -8553,7 +8536,7 @@ instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegI
ins_pipe(pipe_slow);
%}
-instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapI mem (Binary oldval newval)));
@@ -8572,7 +8555,7 @@ instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegI
ins_pipe(pipe_slow);
%}
-instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapL mem (Binary oldval newval)));
@@ -8610,7 +8593,7 @@ instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP new
ins_pipe(pipe_slow);
%}
-instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index 9ab463125fe..37a6a130e0d 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -449,12 +449,20 @@ int LIR_Assembler::emit_deopt_handler() {
int offset = code_offset();
- __ adr(lr, pc());
- __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+ Label start;
+ __ bind(start);
+
+ __ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+
+ int entry_offset = __ offset();
+ __ b(start);
+
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
+ assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
+ "out of bounds read in post-call NOP check");
__ end_a_stub();
- return offset;
+ return entry_offset;
}
void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
index 12b941fc4f7..729cd2827b7 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
@@ -71,7 +71,7 @@ friend class ArrayCopyStub;
// CompiledDirectCall::to_trampoline_stub_size()
_call_stub_size = 13 * NativeInstruction::instruction_size,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
- _deopt_handler_size = 7 * NativeInstruction::instruction_size
+ _deopt_handler_size = 4 * NativeInstruction::instruction_size
};
public:
diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
index 5d4f0801ec6..07a2d6fbfa0 100644
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
@@ -879,7 +879,6 @@ void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) {
ShouldNotReachHere();
}
- OrderAccess::fence();
ICache::invalidate_word((address)patch_addr);
}
diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
index 5a7fececafa..f2003dd9b55 100644
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
@@ -394,12 +394,6 @@ void NativePostCallNop::make_deopt() {
NativeDeoptInstruction::insert(addr_at(0));
}
-#ifdef ASSERT
-static bool is_movk_to_zr(uint32_t insn) {
- return ((insn & 0xffe0001f) == 0xf280001f);
-}
-#endif
-
bool NativePostCallNop::patch(int32_t oopmap_slot, int32_t cb_offset) {
if (((oopmap_slot & 0xff) != oopmap_slot) || ((cb_offset & 0xffffff) != cb_offset)) {
return false; // cannot encode
diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
index df5d97c2376..c30cb911d96 100644
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
@@ -526,14 +526,31 @@ inline NativeLdSt* NativeLdSt_at(address addr) {
// can store an offset from the initial nop to the nmethod.
class NativePostCallNop: public NativeInstruction {
+private:
+ static bool is_movk_to_zr(uint32_t insn) {
+ return ((insn & 0xffe0001f) == 0xf280001f);
+ }
+
public:
+ enum AArch64_specific_constants {
+ // The two parts should be checked separately to prevent out of bounds access in case
+ // the return address points to the deopt handler stub code entry point which could be
+ // at the end of page.
+ first_check_size = instruction_size
+ };
+
bool check() const {
- uint64_t insns = *(uint64_t*)addr_at(0);
- // Check for two instructions: nop; movk zr, xx
- // These instructions only ever appear together in a post-call
- // NOP, so it's unnecessary to check that the third instruction is
- // a MOVK as well.
- return (insns & 0xffe0001fffffffff) == 0xf280001fd503201f;
+ // Check the first instruction is NOP.
+ if (is_nop()) {
+ uint32_t insn = *(uint32_t*)addr_at(first_check_size);
+ // Check next instruction is MOVK zr, xx.
+ // These instructions only ever appear together in a post-call
+ // NOP, so it's unnecessary to check that the third instruction is
+ // a MOVK as well.
+ return is_movk_to_zr(insn);
+ }
+
+ return false;
}
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const {
diff --git a/src/hotspot/cpu/aarch64/runtime_aarch64.cpp b/src/hotspot/cpu/aarch64/runtime_aarch64.cpp
index d45f9865bd2..e36aa21b567 100644
--- a/src/hotspot/cpu/aarch64/runtime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/runtime_aarch64.cpp
@@ -260,8 +260,6 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
//------------------------------generate_exception_blob---------------------------
// creates exception blob at the end
-// Using exception blob, this code is jumped from a compiled method.
-// (see emit_exception_handler in aarch64.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state
diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
index c1eabed8ade..dd70c98797f 100644
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
@@ -1375,7 +1375,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ ldr(r10, Address(rmethod, Method::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
__ lea(rscratch2, unsatisfied);
- __ ldr(rscratch2, rscratch2);
__ cmp(r10, rscratch2);
__ br(Assembler::NE, L);
__ call_VM(noreg,
diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad
index 92c0df68deb..af010caf616 100644
--- a/src/hotspot/cpu/arm/arm.ad
+++ b/src/hotspot/cpu/arm/arm.ad
@@ -105,14 +105,8 @@ class HandlerImpl {
public:
- static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
- static uint size_exception_handler() {
- return ( 3 * 4 );
- }
-
-
static uint size_deopt_handler() {
return ( 9 * 4 );
}
@@ -876,26 +870,6 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
//=============================================================================
-// Emit exception handler code.
-int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm) {
- address base = __ start_a_stub(size_exception_handler());
- if (base == nullptr) {
- ciEnv::current()->record_failure("CodeCache is full");
- return 0; // CodeBuffer::expand failed
- }
-
- int offset = __ offset();
-
- // OK to trash LR, because exception blob will kill it
- __ jump(OptoRuntime::exception_blob()->entry_point(), relocInfo::runtime_call_type, LR_tmp);
-
- assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
-
- __ end_a_stub();
-
- return offset;
-}
-
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
// Can't use any of the current frame's registers as we may have deopted
// at a poll and everything can be live.
@@ -906,19 +880,28 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
}
int offset = __ offset();
- address deopt_pc = __ pc();
- __ sub(SP, SP, wordSize); // make room for saved PC
- __ push(LR); // save LR that may be live when we get here
- __ mov_relative_address(LR, deopt_pc);
- __ str(LR, Address(SP, wordSize)); // save deopt PC
- __ pop(LR); // restore LR
+ Label start;
+ __ bind(start);
+
__ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
+ int entry_offset = __ offset();
+ address deopt_pc = __ pc();
+ // Preserve R0 and reserve space for the address of the entry point
+ __ push(RegisterSet(R0) | RegisterSet(R1));
+ // Store the entry point address
+ __ mov_relative_address(R0, deopt_pc);
+ __ str(R0, Address(SP, wordSize));
+ __ pop(R0); // restore R0
+ __ b(start);
+
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
+ assert(__ offset() - entry_offset >= NativePostCallNop::first_check_size,
+ "out of bounds read in post-call NOP check");
__ end_a_stub();
- return offset;
+ return entry_offset;
}
bool Matcher::match_rule_supported(int opcode) {
diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
index 219c49d1f14..f168a34f140 100644
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
@@ -272,14 +272,22 @@ int LIR_Assembler::emit_deopt_handler() {
int offset = code_offset();
- __ mov_relative_address(LR, __ pc());
- __ push(LR); // stub expects LR to be saved
+ Label start;
+ __ bind(start);
+
__ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
+ int entry_offset = __ offset();
+ __ mov_relative_address(LR, __ pc());
+ __ push(LR); // stub expects LR to be saved
+ __ b(start);
+
assert(code_offset() - offset <= deopt_handler_size(), "overflow");
+ assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
+ "out of bounds read in post-call NOP check");
__ end_a_stub();
- return offset;
+ return entry_offset;
}
diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.hpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.hpp
index 77d13532685..615d2f188ff 100644
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.hpp
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.hpp
@@ -54,7 +54,7 @@
enum {
_call_stub_size = 16,
_exception_handler_size = PRODUCT_ONLY(68) NOT_PRODUCT(68+60),
- _deopt_handler_size = 16
+ _deopt_handler_size = 20
};
public:
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
index ee856bcfe60..82385bf0244 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
@@ -430,6 +430,13 @@ inline NativeCall* nativeCall_before(address return_address) {
class NativePostCallNop: public NativeInstruction {
public:
+ enum arm_specific_constants {
+ // If the check is adjusted to read beyond size of the instruction sequence at the deopt
+ // handler stub code entry point, it has to happen in two stages - to prevent out of bounds
+ // access in case the return address points to the entry point which could be at
+ // the end of page.
+ first_check_size = instruction_size
+ };
bool check() const { return is_nop(); }
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const { return false; }
bool patch(int32_t oopmap_slot, int32_t cb_offset) { return false; }
diff --git a/src/hotspot/cpu/arm/runtime_arm.cpp b/src/hotspot/cpu/arm/runtime_arm.cpp
index 8d48de5795a..29fd0aa0a10 100644
--- a/src/hotspot/cpu/arm/runtime_arm.cpp
+++ b/src/hotspot/cpu/arm/runtime_arm.cpp
@@ -182,8 +182,6 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
//------------------------------ generate_exception_blob ---------------------------
// creates exception blob at the end
-// Using exception blob, this code is jumped from a compiled method.
-// (see emit_exception_handler in sparc.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state
diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
index 108da2039f6..0b48653ae64 100644
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
@@ -264,12 +264,19 @@ int LIR_Assembler::emit_deopt_handler() {
}
int offset = code_offset();
+ Label start;
+
+ __ bind(start);
__ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type);
+ int entry_offset = __ offset();
+ __ b(start);
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
+ assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
+ "out of bounds read in post-call NOP check");
__ end_a_stub();
- return offset;
+ return entry_offset;
}
diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.hpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.hpp
index e4de2eb5c46..6a2f6264850 100644
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.hpp
@@ -63,7 +63,7 @@ enum {
_static_call_stub_size = 4 * BytesPerInstWord + MacroAssembler::b64_patchable_size, // or smaller
_call_stub_size = _static_call_stub_size + MacroAssembler::trampoline_stub_size, // or smaller
_exception_handler_size = MacroAssembler::b64_patchable_size, // or smaller
- _deopt_handler_size = MacroAssembler::bl64_patchable_size
+ _deopt_handler_size = MacroAssembler::bl64_patchable_size + BytesPerInstWord
};
// '_static_call_stub_size' is only used on ppc (see LIR_Assembler::emit_static_call_stub()
diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp
index dcb5c2bb3cb..75ca50674bf 100644
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp
@@ -51,8 +51,6 @@ class NativeInstruction {
friend class Relocation;
public:
- bool is_post_call_nop() const { return MacroAssembler::is_post_call_nop(long_at(0)); }
-
bool is_jump() const { return Assembler::is_b(long_at(0)); } // See NativeGeneralJump.
bool is_sigtrap_ic_miss_check() {
@@ -531,6 +529,14 @@ class NativePostCallNop: public NativeInstruction {
};
public:
+ enum ppc_specific_constants {
+ // If the check is adjusted to read beyond size of the instruction at the deopt handler stub
+ // code entry point, it has to happen in two stages - to prevent out of bounds access in case
+ // the return address points to the entry point which could be at the end of page.
+ first_check_size = BytesPerInstWord
+ };
+
+ bool is_post_call_nop() const { return MacroAssembler::is_post_call_nop(long_at(0)); }
bool check() const { return is_post_call_nop(); }
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const {
uint32_t instr_bits = long_at(0);
diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad
index c169d673aaf..762536df07f 100644
--- a/src/hotspot/cpu/ppc/ppc.ad
+++ b/src/hotspot/cpu/ppc/ppc.ad
@@ -2088,17 +2088,11 @@ class HandlerImpl {
public:
- static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
- static uint size_exception_handler() {
- // The exception_handler is a b64_patchable.
- return MacroAssembler::b64_patchable_size;
- }
-
static uint size_deopt_handler() {
// The deopt_handler is a bl64_patchable.
- return MacroAssembler::bl64_patchable_size;
+ return MacroAssembler::bl64_patchable_size + BytesPerInstWord;
}
};
@@ -2114,22 +2108,6 @@ public:
source %{
-int HandlerImpl::emit_exception_handler(C2_MacroAssembler *masm) {
- address base = __ start_a_stub(size_exception_handler());
- if (base == nullptr) {
- ciEnv::current()->record_failure("CodeCache is full");
- return 0; // CodeBuffer::expand failed
- }
-
- int offset = __ offset();
- __ b64_patchable((address)OptoRuntime::exception_blob()->content_begin(),
- relocInfo::runtime_call_type);
- assert(__ offset() - offset == (int)size_exception_handler(), "must be fixed size");
- __ end_a_stub();
-
- return offset;
-}
-
// The deopt_handler is like the exception handler, but it calls to
// the deoptimization blob instead of jumping to the exception blob.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
@@ -2140,12 +2118,23 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
}
int offset = __ offset();
+
+ Label start;
+ __ bind(start);
+
__ bl64_patchable((address)SharedRuntime::deopt_blob()->unpack(),
relocInfo::runtime_call_type);
+
+ int entry_offset = __ offset();
+
+ __ b(start);
+
assert(__ offset() - offset == (int) size_deopt_handler(), "must be fixed size");
+ assert(__ offset() - entry_offset >= NativePostCallNop::first_check_size,
+ "out of bounds read in post-call NOP check");
__ end_a_stub();
- return offset;
+ return entry_offset;
}
//=============================================================================
diff --git a/src/hotspot/cpu/ppc/runtime_ppc.cpp b/src/hotspot/cpu/ppc/runtime_ppc.cpp
index 2654075f702..ab658e9de58 100644
--- a/src/hotspot/cpu/ppc/runtime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/runtime_ppc.cpp
@@ -46,7 +46,6 @@
//------------------------------generate_exception_blob---------------------------
// Creates exception blob at the end.
-// Using exception blob, this code is jumped from a compiled method.
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state
diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
index db45a2fa4c8..4e427ace404 100644
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
@@ -83,7 +83,6 @@ class RegisterSaver {
static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
int* out_frame_size_in_bytes,
bool generate_oop_map,
- int return_pc_adjustment,
ReturnPCLocation return_pc_location,
bool save_vectors = false);
static void restore_live_registers_and_pop_frame(MacroAssembler* masm,
@@ -262,7 +261,6 @@ static const RegisterSaver::LiveRegType RegisterSaver_LiveVecRegs[] = {
OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
int* out_frame_size_in_bytes,
bool generate_oop_map,
- int return_pc_adjustment,
ReturnPCLocation return_pc_location,
bool save_vectors) {
// Push an abi_reg_args-frame and store all registers which may be live.
@@ -271,7 +269,6 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
// propagated to the RegisterMap of the caller frame during
// StackFrameStream construction (needed for deoptimization; see
// compiledVFrame::create_stack_value).
- // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment.
// Updated return pc is returned in R31 (if not return_pc_is_pre_saved).
// calculate frame size
@@ -305,14 +302,11 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
// Do the save_LR by hand and adjust the return pc if requested.
switch (return_pc_location) {
case return_pc_is_lr: __ mflr(R31); break;
- case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break;
+ case return_pc_is_pre_saved: break;
case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break;
default: ShouldNotReachHere();
}
if (return_pc_location != return_pc_is_pre_saved) {
- if (return_pc_adjustment != 0) {
- __ addi(R31, R31, return_pc_adjustment);
- }
__ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP);
}
@@ -2907,22 +2901,15 @@ void SharedRuntime::generate_deopt_blob() {
// deopt_handler: call_deopt_stub
// cur. return pc --> ...
//
- // So currently SR_LR points behind the call in the deopt handler.
- // We adjust it such that it points to the start of the deopt handler.
// The return_pc has been stored in the frame of the deoptee and
// will replace the address of the deopt_handler in the call
// to Deoptimization::fetch_unroll_info below.
- // We can't grab a free register here, because all registers may
- // contain live values, so let the RegisterSaver do the adjustment
- // of the return pc.
- const int return_pc_adjustment_no_exception = -MacroAssembler::bl64_patchable_size;
// Push the "unpack frame"
// Save everything in sight.
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ true,
- return_pc_adjustment_no_exception,
RegisterSaver::return_pc_is_lr);
assert(map != nullptr, "OopMap must have been created");
@@ -2957,7 +2944,6 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
- /*return_pc_adjustment_exception=*/ 0,
RegisterSaver::return_pc_is_pre_saved);
// Deopt during an exception. Save exec mode for unpack_frames.
@@ -2975,7 +2961,6 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
- /*return_pc_adjustment_reexecute=*/ 0,
RegisterSaver::return_pc_is_pre_saved);
__ li(exec_mode_reg, Deoptimization::Unpack_reexecute);
#endif
@@ -3266,7 +3251,6 @@ SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr)
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&frame_size_in_bytes,
/*generate_oop_map=*/ true,
- /*return_pc_adjustment=*/0,
return_pc_location, save_vectors);
// The following is basically a call_VM. However, we need the precise
@@ -3367,7 +3351,6 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&frame_size_in_bytes,
/*generate_oop_map*/ true,
- /*return_pc_adjustment*/ 0,
RegisterSaver::return_pc_is_lr);
// Use noreg as last_Java_pc, the return pc will be reconstructed
diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
index 9d8ae770ccf..e77a2067e89 100644
--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
@@ -377,12 +377,20 @@ int LIR_Assembler::emit_deopt_handler() {
int offset = code_offset();
- __ auipc(ra, 0);
- __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+ Label start;
+ __ bind(start);
+
+ __ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+
+ int entry_offset = __ offset();
+ __ j(start);
+
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
+ assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
+ "out of bounds read in post-call NOP check");
__ end_a_stub();
- return offset;
+ return entry_offset;
}
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
index e4efb2c171d..ed2ab0c4861 100644
--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
@@ -72,7 +72,7 @@ private:
// See emit_exception_handler for detail
_exception_handler_size = DEBUG_ONLY(256) NOT_DEBUG(32), // or smaller
// See emit_deopt_handler for detail
- // auipc (1) + far_jump (2)
+ // far_call (2) + j (1)
_deopt_handler_size = 1 * MacroAssembler::instruction_size +
2 * MacroAssembler::instruction_size
};
diff --git a/src/hotspot/cpu/riscv/nativeInst_riscv.hpp b/src/hotspot/cpu/riscv/nativeInst_riscv.hpp
index d990cfbc50d..b28e33759b2 100644
--- a/src/hotspot/cpu/riscv/nativeInst_riscv.hpp
+++ b/src/hotspot/cpu/riscv/nativeInst_riscv.hpp
@@ -311,12 +311,19 @@ inline bool NativeInstruction::is_jump_or_nop() {
// can store an offset from the initial nop to the nmethod.
class NativePostCallNop: public NativeInstruction {
public:
+ enum RISCV_specific_constants {
+ // The two parts should be checked separately to prevent out of bounds access in
+ // case the return address points to the deopt handler stub code entry point
+ // which could be at the end of page.
+ first_check_size = instruction_size
+ };
+
bool check() const {
// Check for two instructions: nop; lui zr, hi20
// These instructions only ever appear together in a post-call
// NOP, so it's unnecessary to check that the third instruction is
// an addiw as well.
- return is_nop() && MacroAssembler::is_lui_to_zr_at(addr_at(4));
+ return is_nop() && MacroAssembler::is_lui_to_zr_at(addr_at(first_check_size));
}
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const;
bool patch(int32_t oopmap_slot, int32_t cb_offset);
diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad
index 7acbb5a478b..bb2ed57ef82 100644
--- a/src/hotspot/cpu/riscv/riscv.ad
+++ b/src/hotspot/cpu/riscv/riscv.ad
@@ -1049,15 +1049,10 @@ class HandlerImpl {
public:
- static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
- static uint size_exception_handler() {
- return MacroAssembler::far_branch_size();
- }
-
static uint size_deopt_handler() {
- // count auipc + far branch
+ // count far call + j
return NativeInstruction::instruction_size + MacroAssembler::far_branch_size();
}
};
@@ -1838,25 +1833,6 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
//=============================================================================
-// Emit exception handler code.
-int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
-{
- // auipc t1, #exception_blob_entry_point
- // jr (offset)t1
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a handler.
- address base = __ start_a_stub(size_exception_handler());
- if (base == nullptr) {
- ciEnv::current()->record_failure("CodeCache is full");
- return 0; // CodeBuffer::expand failed
- }
- int offset = __ offset();
- __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
- assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
- __ end_a_stub();
- return offset;
-}
-
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
{
@@ -1867,12 +1843,19 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
}
int offset = __ offset();
- __ auipc(ra, 0);
- __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+ Label start;
+ __ bind(start);
+
+ __ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+
+ int entry_offset = __ offset();
+ __ j(start);
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
+ assert(__ offset() - entry_offset >= NativePostCallNop::first_check_size,
+ "out of bounds read in post-call NOP check");
__ end_a_stub();
- return offset;
+ return entry_offset;
}
// REQUIRED MATCHER CODE
diff --git a/src/hotspot/cpu/riscv/runtime_riscv.cpp b/src/hotspot/cpu/riscv/runtime_riscv.cpp
index e1add8dbb82..c52d5a31066 100644
--- a/src/hotspot/cpu/riscv/runtime_riscv.cpp
+++ b/src/hotspot/cpu/riscv/runtime_riscv.cpp
@@ -249,8 +249,6 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
//------------------------------generate_exception_blob---------------------------
// creates exception blob at the end
-// Using exception blob, this code is jumped from a compiled method.
-// (see emit_exception_handler in riscv.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state
diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.hpp b/src/hotspot/cpu/riscv/vm_version_riscv.hpp
index 16f2e5d8f5b..168a3a576d0 100644
--- a/src/hotspot/cpu/riscv/vm_version_riscv.hpp
+++ b/src/hotspot/cpu/riscv/vm_version_riscv.hpp
@@ -89,11 +89,12 @@ class VM_Version : public Abstract_VM_Version {
FLAG_SET_DEFAULT(flag, true); \
} else { \
FLAG_SET_DEFAULT(flag, false); \
- stringStream ss; \
- deps_string(ss, dep0, ##__VA_ARGS__); \
- warning("Cannot enable " #flag ", it's missing dependent extension(s) %s", ss.as_string(true)); \
/* Sync CPU features with flags */ \
disable_feature(); \
+ stringStream ss; \
+ ss.print("missing dependent extension(s): "); \
+ deps_string(ss, dep0, ##__VA_ARGS__); \
+ log_disabled(ss.as_string(true)); \
} \
} else { \
/* Sync CPU features with flags */ \
@@ -101,11 +102,12 @@ class VM_Version : public Abstract_VM_Version {
disable_feature(); \
} else if (!deps_all_enabled(dep0, ##__VA_ARGS__)) { \
FLAG_SET_DEFAULT(flag, false); \
- stringStream ss; \
- deps_string(ss, dep0, ##__VA_ARGS__); \
- warning("Cannot enable " #flag ", it's missing dependent extension(s) %s", ss.as_string(true)); \
/* Sync CPU features with flags */ \
disable_feature(); \
+ stringStream ss; \
+ ss.print("missing dependent extension(s): "); \
+ deps_string(ss, dep0, ##__VA_ARGS__); \
+ log_disabled(ss.as_string(true)); \
} \
} \
} \
@@ -136,6 +138,7 @@ class VM_Version : public Abstract_VM_Version {
RVExtFeatures::current()->clear_feature(_cpu_feature_index);
}
void log_enabled();
+ void log_disabled(const char* reason);
protected:
bool deps_all_enabled(RVExtFeatureValue* dep0, ...) {
diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
index 298234156c3..93d6051aa76 100644
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
@@ -272,14 +272,27 @@ int LIR_Assembler::emit_deopt_handler() {
// Not enough space left for the handler.
bailout("deopt handler overflow");
return -1;
- } int offset = code_offset();
+ }
+
+ int offset = code_offset();
+
+ Label start;
+ __ bind(start);
+
// Size must be constant (see HandlerImpl::emit_deopt_handler).
__ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack());
__ call(Z_R1_scratch);
+
+ int entry_offset = __ offset();
+
+ __ z_bru(start);
+
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
+ assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
+ "out of bounds read in post-call NOP check");
__ end_a_stub();
- return offset;
+ return entry_offset;
}
void LIR_Assembler::jobject2reg(jobject o, Register reg) {
diff --git a/src/hotspot/cpu/s390/nativeInst_s390.hpp b/src/hotspot/cpu/s390/nativeInst_s390.hpp
index 16400df3f26..9852bc410b1 100644
--- a/src/hotspot/cpu/s390/nativeInst_s390.hpp
+++ b/src/hotspot/cpu/s390/nativeInst_s390.hpp
@@ -649,6 +649,13 @@ class NativeGeneralJump: public NativeInstruction {
class NativePostCallNop: public NativeInstruction {
public:
+ enum z_specific_constants {
+ // Once the check is implemented, this has to specify number of bytes checked on the first
+ // read. If the check would read beyond size of the instruction at the deopt handler stub
+ // code entry point, then it has to happen in two stages - to prevent out of bounds access
+ // in case the return address points to the entry point which could be at the end of page.
+ first_check_size = 0 // check is unimplemented
+ };
bool check() const { Unimplemented(); return false; }
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const { return false; }
bool patch(int32_t oopmap_slot, int32_t cb_offset) { Unimplemented(); return false; }
diff --git a/src/hotspot/cpu/s390/runtime_s390.cpp b/src/hotspot/cpu/s390/runtime_s390.cpp
index 314c407af91..658fba069b4 100644
--- a/src/hotspot/cpu/s390/runtime_s390.cpp
+++ b/src/hotspot/cpu/s390/runtime_s390.cpp
@@ -43,8 +43,6 @@
//------------------------------generate_exception_blob---------------------------
// creates exception blob at the end
-// Using exception blob, this code is jumped from a compiled method.
-// (see emit_exception_handler in s390.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state
diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad
index cab3965ecfa..6fe051b55c7 100644
--- a/src/hotspot/cpu/s390/s390.ad
+++ b/src/hotspot/cpu/s390/s390.ad
@@ -1649,15 +1649,10 @@ source_hpp %{ // Header information of the source block.
class HandlerImpl {
public:
- static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
- static uint size_exception_handler() {
- return NativeJump::max_instruction_size();
- }
-
static uint size_deopt_handler() {
- return NativeCall::max_instruction_size();
+ return NativeCall::max_instruction_size() + MacroAssembler::jump_pcrelative_size();
}
};
@@ -1672,43 +1667,6 @@ public:
source %{
-// This exception handler code snippet is placed after the method's
-// code. It is the return point if an exception occurred. it jumps to
-// the exception blob.
-//
-// If the method gets deoptimized, the method and this code snippet
-// get patched.
-//
-// 1) Trampoline code gets patched into the end of this exception
-// handler. the trampoline code jumps to the deoptimization blob.
-//
-// 2) The return address in the method's code will get patched such
-// that it jumps to the trampoline.
-//
-// 3) The handler will get patched such that it does not jump to the
-// exception blob, but to an entry in the deoptimization blob being
-// aware of the exception.
-int HandlerImpl::emit_exception_handler(C2_MacroAssembler *masm) {
- Register temp_reg = Z_R1;
-
- address base = __ start_a_stub(size_exception_handler());
- if (base == nullptr) {
- ciEnv::current()->record_failure("CodeCache is full");
- return 0; // CodeBuffer::expand failed
- }
-
- int offset = __ offset();
- // Use unconditional pc-relative jump with 32-bit range here.
- __ load_const_optimized(temp_reg, (address)OptoRuntime::exception_blob()->content_begin());
- __ z_br(temp_reg);
-
- assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
-
- __ end_a_stub();
-
- return offset;
-}
-
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
address base = __ start_a_stub(size_deopt_handler());
@@ -1720,14 +1678,24 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
int offset = __ offset();
+ Label start;
+ __ bind(start);
+
// Size_deopt_handler() must be exact on zarch, so for simplicity
// we do not use load_const_opt here.
__ load_const(Z_R1, SharedRuntime::deopt_blob()->unpack());
__ call(Z_R1);
+
+ int entry_offset = __ offset();
+
+ __ z_bru(start);
+
assert(__ offset() - offset == (int) size_deopt_handler(), "must be fixed size");
+ assert(__ offset() - entry_offset >= NativePostCallNop::first_check_size,
+ "out of bounds read in post-call NOP check");
__ end_a_stub();
- return offset;
+ return entry_offset;
}
//=============================================================================
diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
index a3605f649cc..5b6f7dcd984 100644
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
@@ -2544,14 +2544,10 @@ void SharedRuntime::generate_deopt_blob() {
// Normal entry (non-exception case)
//
// We have been called from the deopt handler of the deoptee.
- // Z_R14 points behind the call in the deopt handler. We adjust
- // it such that it points to the start of the deopt handler.
+ // Z_R14 points to the entry point of the deopt handler.
// The return_pc has been stored in the frame of the deoptee and
// will replace the address of the deopt_handler in the call
// to Deoptimization::fetch_unroll_info below.
- // The (int) cast is necessary, because -((unsigned int)14)
- // is an unsigned int.
- __ add2reg(Z_R14, -(int)NativeCall::max_instruction_size());
const Register exec_mode_reg = Z_tmp_1;
diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
index edeb0baea0e..a2ea7af606d 100644
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
@@ -450,14 +450,22 @@ int LIR_Assembler::emit_deopt_handler() {
}
int offset = code_offset();
- InternalAddress here(__ pc());
- __ pushptr(here.addr(), rscratch1);
- __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+ Label start;
+ __ bind(start);
+
+ __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+
+ int entry_offset = __ offset();
+
+ __ jmp(start);
+
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
+ assert(code_offset() - entry_offset >= NativePostCallNop::first_check_size,
+ "out of bounds read in post-call NOP check");
__ end_a_stub();
- return offset;
+ return entry_offset;
}
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp
index 8524dc90276..33f7b063e77 100644
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp
@@ -48,7 +48,7 @@
enum {
_call_stub_size = 28,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
- _deopt_handler_size = 17
+ _deopt_handler_size = 7
};
public:
diff --git a/src/hotspot/cpu/x86/nativeInst_x86.hpp b/src/hotspot/cpu/x86/nativeInst_x86.hpp
index 3e767006480..ec7fc3b154a 100644
--- a/src/hotspot/cpu/x86/nativeInst_x86.hpp
+++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp
@@ -73,6 +73,7 @@ class NativeInstruction {
s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); }
u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); }
+ jshort short_at(int offset) const { return *(jshort*) addr_at(offset); }
jint int_at(int offset) const { return *(jint*) addr_at(offset); }
intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); }
@@ -578,10 +579,15 @@ public:
instruction_code = 0x0f,
instruction_size = 8,
instruction_offset = 0,
- displacement_offset = 4
+ displacement_offset = 4,
+
+ // The two parts should be checked separately to prevent out of bounds access in case
+ // the return address points to the deopt handler stub code entry point which could be
+ // at the end of page.
+ first_check_size = 2
};
- bool check() const { return int_at(0) == 0x841f0f; }
+ bool check() const { return short_at(0) == 0x1f0f && short_at(first_check_size) == 0x0084; }
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const {
int32_t data = int_at(displacement_offset);
if (data == 0) {
diff --git a/src/hotspot/cpu/x86/runtime_x86_64.cpp b/src/hotspot/cpu/x86/runtime_x86_64.cpp
index 7b98cf4fad7..5bf65299a0c 100644
--- a/src/hotspot/cpu/x86/runtime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/runtime_x86_64.cpp
@@ -242,8 +242,6 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
//------------------------------generate_exception_blob---------------------------
// creates exception blob at the end
-// Using exception blob, this code is jumped from a compiled method.
-// (see emit_exception_handler in x86_64.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state
diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad
index a9748617e1f..be9889b0a99 100644
--- a/src/hotspot/cpu/x86/x86.ad
+++ b/src/hotspot/cpu/x86/x86.ad
@@ -2767,21 +2767,11 @@ class HandlerImpl {
public:
- static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
- static uint size_exception_handler() {
- // NativeCall instruction size is the same as NativeJump.
- // exception handler starts out as jump and can be patched to
- // a call be deoptimization. (4932387)
- // Note that this value is also credited (in output.cpp) to
- // the size of the code section.
- return NativeJump::instruction_size;
- }
-
static uint size_deopt_handler() {
- // three 5 byte instructions plus one move for unreachable address.
- return 15+3;
+ // one call and one jmp.
+ return 7;
}
};
@@ -2873,24 +2863,6 @@ int MachNode::compute_padding(int current_offset) const {
}
}
-// Emit exception handler code.
-// Stuff framesize into a register and call a VM stub routine.
-int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm) {
-
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a handler.
- address base = __ start_a_stub(size_exception_handler());
- if (base == nullptr) {
- ciEnv::current()->record_failure("CodeCache is full");
- return 0; // CodeBuffer::expand failed
- }
- int offset = __ offset();
- __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
- assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
- __ end_a_stub();
- return offset;
-}
-
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
@@ -2903,21 +2875,20 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
}
int offset = __ offset();
- address the_pc = (address) __ pc();
- Label next;
- // push a "the_pc" on the stack without destroying any registers
- // as they all may be live.
+ Label start;
+ __ bind(start);
- // push address of "next"
- __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
- __ bind(next);
- // adjust it so it matches "the_pc"
- __ subptr(Address(rsp, 0), __ offset() - offset);
+ __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+
+ int entry_offset = __ offset();
+
+ __ jmp(start);
- __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow %d", (__ offset() - offset));
+ assert(__ offset() - entry_offset >= NativePostCallNop::first_check_size,
+ "out of bounds read in post-call NOP check");
__ end_a_stub();
- return offset;
+ return entry_offset;
}
static Assembler::Width widthForType(BasicType bt) {
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index 5f81912c0d6..48bd5e05816 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -1747,6 +1747,9 @@ size_t os::pd_pretouch_memory(void* first, void* last, size_t page_size) {
return page_size;
}
+void os::numa_set_thread_affinity(Thread *thread, int node) {
+}
+
void os::numa_make_global(char *addr, size_t bytes) {
}
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 3e5fa8b84e1..0b37cb100f6 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -1581,6 +1581,9 @@ size_t os::pd_pretouch_memory(void* first, void* last, size_t page_size) {
return page_size;
}
+void os::numa_set_thread_affinity(Thread *thread, int node) {
+}
+
void os::numa_make_global(char *addr, size_t bytes) {
}
diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
index 8aeb64ef18c..f556bc57f26 100644
--- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
@@ -209,14 +209,14 @@ class CgroupV1Subsystem: public CgroupSubsystem {
bool pids_max(uint64_t& result) override;
bool pids_current(uint64_t& result) override;
- bool is_containerized();
+ bool is_containerized() override;
- const char * container_type() {
+ const char * container_type() override {
return "cgroupv1";
}
- CachingCgroupController* memory_controller() { return _memory; }
- CachingCgroupController* cpu_controller() { return _cpu; }
- CgroupCpuacctController* cpuacct_controller() { return _cpuacct; }
+ CachingCgroupController* memory_controller() override { return _memory; }
+ CachingCgroupController* cpu_controller() override { return _cpu; }
+ CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; }
private:
/* controllers */
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index a345663dd5b..30033903ca3 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -1770,7 +1770,9 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
{EM_LOONGARCH, EM_LOONGARCH, ELFCLASS64, ELFDATA2LSB, (char*)"LoongArch"},
};
-#if (defined AMD64)
+#if (defined IA32)
+ static Elf32_Half running_arch_code=EM_386;
+#elif (defined AMD64) || (defined X32)
static Elf32_Half running_arch_code=EM_X86_64;
#elif (defined __sparc) && (defined _LP64)
static Elf32_Half running_arch_code=EM_SPARCV9;
@@ -1804,7 +1806,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
static Elf32_Half running_arch_code=EM_LOONGARCH;
#else
#error Method os::dll_load requires that one of following is defined:\
- AARCH64, ALPHA, ARM, AMD64, LOONGARCH64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc
+ AARCH64, ALPHA, ARM, AMD64, IA32, LOONGARCH64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc
#endif
// Identify compatibility class for VM's architecture and library's architecture
@@ -1866,6 +1868,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
}
void * os::Linux::dlopen_helper(const char *filename, char *ebuf, int ebuflen) {
+#ifndef IA32
bool ieee_handling = IEEE_subnormal_handling_OK();
if (!ieee_handling) {
Events::log_dll_message(nullptr, "IEEE subnormal handling check failed before loading %s", filename);
@@ -1888,9 +1891,14 @@ void * os::Linux::dlopen_helper(const char *filename, char *ebuf, int ebuflen) {
// numerical "accuracy", but we need to protect Java semantics first
// and foremost. See JDK-8295159.
+ // This workaround is ineffective on IA32 systems because the MXCSR
+ // register (which controls flush-to-zero mode) is not stored in the
+ // legacy fenv.
+
fenv_t default_fenv;
int rtn = fegetenv(&default_fenv);
assert(rtn == 0, "fegetenv must succeed");
+#endif // IA32
void* result;
JFR_ONLY(NativeLibraryLoadEvent load_event(filename, &result);)
@@ -1910,6 +1918,7 @@ void * os::Linux::dlopen_helper(const char *filename, char *ebuf, int ebuflen) {
} else {
Events::log_dll_message(nullptr, "Loaded shared library %s", filename);
log_info(os)("shared library load of %s was successful", filename);
+#ifndef IA32
// Quickly test to make sure subnormals are correctly handled.
if (! IEEE_subnormal_handling_OK()) {
// We just dlopen()ed a library that mangled the floating-point flags.
@@ -1935,6 +1944,7 @@ void * os::Linux::dlopen_helper(const char *filename, char *ebuf, int ebuflen) {
assert(false, "fesetenv didn't work");
}
}
+#endif // IA32
}
return result;
}
@@ -2433,6 +2443,7 @@ void os::Linux::print_uptime_info(outputStream* st) {
if (ret == 0) {
os::print_dhm(st, "OS uptime:", (long) sinfo.uptime);
}
+ assert(ret == 0, "sysinfo failed: %s", os::strerror(errno));
}
bool os::Linux::print_container_info(outputStream* st) {
@@ -2597,7 +2608,8 @@ void os::print_memory_info(outputStream* st) {
// values in struct sysinfo are "unsigned long"
struct sysinfo si;
- sysinfo(&si);
+ int ret = sysinfo(&si);
+ assert(ret == 0, "sysinfo failed: %s", os::strerror(errno));
physical_memory_size_type phys_mem = physical_memory();
st->print(", physical " PHYS_MEM_TYPE_FORMAT "k",
phys_mem >> 10);
@@ -2605,10 +2617,12 @@ void os::print_memory_info(outputStream* st) {
(void)os::available_memory(avail_mem);
st->print("(" PHYS_MEM_TYPE_FORMAT "k free)",
avail_mem >> 10);
- st->print(", swap " UINT64_FORMAT "k",
- ((jlong)si.totalswap * si.mem_unit) >> 10);
- st->print("(" UINT64_FORMAT "k free)",
- ((jlong)si.freeswap * si.mem_unit) >> 10);
+ if (ret == 0) {
+ st->print(", swap " UINT64_FORMAT "k",
+ ((jlong)si.totalswap * si.mem_unit) >> 10);
+ st->print("(" UINT64_FORMAT "k free)",
+ ((jlong)si.freeswap * si.mem_unit) >> 10);
+ }
st->cr();
st->print("Page Sizes: ");
_page_sizes.print_on(st);
@@ -2991,6 +3005,10 @@ size_t os::pd_pretouch_memory(void* first, void* last, size_t page_size) {
return page_size;
}
+void os::numa_set_thread_affinity(Thread* thread, int node) {
+ Linux::numa_set_thread_affinity(thread->osthread()->thread_id(), node);
+}
+
void os::numa_make_global(char *addr, size_t bytes) {
Linux::numa_interleave_memory(addr, bytes);
}
@@ -3173,6 +3191,8 @@ bool os::Linux::libnuma_init() {
libnuma_dlsym(handle, "numa_set_bind_policy")));
set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
libnuma_dlsym(handle, "numa_bitmask_isbitset")));
+ set_numa_bitmask_clearbit(CAST_TO_FN_PTR(numa_bitmask_clearbit_func_t,
+ libnuma_dlsym(handle, "numa_bitmask_clearbit")));
set_numa_bitmask_equal(CAST_TO_FN_PTR(numa_bitmask_equal_func_t,
libnuma_dlsym(handle, "numa_bitmask_equal")));
set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
@@ -3187,20 +3207,32 @@ bool os::Linux::libnuma_init() {
libnuma_dlsym(handle, "numa_set_preferred")));
set_numa_get_run_node_mask(CAST_TO_FN_PTR(numa_get_run_node_mask_func_t,
libnuma_v2_dlsym(handle, "numa_get_run_node_mask")));
+ set_numa_sched_setaffinity(CAST_TO_FN_PTR(numa_sched_setaffinity_func_t,
+ libnuma_v2_dlsym(handle, "numa_sched_setaffinity")));
+ set_numa_allocate_cpumask(CAST_TO_FN_PTR(numa_allocate_cpumask_func_t,
+ libnuma_v2_dlsym(handle, "numa_allocate_cpumask")));
if (numa_available() != -1) {
set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
+ set_numa_all_cpus_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_cpus_ptr"));
set_numa_interleave_bitmask(_numa_get_interleave_mask());
set_numa_membind_bitmask(_numa_get_membind());
set_numa_cpunodebind_bitmask(_numa_get_run_node_mask());
+
// Create an index -> node mapping, since nodes are not always consecutive
_nindex_to_node = new (mtInternal) GrowableArray(0, mtInternal);
rebuild_nindex_to_node_map();
+
// Create a cpu -> node mapping
_cpu_to_node = new (mtInternal) GrowableArray(0, mtInternal);
rebuild_cpu_to_node_map();
+
+ // Create a node -> CPUs mapping
+ _numa_affinity_masks = new (mtInternal) GrowableArray(0, mtInternal);
+ build_numa_affinity_masks();
+
return true;
}
}
@@ -3236,6 +3268,42 @@ size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : os::vm_page_size());
}
+void os::Linux::build_numa_affinity_masks() {
+ // We only build the affinity masks if running libnuma v2 (_numa_node_to_cpus_v2
+ // is available) and we have the affinity mask of the process when it started.
+ if (_numa_node_to_cpus_v2 == nullptr || _numa_all_cpus_ptr == nullptr) {
+ return;
+ }
+
+ // It's important that we respect any user configuration by removing the
+ // CPUs we're not allowed to run on from the affinity mask. For example,
+ // if the user runs the JVM with "numactl -C 0-1,4-5" on a machine with
+ // the following NUMA setup:
+ // NUMA 0: CPUs 0-3, NUMA 1: CPUs 4-7
+ // We expect to get the following affinity masks:
+ // Affinity masks: idx 0 = (0, 1), idx 1 = (4, 5)
+
+ const int num_nodes = get_existing_num_nodes();
+ const unsigned num_cpus = (unsigned)os::processor_count();
+
+ for (int i = 0; i < num_nodes; i++) {
+ struct bitmask* affinity_mask = _numa_allocate_cpumask();
+
+ // Fill the affinity mask with all CPUs belonging to NUMA node i
+ _numa_node_to_cpus_v2(i, affinity_mask);
+
+ // Clear the bits of all CPUs that the process is not allowed to
+ // execute tasks on
+ for (unsigned j = 0; j < num_cpus; j++) {
+ if (!_numa_bitmask_isbitset(_numa_all_cpus_ptr, j)) {
+ _numa_bitmask_clearbit(affinity_mask, j);
+ }
+ }
+
+ _numa_affinity_masks->push(affinity_mask);
+ }
+}
+
void os::Linux::rebuild_nindex_to_node_map() {
int highest_node_number = Linux::numa_max_node();
@@ -3351,6 +3419,25 @@ int os::Linux::numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen)
return -1;
}
+void os::Linux::numa_set_thread_affinity(pid_t tid, int node) {
+ // We only set affinity if running libnuma v2 (_numa_sched_setaffinity
+ // is available) and we have all affinity mask
+ if (_numa_sched_setaffinity == nullptr ||
+ _numa_all_cpus_ptr == nullptr ||
+ _numa_affinity_masks->is_empty()) {
+ return;
+ }
+
+ if (node == -1) {
+ // If the node is -1, the affinity is reverted to the original affinity
+ // of the thread when the VM was started
+ _numa_sched_setaffinity(tid, _numa_all_cpus_ptr);
+ } else {
+ // Normal case, set the affinity to the corresponding affinity mask
+ _numa_sched_setaffinity(tid, _numa_affinity_masks->at(node));
+ }
+}
+
int os::Linux::get_node_by_cpu(int cpu_id) {
if (cpu_to_node() != nullptr && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
return cpu_to_node()->at(cpu_id);
@@ -3360,6 +3447,7 @@ int os::Linux::get_node_by_cpu(int cpu_id) {
GrowableArray* os::Linux::_cpu_to_node;
GrowableArray* os::Linux::_nindex_to_node;
+GrowableArray* os::Linux::_numa_affinity_masks;
os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
os::Linux::numa_node_to_cpus_v2_func_t os::Linux::_numa_node_to_cpus_v2;
@@ -3371,17 +3459,21 @@ os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
+os::Linux::numa_bitmask_clearbit_func_t os::Linux::_numa_bitmask_clearbit;
os::Linux::numa_bitmask_equal_func_t os::Linux::_numa_bitmask_equal;
os::Linux::numa_distance_func_t os::Linux::_numa_distance;
os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
os::Linux::numa_get_run_node_mask_func_t os::Linux::_numa_get_run_node_mask;
+os::Linux::numa_sched_setaffinity_func_t os::Linux::_numa_sched_setaffinity;
+os::Linux::numa_allocate_cpumask_func_t os::Linux::_numa_allocate_cpumask;
os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages;
os::Linux::numa_set_preferred_func_t os::Linux::_numa_set_preferred;
os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
unsigned long* os::Linux::_numa_all_nodes;
struct bitmask* os::Linux::_numa_all_nodes_ptr;
struct bitmask* os::Linux::_numa_nodes_ptr;
+struct bitmask* os::Linux::_numa_all_cpus_ptr;
struct bitmask* os::Linux::_numa_interleave_bitmask;
struct bitmask* os::Linux::_numa_membind_bitmask;
struct bitmask* os::Linux::_numa_cpunodebind_bitmask;
diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp
index df96a17d8e9..9c0b6723b38 100644
--- a/src/hotspot/os/linux/os_linux.hpp
+++ b/src/hotspot/os/linux/os_linux.hpp
@@ -45,6 +45,10 @@ class os::Linux {
static GrowableArray* _cpu_to_node;
static GrowableArray* _nindex_to_node;
+ static GrowableArray* _numa_affinity_masks;
+
+ static void build_numa_affinity_masks();
+
protected:
static physical_memory_size_type _physical_memory;
@@ -230,8 +234,11 @@ class os::Linux {
typedef void (*numa_set_preferred_func_t)(int node);
typedef void (*numa_set_bind_policy_func_t)(int policy);
typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n);
+ typedef int (*numa_bitmask_clearbit_func_t)(struct bitmask *bmp, unsigned int n);
typedef int (*numa_bitmask_equal_func_t)(struct bitmask *bmp1, struct bitmask *bmp2);
typedef int (*numa_distance_func_t)(int node1, int node2);
+ typedef int (*numa_sched_setaffinity_func_t)(pid_t pid, struct bitmask* mask);
+ typedef struct bitmask* (*numa_allocate_cpumask_func_t)(void);
static sched_getcpu_func_t _sched_getcpu;
static numa_node_to_cpus_func_t _numa_node_to_cpus;
@@ -244,6 +251,7 @@ class os::Linux {
static numa_interleave_memory_v2_func_t _numa_interleave_memory_v2;
static numa_set_bind_policy_func_t _numa_set_bind_policy;
static numa_bitmask_isbitset_func_t _numa_bitmask_isbitset;
+ static numa_bitmask_clearbit_func_t _numa_bitmask_clearbit;
static numa_bitmask_equal_func_t _numa_bitmask_equal;
static numa_distance_func_t _numa_distance;
static numa_get_membind_func_t _numa_get_membind;
@@ -251,9 +259,12 @@ class os::Linux {
static numa_get_interleave_mask_func_t _numa_get_interleave_mask;
static numa_move_pages_func_t _numa_move_pages;
static numa_set_preferred_func_t _numa_set_preferred;
+ static numa_sched_setaffinity_func_t _numa_sched_setaffinity;
+ static numa_allocate_cpumask_func_t _numa_allocate_cpumask;
static unsigned long* _numa_all_nodes;
static struct bitmask* _numa_all_nodes_ptr;
static struct bitmask* _numa_nodes_ptr;
+ static struct bitmask* _numa_all_cpus_ptr;
static struct bitmask* _numa_interleave_bitmask;
static struct bitmask* _numa_membind_bitmask;
static struct bitmask* _numa_cpunodebind_bitmask;
@@ -269,6 +280,7 @@ class os::Linux {
static void set_numa_interleave_memory_v2(numa_interleave_memory_v2_func_t func) { _numa_interleave_memory_v2 = func; }
static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; }
+ static void set_numa_bitmask_clearbit(numa_bitmask_clearbit_func_t func) { _numa_bitmask_clearbit = func; }
static void set_numa_bitmask_equal(numa_bitmask_equal_func_t func) { _numa_bitmask_equal = func; }
static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; }
static void set_numa_get_membind(numa_get_membind_func_t func) { _numa_get_membind = func; }
@@ -279,9 +291,12 @@ class os::Linux {
static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = (ptr == nullptr ? nullptr : *ptr); }
static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = (ptr == nullptr ? nullptr : *ptr); }
+ static void set_numa_all_cpus_ptr(struct bitmask **ptr) { _numa_all_cpus_ptr = (ptr == nullptr ? nullptr : *ptr); }
static void set_numa_interleave_bitmask(struct bitmask* ptr) { _numa_interleave_bitmask = ptr ; }
static void set_numa_membind_bitmask(struct bitmask* ptr) { _numa_membind_bitmask = ptr ; }
static void set_numa_cpunodebind_bitmask(struct bitmask* ptr) { _numa_cpunodebind_bitmask = ptr ; }
+ static void set_numa_sched_setaffinity(numa_sched_setaffinity_func_t func) { _numa_sched_setaffinity = func; }
+ static void set_numa_allocate_cpumask(numa_allocate_cpumask_func_t func) { _numa_allocate_cpumask = func; }
static int sched_getcpu_syscall(void);
enum NumaAllocationPolicy{
@@ -292,6 +307,8 @@ class os::Linux {
static NumaAllocationPolicy _current_numa_policy;
public:
+ static void numa_set_thread_affinity(pid_t tid, int node);
+
static int sched_getcpu() { return _sched_getcpu != nullptr ? _sched_getcpu() : -1; }
static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen);
static int numa_max_node() { return _numa_max_node != nullptr ? _numa_max_node() : -1; }
diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp
index 5833e324070..625eb63445a 100644
--- a/src/hotspot/os/posix/signals_posix.cpp
+++ b/src/hotspot/os/posix/signals_posix.cpp
@@ -621,7 +621,7 @@ int JVM_HANDLE_XXX_SIGNAL(int sig, siginfo_t* info,
if (cb != nullptr && cb->is_nmethod()) {
nmethod* nm = cb->as_nmethod();
assert(nm->insts_contains_inclusive(pc), "");
- address deopt = nm->deopt_handler_begin();
+ address deopt = nm->deopt_handler_entry();
assert(deopt != nullptr, "");
frame fr = os::fetch_frame_from_context(uc);
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index ce2baeaf46c..8a450a291d3 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -2795,7 +2795,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
if (cb != nullptr && cb->is_nmethod()) {
nmethod* nm = cb->as_nmethod();
frame fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
- address deopt = nm->deopt_handler_begin();
+ address deopt = nm->deopt_handler_entry();
assert(nm->insts_contains_inclusive(pc), "");
nm->set_original_pc(&fr, pc);
// Set pc to handler
@@ -3752,6 +3752,7 @@ size_t os::pd_pretouch_memory(void* first, void* last, size_t page_size) {
return page_size;
}
+void os::numa_set_thread_affinity(Thread *thread, int node) { }
void os::numa_make_global(char *addr, size_t bytes) { }
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { }
size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); }
diff --git a/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp b/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp
index 0799de014a9..35cbb75e8ff 100644
--- a/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp
+++ b/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp
@@ -104,11 +104,15 @@ uint32_t VM_Version::cpu_vector_length() {
}
void VM_Version::RVExtFeatureValue::log_enabled() {
- log_debug(os, cpu)("Enabled RV64 feature \"%s\"", pretty());
+ log_info(os, cpu)("Enabled RV64 feature \"%s\"", pretty());
+}
+
+void VM_Version::RVExtFeatureValue::log_disabled(const char* reason) {
+ log_info(os, cpu)("Disabled RV64 feature \"%s\" (%s)", pretty(), reason);
}
void VM_Version::RVNonExtFeatureValue::log_enabled() {
- log_debug(os, cpu)("Enabled RV64 feature \"%s\" (%ld)", pretty(), value());
+ log_info(os, cpu)("Enabled RV64 feature \"%s\" (%ld)", pretty(), value());
}
void VM_Version::setup_cpu_available_features() {
@@ -193,7 +197,7 @@ void VM_Version::setup_cpu_available_features() {
// via PR_RISCV_SCOPE_PER_THREAD, i.e. on VM attach/deattach.
int ret = prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX, PR_RISCV_CTX_SW_FENCEI_ON, PR_RISCV_SCOPE_PER_PROCESS);
if (ret == 0) {
- log_debug(os, cpu)("UseCtxFencei (PR_RISCV_CTX_SW_FENCEI_ON) enabled.");
+ log_info(os, cpu)("UseCtxFencei (PR_RISCV_CTX_SW_FENCEI_ON) enabled.");
} else {
FLAG_SET_ERGO(UseCtxFencei, false);
log_info(os, cpu)("UseCtxFencei (PR_RISCV_CTX_SW_FENCEI_ON) disabled, unsupported by kernel.");
diff --git a/src/hotspot/share/cds/aotArtifactFinder.hpp b/src/hotspot/share/cds/aotArtifactFinder.hpp
index 405222a8753..05bcde6b0ac 100644
--- a/src/hotspot/share/cds/aotArtifactFinder.hpp
+++ b/src/hotspot/share/cds/aotArtifactFinder.hpp
@@ -39,7 +39,7 @@ class TypeArrayKlass;
// It also decides what Klasses must be cached in aot-initialized state.
//
// ArchiveBuilder uses [1] as roots to scan for all MetaspaceObjs that need to be cached.
-// ArchiveHeapWriter uses [2] to create an image of the archived heap.
+// HeapShared uses [2] to create an image of the archived heap.
//
// [1] is stored in _all_cached_classes in aotArtifactFinder.cpp.
// [2] is stored in HeapShared::archived_object_cache().
diff --git a/src/hotspot/share/cds/aotConstantPoolResolver.cpp b/src/hotspot/share/cds/aotConstantPoolResolver.cpp
index ddf7d32ed70..c4bb26f6fb1 100644
--- a/src/hotspot/share/cds/aotConstantPoolResolver.cpp
+++ b/src/hotspot/share/cds/aotConstantPoolResolver.cpp
@@ -449,7 +449,7 @@ bool AOTConstantPoolResolver::check_lambda_metafactory_signature(ConstantPool* c
}
bool AOTConstantPoolResolver::check_lambda_metafactory_methodtype_arg(ConstantPool* cp, int bsms_attribute_index, int arg_i) {
- int mt_index = cp->bsm_attribute_entry(bsms_attribute_index)->argument_index(arg_i);
+ int mt_index = cp->bsm_attribute_entry(bsms_attribute_index)->argument(arg_i);
if (!cp->tag_at(mt_index).is_method_type()) {
// malformed class?
return false;
@@ -465,7 +465,7 @@ bool AOTConstantPoolResolver::check_lambda_metafactory_methodtype_arg(ConstantPo
}
bool AOTConstantPoolResolver::check_lambda_metafactory_methodhandle_arg(ConstantPool* cp, int bsms_attribute_index, int arg_i) {
- int mh_index = cp->bsm_attribute_entry(bsms_attribute_index)->argument_index(arg_i);
+ int mh_index = cp->bsm_attribute_entry(bsms_attribute_index)->argument(arg_i);
if (!cp->tag_at(mh_index).is_method_handle()) {
// malformed class?
return false;
diff --git a/src/hotspot/share/cds/aotMapLogger.cpp b/src/hotspot/share/cds/aotMapLogger.cpp
index d0a63c56093..a252eae4b84 100644
--- a/src/hotspot/share/cds/aotMapLogger.cpp
+++ b/src/hotspot/share/cds/aotMapLogger.cpp
@@ -796,7 +796,7 @@ void AOTMapLogger::dumptime_log_mapped_heap_region(ArchiveMappedHeapInfo* heap_i
address buffer_start = address(r.start()); // start of the current oop inside the buffer
address buffer_end = address(r.end());
- address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
+ address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
log_region_range("heap", buffer_start, buffer_end, requested_start);
diff --git a/src/hotspot/share/cds/aotMappedHeapWriter.cpp b/src/hotspot/share/cds/aotMappedHeapWriter.cpp
index ff9319d266b..98f400c989c 100644
--- a/src/hotspot/share/cds/aotMappedHeapWriter.cpp
+++ b/src/hotspot/share/cds/aotMappedHeapWriter.cpp
@@ -55,7 +55,7 @@
GrowableArrayCHeap* AOTMappedHeapWriter::_buffer = nullptr;
-// The following are offsets from buffer_bottom()
+bool AOTMappedHeapWriter::_is_writing_deterministic_heap = false;
size_t AOTMappedHeapWriter::_buffer_used;
// Heap root segments
@@ -74,7 +74,7 @@ AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
typedef HashTable<
- size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
+ size_t, // offset of a filler from AOTMappedHeapWriter::buffer_bottom()
size_t, // size of this filler (in bytes)
127, // prime number
AnyObj::C_HEAP,
@@ -96,6 +96,45 @@ void AOTMappedHeapWriter::init() {
_source_objs = new GrowableArrayCHeap(10000);
guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
+
+ if (CDSConfig::old_cds_flags_used()) {
+ // With the old CDS workflow, we can guatantee determninistic output: given
+ // the same classlist file, we can generate the same static CDS archive.
+ // To ensure determinism, we always use the same compressed oop encoding
+ // (zero-based, no shift). See set_requested_address_range().
+ _is_writing_deterministic_heap = true;
+ } else {
+ // Determninistic output is not supported by the new AOT workflow, so
+ // we don't force the (zero-based, no shift) encoding. This way, it is more
+ // likely that we can avoid oop relocation in the production run.
+ _is_writing_deterministic_heap = false;
+ }
+ }
+}
+
+// For AOTMappedHeapWriter::narrow_oop_{mode, base, shift}(), see comments
+// in AOTMappedHeapWriter::set_requested_address_range(),
+CompressedOops::Mode AOTMappedHeapWriter::narrow_oop_mode() {
+ if (is_writing_deterministic_heap()) {
+ return CompressedOops::UnscaledNarrowOop;
+ } else {
+ return CompressedOops::mode();
+ }
+}
+
+address AOTMappedHeapWriter::narrow_oop_base() {
+ if (is_writing_deterministic_heap()) {
+ return (address)0;
+ } else {
+ return CompressedOops::base();
+ }
+}
+
+int AOTMappedHeapWriter::narrow_oop_shift() {
+ if (is_writing_deterministic_heap()) {
+ return 0;
+ } else {
+ return CompressedOops::shift();
}
}
@@ -116,7 +155,7 @@ void AOTMappedHeapWriter::write(GrowableArrayCHeap* roots,
assert(CDSConfig::is_dumping_heap(), "sanity");
allocate_buffer();
copy_source_objs_to_buffer(roots);
- set_requested_address(heap_info);
+ set_requested_address_range(heap_info);
relocate_embedded_oops(roots, heap_info);
}
@@ -536,14 +575,55 @@ size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
return buffered_obj_offset;
}
-void AOTMappedHeapWriter::set_requested_address(ArchiveMappedHeapInfo* info) {
+// Set the range [_requested_bottom, _requested_top), the requested address range of all
+// the archived heap objects in the production run.
+//
+// (1) UseCompressedOops == true && !is_writing_deterministic_heap()
+//
+// The archived objects are stored using the COOPS encoding of the assembly phase.
+// We pick a range within the heap used by the assembly phase.
+//
+// In the production run, if different COOPS encodings are used:
+// - The heap contents needs to be relocated.
+//
+// (2) UseCompressedOops == true && is_writing_deterministic_heap()
+//
+// We always use zero-based, zero-shift encoding. _requested_top is aligned to 0x10000000.
+//
+// (3) UseCompressedOops == false:
+//
+// In the production run, the heap range is usually picked (randomly) by the OS, so we
+// will almost always need to perform relocation, regardless of how we pick the requested
+// address range.
+//
+// So we just hard code it to NOCOOPS_REQUESTED_BASE.
+//
+void AOTMappedHeapWriter::set_requested_address_range(ArchiveMappedHeapInfo* info) {
assert(!info->is_used(), "only set once");
size_t heap_region_byte_size = _buffer_used;
assert(heap_region_byte_size > 0, "must archived at least one object!");
if (UseCompressedOops) {
- if (UseG1GC) {
+ if (is_writing_deterministic_heap()) {
+ // Pick a heap range so that requested addresses can be encoded with zero-base/no shift.
+ // We align the requested bottom to at least 1 MB: if the production run uses G1 with a small
+ // heap (e.g., -Xmx256m), it's likely that we can map the archived objects at the
+ // requested location to avoid relocation.
+ //
+ // For other collectors or larger heaps, relocation is unavoidable, but is usually
+ // quite cheap. If you really want to avoid relocation, use the AOT workflow instead.
+ address heap_end = (address)0x100000000;
+ size_t alignment = MAX2(MIN_GC_REGION_ALIGNMENT, 1024 * 1024);
+ if (align_up(heap_region_byte_size, alignment) >= (size_t)heap_end) {
+ log_error(aot, heap)("cached heap space is too large: %zu bytes", heap_region_byte_size);
+ AOTMetaspace::unrecoverable_writing_error();
+ }
+ _requested_bottom = align_down(heap_end - heap_region_byte_size, alignment);
+ } else if (UseG1GC) {
+ // For G1, pick the range at the top of the current heap. If the exact same heap sizes
+ // are used in the production run, it's likely that we can map the archived objects
+ // at the requested location to avoid relocation.
address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
log_info(aot, heap)("Heap end = %p", heap_end);
_requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes);
@@ -612,7 +692,14 @@ oop AOTMappedHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
template void AOTMappedHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, oop source_referent, CHeapBitMap* oopmap) {
oop request_referent = source_obj_to_requested_obj(source_referent);
- store_requested_oop_in_buffer(field_addr_in_buffer, request_referent);
+ if (UseCompressedOops && is_writing_deterministic_heap()) {
+ // We use zero-based, 0-shift encoding, so the narrowOop is just the lower
+ // 32 bits of request_referent
+ intptr_t addr = cast_from_oop(request_referent);
+ *((narrowOop*)field_addr_in_buffer) = checked_cast(addr);
+ } else {
+ store_requested_oop_in_buffer(field_addr_in_buffer, request_referent);
+ }
if (request_referent != nullptr) {
mark_oop_pointer(field_addr_in_buffer, oopmap);
}
@@ -918,9 +1005,9 @@ AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHe
address buffer_start = address(r.start());
address buffer_end = address(r.end());
- address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
- address requested_start = UseCompressedOops ? buffered_addr_to_requested_addr(buffer_start) : requested_base;
- int requested_shift = CompressedOops::shift();
+ address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
+ address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
+ int requested_shift = AOTMappedHeapWriter::narrow_oop_shift();
intptr_t buffer_to_requested_delta = requested_start - buffer_start;
uint64_t buffer_start_narrow_oop = 0xdeadbeed;
if (UseCompressedOops) {
diff --git a/src/hotspot/share/cds/aotMappedHeapWriter.hpp b/src/hotspot/share/cds/aotMappedHeapWriter.hpp
index 9a85b83d3d1..eafd38ac8bb 100644
--- a/src/hotspot/share/cds/aotMappedHeapWriter.hpp
+++ b/src/hotspot/share/cds/aotMappedHeapWriter.hpp
@@ -29,6 +29,7 @@
#include "cds/heapShared.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
+#include "oops/compressedOops.hpp"
#include "oops/oopHandle.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/exceptions.hpp"
@@ -71,7 +72,7 @@ class AOTMappedHeapWriter : AllStatic {
// These are entered into HeapShared::archived_object_cache().
//
// - "buffered objects" are copies of the "source objects", and are stored in into
- // ArchiveHeapWriter::_buffer, which is a GrowableArray that sits outside of
+ // AOTMappedHeapWriter::_buffer, which is a GrowableArray that sits outside of
// the valid heap range. Therefore we avoid using the addresses of these copies
// as oops. They are usually called "buffered_addr" in the code (of the type "address").
//
@@ -81,26 +82,11 @@ class AOTMappedHeapWriter : AllStatic {
// - Each archived object has a "requested address" -- at run time, if the object
// can be mapped at this address, we can avoid relocation.
//
- // The requested address is implemented differently depending on UseCompressedOops:
+ // The requested address of an archived object is essentially its buffered_addr + delta,
+ // where delta is (_requested_bottom - buffer_bottom());
//
- // UseCompressedOops == true:
- // The archived objects are stored assuming that the runtime COOPS compression
- // scheme is exactly the same as in dump time (or else a more expensive runtime relocation
- // would be needed.)
- //
- // At dump time, we assume that the runtime heap range is exactly the same as
- // in dump time. The requested addresses of the archived objects are chosen such that
- // they would occupy the top end of a G1 heap (TBD when dumping is supported by other
- // collectors. See JDK-8298614).
- //
- // UseCompressedOops == false:
- // At runtime, the heap range is usually picked (randomly) by the OS, so we will almost always
- // need to perform relocation. Hence, the goal of the "requested address" is to ensure that
- // the contents of the archived objects are deterministic. I.e., the oop fields of archived
- // objects will always point to deterministic addresses.
- //
- // For G1, the archived heap is written such that the lowest archived object is placed
- // at NOCOOPS_REQUESTED_BASE. (TBD after JDK-8298614).
+ // The requested addresses of all archived objects are within [_requested_bottom, _requested_top).
+ // See AOTMappedHeapWriter::set_requested_address_range() for more info.
// ----------------------------------------------------------------------
public:
@@ -111,6 +97,15 @@ public:
// Shenandoah heap region size can never be smaller than 256K.
static constexpr int MIN_GC_REGION_ALIGNMENT = 256 * K;
+ // The heap contents are required to be deterministic when dumping "old" CDS archives, in order
+ // to support reproducible lib/server/classes*.jsa when building the JDK.
+ static bool is_writing_deterministic_heap() { return _is_writing_deterministic_heap; }
+
+ // The oop encoding used by the archived heap objects.
+ static CompressedOops::Mode narrow_oop_mode();
+ static address narrow_oop_base();
+ static int narrow_oop_shift();
+
static const int INITIAL_TABLE_SIZE = 15889; // prime number
static const int MAX_TABLE_SIZE = 1000000;
@@ -121,6 +116,7 @@ private:
int _field_offset;
};
+ static bool _is_writing_deterministic_heap;
static GrowableArrayCHeap* _buffer;
// The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
@@ -130,15 +126,15 @@ private:
static HeapRootSegments _heap_root_segments;
// The address range of the requested location of the archived heap objects.
- static address _requested_bottom;
- static address _requested_top;
+ static address _requested_bottom; // The requested address of the lowest archived heap object
+ static address _requested_top; // The exclusive end of the highest archived heap object
static GrowableArrayCHeap* _native_pointers;
static GrowableArrayCHeap* _source_objs;
static DumpedInternedStrings *_dumped_interned_strings;
// We sort _source_objs_order to minimize the number of bits in ptrmap and oopmap.
- // See comments near the body of ArchiveHeapWriter::compare_objs_by_oop_fields().
+ // See comments near the body of AOTMappedHeapWriter::compare_objs_by_oop_fields().
// The objects will be written in the order of:
//_source_objs->at(_source_objs_order->at(0)._index)
// source_objs->at(_source_objs_order->at(1)._index)
@@ -200,7 +196,7 @@ private:
static int filler_array_length(size_t fill_bytes);
static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
- static void set_requested_address(ArchiveMappedHeapInfo* info);
+ static void set_requested_address_range(ArchiveMappedHeapInfo* info);
static void mark_native_pointers(oop orig_obj);
static void relocate_embedded_oops(GrowableArrayCHeap* roots, ArchiveMappedHeapInfo* info);
static void compute_ptrmap(ArchiveMappedHeapInfo *info);
diff --git a/src/hotspot/share/cds/aotMetaspace.cpp b/src/hotspot/share/cds/aotMetaspace.cpp
index 8642b1a6de8..42d41e6ae89 100644
--- a/src/hotspot/share/cds/aotMetaspace.cpp
+++ b/src/hotspot/share/cds/aotMetaspace.cpp
@@ -114,6 +114,7 @@ intx AOTMetaspace::_relocation_delta;
char* AOTMetaspace::_requested_base_address;
Array* AOTMetaspace::_archived_method_handle_intrinsics = nullptr;
bool AOTMetaspace::_use_optimized_module_handling = true;
+FileMapInfo* AOTMetaspace::_output_mapinfo = nullptr;
// The CDS archive is divided into the following regions:
// rw - read-write metadata
@@ -322,6 +323,24 @@ void AOTMetaspace::initialize_for_static_dump() {
AOTMetaspace::unrecoverable_writing_error();
}
_symbol_region.init(&_symbol_rs, &_symbol_vs);
+ if (CDSConfig::is_dumping_preimage_static_archive()) {
+ // We are in the AOT training run. User code is executed.
+ //
+ // On Windows, if the user code closes System.out and we open the AOT config file for output
+ // only at VM exit, we might get back the same file HANDLE as stdout, and the AOT config
+ // file may get corrupted by UL logs. By opening early, we ensure that the output
+ // HANDLE is different than stdout so we can avoid such corruption.
+ open_output_mapinfo();
+ } else {
+ // No need for the above as we won't execute any user code.
+ }
+}
+
+void AOTMetaspace::open_output_mapinfo() {
+ const char* static_archive = CDSConfig::output_archive_path();
+ assert(static_archive != nullptr, "sanity");
+ _output_mapinfo = new FileMapInfo(static_archive, true);
+ _output_mapinfo->open_as_output();
}
// Called by universe_post_init()
@@ -655,15 +674,14 @@ private:
public:
- VM_PopulateDumpSharedSpace(StaticArchiveBuilder& b) :
- VM_Operation(), _mapped_heap_info(), _streamed_heap_info(), _map_info(nullptr), _builder(b) {}
+ VM_PopulateDumpSharedSpace(StaticArchiveBuilder& b, FileMapInfo* map_info) :
+ VM_Operation(), _mapped_heap_info(), _streamed_heap_info(), _map_info(map_info), _builder(b) {}
bool skip_operation() const { return false; }
VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
ArchiveMappedHeapInfo* mapped_heap_info() { return &_mapped_heap_info; }
ArchiveStreamedHeapInfo* streamed_heap_info() { return &_streamed_heap_info; }
- FileMapInfo* map_info() const { return _map_info; }
void doit(); // outline because gdb sucks
bool allow_nested_vm_operations() const { return true; }
}; // class VM_PopulateDumpSharedSpace
@@ -795,12 +813,6 @@ void VM_PopulateDumpSharedSpace::doit() {
CppVtables::zero_archived_vtables();
// Write the archive file
- if (CDSConfig::is_dumping_final_static_archive()) {
- FileMapInfo::free_current_info(); // FIXME: should not free current info
- }
- const char* static_archive = CDSConfig::output_archive_path();
- assert(static_archive != nullptr, "sanity");
- _map_info = new FileMapInfo(static_archive, true);
_map_info->populate_header(AOTMetaspace::core_region_alignment());
_map_info->set_early_serialized_data(early_serialized_data);
_map_info->set_serialized_data(serialized_data);
@@ -1138,7 +1150,14 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
}
#endif
- VM_PopulateDumpSharedSpace op(builder);
+ if (!CDSConfig::is_dumping_preimage_static_archive()) {
+ if (CDSConfig::is_dumping_final_static_archive()) {
+ FileMapInfo::free_current_info(); // FIXME: should not free current info
+ }
+ open_output_mapinfo();
+ }
+
+ VM_PopulateDumpSharedSpace op(builder, _output_mapinfo);
VMThread::execute(&op);
if (AOTCodeCache::is_on_for_dump() && CDSConfig::is_dumping_final_static_archive()) {
@@ -1152,7 +1171,9 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
CDSConfig::disable_dumping_aot_code();
}
- bool status = write_static_archive(&builder, op.map_info(), op.mapped_heap_info(), op.streamed_heap_info());
+ bool status = write_static_archive(&builder, _output_mapinfo, op.mapped_heap_info(), op.streamed_heap_info());
+ assert(!_output_mapinfo->is_open(), "Must be closed already");
+ _output_mapinfo = nullptr;
if (status && CDSConfig::is_dumping_preimage_static_archive()) {
tty->print_cr("%s AOTConfiguration recorded: %s",
CDSConfig::has_temp_aot_config_file() ? "Temporary" : "", AOTConfiguration);
@@ -1173,11 +1194,10 @@ bool AOTMetaspace::write_static_archive(ArchiveBuilder* builder,
// relocate the data so that it can be mapped to AOTMetaspace::requested_base_address()
// without runtime relocation.
builder->relocate_to_requested();
-
- map_info->open_as_output();
if (!map_info->is_open()) {
return false;
}
+ map_info->prepare_for_writing();
builder->write_archive(map_info, mapped_heap_info, streamed_heap_info);
return true;
}
diff --git a/src/hotspot/share/cds/aotMetaspace.hpp b/src/hotspot/share/cds/aotMetaspace.hpp
index bfd9f4bcc75..1712a7865ad 100644
--- a/src/hotspot/share/cds/aotMetaspace.hpp
+++ b/src/hotspot/share/cds/aotMetaspace.hpp
@@ -60,6 +60,7 @@ class AOTMetaspace : AllStatic {
static char* _requested_base_address;
static bool _use_optimized_module_handling;
static Array* _archived_method_handle_intrinsics;
+ static FileMapInfo* _output_mapinfo;
public:
enum {
@@ -185,6 +186,7 @@ public:
private:
static void read_extra_data(JavaThread* current, const char* filename) NOT_CDS_RETURN;
static void fork_and_dump_final_static_archive(TRAPS);
+ static void open_output_mapinfo();
static bool write_static_archive(ArchiveBuilder* builder,
FileMapInfo* map_info,
ArchiveMappedHeapInfo* mapped_heap_info,
diff --git a/src/hotspot/share/cds/dynamicArchive.cpp b/src/hotspot/share/cds/dynamicArchive.cpp
index 85e59e23f8c..8fae8dabf8c 100644
--- a/src/hotspot/share/cds/dynamicArchive.cpp
+++ b/src/hotspot/share/cds/dynamicArchive.cpp
@@ -353,6 +353,7 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data, AOTClassLocatio
assert(dynamic_info != nullptr, "Sanity");
dynamic_info->open_as_output();
+ dynamic_info->prepare_for_writing();
ArchiveBuilder::write_archive(dynamic_info, nullptr, nullptr);
address base = _requested_dynamic_archive_bottom;
diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp
index ae92ce31058..0eeb96bb269 100644
--- a/src/hotspot/share/cds/filemap.cpp
+++ b/src/hotspot/share/cds/filemap.cpp
@@ -216,12 +216,14 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment,
_obj_alignment = ObjectAlignmentInBytes;
_compact_strings = CompactStrings;
_compact_headers = UseCompactObjectHeaders;
+#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_heap()) {
_object_streaming_mode = HeapShared::is_writing_streaming_mode();
- _narrow_oop_mode = CompressedOops::mode();
- _narrow_oop_base = CompressedOops::base();
- _narrow_oop_shift = CompressedOops::shift();
+ _narrow_oop_mode = AOTMappedHeapWriter::narrow_oop_mode();
+ _narrow_oop_base = AOTMappedHeapWriter::narrow_oop_base();
+ _narrow_oop_shift = AOTMappedHeapWriter::narrow_oop_shift();
}
+#endif
_compressed_oops = UseCompressedOops;
_compressed_class_ptrs = UseCompressedClassPointers;
if (UseCompressedClassPointers) {
@@ -777,7 +779,9 @@ void FileMapInfo::open_as_output() {
}
_fd = fd;
_file_open = true;
+}
+void FileMapInfo::prepare_for_writing() {
// Seek past the header. We will write the header after all regions are written
// and their CRCs computed.
size_t header_bytes = header()->header_size();
@@ -911,7 +915,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
if (HeapShared::is_writing_mapping_mode()) {
requested_base = (char*)AOTMappedHeapWriter::requested_address();
if (UseCompressedOops) {
- mapping_offset = (size_t)((address)requested_base - CompressedOops::base());
+ mapping_offset = (size_t)((address)requested_base - AOTMappedHeapWriter::narrow_oop_base());
assert((mapping_offset >> CompressedOops::shift()) << CompressedOops::shift() == mapping_offset, "must be");
}
} else {
diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp
index 2a761843e47..fbd3c8e1681 100644
--- a/src/hotspot/share/cds/filemap.hpp
+++ b/src/hotspot/share/cds/filemap.hpp
@@ -365,6 +365,7 @@ public:
// File manipulation.
bool open_as_input() NOT_CDS_RETURN_(false);
void open_as_output();
+ void prepare_for_writing();
void write_header();
void write_region(int region, char* base, size_t size,
bool read_only, bool allow_exec);
diff --git a/src/hotspot/share/cds/heapShared.hpp b/src/hotspot/share/cds/heapShared.hpp
index 2c782f7231b..118c60faa60 100644
--- a/src/hotspot/share/cds/heapShared.hpp
+++ b/src/hotspot/share/cds/heapShared.hpp
@@ -332,7 +332,7 @@ public:
// Used by CDSHeapVerifier.
OopHandle _orig_referrer;
- // The location of this object inside ArchiveHeapWriter::_buffer
+ // The location of this object inside {AOTMappedHeapWriter, AOTStreamedHeapWriter}::_buffer
size_t _buffer_offset;
// One or more fields in this object are pointing to non-null oops.
diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp
index 79ab881e7f6..92bacc4c2c3 100644
--- a/src/hotspot/share/ci/ciEnv.cpp
+++ b/src/hotspot/share/ci/ciEnv.cpp
@@ -1057,7 +1057,9 @@ void ciEnv::register_method(ciMethod* target,
}
assert(offsets->value(CodeOffsets::Deopt) != -1, "must have deopt entry");
- assert(offsets->value(CodeOffsets::Exceptions) != -1, "must have exception entry");
+
+ assert(compiler->type() == compiler_c2 ||
+ offsets->value(CodeOffsets::Exceptions) != -1, "must have exception entry");
nm = nmethod::new_nmethod(method,
compile_id(),
diff --git a/src/hotspot/share/ci/ciInstanceKlass.cpp b/src/hotspot/share/ci/ciInstanceKlass.cpp
index 9bbf005356c..64b9acf9146 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp
@@ -605,7 +605,7 @@ bool ciInstanceKlass::is_leaf_type() {
if (is_shared()) {
return is_final(); // approximately correct
} else {
- return !has_subklass() && (nof_implementors() == 0);
+ return !has_subklass() && (!is_interface() || nof_implementors() == 0);
}
}
@@ -619,6 +619,7 @@ bool ciInstanceKlass::is_leaf_type() {
// This is OK, since any dependencies we decide to assert
// will be checked later under the Compile_lock.
ciInstanceKlass* ciInstanceKlass::implementor() {
+ assert(is_interface(), "required");
ciInstanceKlass* impl = _implementor;
if (impl == nullptr) {
if (is_shared()) {
diff --git a/src/hotspot/share/ci/ciInstanceKlass.hpp b/src/hotspot/share/ci/ciInstanceKlass.hpp
index ec8fc789c7d..1f887771f54 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp
@@ -259,6 +259,7 @@ public:
ciInstanceKlass* unique_implementor() {
assert(is_loaded(), "must be loaded");
+ assert(is_interface(), "must be");
ciInstanceKlass* impl = implementor();
return (impl != this ? impl : nullptr);
}
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index eb8a2a389b9..68890775051 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -47,6 +47,7 @@
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/annotations.hpp"
+#include "oops/bsmAttribute.inline.hpp"
#include "oops/constantPool.inline.hpp"
#include "oops/fieldInfo.hpp"
#include "oops/fieldStreams.inline.hpp"
@@ -3298,8 +3299,9 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(const ClassFil
TRAPS) {
assert(cfs != nullptr, "invariant");
assert(cp != nullptr, "invariant");
+ const int cp_size = cp->length();
- const u1* const current_start = cfs->current();
+ const u1* const current_before_parsing = cfs->current();
guarantee_property(attribute_byte_length >= sizeof(u2),
"Invalid BootstrapMethods attribute length %u in class file %s",
@@ -3308,57 +3310,40 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(const ClassFil
cfs->guarantee_more(attribute_byte_length, CHECK);
- const int attribute_array_length = cfs->get_u2_fast();
+ const int num_bootstrap_methods = cfs->get_u2_fast();
- guarantee_property(_max_bootstrap_specifier_index < attribute_array_length,
+ guarantee_property(_max_bootstrap_specifier_index < num_bootstrap_methods,
"Short length on BootstrapMethods in class file %s",
CHECK);
+ const u4 bootstrap_methods_u2_len = (attribute_byte_length - sizeof(u2)) / sizeof(u2);
- // The attribute contains a counted array of counted tuples of shorts,
- // represending bootstrap specifiers:
- // length*{bootstrap_method_index, argument_count*{argument_index}}
- const unsigned int operand_count = (attribute_byte_length - (unsigned)sizeof(u2)) / (unsigned)sizeof(u2);
- // operand_count = number of shorts in attr, except for leading length
-
- // The attribute is copied into a short[] array.
- // The array begins with a series of short[2] pairs, one for each tuple.
- const int index_size = (attribute_array_length * 2);
-
- Array* const operands =
- MetadataFactory::new_array(_loader_data, index_size + operand_count, CHECK);
-
- // Eagerly assign operands so they will be deallocated with the constant
+ // Eagerly assign the arrays so that they will be deallocated with the constant
// pool if there is an error.
- cp->set_operands(operands);
+ BSMAttributeEntries::InsertionIterator iter =
+ cp->bsm_entries().start_extension(num_bootstrap_methods,
+ bootstrap_methods_u2_len,
+ _loader_data,
+ CHECK);
- int operand_fill_index = index_size;
- const int cp_size = cp->length();
-
- for (int n = 0; n < attribute_array_length; n++) {
- // Store a 32-bit offset into the header of the operand array.
- ConstantPool::operand_offset_at_put(operands, n, operand_fill_index);
-
- // Read a bootstrap specifier.
+ for (int i = 0; i < num_bootstrap_methods; i++) {
cfs->guarantee_more(sizeof(u2) * 2, CHECK); // bsm, argc
- const u2 bootstrap_method_index = cfs->get_u2_fast();
- const u2 argument_count = cfs->get_u2_fast();
+ u2 bootstrap_method_ref = cfs->get_u2_fast();
+ u2 num_bootstrap_arguments = cfs->get_u2_fast();
guarantee_property(
- valid_cp_range(bootstrap_method_index, cp_size) &&
- cp->tag_at(bootstrap_method_index).is_method_handle(),
- "bootstrap_method_index %u has bad constant type in class file %s",
- bootstrap_method_index,
- CHECK);
+ valid_cp_range(bootstrap_method_ref, cp_size) &&
+ cp->tag_at(bootstrap_method_ref).is_method_handle(),
+ "bootstrap_method_index %u has bad constant type in class file %s",
+ bootstrap_method_ref,
+ CHECK);
+ cfs->guarantee_more(sizeof(u2) * num_bootstrap_arguments, CHECK); // argv[argc]
- guarantee_property((operand_fill_index + 1 + argument_count) < operands->length(),
- "Invalid BootstrapMethods num_bootstrap_methods or num_bootstrap_arguments value in class file %s",
- CHECK);
+ BSMAttributeEntry* entry = iter.reserve_new_entry(bootstrap_method_ref, num_bootstrap_arguments);
+ guarantee_property(entry != nullptr,
+ "Invalid BootstrapMethods num_bootstrap_methods."
+ " The total amount of space reserved for the BootstrapMethod attribute was not sufficient", CHECK);
- operands->at_put(operand_fill_index++, bootstrap_method_index);
- operands->at_put(operand_fill_index++, argument_count);
-
- cfs->guarantee_more(sizeof(u2) * argument_count, CHECK); // argv[argc]
- for (int j = 0; j < argument_count; j++) {
+ for (int argi = 0; argi < num_bootstrap_arguments; argi++) {
const u2 argument_index = cfs->get_u2_fast();
guarantee_property(
valid_cp_range(argument_index, cp_size) &&
@@ -3366,10 +3351,11 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(const ClassFil
"argument_index %u has bad constant type in class file %s",
argument_index,
CHECK);
- operands->at_put(operand_fill_index++, argument_index);
+ entry->set_argument(argi, argument_index);
}
}
- guarantee_property(current_start + attribute_byte_length == cfs->current(),
+ cp->bsm_entries().end_extension(iter, _loader_data, CHECK);
+ guarantee_property(current_before_parsing + attribute_byte_length == cfs->current(),
"Bad length on BootstrapMethods in class file %s",
CHECK);
}
diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp
index 082c745f4c3..12fbda899b9 100644
--- a/src/hotspot/share/classfile/classLoader.cpp
+++ b/src/hotspot/share/classfile/classLoader.cpp
@@ -412,31 +412,30 @@ ClassFileStream* ClassPathImageEntry::open_stream(JavaThread* current, const cha
//
ClassFileStream* ClassPathImageEntry::open_stream_for_loader(JavaThread* current, const char* name, ClassLoaderData* loader_data) {
jlong size;
- JImageLocationRef location = (*JImageFindResource)(jimage_non_null(), "", get_jimage_version_string(), name, &size);
+ JImageLocationRef location = 0;
- if (location == 0) {
- TempNewSymbol class_name = SymbolTable::new_symbol(name);
- TempNewSymbol pkg_name = ClassLoader::package_from_class_name(class_name);
+ TempNewSymbol class_name = SymbolTable::new_symbol(name);
+ TempNewSymbol pkg_name = ClassLoader::package_from_class_name(class_name);
- if (pkg_name != nullptr) {
- if (!Universe::is_module_initialized()) {
- location = (*JImageFindResource)(jimage_non_null(), JAVA_BASE_NAME, get_jimage_version_string(), name, &size);
- } else {
- PackageEntry* package_entry = ClassLoader::get_package_entry(pkg_name, loader_data);
- if (package_entry != nullptr) {
- ResourceMark rm(current);
- // Get the module name
- ModuleEntry* module = package_entry->module();
- assert(module != nullptr, "Boot classLoader package missing module");
- assert(module->is_named(), "Boot classLoader package is in unnamed module");
- const char* module_name = module->name()->as_C_string();
- if (module_name != nullptr) {
- location = (*JImageFindResource)(jimage_non_null(), module_name, get_jimage_version_string(), name, &size);
- }
+ if (pkg_name != nullptr) {
+ if (!Universe::is_module_initialized()) {
+ location = (*JImageFindResource)(jimage_non_null(), JAVA_BASE_NAME, get_jimage_version_string(), name, &size);
+ } else {
+ PackageEntry* package_entry = ClassLoader::get_package_entry(pkg_name, loader_data);
+ if (package_entry != nullptr) {
+ ResourceMark rm(current);
+ // Get the module name
+ ModuleEntry* module = package_entry->module();
+ assert(module != nullptr, "Boot classLoader package missing module");
+ assert(module->is_named(), "Boot classLoader package is in unnamed module");
+ const char* module_name = module->name()->as_C_string();
+ if (module_name != nullptr) {
+ location = (*JImageFindResource)(jimage_non_null(), module_name, get_jimage_version_string(), name, &size);
}
}
}
}
+
if (location != 0) {
if (UsePerfData) {
ClassLoader::perf_sys_classfile_bytes_read()->inc(size);
diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp
index d91af9b4991..c2f8b46f00e 100644
--- a/src/hotspot/share/code/nmethod.cpp
+++ b/src/hotspot/share/code/nmethod.cpp
@@ -1302,7 +1302,7 @@ nmethod::nmethod(
}
// Native wrappers do not have deopt handlers. Make the values
// something that will never match a pc like the nmethod vtable entry
- _deopt_handler_offset = 0;
+ _deopt_handler_entry_offset = 0;
_unwind_handler_offset = 0;
CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
@@ -1442,7 +1442,7 @@ nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm.
_skipped_instructions_size = nm._skipped_instructions_size;
_stub_offset = nm._stub_offset;
_exception_offset = nm._exception_offset;
- _deopt_handler_offset = nm._deopt_handler_offset;
+ _deopt_handler_entry_offset = nm._deopt_handler_entry_offset;
_unwind_handler_offset = nm._unwind_handler_offset;
_num_stack_arg_slots = nm._num_stack_arg_slots;
_oops_size = nm._oops_size;
@@ -1704,19 +1704,26 @@ nmethod::nmethod(
_exception_offset = -1;
}
if (offsets->value(CodeOffsets::Deopt) != -1) {
- _deopt_handler_offset = code_offset() + offsets->value(CodeOffsets::Deopt);
+ _deopt_handler_entry_offset = code_offset() + offsets->value(CodeOffsets::Deopt);
} else {
- _deopt_handler_offset = -1;
+ _deopt_handler_entry_offset = -1;
}
} else
#endif
{
// Exception handler and deopt handler are in the stub section
- assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set");
- _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
- _deopt_handler_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
+ bool has_exception_handler = (offsets->value(CodeOffsets::Exceptions) != -1);
+ assert(has_exception_handler == (compiler->type() != compiler_c2),
+ "C2 compiler doesn't provide exception handler stub code.");
+ if (has_exception_handler) {
+ _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
+ } else {
+ _exception_offset = -1;
+ }
+
+ _deopt_handler_entry_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
}
if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
// C1 generates UnwindHandler at the end of instructions section.
@@ -4024,7 +4031,7 @@ const char* nmethod::nmethod_section_label(address pos) const {
// Check stub_code before checking exception_handler or deopt_handler.
if (pos == this->stub_begin()) label = "[Stub Code]";
if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]";
- if (JVMCI_ONLY(_deopt_handler_offset != -1 &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]";
+ if (JVMCI_ONLY(_deopt_handler_entry_offset != -1 &&) pos == deopt_handler_entry()) label = "[Deopt Handler Entry Point]";
return label;
}
diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp
index 34accf428b6..0fa9d7fda9e 100644
--- a/src/hotspot/share/code/nmethod.hpp
+++ b/src/hotspot/share/code/nmethod.hpp
@@ -229,7 +229,7 @@ class nmethod : public CodeBlob {
int _exception_offset;
// All deoptee's will resume execution at this location described by
// this offset.
- int _deopt_handler_offset;
+ int _deopt_handler_entry_offset;
// Offset (from insts_end) of the unwind handler if it exists
int16_t _unwind_handler_offset;
// Number of arguments passed on the stack
@@ -617,7 +617,7 @@ public:
address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return code_end() ; }
address exception_begin () const { return header_begin() + _exception_offset ; }
- address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
+ address deopt_handler_entry () const { return header_begin() + _deopt_handler_entry_offset ; }
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
oop* oops_begin () const { return (oop*) data_begin(); }
oop* oops_end () const { return (oop*) data_end(); }
diff --git a/src/hotspot/share/code/nmethod.inline.hpp b/src/hotspot/share/code/nmethod.inline.hpp
index 44331db669c..ecee3c0c31a 100644
--- a/src/hotspot/share/code/nmethod.inline.hpp
+++ b/src/hotspot/share/code/nmethod.inline.hpp
@@ -34,7 +34,7 @@
inline bool nmethod::is_deopt_pc(address pc) { return is_deopt_entry(pc); }
inline bool nmethod::is_deopt_entry(address pc) {
- return pc == deopt_handler_begin();
+ return pc == deopt_handler_entry();
}
// class ExceptionCache methods
diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp
index 286d407c94b..2a6335e2118 100644
--- a/src/hotspot/share/code/relocInfo.cpp
+++ b/src/hotspot/share/code/relocInfo.cpp
@@ -26,6 +26,7 @@
#include "code/compiledIC.hpp"
#include "code/nmethod.hpp"
#include "code/relocInfo.hpp"
+#include "cppstdlib/new.hpp"
#include "cppstdlib/type_traits.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@@ -37,8 +38,6 @@
#include "utilities/checkedCast.hpp"
#include "utilities/copy.hpp"
-#include
-
const RelocationHolder RelocationHolder::none; // its type is relocInfo::none
diff --git a/src/hotspot/share/code/relocInfo.hpp b/src/hotspot/share/code/relocInfo.hpp
index a6a08815d10..6f1778ef479 100644
--- a/src/hotspot/share/code/relocInfo.hpp
+++ b/src/hotspot/share/code/relocInfo.hpp
@@ -25,6 +25,7 @@
#ifndef SHARE_CODE_RELOCINFO_HPP
#define SHARE_CODE_RELOCINFO_HPP
+#include "cppstdlib/new.hpp"
#include "memory/allocation.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/osInfo.hpp"
@@ -32,8 +33,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-#include
-
class CodeBlob;
class Metadata;
class NativeMovConstReg;
diff --git a/src/hotspot/share/cppstdlib/new.hpp b/src/hotspot/share/cppstdlib/new.hpp
new file mode 100644
index 00000000000..ea9d6c88c87
--- /dev/null
+++ b/src/hotspot/share/cppstdlib/new.hpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_CPPSTDLIB_NEW_HPP
+#define SHARE_CPPSTDLIB_NEW_HPP
+
+#include "utilities/compilerWarnings.hpp"
+
+// HotSpot usage:
+// Only the following may be used:
+// * std::nothrow_t, std::nothrow
+// * std::align_val_t
+// * The non-allocating forms of `operator new` and `operator new[]` are
+// implicitly used by the corresponding `new` and `new[]` expressions.
+// - operator new(size_t, void*) noexcept
+// - operator new[](size_t, void*) noexcept
+// Note that the non-allocating forms of `operator delete` and `operator
+// delete[]` are not used, since they are only invoked by a placement new
+// expression that fails by throwing an exception. But they might still
+// end up being referenced in such a situation.
+
+BEGIN_ALLOW_FORBIDDEN_FUNCTIONS
+#include "utilities/vmassert_uninstall.hpp"
+
+#include
+
+#include "utilities/vmassert_reinstall.hpp" // don't reorder
+END_ALLOW_FORBIDDEN_FUNCTIONS
+
+// Deprecation declarations to forbid use of the default global allocator.
+// See C++17 21.6.1 Header synopsis.
+
+namespace std {
+
+#if 0
+// We could deprecate exception types, for completeness, but don't bother. We
+// already have exceptions disabled, and run into compiler bugs when we try.
+//
+// gcc -Wattributes => type attributes ignored after type is already defined
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=122167
+//
+// clang -Wignored-attributes => attribute declaration must precede definition
+// The clang warning is https://github.com/llvm/llvm-project/issues/135481,
+// which should be fixed in clang 21.
+class [[deprecated]] bad_alloc;
+class [[deprecated]] bad_array_new_length;
+#endif // #if 0
+
+// Forbid new_handler manipulation by HotSpot code, leaving it untouched for
+// use by application code.
+[[deprecated]] new_handler get_new_handler() noexcept;
+[[deprecated]] new_handler set_new_handler(new_handler) noexcept;
+
+// Prefer HotSpot mechanisms for padding.
+//
+// The syntax for redeclaring these for deprecation is tricky, and not
+// supported by some versions of some compilers. Dispatch on compiler and
+// version to decide whether to redeclare deprecated.
+
+#if defined(__clang__)
+// Some versions of clang with some stdlibs reject the declaration. Others may
+// accept the declaration but go wrong with uses. Different warnings and
+// link-time failures are both possible.
+// Known to have problems at least through clang19.
+
+#elif defined(__GNUC__)
+#if (__GNUC__ > 13) || (__GNUC__ == 13 && __GNUC_MINOR__ >= 2)
+// g++11.5 accepts the declaration and reports deprecation for uses, but also
+// has link-time failure for uses. Haven't tested intermediate versions.
+#define CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES 1
+#endif // restrict gcc version
+
+#elif defined(_MSVC)
+// VS2022-17.13.2 => error C2370: '...': redefinition; different storage class
+
+#endif // Compiler dispatch
+
+// Redeclare deprecated if such is supported.
+#ifdef CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
+[[deprecated]] extern const size_t hardware_destructive_interference_size;
+[[deprecated]] extern const size_t hardware_constructive_interference_size;
+#undef CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
+#endif // CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
+
+} // namespace std
+
+// Forbid using the global allocator by HotSpot code.
+// This doesn't provide complete coverage. Some global allocation and
+// deallocation functions are implicitly declared in all translation units,
+// without needing to include ; see C++17 6.7.4. So this doesn't remove
+// the need for the link-time verification that these functions aren't used.
+//
+// But don't poison them when compiling gtests. The gtest framework, the
+// HotSpot wrapper around it (gtestMain.cpp), and even some tests, all have
+// new/new[] and delete/delete[] expressions that use the default global
+// allocator. We also don't apply the link-time check for gtests, for the
+// same reason.
+#ifndef HOTSPOT_GTEST
+
+[[deprecated]] void* operator new(std::size_t);
+[[deprecated]] void* operator new(std::size_t, std::align_val_t);
+[[deprecated]] void* operator new(std::size_t, const std::nothrow_t&) noexcept;
+[[deprecated]] void* operator new(std::size_t, std::align_val_t,
+ const std::nothrow_t&) noexcept;
+
+[[deprecated]] void operator delete(void*) noexcept;
+[[deprecated]] void operator delete(void*, std::size_t) noexcept;
+[[deprecated]] void operator delete(void*, std::align_val_t) noexcept;
+[[deprecated]] void operator delete(void*, std::size_t, std::align_val_t) noexcept;
+[[deprecated]] void operator delete(void*, const std::nothrow_t&) noexcept;
+[[deprecated]] void operator delete(void*, std::align_val_t,
+ const std::nothrow_t&) noexcept;
+
+[[deprecated]] void* operator new[](std::size_t);
+[[deprecated]] void* operator new[](std::size_t, std::align_val_t);
+[[deprecated]] void* operator new[](std::size_t, const std::nothrow_t&) noexcept;
+[[deprecated]] void* operator new[](std::size_t, std::align_val_t,
+ const std::nothrow_t&) noexcept;
+
+[[deprecated]] void operator delete[](void*) noexcept;
+[[deprecated]] void operator delete[](void*, std::size_t) noexcept;
+[[deprecated]] void operator delete[](void*, std::align_val_t) noexcept;
+[[deprecated]] void operator delete[](void*, std::size_t, std::align_val_t) noexcept;
+[[deprecated]] void operator delete[](void*, const std::nothrow_t&) noexcept;
+[[deprecated]] void operator delete[](void*, std::align_val_t,
+ const std::nothrow_t&) noexcept;
+
+#endif // HOTSPOT_GTEST
+
+// Allow (don't poison) the non-allocating forms from [new.delete.placement].
+
+#endif // SHARE_CPPSTDLIB_NEW_HPP
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index d18f61ff507..5567d84bee4 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -478,11 +478,6 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(uint node_index, size_t word_
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating %zu words",
Thread::current()->name(), word_size);
- if (is_shutting_down()) {
- stall_for_vm_shutdown();
- return nullptr;
- }
-
// Has the gc overhead limit been reached in the meantime? If so, this mutator
// should receive null even when unsuccessfully scheduling a collection as well
// for global consistency.
@@ -738,11 +733,6 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating %zu",
Thread::current()->name(), word_size);
- if (is_shutting_down()) {
- stall_for_vm_shutdown();
- return nullptr;
- }
-
// Has the gc overhead limit been reached in the meantime? If so, this mutator
// should receive null even when unsuccessfully scheduling a collection as well
// for global consistency.
@@ -1645,6 +1635,10 @@ jint G1CollectedHeap::initialize() {
return JNI_OK;
}
+bool G1CollectedHeap::concurrent_mark_is_terminating() const {
+ return _cm_thread->should_terminate();
+}
+
void G1CollectedHeap::stop() {
// Stop all concurrent threads. We do this to make sure these threads
// do not continue to execute and access resources (e.g. logging)
@@ -1965,8 +1959,8 @@ bool G1CollectedHeap::try_collect_concurrently(size_t allocation_word_size,
}
// If VMOp skipped initiating concurrent marking cycle because
- // we're terminating, then we're done.
- if (is_shutting_down()) {
+ // we're shutting down, then we're done.
+ if (op.is_shutting_down()) {
LOG_COLLECT_CONCURRENTLY(cause, "skipped: terminating");
return false;
}
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index 5dccf41e909..aff7166d391 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -917,6 +917,9 @@ public:
// specified by the policy object.
jint initialize() override;
+ // Returns whether concurrent mark threads (and the VM) are about to terminate.
+ bool concurrent_mark_is_terminating() const;
+
void safepoint_synchronize_begin() override;
void safepoint_synchronize_end() override;
diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp
index 47340fad768..d71108d4d0e 100644
--- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp
@@ -267,8 +267,6 @@ void G1CollectionSetCandidates::set_candidates_from_marking(G1HeapRegion** candi
// the same MixedGC.
uint group_limit = p->calc_min_old_cset_length(num_candidates);
- uint num_added_to_group = 0;
-
G1CSetCandidateGroup::reset_next_group_id();
G1CSetCandidateGroup* current = nullptr;
@@ -279,7 +277,7 @@ void G1CollectionSetCandidates::set_candidates_from_marking(G1HeapRegion** candi
assert(!contains(r), "must not contain region %u", r->hrm_index());
_contains_map[r->hrm_index()] = CandidateOrigin::Marking;
- if (num_added_to_group == group_limit) {
+ if (current->length() == group_limit) {
if (group_limit != G1OldCSetGroupSize) {
group_limit = G1OldCSetGroupSize;
}
@@ -287,10 +285,8 @@ void G1CollectionSetCandidates::set_candidates_from_marking(G1HeapRegion** candi
_from_marking_groups.append(current);
current = new G1CSetCandidateGroup();
- num_added_to_group = 0;
}
current->add(r);
- num_added_to_group++;
}
_from_marking_groups.append(current);
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index d37fe9ea7ba..456d543fa10 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -1883,7 +1883,7 @@ bool G1ConcurrentMark::concurrent_cycle_abort() {
// nothing, but this situation should be extremely rare (a full gc after shutdown
// has been signalled is already rare), and this work should be negligible compared
// to actual full gc work.
- if (!cm_thread()->in_progress() && !_g1h->is_shutting_down()) {
+ if (!cm_thread()->in_progress() && !_g1h->concurrent_mark_is_terminating()) {
return false;
}
diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.cpp b/src/hotspot/share/gc/g1/g1HeapRegion.cpp
index b1eeb333d8d..361e19d4be5 100644
--- a/src/hotspot/share/gc/g1/g1HeapRegion.cpp
+++ b/src/hotspot/share/gc/g1/g1HeapRegion.cpp
@@ -307,10 +307,6 @@ void G1HeapRegion::add_code_root(nmethod* nm) {
rem_set()->add_code_root(nm);
}
-void G1HeapRegion::remove_code_root(nmethod* nm) {
- rem_set()->remove_code_root(nm);
-}
-
void G1HeapRegion::code_roots_do(NMethodClosure* blk) const {
rem_set()->code_roots_do(blk);
}
diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.hpp b/src/hotspot/share/gc/g1/g1HeapRegion.hpp
index 17ec3055b52..fe915b0dafe 100644
--- a/src/hotspot/share/gc/g1/g1HeapRegion.hpp
+++ b/src/hotspot/share/gc/g1/g1HeapRegion.hpp
@@ -543,7 +543,6 @@ public:
// Routines for managing a list of code roots (attached to the
// this region's RSet) that point into this heap region.
void add_code_root(nmethod* nm);
- void remove_code_root(nmethod* nm);
// Applies blk->do_nmethod() to each of the entries in
// the code roots list for this region
diff --git a/src/hotspot/share/gc/g1/g1IHOPControl.cpp b/src/hotspot/share/gc/g1/g1IHOPControl.cpp
index 34c8cd0366b..43698e9f12b 100644
--- a/src/hotspot/share/gc/g1/g1IHOPControl.cpp
+++ b/src/hotspot/share/gc/g1/g1IHOPControl.cpp
@@ -28,14 +28,63 @@
#include "gc/g1/g1Trace.hpp"
#include "logging/log.hpp"
-G1IHOPControl::G1IHOPControl(double initial_ihop_percent,
- G1OldGenAllocationTracker const* old_gen_alloc_tracker) :
- _initial_ihop_percent(initial_ihop_percent),
- _target_occupancy(0),
- _last_allocation_time_s(0.0),
- _old_gen_alloc_tracker(old_gen_alloc_tracker)
-{
- assert(_initial_ihop_percent >= 0.0 && _initial_ihop_percent <= 100.0, "Initial IHOP value must be between 0 and 100 but is %.3f", initial_ihop_percent);
+double G1IHOPControl::predict(const TruncatedSeq* seq) const {
+ assert(_is_adaptive, "precondition");
+ assert(_predictor != nullptr, "precondition");
+
+ return _predictor->predict_zero_bounded(seq);
+}
+
+bool G1IHOPControl::have_enough_data_for_prediction() const {
+ assert(_is_adaptive, "precondition");
+
+ return ((size_t)_marking_times_s.num() >= G1AdaptiveIHOPNumInitialSamples) &&
+ ((size_t)_allocation_rate_s.num() >= G1AdaptiveIHOPNumInitialSamples);
+}
+
+double G1IHOPControl::last_marking_length_s() const {
+ return _marking_times_s.last();
+}
+
+size_t G1IHOPControl::actual_target_threshold() const {
+ assert(_is_adaptive, "precondition");
+
+ // The actual target threshold takes the heap reserve and the expected waste in
+ // free space into account.
+ // _heap_reserve is that part of the total heap capacity that is reserved for
+ // eventual promotion failure.
+ // _heap_waste is the amount of space will never be reclaimed in any
+ // heap, so can not be used for allocation during marking and must always be
+ // considered.
+ double safe_total_heap_percentage =
+ MIN2((double)(_heap_reserve_percent + _heap_waste_percent), 100.0);
+
+ return (size_t)MIN2(
+ G1CollectedHeap::heap()->max_capacity() * (100.0 - safe_total_heap_percentage) / 100.0,
+ _target_occupancy * (100.0 - _heap_waste_percent) / 100.0
+ );
+}
+
+G1IHOPControl::G1IHOPControl(double ihop_percent,
+ const G1OldGenAllocationTracker* old_gen_alloc_tracker,
+ bool adaptive,
+ const G1Predictions* predictor,
+ size_t heap_reserve_percent,
+ size_t heap_waste_percent)
+ : _is_adaptive(adaptive),
+ _initial_ihop_percent(ihop_percent),
+ _target_occupancy(0),
+ _heap_reserve_percent(heap_reserve_percent),
+ _heap_waste_percent(heap_waste_percent),
+ _last_allocation_time_s(0.0),
+ _old_gen_alloc_tracker(old_gen_alloc_tracker),
+ _predictor(predictor),
+ _marking_times_s(10, 0.05),
+ _allocation_rate_s(10, 0.05),
+ _last_unrestrained_young_size(0) {
+ assert(_initial_ihop_percent >= 0.0 && _initial_ihop_percent <= 100.0,
+ "IHOP percent out of range: %.3f", ihop_percent);
+ assert(!_is_adaptive || _predictor != nullptr, "precondition");
}
void G1IHOPControl::update_target_occupancy(size_t new_target_occupancy) {
@@ -50,9 +99,34 @@ void G1IHOPControl::report_statistics(G1NewTracer* new_tracer, size_t non_young_
}
void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t additional_buffer_size) {
- assert(allocation_time_s >= 0.0, "Allocation time must be positive but is %.3f", allocation_time_s);
-
+ assert(allocation_time_s > 0, "Invalid allocation time: %.3f", allocation_time_s);
_last_allocation_time_s = allocation_time_s;
+ double alloc_rate = _old_gen_alloc_tracker->last_period_old_gen_growth() / allocation_time_s;
+ _allocation_rate_s.add(alloc_rate);
+ _last_unrestrained_young_size = additional_buffer_size;
+}
+
+void G1IHOPControl::update_marking_length(double marking_length_s) {
+ assert(marking_length_s >= 0.0, "Invalid marking length: %.3f", marking_length_s);
+ _marking_times_s.add(marking_length_s);
+}
+
+size_t G1IHOPControl::get_conc_mark_start_threshold() {
+ guarantee(_target_occupancy > 0, "Target occupancy must be initialized");
+
+ if (!_is_adaptive || !have_enough_data_for_prediction()) {
+ return (size_t)(_initial_ihop_percent * _target_occupancy / 100.0);
+ }
+
+ double pred_marking_time = predict(&_marking_times_s);
+ double pred_rate = predict(&_allocation_rate_s);
+ size_t pred_bytes = (size_t)(pred_marking_time * pred_rate);
+ size_t predicted_needed = pred_bytes + _last_unrestrained_young_size;
+ size_t internal_threshold = actual_target_threshold();
+
+ return predicted_needed < internal_threshold
+ ? internal_threshold - predicted_needed
+ : 0;
}
void G1IHOPControl::print_log(size_t non_young_occupancy) {
@@ -68,6 +142,23 @@ void G1IHOPControl::print_log(size_t non_young_occupancy) {
_last_allocation_time_s * 1000.0,
_last_allocation_time_s > 0.0 ? _old_gen_alloc_tracker->last_period_old_gen_bytes() / _last_allocation_time_s : 0.0,
last_marking_length_s() * 1000.0);
+
+ if (!_is_adaptive) {
+ return;
+ }
+
+ size_t actual_threshold = actual_target_threshold();
+ log_debug(gc, ihop)("Adaptive IHOP information (value update), threshold: %zuB (%1.2f), internal target threshold: %zuB, "
+ "non-young occupancy: %zuB, additional buffer size: %zuB, predicted old gen allocation rate: %1.2fB/s, "
+ "predicted marking phase length: %1.2fms, prediction active: %s",
+ cur_conc_mark_start_threshold,
+ percent_of(cur_conc_mark_start_threshold, actual_threshold),
+ actual_threshold,
+ non_young_occupancy,
+ _last_unrestrained_young_size,
+ predict(&_allocation_rate_s),
+ predict(&_marking_times_s) * 1000.0,
+ have_enough_data_for_prediction() ? "true" : "false");
}
void G1IHOPControl::send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy) {
@@ -78,121 +169,14 @@ void G1IHOPControl::send_trace_event(G1NewTracer* tracer, size_t non_young_occup
_old_gen_alloc_tracker->last_period_old_gen_bytes(),
_last_allocation_time_s,
last_marking_length_s());
-}
-G1StaticIHOPControl::G1StaticIHOPControl(double ihop_percent,
- G1OldGenAllocationTracker const* old_gen_alloc_tracker) :
- G1IHOPControl(ihop_percent, old_gen_alloc_tracker),
- _last_marking_length_s(0.0) {
-}
-
-G1AdaptiveIHOPControl::G1AdaptiveIHOPControl(double ihop_percent,
- G1OldGenAllocationTracker const* old_gen_alloc_tracker,
- G1Predictions const* predictor,
- size_t heap_reserve_percent,
- size_t heap_waste_percent) :
- G1IHOPControl(ihop_percent, old_gen_alloc_tracker),
- _heap_reserve_percent(heap_reserve_percent),
- _heap_waste_percent(heap_waste_percent),
- _predictor(predictor),
- _marking_times_s(10, 0.05),
- _allocation_rate_s(10, 0.05),
- _last_unrestrained_young_size(0)
-{
-}
-
-size_t G1AdaptiveIHOPControl::actual_target_threshold() const {
- guarantee(_target_occupancy > 0, "Target occupancy still not updated yet.");
- // The actual target threshold takes the heap reserve and the expected waste in
- // free space into account.
- // _heap_reserve is that part of the total heap capacity that is reserved for
- // eventual promotion failure.
- // _heap_waste is the amount of space will never be reclaimed in any
- // heap, so can not be used for allocation during marking and must always be
- // considered.
-
- double safe_total_heap_percentage = MIN2((double)(_heap_reserve_percent + _heap_waste_percent), 100.0);
-
- return (size_t)MIN2(
- G1CollectedHeap::heap()->max_capacity() * (100.0 - safe_total_heap_percentage) / 100.0,
- _target_occupancy * (100.0 - _heap_waste_percent) / 100.0
- );
-}
-
-double G1AdaptiveIHOPControl::predict(TruncatedSeq const* seq) const {
- return _predictor->predict_zero_bounded(seq);
-}
-
-bool G1AdaptiveIHOPControl::have_enough_data_for_prediction() const {
- return ((size_t)_marking_times_s.num() >= G1AdaptiveIHOPNumInitialSamples) &&
- ((size_t)_allocation_rate_s.num() >= G1AdaptiveIHOPNumInitialSamples);
-}
-
-size_t G1AdaptiveIHOPControl::get_conc_mark_start_threshold() {
- if (have_enough_data_for_prediction()) {
- double pred_marking_time = predict(&_marking_times_s);
- double pred_promotion_rate = predict(&_allocation_rate_s);
- size_t pred_promotion_size = (size_t)(pred_marking_time * pred_promotion_rate);
-
- size_t predicted_needed_bytes_during_marking =
- pred_promotion_size +
- // In reality we would need the maximum size of the young gen during
- // marking. This is a conservative estimate.
- _last_unrestrained_young_size;
-
- size_t internal_threshold = actual_target_threshold();
- size_t predicted_initiating_threshold = predicted_needed_bytes_during_marking < internal_threshold ?
- internal_threshold - predicted_needed_bytes_during_marking :
- 0;
- return predicted_initiating_threshold;
- } else {
- // Use the initial value.
- return (size_t)(_initial_ihop_percent * _target_occupancy / 100.0);
+ if (_is_adaptive) {
+ tracer->report_adaptive_ihop_statistics(get_conc_mark_start_threshold(),
+ actual_target_threshold(),
+ non_young_occupancy,
+ _last_unrestrained_young_size,
+ predict(&_allocation_rate_s),
+ predict(&_marking_times_s),
+ have_enough_data_for_prediction());
}
}
-
-double G1AdaptiveIHOPControl::last_mutator_period_old_allocation_rate() const {
- assert(_last_allocation_time_s > 0, "This should not be called when the last GC is full");
-
- return _old_gen_alloc_tracker->last_period_old_gen_growth() / _last_allocation_time_s;
-}
-
-void G1AdaptiveIHOPControl::update_allocation_info(double allocation_time_s,
- size_t additional_buffer_size) {
- G1IHOPControl::update_allocation_info(allocation_time_s, additional_buffer_size);
- _allocation_rate_s.add(last_mutator_period_old_allocation_rate());
-
- _last_unrestrained_young_size = additional_buffer_size;
-}
-
-void G1AdaptiveIHOPControl::update_marking_length(double marking_length_s) {
- assert(marking_length_s >= 0.0, "Marking length must be larger than zero but is %.3f", marking_length_s);
- _marking_times_s.add(marking_length_s);
-}
-
-void G1AdaptiveIHOPControl::print_log(size_t non_young_occupancy) {
- G1IHOPControl::print_log(non_young_occupancy);
- size_t actual_threshold = actual_target_threshold();
- log_debug(gc, ihop)("Adaptive IHOP information (value update), threshold: %zuB (%1.2f), internal target threshold: %zuB, "
- "non-young occupancy: %zuB, additional buffer size: %zuB, predicted old gen allocation rate: %1.2fB/s, "
- "predicted marking phase length: %1.2fms, prediction active: %s",
- get_conc_mark_start_threshold(),
- percent_of(get_conc_mark_start_threshold(), actual_threshold),
- actual_threshold,
- non_young_occupancy,
- _last_unrestrained_young_size,
- predict(&_allocation_rate_s),
- predict(&_marking_times_s) * 1000.0,
- have_enough_data_for_prediction() ? "true" : "false");
-}
-
-void G1AdaptiveIHOPControl::send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy) {
- G1IHOPControl::send_trace_event(tracer, non_young_occupancy);
- tracer->report_adaptive_ihop_statistics(get_conc_mark_start_threshold(),
- actual_target_threshold(),
- non_young_occupancy,
- _last_unrestrained_young_size,
- predict(&_allocation_rate_s),
- predict(&_marking_times_s),
- have_enough_data_for_prediction());
-}
diff --git a/src/hotspot/share/gc/g1/g1IHOPControl.hpp b/src/hotspot/share/gc/g1/g1IHOPControl.hpp
index 392a12a785a..b6e80d9b422 100644
--- a/src/hotspot/share/gc/g1/g1IHOPControl.hpp
+++ b/src/hotspot/share/gc/g1/g1IHOPControl.hpp
@@ -32,89 +32,32 @@
class G1Predictions;
class G1NewTracer;
-// Base class for algorithms that calculate the heap occupancy at which
-// concurrent marking should start. This heap usage threshold should be relative
-// to old gen size.
+// Implements two strategies for calculating the concurrent mark starting occupancy threshold:
+// - Static mode: Uses a fixed percentage of the target heap occupancy.
+// - Adaptive mode: Predicts a threshold based on allocation rates and marking durations
+// to ensure the target occupancy is never exceeded during marking.
class G1IHOPControl : public CHeapObj {
- protected:
+ private:
+ const bool _is_adaptive;
+
// The initial IHOP value relative to the target occupancy.
double _initial_ihop_percent;
+
// The target maximum occupancy of the heap. The target occupancy is the number
// of bytes when marking should be finished and reclaim started.
size_t _target_occupancy;
+ // Percentage of maximum heap capacity we should avoid to touch
+ const size_t _heap_reserve_percent;
+
+ // Percentage of free heap that should be considered as waste.
+ const size_t _heap_waste_percent;
+
// Most recent complete mutator allocation period in seconds.
double _last_allocation_time_s;
-
const G1OldGenAllocationTracker* _old_gen_alloc_tracker;
- // Initialize an instance with the old gen allocation tracker and the
- // initial IHOP value in percent. The target occupancy will be updated
- // at the first heap expansion.
- G1IHOPControl(double ihop_percent, G1OldGenAllocationTracker const* old_gen_alloc_tracker);
-
- // Most recent time from the end of the concurrent start to the start of the first
- // mixed gc.
- virtual double last_marking_length_s() const = 0;
-
- virtual void print_log(size_t non_young_occupancy);
- virtual void send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy);
-
-public:
- virtual ~G1IHOPControl() { }
-
- // Get the current non-young occupancy at which concurrent marking should start.
- virtual size_t get_conc_mark_start_threshold() = 0;
-
- // Adjust target occupancy.
- virtual void update_target_occupancy(size_t new_target_occupancy);
- // Update information about time during which allocations in the Java heap occurred,
- // how large these allocations were in bytes, and an additional buffer.
- // The allocations should contain any amount of space made unusable for further
- // allocation, e.g. any waste caused by TLAB allocation, space at the end of
- // humongous objects that can not be used for allocation, etc.
- // Together with the target occupancy, this additional buffer should contain the
- // difference between old gen size and total heap size at the start of reclamation,
- // and space required for that reclamation.
- virtual void update_allocation_info(double allocation_time_s, size_t additional_buffer_size);
- // Update the time spent in the mutator beginning from the end of concurrent start to
- // the first mixed gc.
- virtual void update_marking_length(double marking_length_s) = 0;
-
- void report_statistics(G1NewTracer* tracer, size_t non_young_occupancy);
-};
-
-// The returned concurrent mark starting occupancy threshold is a fixed value
-// relative to the maximum heap size.
-class G1StaticIHOPControl : public G1IHOPControl {
- // Most recent mutator time between the end of concurrent mark to the start of the
- // first mixed gc.
- double _last_marking_length_s;
- protected:
- double last_marking_length_s() const { return _last_marking_length_s; }
- public:
- G1StaticIHOPControl(double ihop_percent, G1OldGenAllocationTracker const* old_gen_alloc_tracker);
-
- size_t get_conc_mark_start_threshold() {
- guarantee(_target_occupancy > 0, "Target occupancy must have been initialized.");
- return (size_t) (_initial_ihop_percent * _target_occupancy / 100.0);
- }
-
- virtual void update_marking_length(double marking_length_s) {
- assert(marking_length_s > 0.0, "Marking length must be larger than zero but is %.3f", marking_length_s);
- _last_marking_length_s = marking_length_s;
- }
-};
-
-// This algorithm tries to return a concurrent mark starting occupancy value that
-// makes sure that during marking the given target occupancy is never exceeded,
-// based on predictions of current allocation rate and time periods between
-// concurrent start and the first mixed gc.
-class G1AdaptiveIHOPControl : public G1IHOPControl {
- size_t _heap_reserve_percent; // Percentage of maximum heap capacity we should avoid to touch
- size_t _heap_waste_percent; // Percentage of free heap that should be considered as waste.
-
- const G1Predictions * _predictor;
+ const G1Predictions* _predictor;
TruncatedSeq _marking_times_s;
TruncatedSeq _allocation_rate_s;
@@ -128,35 +71,48 @@ class G1AdaptiveIHOPControl : public G1IHOPControl {
size_t _last_unrestrained_young_size;
// Get a new prediction bounded below by zero from the given sequence.
- double predict(TruncatedSeq const* seq) const;
+ double predict(const TruncatedSeq* seq) const;
bool have_enough_data_for_prediction() const;
+ double last_marking_length_s() const;
// The "actual" target threshold the algorithm wants to keep during and at the
// end of marking. This is typically lower than the requested threshold, as the
// algorithm needs to consider restrictions by the environment.
size_t actual_target_threshold() const;
- // This method calculates the old gen allocation rate based on the net survived
- // bytes that are allocated in the old generation in the last mutator period.
- double last_mutator_period_old_allocation_rate() const;
- protected:
- virtual double last_marking_length_s() const { return _marking_times_s.last(); }
-
- virtual void print_log(size_t non_young_occupancy);
- virtual void send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy);
+ void print_log(size_t non_young_occupancy);
+ void send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy);
public:
- G1AdaptiveIHOPControl(double ihop_percent,
- G1OldGenAllocationTracker const* old_gen_alloc_tracker,
- G1Predictions const* predictor,
- size_t heap_reserve_percent, // The percentage of total heap capacity that should not be tapped into.
- size_t heap_waste_percent); // The percentage of the free space in the heap that we think is not usable for allocation.
+ G1IHOPControl(double ihop_percent,
+ const G1OldGenAllocationTracker* old_gen_alloc_tracker,
+ bool adaptive,
+ const G1Predictions* predictor,
+ size_t heap_reserve_percent,
+ size_t heap_waste_percent);
- virtual size_t get_conc_mark_start_threshold();
+ // Adjust target occupancy.
+ void update_target_occupancy(size_t new_target_occupancy);
- virtual void update_allocation_info(double allocation_time_s, size_t additional_buffer_size);
- virtual void update_marking_length(double marking_length_s);
+ // Update information about time during which allocations in the Java heap occurred,
+ // how large these allocations were in bytes, and an additional buffer.
+ // The allocations should contain any amount of space made unusable for further
+ // allocation, e.g. any waste caused by TLAB allocation, space at the end of
+ // humongous objects that can not be used for allocation, etc.
+ // Together with the target occupancy, this additional buffer should contain the
+ // difference between old gen size and total heap size at the start of reclamation,
+ // and space required for that reclamation.
+ void update_allocation_info(double allocation_time_s, size_t additional_buffer_size);
+
+ // Update the time spent in the mutator beginning from the end of concurrent start to
+ // the first mixed gc.
+ void update_marking_length(double marking_length_s);
+
+ // Get the current non-young occupancy at which concurrent marking should start.
+ size_t get_conc_mark_start_threshold();
+
+ void report_statistics(G1NewTracer* tracer, size_t non_young_occupancy);
};
#endif // SHARE_GC_G1_G1IHOPCONTROL_HPP
diff --git a/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.hpp b/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.hpp
index 265c7029e14..aa5e3c6c942 100644
--- a/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.hpp
+++ b/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.hpp
@@ -28,8 +28,6 @@
#include "gc/g1/g1HeapRegion.hpp"
#include "memory/allocation.hpp"
-class G1AdaptiveIHOPControl;
-
// Track allocation details in the old generation.
class G1OldGenAllocationTracker : public CHeapObj {
// Total number of bytes allocated in the old generation at the end
diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp
index 19573e11cd7..6eef6cbfa87 100644
--- a/src/hotspot/share/gc/g1/g1Policy.cpp
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp
@@ -669,7 +669,6 @@ bool G1Policy::should_retain_evac_failed_region(uint index) const {
}
void G1Policy::record_pause_start_time() {
- assert(!_g1h->is_shutting_down(), "Invariant!");
Ticks now = Ticks::now();
_cur_pause_start_sec = now.seconds();
@@ -1026,15 +1025,12 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
G1IHOPControl* G1Policy::create_ihop_control(const G1OldGenAllocationTracker* old_gen_alloc_tracker,
const G1Predictions* predictor) {
- if (G1UseAdaptiveIHOP) {
- return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
- old_gen_alloc_tracker,
- predictor,
- G1ReservePercent,
- G1HeapWastePercent);
- } else {
- return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent, old_gen_alloc_tracker);
- }
+ return new G1IHOPControl(InitiatingHeapOccupancyPercent,
+ old_gen_alloc_tracker,
+ G1UseAdaptiveIHOP,
+ predictor,
+ G1ReservePercent,
+ G1HeapWastePercent);
}
bool G1Policy::update_ihop_prediction(double mutator_time_s,
@@ -1280,12 +1276,6 @@ void G1Policy::decide_on_concurrent_start_pause() {
// concurrent start pause).
assert(!collector_state()->in_concurrent_start_gc(), "pre-condition");
- // We should not be starting a concurrent start pause if the concurrent mark
- // thread is terminating.
- if (_g1h->is_shutting_down()) {
- return;
- }
-
if (collector_state()->initiate_conc_mark_if_possible()) {
// We had noticed on a previous pause that the heap occupancy has
// gone over the initiating threshold and we should start a
diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp
index f0bacefd71c..d0633466f37 100644
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp
@@ -992,10 +992,11 @@ class G1MergeHeapRootsTask : public WorkerTask {
}
};
- // Closure to make sure that the marking bitmap is clear for any old region in
- // the collection set.
- // This is needed to be able to use the bitmap for evacuation failure handling.
- class G1ClearBitmapClosure : public G1HeapRegionClosure {
+ // Closure to prepare the collection set regions for evacuation failure, i.e. make
+ // sure that the mark bitmap is clear for any old region in the collection set.
+ //
+ // These mark bitmaps record the evacuation failed objects.
+ class G1PrepareRegionsForEvacFailClosure : public G1HeapRegionClosure {
G1CollectedHeap* _g1h;
G1RemSetScanState* _scan_state;
bool _initial_evacuation;
@@ -1018,18 +1019,12 @@ class G1MergeHeapRootsTask : public WorkerTask {
// the pause occurs during the Concurrent Cleanup for Next Mark phase.
// Only at that point the region's bitmap may contain marks while being in the collection
// set at the same time.
- //
- // There is one exception: shutdown might have aborted the Concurrent Cleanup for Next
- // Mark phase midway, which might have also left stale marks in old generation regions.
- // There might actually have been scheduled multiple collections, but at that point we do
- // not care that much about performance and just do the work multiple times if needed.
- return (_g1h->collector_state()->clear_bitmap_in_progress() ||
- _g1h->is_shutting_down()) &&
- hr->is_old();
+ return _g1h->collector_state()->clear_bitmap_in_progress() &&
+ hr->is_old();
}
public:
- G1ClearBitmapClosure(G1CollectedHeap* g1h, G1RemSetScanState* scan_state, bool initial_evacuation) :
+ G1PrepareRegionsForEvacFailClosure(G1CollectedHeap* g1h, G1RemSetScanState* scan_state, bool initial_evacuation) :
_g1h(g1h),
_scan_state(scan_state),
_initial_evacuation(initial_evacuation)
@@ -1178,8 +1173,8 @@ public:
// Preparation for evacuation failure handling.
{
- G1ClearBitmapClosure clear(g1h, _scan_state, _initial_evacuation);
- g1h->collection_set_iterate_increment_from(&clear, &_hr_claimer, worker_id);
+ G1PrepareRegionsForEvacFailClosure prepare_evac_failure(g1h, _scan_state, _initial_evacuation);
+ g1h->collection_set_iterate_increment_from(&prepare_evac_failure, &_hr_claimer, worker_id);
}
}
};
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
index ebaea3ecba4..3a13d0d0535 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
@@ -344,11 +344,6 @@ HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size, bool is_tlab) {
assert(is_in_or_null(op.result()), "result not in heap");
return op.result();
}
-
- if (is_shutting_down()) {
- stall_for_vm_shutdown();
- return nullptr;
- }
}
// Was the gc-overhead reached inside the safepoint? If so, this mutator
@@ -370,6 +365,55 @@ void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
}
+bool ParallelScavengeHeap::should_attempt_young_gc() const {
+ const bool ShouldRunYoungGC = true;
+ const bool ShouldRunFullGC = false;
+
+ if (!_young_gen->to_space()->is_empty()) {
+ log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
+ return ShouldRunFullGC;
+ }
+
+ // Check if the predicted promoted bytes will overflow free space in old-gen.
+ PSAdaptiveSizePolicy* policy = _size_policy;
+
+ size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
+ size_t promotion_estimate = MIN2(avg_promoted, _young_gen->used_in_bytes());
+ // Total free size after possible old gen expansion
+ size_t free_in_old_gen_with_expansion = _old_gen->max_gen_size() - _old_gen->used_in_bytes();
+
+ log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
+ (size_t) policy->average_promoted_in_bytes(),
+ (size_t) policy->padded_average_promoted_in_bytes());
+
+ if (promotion_estimate >= free_in_old_gen_with_expansion) {
+ log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
+ promotion_estimate, free_in_old_gen_with_expansion);
+ return ShouldRunFullGC;
+ }
+
+ if (UseAdaptiveSizePolicy) {
+ // Also checking OS has enough free memory to commit and expand old-gen.
+ // Otherwise, the recorded gc-pause-time might be inflated to include time
+ // of OS preparing free memory, resulting in inaccurate young-gen resizing.
+ assert(_old_gen->committed().byte_size() >= _old_gen->used_in_bytes(), "inv");
+ // Use uint64_t instead of size_t for 32bit compatibility.
+ uint64_t free_mem_in_os;
+ if (os::free_memory(free_mem_in_os)) {
+ size_t actual_free = (size_t)MIN2(_old_gen->committed().byte_size() - _old_gen->used_in_bytes() + free_mem_in_os,
+ (uint64_t)SIZE_MAX);
+ if (promotion_estimate > actual_free) {
+ log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
+ promotion_estimate, actual_free);
+ return ShouldRunFullGC;
+ }
+ }
+ }
+
+ // No particular reasons to run full-gc, so young-gc.
+ return ShouldRunYoungGC;
+}
+
static bool check_gc_heap_free_limit(size_t free_bytes, size_t capacity_bytes) {
return (free_bytes * 100 / capacity_bytes) < GCHeapFreeLimit;
}
@@ -516,17 +560,18 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
VMThread::execute(&op);
}
-void ParallelScavengeHeap::collect_at_safepoint(bool full) {
+void ParallelScavengeHeap::collect_at_safepoint(bool is_full) {
assert(!GCLocker::is_active(), "precondition");
bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
- if (!full) {
- bool success = PSScavenge::invoke(clear_soft_refs);
- if (success) {
+ if (!is_full && should_attempt_young_gc()) {
+ bool young_gc_success = PSScavenge::invoke(clear_soft_refs);
+ if (young_gc_success) {
return;
}
- // Upgrade to Full-GC if young-gc fails
+ log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
}
+
const bool should_do_max_compaction = false;
PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
}
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
index f9161afc28f..5d8ddbcaaed 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
@@ -119,6 +119,9 @@ class ParallelScavengeHeap : public CollectedHeap {
void print_tracing_info() const override;
void stop() override {};
+ // Returns true if a young GC should be attempted, false if a full GC is preferred.
+ bool should_attempt_young_gc() const;
+
public:
ParallelScavengeHeap() :
CollectedHeap(),
@@ -199,7 +202,6 @@ public:
bool requires_barriers(stackChunkOop obj) const override;
MemRegion reserved_region() const { return _reserved; }
- HeapWord* base() const { return _reserved.start(); }
// Memory allocation.
HeapWord* mem_allocate(size_t size) override;
diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp
index e738a13d464..d1d595df529 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp
@@ -313,12 +313,6 @@ bool PSScavenge::invoke(bool clear_soft_refs) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
- // Check for potential problems.
- if (!should_attempt_scavenge()) {
- log_info(gc, ergo)("Young-gc might fail so skipping");
- return false;
- }
-
IsSTWGCActiveMark mark;
_gc_timer.register_gc_start();
@@ -336,8 +330,7 @@ bool PSScavenge::invoke(bool clear_soft_refs) {
PSOldGen* old_gen = heap->old_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
- assert(young_gen->to_space()->is_empty(),
- "Attempt to scavenge with live objects in to_space");
+ assert(young_gen->to_space()->is_empty(), "precondition");
heap->increment_total_collections();
@@ -520,59 +513,6 @@ void PSScavenge::clean_up_failed_promotion() {
NOT_PRODUCT(ParallelScavengeHeap::heap()->reset_promotion_should_fail();)
}
-bool PSScavenge::should_attempt_scavenge() {
- const bool ShouldRunYoungGC = true;
- const bool ShouldRunFullGC = false;
-
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- PSYoungGen* young_gen = heap->young_gen();
- PSOldGen* old_gen = heap->old_gen();
-
- if (!young_gen->to_space()->is_empty()) {
- log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
- return ShouldRunFullGC;
- }
-
- // Check if the predicted promoted bytes will overflow free space in old-gen.
- PSAdaptiveSizePolicy* policy = heap->size_policy();
-
- size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
- size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
- // Total free size after possible old gen expansion
- size_t free_in_old_gen_with_expansion = old_gen->max_gen_size() - old_gen->used_in_bytes();
-
- log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
- (size_t) policy->average_promoted_in_bytes(),
- (size_t) policy->padded_average_promoted_in_bytes());
-
- if (promotion_estimate >= free_in_old_gen_with_expansion) {
- log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
- promotion_estimate, free_in_old_gen_with_expansion);
- return ShouldRunFullGC;
- }
-
- if (UseAdaptiveSizePolicy) {
- // Also checking OS has enough free memory to commit and expand old-gen.
- // Otherwise, the recorded gc-pause-time might be inflated to include time
- // of OS preparing free memory, resulting in inaccurate young-gen resizing.
- assert(old_gen->committed().byte_size() >= old_gen->used_in_bytes(), "inv");
- // Use uint64_t instead of size_t for 32bit compatibility.
- uint64_t free_mem_in_os;
- if (os::free_memory(free_mem_in_os)) {
- size_t actual_free = (size_t)MIN2(old_gen->committed().byte_size() - old_gen->used_in_bytes() + free_mem_in_os,
- (uint64_t)SIZE_MAX);
- if (promotion_estimate > actual_free) {
- log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
- promotion_estimate, actual_free);
- return ShouldRunFullGC;
- }
- }
- }
-
- // No particular reasons to run full-gc, so young-gc.
- return ShouldRunYoungGC;
-}
-
// Adaptive size policy support.
void PSScavenge::set_young_generation_boundary(HeapWord* v) {
_young_generation_boundary = v;
diff --git a/src/hotspot/share/gc/parallel/psScavenge.hpp b/src/hotspot/share/gc/parallel/psScavenge.hpp
index c297a46a46e..af9b91f74bc 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.hpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.hpp
@@ -64,8 +64,6 @@ class PSScavenge: AllStatic {
static void clean_up_failed_promotion();
- static bool should_attempt_scavenge();
-
// Private accessors
static PSCardTable* card_table() { assert(_card_table != nullptr, "Sanity"); return _card_table; }
static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp
index 00d74e691eb..03ad1282f5f 100644
--- a/src/hotspot/share/gc/serial/serialHeap.cpp
+++ b/src/hotspot/share/gc/serial/serialHeap.cpp
@@ -337,11 +337,6 @@ HeapWord* SerialHeap::mem_allocate_work(size_t size, bool is_tlab) {
break;
}
- if (is_shutting_down()) {
- stall_for_vm_shutdown();
- return nullptr;
- }
-
// Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
diff --git a/src/hotspot/share/gc/shared/bufferNode.cpp b/src/hotspot/share/gc/shared/bufferNode.cpp
index b064f9c7efe..90e50f52e84 100644
--- a/src/hotspot/share/gc/shared/bufferNode.cpp
+++ b/src/hotspot/share/gc/shared/bufferNode.cpp
@@ -22,12 +22,11 @@
*
*/
+#include "cppstdlib/new.hpp"
#include "gc/shared/bufferNode.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/debug.hpp"
-#include
-
BufferNode::AllocatorConfig::AllocatorConfig(size_t size)
: _buffer_capacity(size)
{
diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp
index c8dd39e72be..a59ea3745ab 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp
@@ -62,12 +62,14 @@
class ClassLoaderData;
+bool CollectedHeap::_is_shutting_down = false;
+
size_t CollectedHeap::_lab_alignment_reserve = SIZE_MAX;
Klass* CollectedHeap::_filler_object_klass = nullptr;
size_t CollectedHeap::_filler_array_max_size = 0;
size_t CollectedHeap::_stack_chunk_max_size = 0;
-class GCLogMessage : public FormatBuffer<512> {};
+class GCLogMessage : public FormatBuffer<1024> {};
template <>
void EventLogBase::print(outputStream* st, GCLogMessage& m) {
@@ -377,8 +379,7 @@ MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loa
word_size,
mdtype,
gc_count,
- full_gc_count,
- GCCause::_metadata_GC_threshold);
+ full_gc_count);
VMThread::execute(&op);
@@ -386,11 +387,6 @@ MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loa
return op.result();
}
- if (is_shutting_down()) {
- stall_for_vm_shutdown();
- return nullptr;
- }
-
loop_count++;
if ((QueuedAllocationWarningCount > 0) &&
(loop_count % QueuedAllocationWarningCount == 0)) {
@@ -605,30 +601,20 @@ void CollectedHeap::post_initialize() {
initialize_serviceability();
}
-bool CollectedHeap::is_shutting_down() const {
- return Universe::is_shutting_down();
+bool CollectedHeap::is_shutting_down() {
+ assert(Heap_lock->owned_by_self(), "Protected by this lock");
+ return _is_shutting_down;
}
-void CollectedHeap::stall_for_vm_shutdown() {
- assert(is_shutting_down(), "Precondition");
- // Stall the thread (2 seconds) instead of an indefinite wait to avoid deadlock
- // if the VM shutdown triggers a GC.
- // The 2-seconds sleep is:
- // - long enough to keep daemon threads stalled, while the shutdown
- // sequence completes in the common case.
- // - short enough to avoid excessive stall time if the shutdown itself
- // triggers a GC.
- JavaThread::current()->sleep(2 * MILLIUNITS);
+void CollectedHeap::initiate_shutdown() {
+ {
+ // Acquire the Heap_lock to synchronize with VM_Heap_Sync_Operations,
+ // which may depend on the value of _is_shutting_down flag.
+ MutexLocker hl(Heap_lock);
+ _is_shutting_down = true;
+ }
- ResourceMark rm;
- log_warning(gc, alloc)("%s: Stall for VM-Shutdown timed out; allocation may fail with OOME", Thread::current()->name());
-}
-
-void CollectedHeap::before_exit() {
print_tracing_info();
-
- // Stop any on-going concurrent work and prepare for exit.
- stop();
}
size_t CollectedHeap::bootstrap_max_memory() const {
diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp
index 6be0057480d..6f335b1cdf4 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp
@@ -96,6 +96,8 @@ class CollectedHeap : public CHeapObj {
friend class MemAllocator;
private:
+ static bool _is_shutting_down;
+
GCHeapLog* _heap_log;
GCMetaspaceLog* _metaspace_log;
@@ -209,11 +211,10 @@ protected:
// Default implementation does nothing.
virtual void print_tracing_info() const = 0;
+ public:
// Stop any onging concurrent work and prepare for exit.
virtual void stop() = 0;
- public:
-
static inline size_t filler_array_max_size() {
return _filler_array_max_size;
}
@@ -245,14 +246,9 @@ protected:
// This is the correct place to place such initialization methods.
virtual void post_initialize();
- bool is_shutting_down() const;
+ static bool is_shutting_down();
- // If the VM is shutting down, we may have skipped VM_CollectForAllocation.
- // In this case, stall the allocation request briefly in the hope that
- // the VM shutdown completes before the allocation request returns.
- void stall_for_vm_shutdown();
-
- void before_exit();
+ void initiate_shutdown();
// Stop and resume concurrent GC threads interfering with safepoint operations
virtual void safepoint_synchronize_begin() {}
diff --git a/src/hotspot/share/gc/shared/freeListAllocator.cpp b/src/hotspot/share/gc/shared/freeListAllocator.cpp
index c6801c2be18..990bf88aade 100644
--- a/src/hotspot/share/gc/shared/freeListAllocator.cpp
+++ b/src/hotspot/share/gc/shared/freeListAllocator.cpp
@@ -41,26 +41,26 @@ FreeListAllocator::PendingList::PendingList() :
size_t FreeListAllocator::PendingList::add(FreeNode* node) {
assert(node->next() == nullptr, "precondition");
- FreeNode* old_head = AtomicAccess::xchg(&_head, node);
+ FreeNode* old_head = _head.exchange(node);
if (old_head != nullptr) {
node->set_next(old_head);
} else {
assert(_tail == nullptr, "invariant");
_tail = node;
}
- return AtomicAccess::add(&_count, size_t(1));
+ return _count.add_then_fetch(1u);
}
typename FreeListAllocator::NodeList FreeListAllocator::PendingList::take_all() {
- NodeList result{AtomicAccess::load(&_head), _tail, AtomicAccess::load(&_count)};
- AtomicAccess::store(&_head, (FreeNode*)nullptr);
+ NodeList result{_head.load_relaxed(), _tail, _count.load_relaxed()};
+ _head.store_relaxed(nullptr);
_tail = nullptr;
- AtomicAccess::store(&_count, size_t(0));
+ _count.store_relaxed(0u);
return result;
}
size_t FreeListAllocator::PendingList::count() const {
- return AtomicAccess::load(&_count);
+ return _count.load_relaxed();
}
FreeListAllocator::FreeListAllocator(const char* name, FreeListConfig* config) :
@@ -85,7 +85,7 @@ void FreeListAllocator::delete_list(FreeNode* list) {
}
FreeListAllocator::~FreeListAllocator() {
- uint index = AtomicAccess::load(&_active_pending_list);
+ uint index = _active_pending_list.load_relaxed();
NodeList pending_list = _pending_lists[index].take_all();
delete_list(pending_list._head);
delete_list(_free_list.pop_all());
@@ -93,18 +93,18 @@ FreeListAllocator::~FreeListAllocator() {
// Drop existing nodes and reset all counters
void FreeListAllocator::reset() {
- uint index = AtomicAccess::load(&_active_pending_list);
+ uint index = _active_pending_list.load_relaxed();
_pending_lists[index].take_all();
_free_list.pop_all();
- _free_count = 0;
+ _free_count.store_relaxed(0u);
}
size_t FreeListAllocator::free_count() const {
- return AtomicAccess::load(&_free_count);
+ return _free_count.load_relaxed();
}
size_t FreeListAllocator::pending_count() const {
- uint index = AtomicAccess::load(&_active_pending_list);
+ uint index = _active_pending_list.load_relaxed();
return _pending_lists[index].count();
}
@@ -124,7 +124,7 @@ void* FreeListAllocator::allocate() {
// Decrement count after getting buffer from free list. This, along
// with incrementing count before adding to free list, ensures count
// never underflows.
- size_t count = AtomicAccess::sub(&_free_count, 1u);
+ size_t count = _free_count.sub_then_fetch(1u);
assert((count + 1) != 0, "_free_count underflow");
return node;
} else {
@@ -149,7 +149,7 @@ void FreeListAllocator::release(void* free_node) {
// we're done with what might be the pending list to be transferred.
{
GlobalCounter::CriticalSection cs(Thread::current());
- uint index = AtomicAccess::load_acquire(&_active_pending_list);
+ uint index = _active_pending_list.load_acquire();
size_t count = _pending_lists[index].add(node);
if (count <= _config->transfer_threshold()) return;
}
@@ -164,17 +164,17 @@ void FreeListAllocator::release(void* free_node) {
// in-progress transfer.
bool FreeListAllocator::try_transfer_pending() {
// Attempt to claim the lock.
- if (AtomicAccess::load(&_transfer_lock) || // Skip CAS if likely to fail.
- AtomicAccess::cmpxchg(&_transfer_lock, false, true)) {
+ if (_transfer_lock.load_relaxed() || // Skip CAS if likely to fail.
+ _transfer_lock.compare_exchange(false, true)) {
return false;
}
// Have the lock; perform the transfer.
// Change which pending list is active. Don't need an atomic RMW since
// we have the lock and we're the only writer.
- uint index = AtomicAccess::load(&_active_pending_list);
+ uint index = _active_pending_list.load_relaxed();
uint new_active = (index + 1) % ARRAY_SIZE(_pending_lists);
- AtomicAccess::release_store(&_active_pending_list, new_active);
+ _active_pending_list.release_store(new_active);
// Wait for all critical sections in the buffer life-cycle to complete.
// This includes _free_list pops and adding to the now inactive pending
@@ -186,11 +186,11 @@ bool FreeListAllocator::try_transfer_pending() {
size_t count = transfer_list._entry_count;
if (count > 0) {
// Update count first so no underflow in allocate().
- AtomicAccess::add(&_free_count, count);
+ _free_count.add_then_fetch(count);
_free_list.prepend(*transfer_list._head, *transfer_list._tail);
log_trace(gc, freelist)
("Transferred %s pending to free: %zu", name(), count);
}
- AtomicAccess::release_store(&_transfer_lock, false);
+ _transfer_lock.release_store(false);
return true;
}
diff --git a/src/hotspot/share/gc/shared/freeListAllocator.hpp b/src/hotspot/share/gc/shared/freeListAllocator.hpp
index 07e075a6725..dd163f0fe67 100644
--- a/src/hotspot/share/gc/shared/freeListAllocator.hpp
+++ b/src/hotspot/share/gc/shared/freeListAllocator.hpp
@@ -27,7 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/padded.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/lockFreeStack.hpp"
@@ -62,15 +62,15 @@ public:
// to the free list making them available for re-allocation.
class FreeListAllocator {
struct FreeNode {
- FreeNode* volatile _next;
+ Atomic _next;
FreeNode() : _next (nullptr) { }
- FreeNode* next() { return AtomicAccess::load(&_next); }
+ FreeNode* next() { return _next.load_relaxed(); }
- FreeNode* volatile* next_addr() { return &_next; }
+ Atomic* next_addr() { return &_next; }
- void set_next(FreeNode* next) { AtomicAccess::store(&_next, next); }
+ void set_next(FreeNode* next) { _next.store_relaxed(next); }
};
struct NodeList {
@@ -85,8 +85,8 @@ class FreeListAllocator {
class PendingList {
FreeNode* _tail;
- FreeNode* volatile _head;
- volatile size_t _count;
+ Atomic _head;
+ Atomic _count;
NONCOPYABLE(PendingList);
@@ -105,20 +105,20 @@ class FreeListAllocator {
NodeList take_all();
};
- static FreeNode* volatile* next_ptr(FreeNode& node) { return node.next_addr(); }
- typedef LockFreeStack Stack;
+ static Atomic* next_ptr(FreeNode& node) { return node.next_addr(); }
+ using Stack = LockFreeStack;
FreeListConfig* _config;
char _name[DEFAULT_PADDING_SIZE - sizeof(FreeListConfig*)]; // Use name as padding.
#define DECLARE_PADDED_MEMBER(Id, Type, Name) \
Type Name; DEFINE_PAD_MINUS_SIZE(Id, DEFAULT_PADDING_SIZE, sizeof(Type))
- DECLARE_PADDED_MEMBER(1, volatile size_t, _free_count);
+ DECLARE_PADDED_MEMBER(1, Atomic, _free_count);
DECLARE_PADDED_MEMBER(2, Stack, _free_list);
- DECLARE_PADDED_MEMBER(3, volatile bool, _transfer_lock);
+ DECLARE_PADDED_MEMBER(3, Atomic, _transfer_lock);
#undef DECLARE_PADDED_MEMBER
- volatile uint _active_pending_list;
+ Atomic _active_pending_list;
PendingList _pending_lists[2];
void delete_list(FreeNode* list);
diff --git a/src/hotspot/share/gc/shared/gcVMOperations.cpp b/src/hotspot/share/gc/shared/gcVMOperations.cpp
index 36aa0c9843d..6dbfd56b4e9 100644
--- a/src/hotspot/share/gc/shared/gcVMOperations.cpp
+++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp
@@ -92,6 +92,22 @@ static bool should_use_gclocker() {
return UseSerialGC || UseParallelGC;
}
+static void block_if_java_thread() {
+ Thread* thread = Thread::current();
+ if (thread->is_Java_thread()) {
+ // Block here and allow the shutdown to complete
+ while (true) {
+ // The call to wait has a few important effects:
+ // 1) Block forever (minus spurious wake-ups, hence the loop)
+ // 2) Release the Heap_lock, which is taken by the shutdown code
+ // 3) Transition to blocked state so that the final VM_Exit operation can be scheduled
+ Heap_lock->wait();
+ }
+ } else {
+ assert(thread->is_ConcurrentGC_thread(), "Unexpected thread type");
+ }
+}
+
bool VM_GC_Operation::doit_prologue() {
assert(_gc_cause != GCCause::_no_gc, "Illegal GCCause");
@@ -110,8 +126,15 @@ bool VM_GC_Operation::doit_prologue() {
}
VM_Heap_Sync_Operation::doit_prologue();
+ _is_shutting_down = CollectedHeap::is_shutting_down();
+ if (_is_shutting_down) {
+ // Block forever if a Java thread is triggering a GC after
+ // the GC has started to shut down.
+ block_if_java_thread();
+ }
+
// Check invocations
- if (skip_operation() || Universe::is_shutting_down()) {
+ if (skip_operation() || _is_shutting_down) {
// skip collection
Heap_lock->unlock();
if (should_use_gclocker()) {
@@ -197,9 +220,8 @@ VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData
size_t size,
Metaspace::MetadataType mdtype,
uint gc_count_before,
- uint full_gc_count_before,
- GCCause::Cause gc_cause)
- : VM_GC_Collect_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
+ uint full_gc_count_before)
+ : VM_GC_Collect_Operation(gc_count_before, GCCause::_metadata_GC_threshold, full_gc_count_before, true),
_result(nullptr), _size(size), _mdtype(mdtype), _loader_data(loader_data) {
assert(_size != 0, "An allocation should always be requested with this operation.");
AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
@@ -208,8 +230,11 @@ VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData
void VM_CollectForMetadataAllocation::doit() {
SvcGCMarker sgcm(SvcGCMarker::FULL);
- CollectedHeap* heap = Universe::heap();
- GCCauseSetter gccs(heap, _gc_cause);
+ // Note: GCCauseSetter is intentionally not used here.
+ // The specific GC cause is set directly in downstream calls that initiate
+ // collections, allowing us to accurately reflect different situations:
+ // - A typical metadata allocation failure triggers a collection.
+ // - As a last resort, a collection clears soft references if prior attempts fail.
// Check again if the space is available. Another thread
// may have similarly failed a metadata allocation and induced
@@ -232,8 +257,10 @@ void VM_CollectForMetadataAllocation::doit() {
}
#endif
+ CollectedHeap* heap = Universe::heap();
+
// Don't clear the soft refs yet.
- heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
+ heap->collect_as_vm_thread(_gc_cause);
// After a GC try to allocate without expanding. Could fail
// and expansion will be tried below.
_result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
diff --git a/src/hotspot/share/gc/shared/gcVMOperations.hpp b/src/hotspot/share/gc/shared/gcVMOperations.hpp
index 5048bc3c1ed..a9aee2faf5d 100644
--- a/src/hotspot/share/gc/shared/gcVMOperations.hpp
+++ b/src/hotspot/share/gc/shared/gcVMOperations.hpp
@@ -110,23 +110,23 @@ class VM_GC_Operation: public VM_Heap_Sync_Operation {
uint _full_gc_count_before; // full gc count before acquiring the Heap_lock
bool _full; // whether a "full" collection
bool _prologue_succeeded; // whether doit_prologue succeeded
+ bool _is_shutting_down; // whether the operation found that the GC is shutting down
GCCause::Cause _gc_cause; // the putative cause for this gc op
virtual bool skip_operation() const;
public:
VM_GC_Operation(uint gc_count_before,
- GCCause::Cause _cause,
+ GCCause::Cause cause,
uint full_gc_count_before,
- bool full) : VM_Heap_Sync_Operation() {
- _full = full;
- _prologue_succeeded = false;
- _gc_count_before = gc_count_before;
-
- _gc_cause = _cause;
-
- _full_gc_count_before = full_gc_count_before;
- }
+ bool full)
+ : VM_Heap_Sync_Operation(),
+ _gc_count_before(gc_count_before),
+ _full_gc_count_before(full_gc_count_before),
+ _full(full),
+ _prologue_succeeded(false),
+ _is_shutting_down(false),
+ _gc_cause(cause) {}
virtual const char* cause() const;
@@ -139,6 +139,14 @@ class VM_GC_Operation: public VM_Heap_Sync_Operation {
virtual bool allow_nested_vm_operations() const { return true; }
virtual bool gc_succeeded() const { return _prologue_succeeded; }
+ // This function returns the value of CollectedHeap::is_shutting_down() that
+ // was recorded in the prologue. Unlike CollectedHeap::is_shutting_down(),
+ // this function can be called without acquiring the Heap_lock.
+ //
+ // This function exists so that code that tries to schedule a GC operation
+ // can check if it was refused because the JVM is about to shut down.
+ bool is_shutting_down() const { return _is_shutting_down; }
+
static void notify_gc_begin(bool full = false);
static void notify_gc_end();
};
@@ -214,8 +222,7 @@ class VM_CollectForMetadataAllocation: public VM_GC_Collect_Operation {
size_t size,
Metaspace::MetadataType mdtype,
uint gc_count_before,
- uint full_gc_count_before,
- GCCause::Cause gc_cause);
+ uint full_gc_count_before);
virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
virtual void doit();
diff --git a/src/hotspot/share/gc/shared/oopStorage.cpp b/src/hotspot/share/gc/shared/oopStorage.cpp
index d52efc13dac..a1cc3ffa553 100644
--- a/src/hotspot/share/gc/shared/oopStorage.cpp
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp
@@ -28,7 +28,7 @@
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "nmt/memTracker.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@@ -122,7 +122,7 @@ OopStorage::ActiveArray::ActiveArray(size_t size) :
{}
OopStorage::ActiveArray::~ActiveArray() {
- assert(_refcount == 0, "precondition");
+ assert(_refcount.load_relaxed() == 0, "precondition");
}
OopStorage::ActiveArray* OopStorage::ActiveArray::create(size_t size,
@@ -144,32 +144,32 @@ size_t OopStorage::ActiveArray::size() const {
}
size_t OopStorage::ActiveArray::block_count() const {
- return _block_count;
+ return _block_count.load_relaxed();
}
size_t OopStorage::ActiveArray::block_count_acquire() const {
- return AtomicAccess::load_acquire(&_block_count);
+ return _block_count.load_acquire();
}
void OopStorage::ActiveArray::increment_refcount() const {
- int new_value = AtomicAccess::add(&_refcount, 1);
- assert(new_value >= 1, "negative refcount %d", new_value - 1);
+ int old_value = _refcount.fetch_then_add(1);
+ assert(old_value >= 0, "negative refcount %d", old_value);
}
bool OopStorage::ActiveArray::decrement_refcount() const {
- int new_value = AtomicAccess::sub(&_refcount, 1);
+ int new_value = _refcount.sub_then_fetch(1);
assert(new_value >= 0, "negative refcount %d", new_value);
return new_value == 0;
}
bool OopStorage::ActiveArray::push(Block* block) {
- size_t index = _block_count;
+ size_t index = _block_count.load_relaxed();
if (index < _size) {
block->set_active_index(index);
*block_ptr(index) = block;
// Use a release_store to ensure all the setup is complete before
// making the block visible.
- AtomicAccess::release_store(&_block_count, index + 1);
+ _block_count.release_store(index + 1);
return true;
} else {
return false;
@@ -177,19 +177,19 @@ bool OopStorage::ActiveArray::push(Block* block) {
}
void OopStorage::ActiveArray::remove(Block* block) {
- assert(_block_count > 0, "array is empty");
+ assert(_block_count.load_relaxed() > 0, "array is empty");
size_t index = block->active_index();
assert(*block_ptr(index) == block, "block not present");
- size_t last_index = _block_count - 1;
+ size_t last_index = _block_count.load_relaxed() - 1;
Block* last_block = *block_ptr(last_index);
last_block->set_active_index(index);
*block_ptr(index) = last_block;
- _block_count = last_index;
+ _block_count.store_relaxed(last_index);
}
void OopStorage::ActiveArray::copy_from(const ActiveArray* from) {
- assert(_block_count == 0, "array must be empty");
- size_t count = from->_block_count;
+ assert(_block_count.load_relaxed() == 0, "array must be empty");
+ size_t count = from->_block_count.load_relaxed();
assert(count <= _size, "precondition");
Block* const* from_ptr = from->block_ptr(0);
Block** to_ptr = block_ptr(0);
@@ -198,7 +198,7 @@ void OopStorage::ActiveArray::copy_from(const ActiveArray* from) {
assert(block->active_index() == i, "invariant");
*to_ptr++ = block;
}
- _block_count = count;
+ _block_count.store_relaxed(count);
}
// Blocks start with an array of BitsPerWord oop entries. That array
@@ -230,14 +230,17 @@ OopStorage::Block::Block(const OopStorage* owner, void* memory) :
assert(is_aligned(this, block_alignment), "misaligned block");
}
+#ifdef ASSERT
OopStorage::Block::~Block() {
- assert(_release_refcount == 0, "deleting block while releasing");
- assert(_deferred_updates_next == nullptr, "deleting block with deferred update");
+ assert(_release_refcount.load_relaxed() == 0, "deleting block while releasing");
+ assert(_deferred_updates_next.load_relaxed() == nullptr, "deleting block with deferred update");
// Clear fields used by block_for_ptr and entry validation, which
- // might help catch bugs. Volatile to prevent dead-store elimination.
- const_cast(_allocated_bitmask) = 0;
+ // might help catch bugs.
+ _allocated_bitmask.store_relaxed(0);
+ // Volatile to prevent dead-store elimination.
const_cast(_owner_address) = 0;
}
+#endif // ASSERT
size_t OopStorage::Block::allocation_size() {
// _data must be first member, so aligning Block aligns _data.
@@ -272,16 +275,16 @@ uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
bool OopStorage::Block::is_safe_to_delete() const {
assert(is_empty(), "precondition");
OrderAccess::loadload();
- return (AtomicAccess::load_acquire(&_release_refcount) == 0) &&
- (AtomicAccess::load_acquire(&_deferred_updates_next) == nullptr);
+ return ((_release_refcount.load_acquire() == 0) &&
+ (_deferred_updates_next.load_acquire() == nullptr));
}
OopStorage::Block* OopStorage::Block::deferred_updates_next() const {
- return _deferred_updates_next;
+ return _deferred_updates_next.load_relaxed();
}
void OopStorage::Block::set_deferred_updates_next(Block* block) {
- _deferred_updates_next = block;
+ _deferred_updates_next.store_relaxed(block);
}
bool OopStorage::Block::contains(const oop* ptr) const {
@@ -321,9 +324,8 @@ void OopStorage::Block::atomic_add_allocated(uintx add) {
// we can use an atomic add to implement the operation. The assert post
// facto verifies the precondition held; if there were any set bits in
// common, then after the add at least one of them will be zero.
- uintx sum = AtomicAccess::add(&_allocated_bitmask, add);
- assert((sum & add) == add, "some already present: %zu:%zu",
- sum, add);
+ uintx sum = _allocated_bitmask.add_then_fetch(add);
+ assert((sum & add) == add, "some already present: %zu:%zu", sum, add);
}
oop* OopStorage::Block::allocate() {
@@ -452,7 +454,7 @@ oop* OopStorage::allocate() {
oop* result = block->allocate();
assert(result != nullptr, "allocation failed");
assert(!block->is_empty(), "postcondition");
- AtomicAccess::inc(&_allocation_count); // release updates outside lock.
+ _allocation_count.add_then_fetch(1u); // release updates outside lock.
if (block->is_full()) {
// Transitioning from not full to full.
// Remove full blocks from consideration by future allocates.
@@ -490,7 +492,7 @@ size_t OopStorage::allocate(oop** ptrs, size_t size) {
assert(!is_empty_bitmask(taken), "invariant");
} // Drop lock, now that we've taken all available entries from block.
size_t num_taken = population_count(taken);
- AtomicAccess::add(&_allocation_count, num_taken);
+ _allocation_count.add_then_fetch(num_taken);
// Fill ptrs from those taken entries.
size_t limit = MIN2(num_taken, size);
for (size_t i = 0; i < limit; ++i) {
@@ -506,7 +508,7 @@ size_t OopStorage::allocate(oop** ptrs, size_t size) {
assert(size == limit, "invariant");
assert(num_taken == (limit + population_count(taken)), "invariant");
block->release_entries(taken, this);
- AtomicAccess::sub(&_allocation_count, num_taken - limit);
+ _allocation_count.sub_then_fetch(num_taken - limit);
}
log_trace(oopstorage, ref)("%s: bulk allocate %zu, returned %zu",
name(), limit, num_taken - limit);
@@ -527,9 +529,9 @@ bool OopStorage::try_add_block() {
if (block == nullptr) return false;
// Add new block to the _active_array, growing if needed.
- if (!_active_array->push(block)) {
+ if (!_active_array.load_relaxed()->push(block)) {
if (expand_active_array()) {
- guarantee(_active_array->push(block), "push failed after expansion");
+ guarantee(_active_array.load_relaxed()->push(block), "push failed after expansion");
} else {
log_debug(oopstorage, blocks)("%s: failed active array expand", name());
Block::delete_block(*block);
@@ -576,7 +578,7 @@ OopStorage::Block* OopStorage::block_for_allocation() {
// indicate allocation failure.
bool OopStorage::expand_active_array() {
assert_lock_strong(_allocation_mutex);
- ActiveArray* old_array = _active_array;
+ ActiveArray* old_array = _active_array.load_relaxed();
size_t new_size = 2 * old_array->size();
log_debug(oopstorage, blocks)("%s: expand active array %zu",
name(), new_size);
@@ -599,7 +601,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
// Update new_array refcount to account for the new reference.
new_array->increment_refcount();
// Install new_array, ensuring its initialization is complete first.
- AtomicAccess::release_store(&_active_array, new_array);
+ _active_array.release_store(new_array);
// Wait for any readers that could read the old array from _active_array.
// Can't use GlobalCounter here, because this is called from allocate(),
// which may be called in the scope of a GlobalCounter critical section
@@ -617,7 +619,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
// using it.
OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
SingleWriterSynchronizer::CriticalSection cs(&_protect_active);
- ActiveArray* result = AtomicAccess::load_acquire(&_active_array);
+ ActiveArray* result = _active_array.load_acquire();
result->increment_refcount();
return result;
}
@@ -625,7 +627,7 @@ OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
// Decrement refcount of array and destroy if refcount is zero.
void OopStorage::relinquish_block_array(ActiveArray* array) const {
if (array->decrement_refcount()) {
- assert(array != _active_array, "invariant");
+ assert(array != _active_array.load_relaxed(), "invariant");
ActiveArray::destroy(array);
}
}
@@ -672,14 +674,14 @@ static void log_release_transitions(uintx releasing,
void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
assert(releasing != 0, "preconditon");
// Prevent empty block deletion when transitioning to empty.
- AtomicAccess::inc(&_release_refcount);
+ _release_refcount.add_then_fetch(1u);
// Atomically update allocated bitmask.
- uintx old_allocated = _allocated_bitmask;
+ uintx old_allocated = _allocated_bitmask.load_relaxed();
while (true) {
assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
uintx new_value = old_allocated ^ releasing;
- uintx fetched = AtomicAccess::cmpxchg(&_allocated_bitmask, old_allocated, new_value);
+ uintx fetched = _allocated_bitmask.compare_exchange(old_allocated, new_value);
if (fetched == old_allocated) break; // Successful update.
old_allocated = fetched; // Retry with updated bitmask.
}
@@ -698,12 +700,12 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
// then someone else has made such a claim and the deferred update has not
// yet been processed and will include our change, so we don't need to do
// anything further.
- if (AtomicAccess::replace_if_null(&_deferred_updates_next, this)) {
+ if (_deferred_updates_next.compare_exchange(nullptr, this) == nullptr) {
// Successfully claimed. Push, with self-loop for end-of-list.
- Block* head = owner->_deferred_updates;
+ Block* head = owner->_deferred_updates.load_relaxed();
while (true) {
- _deferred_updates_next = (head == nullptr) ? this : head;
- Block* fetched = AtomicAccess::cmpxchg(&owner->_deferred_updates, head, this);
+ _deferred_updates_next.store_relaxed((head == nullptr) ? this : head);
+ Block* fetched = owner->_deferred_updates.compare_exchange(head, this);
if (fetched == head) break; // Successful update.
head = fetched; // Retry with updated head.
}
@@ -720,7 +722,7 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
}
}
// Release hold on empty block deletion.
- AtomicAccess::dec(&_release_refcount);
+ _release_refcount.sub_then_fetch(1u);
}
// Process one available deferred update. Returns true if one was processed.
@@ -729,13 +731,13 @@ bool OopStorage::reduce_deferred_updates() {
// Atomically pop a block off the list, if any available.
// No ABA issue because this is only called by one thread at a time.
// The atomicity is wrto pushes by release().
- Block* block = AtomicAccess::load_acquire(&_deferred_updates);
+ Block* block = _deferred_updates.load_acquire();
while (true) {
if (block == nullptr) return false;
// Try atomic pop of block from list.
Block* tail = block->deferred_updates_next();
if (block == tail) tail = nullptr; // Handle self-loop end marker.
- Block* fetched = AtomicAccess::cmpxchg(&_deferred_updates, block, tail);
+ Block* fetched = _deferred_updates.compare_exchange(block, tail);
if (fetched == block) break; // Update successful.
block = fetched; // Retry with updated block.
}
@@ -780,7 +782,7 @@ void OopStorage::release(const oop* ptr) {
assert(block != nullptr, "%s: invalid release " PTR_FORMAT, name(), p2i(ptr));
log_trace(oopstorage, ref)("%s: releasing " PTR_FORMAT, name(), p2i(ptr));
block->release_entries(block->bitmask_for_entry(ptr), this);
- AtomicAccess::dec(&_allocation_count);
+ _allocation_count.sub_then_fetch(1u);
}
void OopStorage::release(const oop* const* ptrs, size_t size) {
@@ -806,7 +808,7 @@ void OopStorage::release(const oop* const* ptrs, size_t size) {
}
// Release the contiguous entries that are in block.
block->release_entries(releasing, this);
- AtomicAccess::sub(&_allocation_count, count);
+ _allocation_count.sub_then_fetch(count);
}
}
@@ -837,7 +839,7 @@ OopStorage::OopStorage(const char* name, MemTag mem_tag) :
_mem_tag(mem_tag),
_needs_cleanup(false)
{
- _active_array->increment_refcount();
+ _active_array.load_relaxed()->increment_refcount();
assert(_active_mutex->rank() < _allocation_mutex->rank(),
"%s: active_mutex must have lower rank than allocation_mutex", _name);
assert(Service_lock->rank() < _active_mutex->rank(),
@@ -852,20 +854,21 @@ void OopStorage::delete_empty_block(const Block& block) {
OopStorage::~OopStorage() {
Block* block;
- while ((block = _deferred_updates) != nullptr) {
- _deferred_updates = block->deferred_updates_next();
+ while ((block = _deferred_updates.load_relaxed()) != nullptr) {
+ _deferred_updates.store_relaxed(block->deferred_updates_next());
block->set_deferred_updates_next(nullptr);
}
while ((block = _allocation_list.head()) != nullptr) {
_allocation_list.unlink(*block);
}
- bool unreferenced = _active_array->decrement_refcount();
+ ActiveArray* array = _active_array.load_relaxed();
+ bool unreferenced = array->decrement_refcount();
assert(unreferenced, "deleting storage while _active_array is referenced");
- for (size_t i = _active_array->block_count(); 0 < i; ) {
- block = _active_array->at(--i);
+ for (size_t i = array->block_count(); 0 < i; ) {
+ block = array->at(--i);
Block::delete_block(*block);
}
- ActiveArray::destroy(_active_array);
+ ActiveArray::destroy(array);
os::free(const_cast(_name));
}
@@ -894,7 +897,7 @@ bool OopStorage::should_report_num_dead() const {
// face of frequent explicit ServiceThread wakeups, hence the defer period.
// Global cleanup request state.
-static volatile bool needs_cleanup_requested = false;
+static Atomic needs_cleanup_requested{false};
// Time after which a cleanup is permitted.
static jlong cleanup_permit_time = 0;
@@ -906,12 +909,11 @@ const jlong cleanup_defer_period = 500 * NANOSECS_PER_MILLISEC;
bool OopStorage::has_cleanup_work_and_reset() {
assert_lock_strong(Service_lock);
- if (AtomicAccess::load_acquire(&needs_cleanup_requested) &&
- os::javaTimeNanos() > cleanup_permit_time) {
- cleanup_permit_time =
- os::javaTimeNanos() + cleanup_defer_period;
+ if (needs_cleanup_requested.load_acquire() &&
+ (os::javaTimeNanos() > cleanup_permit_time)) {
+ cleanup_permit_time = os::javaTimeNanos() + cleanup_defer_period;
// Set the request flag false and return its old value.
- AtomicAccess::release_store(&needs_cleanup_requested, false);
+ needs_cleanup_requested.release_store(false);
return true;
} else {
return false;
@@ -923,22 +925,22 @@ bool OopStorage::has_cleanup_work_and_reset() {
void OopStorage::record_needs_cleanup() {
// Set local flag first, else ServiceThread could wake up and miss
// the request.
- AtomicAccess::release_store(&_needs_cleanup, true);
- AtomicAccess::release_store_fence(&needs_cleanup_requested, true);
+ _needs_cleanup.release_store(true);
+ needs_cleanup_requested.release_store_fence(true);
}
bool OopStorage::delete_empty_blocks() {
// ServiceThread might have oopstorage work, but not for this object.
// But check for deferred updates, which might provide cleanup work.
- if (!AtomicAccess::load_acquire(&_needs_cleanup) &&
- (AtomicAccess::load_acquire(&_deferred_updates) == nullptr)) {
+ if (!_needs_cleanup.load_acquire() &&
+ (_deferred_updates.load_acquire() == nullptr)) {
return false;
}
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Clear the request before processing.
- AtomicAccess::release_store_fence(&_needs_cleanup, false);
+ _needs_cleanup.release_store_fence(false);
// Other threads could be adding to the empty block count or the
// deferred update list while we're working. Set an upper bound on
@@ -977,7 +979,7 @@ bool OopStorage::delete_empty_blocks() {
// but don't re-notify, to avoid useless spinning of the
// ServiceThread. Instead, iteration completion notifies.
if (_concurrent_iteration_count > 0) return true;
- _active_array->remove(block);
+ _active_array.load_relaxed()->remove(block);
}
// Remove block from _allocation_list and delete it.
_allocation_list.unlink(*block);
@@ -1001,8 +1003,9 @@ OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Block could be a false positive, so get index carefully.
size_t index = Block::active_index_safe(block);
- if ((index < _active_array->block_count()) &&
- (block == _active_array->at(index)) &&
+ ActiveArray* array = _active_array.load_relaxed();
+ if ((index < array->block_count()) &&
+ (block == array->at(index)) &&
block->contains(ptr)) {
if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
return ALLOCATED_ENTRY;
@@ -1015,7 +1018,7 @@ OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
}
size_t OopStorage::allocation_count() const {
- return _allocation_count;
+ return _allocation_count.load_relaxed();
}
size_t OopStorage::block_count() const {
@@ -1084,7 +1087,7 @@ void OopStorage::BasicParState::update_concurrent_iteration_count(int value) {
bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
data->_processed += data->_segment_end - data->_segment_start;
- size_t start = AtomicAccess::load_acquire(&_next_block);
+ size_t start = _next_block.load_acquire();
if (start >= _block_count) {
return finish_iteration(data); // No more blocks available.
}
@@ -1097,11 +1100,11 @@ bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
size_t max_step = 10;
size_t remaining = _block_count - start;
size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
- // AtomicAccess::add with possible overshoot. This can perform better
+ // Atomic add with possible overshoot. This can perform better
// than a CAS loop on some platforms when there is contention.
// We can cope with the uncertainty by recomputing start/end from
// the result of the add, and dealing with potential overshoot.
- size_t end = AtomicAccess::add(&_next_block, step);
+ size_t end = _next_block.add_then_fetch(step);
// _next_block may have changed, so recompute start from result of add.
start = end - step;
// _next_block may have changed so much that end has overshot.
@@ -1128,15 +1131,15 @@ bool OopStorage::BasicParState::finish_iteration(const IterationData* data) cons
}
size_t OopStorage::BasicParState::num_dead() const {
- return AtomicAccess::load(&_num_dead);
+ return _num_dead.load_relaxed();
}
void OopStorage::BasicParState::increment_num_dead(size_t num_dead) {
- AtomicAccess::add(&_num_dead, num_dead);
+ _num_dead.add_then_fetch(num_dead);
}
void OopStorage::BasicParState::report_num_dead() const {
- _storage->report_num_dead(AtomicAccess::load(&_num_dead));
+ _storage->report_num_dead(_num_dead.load_relaxed());
}
const char* OopStorage::name() const { return _name; }
@@ -1164,8 +1167,8 @@ bool OopStorage::Block::print_containing(const oop* addr, outputStream* st) {
#ifndef PRODUCT
void OopStorage::print_on(outputStream* st) const {
- size_t allocations = _allocation_count;
- size_t blocks = _active_array->block_count();
+ size_t allocations = _allocation_count.load_relaxed();
+ size_t blocks = _active_array.load_relaxed()->block_count();
double data_size = section_size * section_count;
double alloc_percentage = percent_of((double)allocations, blocks * data_size);
diff --git a/src/hotspot/share/gc/shared/oopStorage.hpp b/src/hotspot/share/gc/shared/oopStorage.hpp
index 34c980a0586..6097eeaa4f4 100644
--- a/src/hotspot/share/gc/shared/oopStorage.hpp
+++ b/src/hotspot/share/gc/shared/oopStorage.hpp
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/singleWriterSynchronizer.hpp"
@@ -258,15 +259,15 @@ private:
private:
const char* _name;
- ActiveArray* _active_array;
+ Atomic _active_array;
AllocationList _allocation_list;
- Block* volatile _deferred_updates;
+ Atomic _deferred_updates;
Mutex* _allocation_mutex;
Mutex* _active_mutex;
NumDeadCallback _num_dead_callback;
- // Volatile for racy unlocked accesses.
- volatile size_t _allocation_count;
+ // Atomic for racy unlocked accesses.
+ Atomic _allocation_count;
// Protection for _active_array.
mutable SingleWriterSynchronizer _protect_active;
@@ -278,7 +279,7 @@ private:
MemTag _mem_tag;
// Flag indicating this storage object is a candidate for empty block deletion.
- volatile bool _needs_cleanup;
+ Atomic _needs_cleanup;
// Clients construct via "create" factory function.
OopStorage(const char* name, MemTag mem_tag);
diff --git a/src/hotspot/share/gc/shared/oopStorage.inline.hpp b/src/hotspot/share/gc/shared/oopStorage.inline.hpp
index 4fb1d8fcaf1..c2747781a6b 100644
--- a/src/hotspot/share/gc/shared/oopStorage.inline.hpp
+++ b/src/hotspot/share/gc/shared/oopStorage.inline.hpp
@@ -30,6 +30,7 @@
#include "cppstdlib/type_traits.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/align.hpp"
#include "utilities/count_trailing_zeros.hpp"
@@ -42,8 +43,8 @@ class OopStorage::ActiveArray {
friend class OopStorage::TestAccess;
size_t _size;
- volatile size_t _block_count;
- mutable volatile int _refcount;
+ Atomic _block_count;
+ mutable Atomic _refcount;
// Block* _blocks[1]; // Pseudo flexible array member.
ActiveArray(size_t size);
@@ -104,7 +105,7 @@ inline OopStorage::Block** OopStorage::ActiveArray::block_ptr(size_t index) {
}
inline OopStorage::Block* OopStorage::ActiveArray::at(size_t index) const {
- assert(index < _block_count, "precondition");
+ assert(index < _block_count.load_relaxed(), "precondition");
return *block_ptr(index);
}
@@ -135,16 +136,16 @@ class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
oop _data[BitsPerWord];
static const unsigned _data_pos = 0; // Position of _data.
- volatile uintx _allocated_bitmask; // One bit per _data element.
+ Atomic _allocated_bitmask; // One bit per _data element.
intptr_t _owner_address;
void* _memory; // Unaligned storage containing block.
size_t _active_index;
AllocationListEntry _allocation_list_entry;
- Block* volatile _deferred_updates_next;
- volatile uintx _release_refcount;
+ Atomic _deferred_updates_next;
+ Atomic _release_refcount;
Block(const OopStorage* owner, void* memory);
- ~Block();
+ ~Block() NOT_DEBUG(= default);
void check_index(unsigned index) const;
unsigned get_index(const oop* ptr) const;
@@ -322,7 +323,7 @@ inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
}
inline uintx OopStorage::Block::allocated_bitmask() const {
- return _allocated_bitmask;
+ return _allocated_bitmask.load_relaxed();
}
inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const {
@@ -366,7 +367,7 @@ inline bool OopStorage::iterate_impl(F f, Storage* storage) {
// Propagate const/non-const iteration to the block layer, by using
// const or non-const blocks as corresponding to Storage.
using BlockPtr = std::conditional_t::value, const Block*, Block*>;
- ActiveArray* blocks = storage->_active_array;
+ ActiveArray* blocks = storage->_active_array.load_relaxed();
size_t limit = blocks->block_count();
for (size_t i = 0; i < limit; ++i) {
BlockPtr block = blocks->at(i);
diff --git a/src/hotspot/share/gc/shared/oopStorageParState.hpp b/src/hotspot/share/gc/shared/oopStorageParState.hpp
index 046bf9de8c2..cad1a1f0cf6 100644
--- a/src/hotspot/share/gc/shared/oopStorageParState.hpp
+++ b/src/hotspot/share/gc/shared/oopStorageParState.hpp
@@ -27,6 +27,7 @@
#include "cppstdlib/type_traits.hpp"
#include "gc/shared/oopStorage.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
//////////////////////////////////////////////////////////////////////////////
@@ -131,10 +132,10 @@ class OopStorage::BasicParState {
const OopStorage* _storage;
ActiveArray* _active_array;
size_t _block_count;
- volatile size_t _next_block;
+ Atomic _next_block;
uint _estimated_thread_count;
bool _concurrent;
- volatile size_t _num_dead;
+ Atomic _num_dead;
NONCOPYABLE(BasicParState);
diff --git a/src/hotspot/share/gc/shared/partialArrayState.cpp b/src/hotspot/share/gc/shared/partialArrayState.cpp
index f913f3db4ba..39c1fe4fc78 100644
--- a/src/hotspot/share/gc/shared/partialArrayState.cpp
+++ b/src/hotspot/share/gc/shared/partialArrayState.cpp
@@ -22,6 +22,7 @@
*
*/
+#include "cppstdlib/new.hpp"
#include "gc/shared/partialArrayState.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/arena.hpp"
@@ -33,8 +34,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-#include
-
PartialArrayState::PartialArrayState(oop src, oop dst,
size_t index, size_t length,
size_t initial_refcount)
diff --git a/src/hotspot/share/gc/shared/taskqueue.hpp b/src/hotspot/share/gc/shared/taskqueue.hpp
index 1c36e18894a..3a751852ab6 100644
--- a/src/hotspot/share/gc/shared/taskqueue.hpp
+++ b/src/hotspot/share/gc/shared/taskqueue.hpp
@@ -25,13 +25,16 @@
#ifndef SHARE_GC_SHARED_TASKQUEUE_HPP
#define SHARE_GC_SHARED_TASKQUEUE_HPP
+#include "cppstdlib/type_traits.hpp"
#include "memory/allocation.hpp"
#include "memory/padded.hpp"
+#include "metaprogramming/primitiveConversions.hpp"
#include "oops/oopsHierarchy.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
+#include "utilities/powerOfTwo.hpp"
#include "utilities/stack.hpp"
#if TASKQUEUE_STATS
@@ -100,76 +103,92 @@ void TaskQueueStats::reset() {
}
#endif // TASKQUEUE_STATS
+// Helper for TaskQueueSuper, encoding {queue index, tag} pair in a form that
+// supports atomic access to the pair.
+class TaskQueueAge {
+ friend struct PrimitiveConversions::Translate;
+
+public:
+ // Internal type used for indexing the queue, and for the tag.
+ using idx_t = NOT_LP64(uint16_t) LP64_ONLY(uint32_t);
+
+ explicit TaskQueueAge(size_t data = 0) : _data{data} {}
+ TaskQueueAge(idx_t top, idx_t tag) : _fields{top, tag} {}
+
+ idx_t top() const { return _fields._top; }
+ idx_t tag() const { return _fields._tag; }
+
+ bool operator==(const TaskQueueAge& other) const { return _data == other._data; }
+
+private:
+ struct Fields {
+ idx_t _top;
+ idx_t _tag;
+ };
+ union {
+ size_t _data; // Provides access to _fields as a single integral value.
+ Fields _fields;
+ };
+ // _data must be able to hold combined _fields. Must be equal to ensure
+ // there isn't any padding that could be uninitialized by 2-arg ctor.
+ static_assert(sizeof(_data) == sizeof(_fields));
+};
+
+// Support for Atomic.
+template<>
+struct PrimitiveConversions::Translate : public std::true_type {
+ using Value = TaskQueueAge;
+ using Decayed = decltype(TaskQueueAge::_data);
+
+ static Decayed decay(Value x) { return x._data; }
+ static Value recover(Decayed x) { return Value(x); }
+};
+
// TaskQueueSuper collects functionality common to all GenericTaskQueue instances.
template
class TaskQueueSuper: public CHeapObj {
protected:
- // Internal type for indexing the queue; also used for the tag.
- typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
- STATIC_ASSERT(N == idx_t(N)); // Ensure N fits in an idx_t.
+ using Age = TaskQueueAge;
+ using idx_t = Age::idx_t;
+ static_assert(N == idx_t(N)); // Ensure N fits in an idx_t.
// N must be a power of 2 for computing modulo via masking.
// N must be >= 2 for the algorithm to work at all, though larger is better.
- STATIC_ASSERT(N >= 2);
- STATIC_ASSERT(is_power_of_2(N));
+ static_assert(N >= 2);
+ static_assert(is_power_of_2(N));
static const uint MOD_N_MASK = N - 1;
- class Age {
- friend class TaskQueueSuper;
-
- public:
- explicit Age(size_t data = 0) : _data(data) {}
- Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; }
-
- idx_t top() const { return _fields._top; }
- idx_t tag() const { return _fields._tag; }
-
- bool operator ==(const Age& other) const { return _data == other._data; }
-
- private:
- struct fields {
- idx_t _top;
- idx_t _tag;
- };
- union {
- size_t _data;
- fields _fields;
- };
- STATIC_ASSERT(sizeof(size_t) >= sizeof(fields));
- };
-
uint bottom_relaxed() const {
- return AtomicAccess::load(&_bottom);
+ return _bottom.load_relaxed();
}
uint bottom_acquire() const {
- return AtomicAccess::load_acquire(&_bottom);
+ return _bottom.load_acquire();
}
void set_bottom_relaxed(uint new_bottom) {
- AtomicAccess::store(&_bottom, new_bottom);
+ _bottom.store_relaxed(new_bottom);
}
void release_set_bottom(uint new_bottom) {
- AtomicAccess::release_store(&_bottom, new_bottom);
+ _bottom.release_store(new_bottom);
}
Age age_relaxed() const {
- return Age(AtomicAccess::load(&_age._data));
+ return _age.load_relaxed();
}
void set_age_relaxed(Age new_age) {
- AtomicAccess::store(&_age._data, new_age._data);
+ _age.store_relaxed(new_age);
}
Age cmpxchg_age(Age old_age, Age new_age) {
- return Age(AtomicAccess::cmpxchg(&_age._data, old_age._data, new_age._data));
+ return _age.compare_exchange(old_age, new_age);
}
idx_t age_top_relaxed() const {
- // Atomically accessing a subfield of an "atomic" member.
- return AtomicAccess::load(&_age._fields._top);
+ return _age.load_relaxed().top();
}
// These both operate mod N.
@@ -222,16 +241,16 @@ private:
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_PADDING_SIZE, 0);
// Index of the first free element after the last one pushed (mod N).
- volatile uint _bottom;
- DEFINE_PAD_MINUS_SIZE(1, DEFAULT_PADDING_SIZE, sizeof(uint));
+ Atomic _bottom;
+ DEFINE_PAD_MINUS_SIZE(1, DEFAULT_PADDING_SIZE, sizeof(_bottom));
// top() is the index of the oldest pushed element (mod N), and tag()
// is the associated epoch, to distinguish different modifications of
// the age. There is no available element if top() == _bottom or
// (_bottom - top()) mod N == N-1; the latter indicates underflow
// during concurrent pop_local/pop_global.
- volatile Age _age;
- DEFINE_PAD_MINUS_SIZE(2, DEFAULT_PADDING_SIZE, sizeof(Age));
+ Atomic _age;
+ DEFINE_PAD_MINUS_SIZE(2, DEFAULT_PADDING_SIZE, sizeof(_age));
NONCOPYABLE(TaskQueueSuper);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp
index 78ae78f4c24..05ecfb254a2 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp
@@ -31,15 +31,37 @@
class ShenandoahAllocRequest : StackObj {
public:
- enum Type {
- _alloc_shared, // Allocate common, outside of TLAB
- _alloc_shared_gc, // Allocate common, outside of GCLAB/PLAB
- _alloc_cds, // Allocate for CDS
- _alloc_tlab, // Allocate TLAB
- _alloc_gclab, // Allocate GCLAB
- _alloc_plab, // Allocate PLAB
- _ALLOC_LIMIT
- };
+ // Alloc type is an int value with encoded bits in scheme as:
+ // [x|xx|xx|xx]
+ // ^---- Requester:
+ // 00 -- mutator
+ // 10 -- mutator (CDS)
+ // 01 -- GC
+ // ^------- Purpose:
+ // 00 -- shared
+ // 01 -- TLAB/GCLAB
+ // 11 -- PLAB
+ // ^---------- Affiliation:
+ // 00 -- YOUNG
+ // 01 -- OLD
+ // 11 -- OLD, promotion
+ typedef int Type;
+
+ static constexpr int bit_gc_alloc = 1 << 0;
+ static constexpr int bit_cds_alloc = 1 << 1;
+ static constexpr int bit_lab_alloc = 1 << 2;
+ static constexpr int bit_plab_alloc = 1 << 3;
+ static constexpr int bit_old_alloc = 1 << 4;
+ static constexpr int bit_promotion_alloc = 1 << 5;
+
+ static constexpr Type _alloc_shared = 0;
+ static constexpr Type _alloc_tlab = bit_lab_alloc;
+ static constexpr Type _alloc_cds = bit_cds_alloc;
+ static constexpr Type _alloc_shared_gc = bit_gc_alloc;
+ static constexpr Type _alloc_shared_gc_old = bit_gc_alloc | bit_old_alloc;
+ static constexpr Type _alloc_shared_gc_promotion = bit_gc_alloc | bit_old_alloc | bit_promotion_alloc;
+ static constexpr Type _alloc_gclab = bit_gc_alloc | bit_lab_alloc;
+ static constexpr Type _alloc_plab = bit_gc_alloc | bit_lab_alloc | bit_plab_alloc | bit_old_alloc;
static const char* alloc_type_to_string(Type type) {
switch (type) {
@@ -47,6 +69,10 @@ public:
return "Shared";
case _alloc_shared_gc:
return "Shared GC";
+ case _alloc_shared_gc_old:
+ return "Shared GC Old";
+ case _alloc_shared_gc_promotion:
+ return "Shared GC Promotion";
case _alloc_cds:
return "CDS";
case _alloc_tlab:
@@ -80,20 +106,14 @@ private:
// This is the type of the request.
Type _alloc_type;
- // This is the generation which the request is targeting.
- ShenandoahAffiliation const _affiliation;
-
- // True if this request is trying to copy any object from young to old (promote).
- bool _is_promotion;
-
#ifdef ASSERT
// Check that this is set before being read.
bool _actual_size_set;
#endif
- ShenandoahAllocRequest(size_t _min_size, size_t _requested_size, Type _alloc_type, ShenandoahAffiliation affiliation, bool is_promotion = false) :
+ ShenandoahAllocRequest(size_t _min_size, size_t _requested_size, Type _alloc_type) :
_min_size(_min_size), _requested_size(_requested_size),
- _actual_size(0), _waste(0), _alloc_type(_alloc_type), _affiliation(affiliation), _is_promotion(is_promotion)
+ _actual_size(0), _waste(0), _alloc_type(_alloc_type)
#ifdef ASSERT
, _actual_size_set(false)
#endif
@@ -101,31 +121,34 @@ private:
public:
static inline ShenandoahAllocRequest for_tlab(size_t min_size, size_t requested_size) {
- return ShenandoahAllocRequest(min_size, requested_size, _alloc_tlab, ShenandoahAffiliation::YOUNG_GENERATION);
+ return ShenandoahAllocRequest(min_size, requested_size, _alloc_tlab);
}
static inline ShenandoahAllocRequest for_gclab(size_t min_size, size_t requested_size) {
- return ShenandoahAllocRequest(min_size, requested_size, _alloc_gclab, ShenandoahAffiliation::YOUNG_GENERATION);
+ return ShenandoahAllocRequest(min_size, requested_size, _alloc_gclab);
}
static inline ShenandoahAllocRequest for_plab(size_t min_size, size_t requested_size) {
- return ShenandoahAllocRequest(min_size, requested_size, _alloc_plab, ShenandoahAffiliation::OLD_GENERATION);
+ return ShenandoahAllocRequest(min_size, requested_size, _alloc_plab);
}
static inline ShenandoahAllocRequest for_shared_gc(size_t requested_size, ShenandoahAffiliation affiliation, bool is_promotion = false) {
if (is_promotion) {
- assert(affiliation == ShenandoahAffiliation::OLD_GENERATION, "Should only promote to old generation");
- return ShenandoahAllocRequest(0, requested_size, _alloc_shared_gc, affiliation, true);
+ assert(affiliation == OLD_GENERATION, "Should only promote to old generation");
+ return ShenandoahAllocRequest(0, requested_size, _alloc_shared_gc_promotion);
}
- return ShenandoahAllocRequest(0, requested_size, _alloc_shared_gc, affiliation);
+ if (affiliation == OLD_GENERATION) {
+ return ShenandoahAllocRequest(0, requested_size, _alloc_shared_gc_old);
+ }
+ return ShenandoahAllocRequest(0, requested_size, _alloc_shared_gc);
}
static inline ShenandoahAllocRequest for_shared(size_t requested_size) {
- return ShenandoahAllocRequest(0, requested_size, _alloc_shared, ShenandoahAffiliation::YOUNG_GENERATION);
+ return ShenandoahAllocRequest(0, requested_size, _alloc_shared);
}
static inline ShenandoahAllocRequest for_cds(size_t requested_size) {
- return ShenandoahAllocRequest(0, requested_size, _alloc_cds, ShenandoahAffiliation::YOUNG_GENERATION);
+ return ShenandoahAllocRequest(0, requested_size, _alloc_cds);
}
inline size_t size() const {
@@ -167,71 +190,35 @@ public:
}
inline bool is_mutator_alloc() const {
- switch (_alloc_type) {
- case _alloc_tlab:
- case _alloc_shared:
- case _alloc_cds:
- return true;
- case _alloc_gclab:
- case _alloc_plab:
- case _alloc_shared_gc:
- return false;
- default:
- ShouldNotReachHere();
- return false;
- }
+ return (_alloc_type & bit_gc_alloc) == 0;
}
inline bool is_gc_alloc() const {
- switch (_alloc_type) {
- case _alloc_tlab:
- case _alloc_shared:
- case _alloc_cds:
- return false;
- case _alloc_gclab:
- case _alloc_plab:
- case _alloc_shared_gc:
- return true;
- default:
- ShouldNotReachHere();
- return false;
- }
+ return (_alloc_type & bit_gc_alloc) != 0;
}
inline bool is_lab_alloc() const {
- switch (_alloc_type) {
- case _alloc_tlab:
- case _alloc_gclab:
- case _alloc_plab:
- return true;
- case _alloc_shared:
- case _alloc_shared_gc:
- case _alloc_cds:
- return false;
- default:
- ShouldNotReachHere();
- return false;
- }
+ return (_alloc_type & bit_lab_alloc) != 0;
}
- bool is_old() const {
- return _affiliation == OLD_GENERATION;
+ inline bool is_old() const {
+ return (_alloc_type & bit_old_alloc) != 0;
}
- bool is_young() const {
- return _affiliation == YOUNG_GENERATION;
+ inline bool is_young() const {
+ return (_alloc_type & bit_old_alloc) == 0;
}
- ShenandoahAffiliation affiliation() const {
- return _affiliation;
+ inline ShenandoahAffiliation affiliation() const {
+ return (_alloc_type & bit_old_alloc) == 0 ? YOUNG_GENERATION : OLD_GENERATION ;
}
const char* affiliation_name() const {
- return shenandoah_affiliation_name(_affiliation);
+ return shenandoah_affiliation_name(affiliation());
}
- bool is_promotion() const {
- return _is_promotion;
+ inline bool is_promotion() const {
+ return (_alloc_type & bit_promotion_alloc) != 0;
}
};
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp
index 0d38cc757f4..2b5bc766a46 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp
@@ -129,7 +129,7 @@ public:
private:
template
- inline void arraycopy_marking(T* src, T* dst, size_t count, bool is_old_marking);
+ inline void arraycopy_marking(T* dst, size_t count);
template
inline void arraycopy_evacuation(T* src, size_t count);
template
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
index b176446452a..adeea8ebf96 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
@@ -387,13 +387,11 @@ template
void ShenandoahBarrierSet::arraycopy_work(T* src, size_t count) {
// Young cycles are allowed to run when old marking is in progress. When old marking is in progress,
// this barrier will be called with ENQUEUE=true and HAS_FWD=false, even though the young generation
- // may have forwarded objects. In this case, the `arraycopy_work` is first called with HAS_FWD=true and
- // ENQUEUE=false.
- assert(HAS_FWD == _heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(),
- "Forwarded object status is sane");
+ // may have forwarded objects.
+ assert(HAS_FWD == _heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded object status is sane");
// This function cannot be called to handle marking and evacuation at the same time (they operate on
// different sides of the copy).
- assert((HAS_FWD || EVAC) != ENQUEUE, "Cannot evacuate and mark both sides of copy.");
+ static_assert((HAS_FWD || EVAC) != ENQUEUE, "Cannot evacuate and mark both sides of copy.");
Thread* thread = Thread::current();
SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
@@ -412,7 +410,7 @@ void ShenandoahBarrierSet::arraycopy_work(T* src, size_t count) {
shenandoah_assert_forwarded_except(elem_ptr, obj, _heap->cancelled_gc());
ShenandoahHeap::atomic_update_oop(fwd, elem_ptr, o);
}
- if (ENQUEUE && !ctx->is_marked_strong_or_old(obj)) {
+ if (ENQUEUE && !ctx->is_marked_strong(obj)) {
_satb_mark_queue_set.enqueue_known_active(queue, obj);
}
}
@@ -426,68 +424,29 @@ void ShenandoahBarrierSet::arraycopy_barrier(T* src, T* dst, size_t count) {
return;
}
- char gc_state = ShenandoahThreadLocalData::gc_state(Thread::current());
- if ((gc_state & ShenandoahHeap::EVACUATION) != 0) {
- arraycopy_evacuation(src, count);
- } else if ((gc_state & ShenandoahHeap::UPDATE_REFS) != 0) {
- arraycopy_update(src, count);
+ const char gc_state = ShenandoahThreadLocalData::gc_state(Thread::current());
+ if ((gc_state & ShenandoahHeap::MARKING) != 0) {
+ // If marking old or young, we must evaluate the SATB barrier. This will be the only
+ // action if we are not marking old. If we are marking old, we must still evaluate the
+ // load reference barrier for a young collection.
+ arraycopy_marking(dst, count);
}
- if (_heap->mode()->is_generational()) {
- assert(ShenandoahSATBBarrier, "Generational mode assumes SATB mode");
- if ((gc_state & ShenandoahHeap::YOUNG_MARKING) != 0) {
- arraycopy_marking(src, dst, count, false);
- }
- if ((gc_state & ShenandoahHeap::OLD_MARKING) != 0) {
- arraycopy_marking(src, dst, count, true);
- }
- } else if ((gc_state & ShenandoahHeap::MARKING) != 0) {
- arraycopy_marking(src, dst, count, false);
+ if ((gc_state & ShenandoahHeap::EVACUATION) != 0) {
+ assert((gc_state & ShenandoahHeap::YOUNG_MARKING) == 0, "Cannot be marking young during evacuation");
+ arraycopy_evacuation(src, count);
+ } else if ((gc_state & ShenandoahHeap::UPDATE_REFS) != 0) {
+ assert((gc_state & ShenandoahHeap::YOUNG_MARKING) == 0, "Cannot be marking young during update-refs");
+ arraycopy_update(src, count);
}
}
template
-void ShenandoahBarrierSet::arraycopy_marking(T* src, T* dst, size_t count, bool is_old_marking) {
+void ShenandoahBarrierSet::arraycopy_marking(T* dst, size_t count) {
assert(_heap->is_concurrent_mark_in_progress(), "only during marking");
- /*
- * Note that an old-gen object is considered live if it is live at the start of OLD marking or if it is promoted
- * following the start of OLD marking.
- *
- * 1. Every object promoted following the start of OLD marking will be above TAMS within its old-gen region
- * 2. Every object live at the start of OLD marking will be referenced from a "root" or it will be referenced from
- * another live OLD-gen object. With regards to old-gen, roots include stack locations and all of live young-gen.
- * All root references to old-gen are identified during a bootstrap young collection. All references from other
- * old-gen objects will be marked during the traversal of all old objects, or will be marked by the SATB barrier.
- *
- * During old-gen marking (which is interleaved with young-gen collections), call arraycopy_work() if:
- *
- * 1. The overwritten array resides in old-gen and it is below TAMS within its old-gen region
- * 2. Do not call arraycopy_work for any array residing in young-gen because young-gen collection is idle at this time
- *
- * During young-gen marking, call arraycopy_work() if:
- *
- * 1. The overwritten array resides in young-gen and is below TAMS within its young-gen region
- * 2. Additionally, if array resides in old-gen, regardless of its relationship to TAMS because this old-gen array
- * may hold references to young-gen
- */
if (ShenandoahSATBBarrier) {
- T* array = dst;
- HeapWord* array_addr = reinterpret_cast(array);
- ShenandoahHeapRegion* r = _heap->heap_region_containing(array_addr);
- if (is_old_marking) {
- // Generational, old marking
- assert(_heap->mode()->is_generational(), "Invariant");
- if (r->is_old() && (array_addr < _heap->marking_context()->top_at_mark_start(r))) {
- arraycopy_work(array, count);
- }
- } else if (_heap->mode()->is_generational()) {
- // Generational, young marking
- if (r->is_old() || (array_addr < _heap->marking_context()->top_at_mark_start(r))) {
- arraycopy_work(array, count);
- }
- } else if (array_addr < _heap->marking_context()->top_at_mark_start(r)) {
- // Non-generational, marking
- arraycopy_work(array, count);
+ if (!_heap->marking_context()->allocated_after_mark_start(reinterpret_cast(dst))) {
+ arraycopy_work(dst, count);
}
}
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
index 0deb3b5ba4c..ab7985b3d34 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
@@ -1311,19 +1311,11 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
// Overwrite with non-zero (non-null) values only if necessary for allocation bookkeeping.
- switch (req.type()) {
- case ShenandoahAllocRequest::_alloc_tlab:
- case ShenandoahAllocRequest::_alloc_shared:
- case ShenandoahAllocRequest::_alloc_cds:
- return allocate_for_mutator(req, in_new_region);
- case ShenandoahAllocRequest::_alloc_gclab:
- case ShenandoahAllocRequest::_alloc_plab:
- case ShenandoahAllocRequest::_alloc_shared_gc:
- return allocate_for_collector(req, in_new_region);
- default:
- ShouldNotReachHere();
+ if (req.is_mutator_alloc()) {
+ return allocate_for_mutator(req, in_new_region);
+ } else {
+ return allocate_for_collector(req, in_new_region);
}
- return nullptr;
}
HeapWord* ShenandoahFreeSet::allocate_for_mutator(ShenandoahAllocRequest &req, bool &in_new_region) {
@@ -1619,21 +1611,13 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
if (req.is_mutator_alloc()) {
request_generation = _heap->mode()->is_generational()? _heap->young_generation(): _heap->global_generation();
orig_partition = ShenandoahFreeSetPartitionId::Mutator;
- } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) {
- request_generation = _heap->mode()->is_generational()? _heap->young_generation(): _heap->global_generation();
- orig_partition = ShenandoahFreeSetPartitionId::Collector;
- } else if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
+ } else if (req.is_old()) {
request_generation = _heap->old_generation();
orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
} else {
- assert(req.type() == ShenandoahAllocRequest::_alloc_shared_gc, "Unexpected allocation type");
- if (req.is_old()) {
- request_generation = _heap->old_generation();
- orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
- } else {
- request_generation = _heap->mode()->is_generational()? _heap->young_generation(): _heap->global_generation();
- orig_partition = ShenandoahFreeSetPartitionId::Collector;
- }
+ // Not old collector alloc, so this is a young collector gclab or shared allocation
+ request_generation = _heap->mode()->is_generational()? _heap->young_generation(): _heap->global_generation();
+ orig_partition = ShenandoahFreeSetPartitionId::Collector;
}
if (alloc_capacity(r) < PLAB::min_size() * HeapWordSize) {
// Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
index cad9dc0e932..636f65e2553 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
@@ -129,6 +129,8 @@ inline void ShenandoahHeapRegion::adjust_alloc_metadata(ShenandoahAllocRequest::
switch (type) {
case ShenandoahAllocRequest::_alloc_shared:
case ShenandoahAllocRequest::_alloc_shared_gc:
+ case ShenandoahAllocRequest::_alloc_shared_gc_old:
+ case ShenandoahAllocRequest::_alloc_shared_gc_promotion:
case ShenandoahAllocRequest::_alloc_cds:
// Counted implicitly by tlab/gclab allocs
break;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
index 3a99023eca4..44064dbd1a9 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
@@ -250,6 +250,8 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
HeapWord* right = MIN2(region->top(), end_range_of_interest);
HeapWord* end_of_search_next = MIN2(right, tams);
+ // Since end_range_of_interest may not align on a card boundary, last_relevant_card_index is conservative. Not all of the
+ // memory within the last relevant card's span is < right.
size_t last_relevant_card_index;
if (end_range_of_interest == _end_of_heap) {
last_relevant_card_index = _rs->card_index_for_addr(end_range_of_interest - 1);
@@ -352,9 +354,8 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
return nullptr;
}
} while (!starts_object(following_card_index));
- assert(_rs->addr_for_card_index(following_card_index) + get_first_start(following_card_index),
- "Result must precede right");
- return _rs->addr_for_card_index(following_card_index) + get_first_start(following_card_index);
+ HeapWord* result_candidate = _rs->addr_for_card_index(following_card_index) + get_first_start(following_card_index);
+ return (result_candidate >= right)? nullptr: result_candidate;
}
}
}
@@ -378,24 +379,20 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
// evacuation phase) of young collections. This is never called
// during global collections during marking or update refs..
// 4. Every allocation under TAMS updates the object start array.
+#ifdef ASSERT
oop obj = cast_to_oop(p);
assert(oopDesc::is_oop(obj), "Should be an object");
-#ifdef ASSERT
-#define WALK_FORWARD_IN_BLOCK_START true
-#else
-#define WALK_FORWARD_IN_BLOCK_START false
-#endif // ASSERT
- while (WALK_FORWARD_IN_BLOCK_START && p + obj->size() < left) {
+ while (p + obj->size() < left) {
p += obj->size();
obj = cast_to_oop(p);
assert(oopDesc::is_oop(obj), "Should be an object");
assert(Klass::is_valid(obj->klass()), "Not a valid klass ptr");
// Check assumptions in previous block comment if this assert fires
- guarantee(false, "Should never need forward walk in block start");
+ fatal("Should never need forward walk in block start");
}
-#undef WALK_FORWARD_IN_BLOCK_START
assert(p <= left, "p should start at or before left end of card");
assert(p + obj->size() > left, "obj should end after left end of card");
+#endif // ASSERT
return p;
}
diff --git a/src/hotspot/share/gc/z/zBarrier.inline.hpp b/src/hotspot/share/gc/z/zBarrier.inline.hpp
index b5923f01628..766a6eb8e4c 100644
--- a/src/hotspot/share/gc/z/zBarrier.inline.hpp
+++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp
@@ -86,10 +86,6 @@ inline void ZBarrier::self_heal(ZBarrierFastPath fast_path, volatile zpointer* p
assert(ZPointer::is_remapped(heal_ptr), "invariant");
for (;;) {
- if (ptr == zpointer::null) {
- assert(!ZVerifyOops || !ZHeap::heap()->is_in(uintptr_t(p)) || !ZHeap::heap()->is_old(p), "No raw null in old");
- }
-
assert_transition_monotonicity(ptr, heal_ptr);
// Heal
diff --git a/src/hotspot/share/gc/z/zBarrierSet.cpp b/src/hotspot/share/gc/z/zBarrierSet.cpp
index 87f93043bdf..643eba1947e 100644
--- a/src/hotspot/share/gc/z/zBarrierSet.cpp
+++ b/src/hotspot/share/gc/z/zBarrierSet.cpp
@@ -223,27 +223,7 @@ void ZBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
// breaks that promise. Take a few steps in the interpreter instead, which has
// no such assumptions about where an object resides.
deoptimize_allocation(thread);
- return;
}
-
- if (!ZGeneration::young()->is_phase_mark_complete()) {
- return;
- }
-
- if (!page->is_relocatable()) {
- return;
- }
-
- if (ZRelocate::compute_to_age(age) != ZPageAge::old) {
- return;
- }
-
- // If the object is young, we have to still be careful that it isn't racingly
- // about to get promoted to the old generation. That causes issues when null
- // pointers are supposed to be coloured, but the JIT is a bit sloppy and
- // reinitializes memory with raw nulls. We detect this situation and detune
- // rather than relying on the JIT to never be sloppy with redundant initialization.
- deoptimize_allocation(thread);
}
void ZBarrierSet::print_on(outputStream* st) const {
diff --git a/src/hotspot/share/gc/z/zDeferredConstructed.inline.hpp b/src/hotspot/share/gc/z/zDeferredConstructed.inline.hpp
index d6d35ecddcd..f686bc78d15 100644
--- a/src/hotspot/share/gc/z/zDeferredConstructed.inline.hpp
+++ b/src/hotspot/share/gc/z/zDeferredConstructed.inline.hpp
@@ -27,10 +27,9 @@
#include "gc/z/zDeferredConstructed.hpp"
+#include "cppstdlib/new.hpp"
#include "cppstdlib/type_traits.hpp"
-#include
-
template
inline ZDeferredConstructed::ZDeferredConstructed()
DEBUG_ONLY(: _initialized(false)) {
diff --git a/src/hotspot/share/gc/z/zGeneration.cpp b/src/hotspot/share/gc/z/zGeneration.cpp
index d1680b6c336..2b632ef29a9 100644
--- a/src/hotspot/share/gc/z/zGeneration.cpp
+++ b/src/hotspot/share/gc/z/zGeneration.cpp
@@ -111,6 +111,16 @@ static const ZStatSampler ZSamplerJavaThreads("System", "Java Threads", ZStatUni
ZGenerationYoung* ZGeneration::_young;
ZGenerationOld* ZGeneration::_old;
+class ZRendezvousHandshakeClosure : public HandshakeClosure {
+public:
+ ZRendezvousHandshakeClosure()
+ : HandshakeClosure("ZRendezvous") {}
+
+ void do_thread(Thread* thread) {
+ // Does nothing
+ }
+};
+
ZGeneration::ZGeneration(ZGenerationId id, ZPageTable* page_table, ZPageAllocator* page_allocator)
: _id(id),
_page_allocator(page_allocator),
@@ -168,11 +178,19 @@ void ZGeneration::free_empty_pages(ZRelocationSetSelector* selector, int bulk) {
}
void ZGeneration::flip_age_pages(const ZRelocationSetSelector* selector) {
- if (is_young()) {
- _relocate.flip_age_pages(selector->not_selected_small());
- _relocate.flip_age_pages(selector->not_selected_medium());
- _relocate.flip_age_pages(selector->not_selected_large());
- }
+ _relocate.flip_age_pages(selector->not_selected_small());
+ _relocate.flip_age_pages(selector->not_selected_medium());
+ _relocate.flip_age_pages(selector->not_selected_large());
+
+ // Perform a handshake between flip promotion and running the promotion barrier. This ensures
+ // that ZBarrierSet::on_slowpath_allocation_exit() observing a young page that was then racingly
+ // flip promoted, will run any stores without barriers to completion before responding to the
+ // handshake at the subsequent safepoint poll. This ensures that the flip promotion barriers always
+ // run after compiled code missing barriers, but before relocate start.
+ ZRendezvousHandshakeClosure cl;
+ Handshake::execute(&cl);
+
+ _relocate.barrier_flip_promoted_pages(_relocation_set.flip_promoted_pages());
}
static double fragmentation_limit(ZGenerationId generation) {
@@ -235,7 +253,9 @@ void ZGeneration::select_relocation_set(bool promote_all) {
_relocation_set.install(&selector);
// Flip age young pages that were not selected
- flip_age_pages(&selector);
+ if (is_young()) {
+ flip_age_pages(&selector);
+ }
// Setup forwarding table
ZRelocationSetIterator rs_iter(&_relocation_set);
@@ -1280,16 +1300,6 @@ bool ZGenerationOld::uses_clear_all_soft_reference_policy() const {
return _reference_processor.uses_clear_all_soft_reference_policy();
}
-class ZRendezvousHandshakeClosure : public HandshakeClosure {
-public:
- ZRendezvousHandshakeClosure()
- : HandshakeClosure("ZRendezvous") {}
-
- void do_thread(Thread* thread) {
- // Does nothing
- }
-};
-
class ZRendezvousGCThreads: public VM_Operation {
public:
VMOp_Type type() const { return VMOp_ZRendezvousGCThreads; }
diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp
index 69233da6f54..180ce22b041 100644
--- a/src/hotspot/share/gc/z/zRelocate.cpp
+++ b/src/hotspot/share/gc/z/zRelocate.cpp
@@ -1322,7 +1322,7 @@ private:
public:
ZFlipAgePagesTask(const ZArray* pages)
- : ZTask("ZPromotePagesTask"),
+ : ZTask("ZFlipAgePagesTask"),
_iter(pages) {}
virtual void work() {
@@ -1337,16 +1337,6 @@ public:
// Figure out if this is proper promotion
const bool promotion = to_age == ZPageAge::old;
- if (promotion) {
- // Before promoting an object (and before relocate start), we must ensure that all
- // contained zpointers are store good. The marking code ensures that for non-null
- // pointers, but null pointers are ignored. This code ensures that even null pointers
- // are made store good, for the promoted objects.
- prev_page->object_iterate([&](oop obj) {
- ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
- });
- }
-
// Logging
prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
@@ -1360,7 +1350,7 @@ public:
if (promotion) {
ZGeneration::young()->flip_promote(prev_page, new_page);
- // Defer promoted page registration times the lock is taken
+ // Defer promoted page registration
promoted_pages.push(prev_page);
}
@@ -1371,11 +1361,42 @@ public:
}
};
+class ZPromoteBarrierTask : public ZTask {
+private:
+ ZArrayParallelIterator _iter;
+
+public:
+ ZPromoteBarrierTask(const ZArray* pages)
+ : ZTask("ZPromoteBarrierTask"),
+ _iter(pages) {}
+
+ virtual void work() {
+ SuspendibleThreadSetJoiner sts_joiner;
+
+ for (ZPage* page; _iter.next(&page);) {
+ // When promoting an object (and before relocate start), we must ensure that all
+ // contained zpointers are store good. The marking code ensures that for non-null
+ // pointers, but null pointers are ignored. This code ensures that even null pointers
+ // are made store good, for the promoted objects.
+ page->object_iterate([&](oop obj) {
+ ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
+ });
+
+ SuspendibleThreadSet::yield();
+ }
+ }
+};
+
void ZRelocate::flip_age_pages(const ZArray* pages) {
ZFlipAgePagesTask flip_age_task(pages);
workers()->run(&flip_age_task);
}
+void ZRelocate::barrier_flip_promoted_pages(const ZArray* pages) {
+ ZPromoteBarrierTask promote_barrier_task(pages);
+ workers()->run(&promote_barrier_task);
+}
+
void ZRelocate::synchronize() {
_queue.synchronize();
}
diff --git a/src/hotspot/share/gc/z/zRelocate.hpp b/src/hotspot/share/gc/z/zRelocate.hpp
index d0ddf7deecf..50111f24ee5 100644
--- a/src/hotspot/share/gc/z/zRelocate.hpp
+++ b/src/hotspot/share/gc/z/zRelocate.hpp
@@ -119,6 +119,7 @@ public:
void relocate(ZRelocationSet* relocation_set);
void flip_age_pages(const ZArray* pages);
+ void barrier_flip_promoted_pages(const ZArray* pages);
void synchronize();
void desynchronize();
diff --git a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampling.cpp b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampling.cpp
index f7a725fce6d..b5720351bdf 100644
--- a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampling.cpp
+++ b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampling.cpp
@@ -217,7 +217,7 @@ static bool compute_top_frame(const JfrSampleRequest& request, frame& top_frame,
const PcDesc* const pc_desc = get_pc_desc(sampled_nm, sampled_pc);
if (is_valid(pc_desc)) {
intptr_t* const synthetic_sp = sender_sp - sampled_nm->frame_size();
- top_frame = frame(synthetic_sp, synthetic_sp, sender_sp, pc_desc->real_pc(sampled_nm), sampled_nm);
+ top_frame = frame(synthetic_sp, synthetic_sp, sender_sp - 2, pc_desc->real_pc(sampled_nm), sampled_nm);
in_continuation = is_in_continuation(top_frame, jt);
return true;
}
diff --git a/src/hotspot/share/memory/allocation.hpp b/src/hotspot/share/memory/allocation.hpp
index 35180fdba5e..963ca04aadf 100644
--- a/src/hotspot/share/memory/allocation.hpp
+++ b/src/hotspot/share/memory/allocation.hpp
@@ -25,14 +25,13 @@
#ifndef SHARE_MEMORY_ALLOCATION_HPP
#define SHARE_MEMORY_ALLOCATION_HPP
+#include "cppstdlib/new.hpp"
#include "memory/allStatic.hpp"
#include "nmt/memTag.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-#include
-
class outputStream;
class Thread;
class JavaThread;
diff --git a/src/hotspot/share/memory/arena.cpp b/src/hotspot/share/memory/arena.cpp
index b9968083e0e..2de3f837c00 100644
--- a/src/hotspot/share/memory/arena.cpp
+++ b/src/hotspot/share/memory/arena.cpp
@@ -24,6 +24,7 @@
*/
#include "compiler/compilationMemoryStatistic.hpp"
+#include "cppstdlib/new.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/arena.hpp"
#include "memory/resourceArea.hpp"
diff --git a/src/hotspot/share/memory/arena.hpp b/src/hotspot/share/memory/arena.hpp
index b4a0546babf..a8450b5543a 100644
--- a/src/hotspot/share/memory/arena.hpp
+++ b/src/hotspot/share/memory/arena.hpp
@@ -31,8 +31,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
-#include
-
// The byte alignment to be used by Arena::Amalloc.
#define ARENA_AMALLOC_ALIGNMENT BytesPerLong
#define ARENA_ALIGN(x) (align_up((x), ARENA_AMALLOC_ALIGNMENT))
diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp
index d389fe81806..4d2897be5eb 100644
--- a/src/hotspot/share/memory/universe.cpp
+++ b/src/hotspot/share/memory/universe.cpp
@@ -182,7 +182,6 @@ int Universe::_base_vtable_size = 0;
bool Universe::_bootstrapping = false;
bool Universe::_module_initialized = false;
bool Universe::_fully_initialized = false;
-volatile bool Universe::_is_shutting_down = false;
OopStorage* Universe::_vm_weak = nullptr;
OopStorage* Universe::_vm_global = nullptr;
@@ -1374,15 +1373,14 @@ static void log_cpu_time() {
}
void Universe::before_exit() {
- {
- // Acquire the Heap_lock to synchronize with VM_Heap_Sync_Operations,
- // which may depend on the value of _is_shutting_down flag.
- MutexLocker hl(Heap_lock);
- log_cpu_time();
- AtomicAccess::release_store(&_is_shutting_down, true);
- }
+ // Tell the GC that it is time to shutdown and to block requests for new GC pauses.
+ heap()->initiate_shutdown();
- heap()->before_exit();
+ // Log CPU time statistics before stopping the GC threads.
+ log_cpu_time();
+
+ // Stop the GC threads.
+ heap()->stop();
// Print GC/heap related information.
Log(gc, exit) log;
diff --git a/src/hotspot/share/memory/universe.hpp b/src/hotspot/share/memory/universe.hpp
index df2c1d66d3c..b2325c67ca0 100644
--- a/src/hotspot/share/memory/universe.hpp
+++ b/src/hotspot/share/memory/universe.hpp
@@ -128,9 +128,6 @@ class Universe: AllStatic {
static bool _module_initialized; // true after call_initPhase2 called
static bool _fully_initialized; // true after universe_init and initialize_vtables called
- // Shutdown
- static volatile bool _is_shutting_down;
-
// the array of preallocated errors with backtraces
static objArrayOop preallocated_out_of_memory_errors();
@@ -328,8 +325,6 @@ class Universe: AllStatic {
static bool is_module_initialized() { return _module_initialized; }
static bool is_fully_initialized() { return _fully_initialized; }
- static bool is_shutting_down() { return AtomicAccess::load_acquire(&_is_shutting_down); }
-
static bool on_page_boundary(void* addr);
static bool should_fill_in_stack_trace(Handle throwable);
static void check_alignment(uintx size, uintx alignment, const char* name);
diff --git a/src/hotspot/share/oops/bsmAttribute.hpp b/src/hotspot/share/oops/bsmAttribute.hpp
new file mode 100644
index 00000000000..a28d2757fb0
--- /dev/null
+++ b/src/hotspot/share/oops/bsmAttribute.hpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_OOPS_BSMATTRIBUTE_HPP
+#define SHARE_OOPS_BSMATTRIBUTE_HPP
+
+#include "oops/array.hpp"
+#include "utilities/checkedCast.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class ClassLoaderData;
+
+class BSMAttributeEntry {
+ friend class ConstantPool;
+ friend class BSMAttributeEntries;
+
+ u2 _bootstrap_method_index;
+ u2 _argument_count;
+
+ // The argument indexes are stored right after the object, in a contiguous array.
+ // [ bsmi_0 argc_0 arg_00 arg_01 ... arg_0N bsmi_1 argc_1 arg_10 ... arg_1N ... ]
+ // So in order to find the argument array, jump over ourselves.
+ const u2* argument_indexes() const {
+ return reinterpret_cast(this + 1);
+ }
+ u2* argument_indexes() {
+ return reinterpret_cast(this + 1);
+ }
+ // These are overlays on top of the BSMAttributeEntries data array, do not construct.
+ BSMAttributeEntry() = delete;
+ NONCOPYABLE(BSMAttributeEntry);
+
+ void copy_args_into(BSMAttributeEntry* entry) const;
+
+public:
+ // Offsets for SA
+ enum {
+ _bsmi_offset = 0,
+ _argc_offset = 1,
+ _argv_offset = 2
+ };
+
+ int bootstrap_method_index() const {
+ return _bootstrap_method_index;
+ }
+ int argument_count() const {
+ return _argument_count;
+ }
+ int argument(int n) const {
+ assert(checked_cast(n) < _argument_count, "oob");
+ return argument_indexes()[n];
+ }
+
+ void set_argument(int index, u2 value) {
+ assert(index >= 0 && index < argument_count(), "invariant");
+ argument_indexes()[index] = value;
+ }
+
+ // How many u2s are required to store a BSM entry with argc arguments?
+ static int u2s_required (u2 argc) {
+ return 1 /* index */ + 1 /* argc */ + argc /* argv */;
+ }
+};
+
+// The BSMAttributeEntries stores the state of the BootstrapMethods attribute.
+class BSMAttributeEntries {
+ friend class VMStructs;
+ friend class JVMCIVMStructs;
+
+public:
+ class InsertionIterator {
+ friend BSMAttributeEntries;
+ BSMAttributeEntries* _insert_into;
+ // Current unused offset into BSMAEs offset array.
+ int _cur_offset;
+ // Current unused offset into BSMAEs bsm-data array.
+ int _cur_array;
+ public:
+ InsertionIterator() : _insert_into(nullptr), _cur_offset(-1), _cur_array(-1) {}
+ InsertionIterator(BSMAttributeEntries* insert_into, int cur_offset, int cur_array)
+ : _insert_into(insert_into),
+ _cur_offset(cur_offset),
+ _cur_array(cur_array) {}
+ InsertionIterator(const InsertionIterator&) = default;
+ InsertionIterator& operator=(const InsertionIterator&) = default;
+
+ int current_offset() const { return _cur_offset; }
+ // Add a new BSMAE, reserving the necessary memory for filling the argument vector.
+ // Returns null if there isn't enough space.
+ inline BSMAttributeEntry* reserve_new_entry(u2 bsmi, u2 argc);
+ };
+
+private:
+ // Each bootstrap method has a variable-sized array associated with it.
+ // We want constant-time lookup of the Nth BSM. Therefore, we use an offset table,
+ // such that the Nth BSM is located at _bootstrap_methods[_offsets[N]].
+ Array* _offsets;
+ Array* _bootstrap_methods;
+
+ // Copy the first num_entries into iter.
+ void copy_into(InsertionIterator& iter, int num_entries) const;
+
+public:
+ BSMAttributeEntries() : _offsets(nullptr), _bootstrap_methods(nullptr) {}
+ BSMAttributeEntries(Array* offsets, Array* bootstrap_methods)
+ : _offsets(offsets),
+ _bootstrap_methods(bootstrap_methods) {}
+
+ bool is_empty() const {
+ return _offsets == nullptr && _bootstrap_methods == nullptr;
+ }
+
+ Array*& offsets() { return _offsets; }
+ const Array* const& offsets() const { return _offsets; }
+ Array*& bootstrap_methods() { return _bootstrap_methods; }
+ const Array* const& bootstrap_methods() const { return _bootstrap_methods; }
+
+ BSMAttributeEntry* entry(int bsms_attribute_index) {
+ return reinterpret_cast(_bootstrap_methods->adr_at(_offsets->at(bsms_attribute_index)));
+ }
+ const BSMAttributeEntry* entry(int bsms_attribute_index) const {
+ return reinterpret_cast(_bootstrap_methods->adr_at(_offsets->at(bsms_attribute_index)));
+ }
+
+ int number_of_entries() const {
+ return _offsets == nullptr ? 0 : _offsets->length();
+ }
+
+ // The number of U2s the BSM data consists of.
+ int array_length() const {
+ return _bootstrap_methods == nullptr ? 0 : _bootstrap_methods->length();
+ }
+
+ void deallocate_contents(ClassLoaderData* loader_data);
+
+ // Extend to have the space for both this BSMAEntries and other's.
+ // Does not copy in the other's BSMAEntrys, that must be done via the InsertionIterator.
+ // This starts an insertion iterator. Any call to start_extension must have a matching end_extension call.
+ InsertionIterator start_extension(const BSMAttributeEntries& other, ClassLoaderData* loader_data, TRAPS);
+ // Extend the BSMAEntries with an additional number_of_entries with a total data_size.
+ InsertionIterator start_extension(int number_of_entries, int data_size, ClassLoaderData* loader_data, TRAPS);
+ // Reallocates the underlying memory to fit the limits of the InsertionIterator precisely.
+ // This ends an insertion iteration. The memory is truncated to fit exactly the data used.
+ void end_extension(InsertionIterator& iter, ClassLoaderData* loader_data, TRAPS);
+ // Append all of the BSMAEs in other into this.
+ void append(const BSMAttributeEntries& other, ClassLoaderData* loader_data, TRAPS);
+};
+
+#endif // SHARE_OOPS_BSMATTRIBUTE_HPP
diff --git a/src/hotspot/share/oops/bsmAttribute.inline.hpp b/src/hotspot/share/oops/bsmAttribute.inline.hpp
new file mode 100644
index 00000000000..e678c280c26
--- /dev/null
+++ b/src/hotspot/share/oops/bsmAttribute.inline.hpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_OOPS_BSMATTRIBUTE_INLINE_HPP
+#define SHARE_OOPS_BSMATTRIBUTE_INLINE_HPP
+
+#include "oops/bsmAttribute.hpp"
+
+inline BSMAttributeEntry* BSMAttributeEntries::InsertionIterator::reserve_new_entry(u2 bsmi, u2 argc) {
+ assert(_insert_into->offsets() != nullptr, "must");
+ assert(_insert_into->bootstrap_methods() != nullptr, "must");
+
+ if (_cur_offset + 1 > _insert_into->offsets()->length() ||
+ _cur_array + BSMAttributeEntry::u2s_required(argc) > _insert_into->bootstrap_methods()->length()) {
+ return nullptr;
+ }
+ _insert_into->offsets()->at_put(_cur_offset, _cur_array);
+ BSMAttributeEntry* e = _insert_into->entry(_cur_offset);
+ e->_bootstrap_method_index = bsmi;
+ e->_argument_count = argc;
+
+ _cur_array += 1 + 1 + argc;
+ _cur_offset += 1;
+ return e;
+}
+
+inline void BSMAttributeEntry::copy_args_into(BSMAttributeEntry* entry) const {
+ assert(entry->argument_count() == this->argument_count(), "must be same");
+ for (int i = 0; i < argument_count(); i++) {
+ entry->set_argument(i, this->argument(i));
+ }
+}
+
+#endif // SHARE_OOPS_BSMATTRIBUTE_INLINE_HPP
diff --git a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp
index 95a43b07bd7..640b2f2460f 100644
--- a/src/hotspot/share/oops/constantPool.cpp
+++ b/src/hotspot/share/oops/constantPool.cpp
@@ -131,8 +131,7 @@ void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
MetadataFactory::free_array(loader_data, resolved_klasses());
set_resolved_klasses(nullptr);
- MetadataFactory::free_array(loader_data, operands());
- set_operands(nullptr);
+ bsm_entries().deallocate_contents(loader_data);
release_C_heap_structures();
@@ -152,7 +151,8 @@ void ConstantPool::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_tags, MetaspaceClosure::_writable);
it->push(&_cache);
it->push(&_pool_holder);
- it->push(&_operands);
+ it->push(&bsm_entries().offsets());
+ it->push(&bsm_entries().bootstrap_methods());
it->push(&_resolved_klasses, MetaspaceClosure::_writable);
for (int i = 0; i < length(); i++) {
@@ -761,7 +761,7 @@ Method* ConstantPool::method_at_if_loaded(const constantPoolHandle& cpool,
if (cpool->cache() == nullptr) return nullptr; // nothing to load yet
if (!(which >= 0 && which < cpool->resolved_method_entries_length())) {
// FIXME: should be an assert
- log_debug(class, resolve)("bad operand %d in:", which); cpool->print();
+ log_debug(class, resolve)("bad BSM %d in:", which); cpool->print();
return nullptr;
}
return cpool->cache()->method_if_resolved(which);
@@ -1562,8 +1562,8 @@ bool ConstantPool::compare_entry_to(int index1, const constantPoolHandle& cp2,
int i1 = bootstrap_methods_attribute_index(index1);
int i2 = cp2->bootstrap_methods_attribute_index(index2);
bool match_entry = compare_entry_to(k1, cp2, k2);
- bool match_operand = compare_operand_to(i1, cp2, i2);
- return (match_entry && match_operand);
+ bool match_bsm = compare_bootstrap_entry_to(i1, cp2, i2);
+ return (match_entry && match_bsm);
} break;
case JVM_CONSTANT_InvokeDynamic:
@@ -1573,8 +1573,8 @@ bool ConstantPool::compare_entry_to(int index1, const constantPoolHandle& cp2,
int i1 = bootstrap_methods_attribute_index(index1);
int i2 = cp2->bootstrap_methods_attribute_index(index2);
bool match_entry = compare_entry_to(k1, cp2, k2);
- bool match_operand = compare_operand_to(i1, cp2, i2);
- return (match_entry && match_operand);
+ bool match_bsm = compare_bootstrap_entry_to(i1, cp2, i2);
+ return (match_entry && match_bsm);
} break;
case JVM_CONSTANT_String:
@@ -1608,140 +1608,29 @@ bool ConstantPool::compare_entry_to(int index1, const constantPoolHandle& cp2,
return false;
} // end compare_entry_to()
-
-// Resize the operands array with delta_len and delta_size.
+// Extend the BSMAttributeEntries with the length and size of the ext_cp BSMAttributeEntries.
// Used in RedefineClasses for CP merge.
-void ConstantPool::resize_operands(int delta_len, int delta_size, TRAPS) {
- int old_len = operand_array_length(operands());
- int new_len = old_len + delta_len;
- int min_len = (delta_len > 0) ? old_len : new_len;
-
- int old_size = operands()->length();
- int new_size = old_size + delta_size;
- int min_size = (delta_size > 0) ? old_size : new_size;
-
- ClassLoaderData* loader_data = pool_holder()->class_loader_data();
- Array* new_ops = MetadataFactory::new_array(loader_data, new_size, CHECK);
-
- // Set index in the resized array for existing elements only
- for (int idx = 0; idx < min_len; idx++) {
- int offset = operand_offset_at(idx); // offset in original array
- operand_offset_at_put(new_ops, idx, offset + 2*delta_len); // offset in resized array
- }
- // Copy the bootstrap specifiers only
- Copy::conjoint_memory_atomic(operands()->adr_at(2*old_len),
- new_ops->adr_at(2*new_len),
- (min_size - 2*min_len) * sizeof(u2));
- // Explicitly deallocate old operands array.
- // Note, it is not needed for 7u backport.
- if ( operands() != nullptr) { // the safety check
- MetadataFactory::free_array(loader_data, operands());
- }
- set_operands(new_ops);
-} // end resize_operands()
+BSMAttributeEntries::InsertionIterator
+ConstantPool::start_extension(const constantPoolHandle& ext_cp, TRAPS) {
+ BSMAttributeEntries::InsertionIterator iter =
+ bsm_entries().start_extension(ext_cp->bsm_entries(), pool_holder()->class_loader_data(),
+ CHECK_(BSMAttributeEntries::InsertionIterator()));
+ return iter;
+}
-// Extend the operands array with the length and size of the ext_cp operands.
-// Used in RedefineClasses for CP merge.
-void ConstantPool::extend_operands(const constantPoolHandle& ext_cp, TRAPS) {
- int delta_len = operand_array_length(ext_cp->operands());
- if (delta_len == 0) {
- return; // nothing to do
- }
- int delta_size = ext_cp->operands()->length();
-
- assert(delta_len > 0 && delta_size > 0, "extended operands array must be bigger");
-
- if (operand_array_length(operands()) == 0) {
- ClassLoaderData* loader_data = pool_holder()->class_loader_data();
- Array* new_ops = MetadataFactory::new_array(loader_data, delta_size, CHECK);
- // The first element index defines the offset of second part
- operand_offset_at_put(new_ops, 0, 2*delta_len); // offset in new array
- set_operands(new_ops);
- } else {
- resize_operands(delta_len, delta_size, CHECK);
- }
-
-} // end extend_operands()
+void ConstantPool::end_extension(BSMAttributeEntries::InsertionIterator iter, TRAPS) {
+ bsm_entries().end_extension(iter, pool_holder()->class_loader_data(), THREAD);
+}
-// Shrink the operands array to a smaller array with new_len length.
-// Used in RedefineClasses for CP merge.
-void ConstantPool::shrink_operands(int new_len, TRAPS) {
- int old_len = operand_array_length(operands());
- if (new_len == old_len) {
- return; // nothing to do
- }
- assert(new_len < old_len, "shrunken operands array must be smaller");
-
- int free_base = operand_next_offset_at(new_len - 1);
- int delta_len = new_len - old_len;
- int delta_size = 2*delta_len + free_base - operands()->length();
-
- resize_operands(delta_len, delta_size, CHECK);
-
-} // end shrink_operands()
-
-
-void ConstantPool::copy_operands(const constantPoolHandle& from_cp,
- const constantPoolHandle& to_cp,
- TRAPS) {
-
- int from_oplen = operand_array_length(from_cp->operands());
- int old_oplen = operand_array_length(to_cp->operands());
- if (from_oplen != 0) {
- ClassLoaderData* loader_data = to_cp->pool_holder()->class_loader_data();
- // append my operands to the target's operands array
- if (old_oplen == 0) {
- // Can't just reuse from_cp's operand list because of deallocation issues
- int len = from_cp->operands()->length();
- Array* new_ops = MetadataFactory::new_array(loader_data, len, CHECK);
- Copy::conjoint_memory_atomic(
- from_cp->operands()->adr_at(0), new_ops->adr_at(0), len * sizeof(u2));
- to_cp->set_operands(new_ops);
- } else {
- int old_len = to_cp->operands()->length();
- int from_len = from_cp->operands()->length();
- int old_off = old_oplen * sizeof(u2);
- int from_off = from_oplen * sizeof(u2);
- // Use the metaspace for the destination constant pool
- Array* new_operands = MetadataFactory::new_array(loader_data, old_len + from_len, CHECK);
- int fillp = 0, len = 0;
- // first part of dest
- Copy::conjoint_memory_atomic(to_cp->operands()->adr_at(0),
- new_operands->adr_at(fillp),
- (len = old_off) * sizeof(u2));
- fillp += len;
- // first part of src
- Copy::conjoint_memory_atomic(from_cp->operands()->adr_at(0),
- new_operands->adr_at(fillp),
- (len = from_off) * sizeof(u2));
- fillp += len;
- // second part of dest
- Copy::conjoint_memory_atomic(to_cp->operands()->adr_at(old_off),
- new_operands->adr_at(fillp),
- (len = old_len - old_off) * sizeof(u2));
- fillp += len;
- // second part of src
- Copy::conjoint_memory_atomic(from_cp->operands()->adr_at(from_off),
- new_operands->adr_at(fillp),
- (len = from_len - from_off) * sizeof(u2));
- fillp += len;
- assert(fillp == new_operands->length(), "");
-
- // Adjust indexes in the first part of the copied operands array.
- for (int j = 0; j < from_oplen; j++) {
- int offset = operand_offset_at(new_operands, old_oplen + j);
- assert(offset == operand_offset_at(from_cp->operands(), j), "correct copy");
- offset += old_len; // every new tuple is preceded by old_len extra u2's
- operand_offset_at_put(new_operands, old_oplen + j, offset);
- }
-
- // replace target operands array with combined array
- to_cp->set_operands(new_operands);
- }
- }
-} // end copy_operands()
+void ConstantPool::copy_bsm_entries(const constantPoolHandle& from_cp,
+ const constantPoolHandle& to_cp,
+ TRAPS) {
+ to_cp->bsm_entries().append(from_cp->bsm_entries(),
+ to_cp->pool_holder()->class_loader_data(),
+ THREAD);
+}
// Copy this constant pool's entries at start_i to end_i (inclusive)
@@ -1771,7 +1660,7 @@ void ConstantPool::copy_cp_to_impl(const constantPoolHandle& from_cp, int start_
break;
}
}
- copy_operands(from_cp, to_cp, CHECK);
+ copy_bsm_entries(from_cp, to_cp, THREAD);
} // end copy_cp_to_impl()
@@ -1895,7 +1784,7 @@ void ConstantPool::copy_entry_to(const constantPoolHandle& from_cp, int from_i,
{
int k1 = from_cp->bootstrap_methods_attribute_index(from_i);
int k2 = from_cp->bootstrap_name_and_type_ref_index_at(from_i);
- k1 += operand_array_length(to_cp->operands()); // to_cp might already have operands
+ k1 += to_cp->bsm_entries().array_length(); // to_cp might already have a BSM attribute
to_cp->dynamic_constant_at_put(to_i, k1, k2);
} break;
@@ -1903,7 +1792,7 @@ void ConstantPool::copy_entry_to(const constantPoolHandle& from_cp, int from_i,
{
int k1 = from_cp->bootstrap_methods_attribute_index(from_i);
int k2 = from_cp->bootstrap_name_and_type_ref_index_at(from_i);
- k1 += operand_array_length(to_cp->operands()); // to_cp might already have operands
+ k1 += to_cp->bsm_entries().array_length(); // to_cp might already have a BSM attribute
to_cp->invoke_dynamic_at_put(to_i, k1, k2);
} break;
@@ -1939,9 +1828,9 @@ int ConstantPool::find_matching_entry(int pattern_i,
// Compare this constant pool's bootstrap specifier at idx1 to the constant pool
// cp2's bootstrap specifier at idx2.
-bool ConstantPool::compare_operand_to(int idx1, const constantPoolHandle& cp2, int idx2) {
- BSMAttributeEntry* e1 = bsm_attribute_entry(idx1);
- BSMAttributeEntry* e2 = cp2->bsm_attribute_entry(idx2);
+bool ConstantPool::compare_bootstrap_entry_to(int idx1, const constantPoolHandle& cp2, int idx2) {
+ const BSMAttributeEntry* const e1 = bsm_attribute_entry(idx1);
+ const BSMAttributeEntry* const e2 = cp2->bsm_attribute_entry(idx2);
int k1 = e1->bootstrap_method_index();
int k2 = e2->bootstrap_method_index();
bool match = compare_entry_to(k1, cp2, k2);
@@ -1949,34 +1838,37 @@ bool ConstantPool::compare_operand_to(int idx1, const constantPoolHandle& cp2, i
if (!match) {
return false;
}
- int argc = e1->argument_count();
- if (argc == e2->argument_count()) {
- for (int j = 0; j < argc; j++) {
- k1 = e1->argument_index(j);
- k2 = e2->argument_index(j);
- match = compare_entry_to(k1, cp2, k2);
- if (!match) {
- return false;
- }
- }
- return true; // got through loop; all elements equal
+
+ const int argc = e1->argument_count();
+ if (argc != e2->argument_count()) {
+ return false;
}
- return false;
-} // end compare_operand_to()
+
+ for (int j = 0; j < argc; j++) {
+ k1 = e1->argument(j);
+ k2 = e2->argument(j);
+ match = compare_entry_to(k1, cp2, k2);
+ if (!match) {
+ return false;
+ }
+ }
+
+ return true; // got through loop; all elements equal
+} // end compare_bootstrap_entry_to()
// Search constant pool search_cp for a bootstrap specifier that matches
// this constant pool's bootstrap specifier data at pattern_i index.
// Return the index of a matching bootstrap attribute record or (-1) if there is no match.
-int ConstantPool::find_matching_operand(int pattern_i,
- const constantPoolHandle& search_cp, int search_len) {
- for (int i = 0; i < search_len; i++) {
- bool found = compare_operand_to(pattern_i, search_cp, i);
+int ConstantPool::find_matching_bsm_entry(int pattern_i,
+ const constantPoolHandle& search_cp, int offset_limit) {
+ for (int i = 0; i < offset_limit; i++) {
+ bool found = compare_bootstrap_entry_to(pattern_i, search_cp, i);
if (found) {
return i;
}
}
return -1; // bootstrap specifier data not found; return unused index (-1)
-} // end find_matching_operand()
+} // end find_matching_bsm_entry()
#ifndef PRODUCT
@@ -2411,7 +2303,7 @@ void ConstantPool::print_value_on(outputStream* st) const {
assert(is_constantPool(), "must be constantPool");
st->print("constant pool [%d]", length());
if (has_preresolution()) st->print("/preresolution");
- if (operands() != nullptr) st->print("/operands[%d]", operands()->length());
+ if (!bsm_entries().is_empty()) st->print("/BSMs[%d]", bsm_entries().bootstrap_methods()->length());
print_address_on(st);
if (pool_holder() != nullptr) {
st->print(" for ");
@@ -2446,3 +2338,87 @@ void ConstantPool::verify_on(outputStream* st) {
guarantee(pool_holder()->is_klass(), "should be klass");
}
}
+
+void BSMAttributeEntries::deallocate_contents(ClassLoaderData* loader_data) {
+ MetadataFactory::free_array(loader_data, this->_offsets);
+ MetadataFactory::free_array(loader_data, this->_bootstrap_methods);
+ this->_offsets = nullptr;
+ this->_bootstrap_methods = nullptr;
+}
+
+void BSMAttributeEntries::copy_into(InsertionIterator& iter, int num_entries) const {
+ assert(num_entries + iter._cur_offset <= iter._insert_into->_offsets->length(), "must");
+ for (int i = 0; i < num_entries; i++) {
+ const BSMAttributeEntry* e = entry(i);
+ BSMAttributeEntry* e_new = iter.reserve_new_entry(e->bootstrap_method_index(), e->argument_count());
+ assert(e_new != nullptr, "must be");
+ e->copy_args_into(e_new);
+ }
+}
+
+BSMAttributeEntries::InsertionIterator
+BSMAttributeEntries::start_extension(const BSMAttributeEntries& other, ClassLoaderData* loader_data, TRAPS) {
+ InsertionIterator iter = start_extension(other.number_of_entries(), other.array_length(),
+ loader_data, CHECK_(BSMAttributeEntries::InsertionIterator()));
+ return iter;
+}
+
+BSMAttributeEntries::InsertionIterator
+BSMAttributeEntries::start_extension(int number_of_entries, int array_length,
+ ClassLoaderData* loader_data, TRAPS) {
+ InsertionIterator extension_iterator(this, this->number_of_entries(), this->array_length());
+ int new_number_of_entries = this->number_of_entries() + number_of_entries;
+ int new_array_length = this->array_length() + array_length;
+ int invalid_index = new_array_length;
+
+ Array* new_offsets =
+ MetadataFactory::new_array(loader_data, new_number_of_entries, invalid_index, CHECK_(InsertionIterator()));
+ Array* new_array = MetadataFactory::new_array(loader_data, new_array_length, CHECK_(InsertionIterator()));
+ { // Copy over all the old BSMAEntry's and their respective offsets
+ BSMAttributeEntries carrier(new_offsets, new_array);
+ InsertionIterator copy_iter(&carrier, 0, 0);
+ copy_into(copy_iter, this->number_of_entries());
+ }
+ // Replace content
+ deallocate_contents(loader_data);
+ _offsets = new_offsets;
+ _bootstrap_methods = new_array;
+ return extension_iterator;
+}
+
+
+void BSMAttributeEntries::append(const BSMAttributeEntries& other, ClassLoaderData* loader_data, TRAPS) {
+ if (other.number_of_entries() == 0) {
+ return; // Done!
+ }
+ InsertionIterator iter = start_extension(other, loader_data, CHECK);
+ other.copy_into(iter, other.number_of_entries());
+ end_extension(iter, loader_data, THREAD);
+}
+
+void BSMAttributeEntries::end_extension(InsertionIterator& iter, ClassLoaderData* loader_data, TRAPS) {
+ assert(iter._insert_into == this, "must be");
+ assert(iter._cur_offset <= this->_offsets->length(), "must be");
+ assert(iter._cur_array <= this->_bootstrap_methods->length(), "must be");
+
+ // Did we fill up all of the available space? If so, do nothing.
+ if (iter._cur_offset == this->_offsets->length() &&
+ iter._cur_array == this->_bootstrap_methods->length()) {
+ return;
+ }
+
+ // We used less, truncate by allocating new arrays
+ Array* new_offsets =
+ MetadataFactory::new_array(loader_data, iter._cur_offset, 0, CHECK);
+ Array* new_array =
+ MetadataFactory::new_array(loader_data, iter._cur_array, CHECK);
+ { // Copy over the constructed BSMAEntry's
+ BSMAttributeEntries carrier(new_offsets, new_array);
+ InsertionIterator copy_iter(&carrier, 0, 0);
+ copy_into(copy_iter, iter._cur_offset);
+ }
+
+ deallocate_contents(loader_data);
+ _offsets = new_offsets;
+ _bootstrap_methods = new_array;
+}
diff --git a/src/hotspot/share/oops/constantPool.hpp b/src/hotspot/share/oops/constantPool.hpp
index 9cbeb1245be..6c519945f4d 100644
--- a/src/hotspot/share/oops/constantPool.hpp
+++ b/src/hotspot/share/oops/constantPool.hpp
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "oops/arrayOop.hpp"
+#include "oops/bsmAttribute.inline.hpp"
#include "oops/cpCache.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oopHandle.hpp"
@@ -77,43 +78,6 @@ public:
}
};
-class BSMAttributeEntry {
- friend class ConstantPool;
- u2 _bootstrap_method_index;
- u2 _argument_count;
-
- // The argument indexes are stored right after the object, in a contiguous array.
- // [ bsmi_0 argc_0 arg_00 arg_01 ... arg_0N bsmi_1 argc_1 arg_10 ... arg_1N ... ]
- // So in order to find the argument array, jump over ourselves.
- const u2* argument_indexes() const {
- return reinterpret_cast(this + 1);
- }
- u2* argument_indexes() {
- return reinterpret_cast(this + 1);
- }
- // These are overlays on top of the operands array. Do not construct.
- BSMAttributeEntry() = delete;
-
-public:
- // Offsets for SA
- enum {
- _bsmi_offset = 0,
- _argc_offset = 1,
- _argv_offset = 2
- };
-
- int bootstrap_method_index() const {
- return _bootstrap_method_index;
- }
- int argument_count() const {
- return _argument_count;
- }
- int argument_index(int n) const {
- assert(checked_cast(n) < _argument_count, "oob");
- return argument_indexes()[n];
- }
-};
-
class ConstantPool : public Metadata {
friend class VMStructs;
friend class JVMCIVMStructs;
@@ -126,7 +90,8 @@ class ConstantPool : public Metadata {
Array* _tags; // the tag array describing the constant pool's contents
ConstantPoolCache* _cache; // the cache holding interpreter runtime information
InstanceKlass* _pool_holder; // the corresponding class
- Array* _operands; // for variable-sized (InvokeDynamic) nodes, usually empty
+
+ BSMAttributeEntries _bsm_entries;
// Consider using an array of compressed klass pointers to
// save space on 64-bit platforms.
@@ -167,8 +132,6 @@ class ConstantPool : public Metadata {
u1* tag_addr_at(int cp_index) const { return tags()->adr_at(cp_index); }
- void set_operands(Array* operands) { _operands = operands; }
-
u2 flags() const { return _flags; }
void set_flags(u2 f) { _flags = f; }
@@ -208,7 +171,13 @@ class ConstantPool : public Metadata {
virtual bool is_constantPool() const { return true; }
Array* tags() const { return _tags; }
- Array* operands() const { return _operands; }
+
+ BSMAttributeEntries& bsm_entries() {
+ return _bsm_entries;
+ }
+ const BSMAttributeEntries& bsm_entries() const {
+ return _bsm_entries;
+ }
bool has_preresolution() const { return (_flags & _has_preresolution) != 0; }
void set_has_preresolution() {
@@ -556,76 +525,21 @@ class ConstantPool : public Metadata {
assert(tag_at(cp_index).has_bootstrap(), "Corrupted constant pool");
return extract_low_short_from_int(*int_at_addr(cp_index));
}
- // The first part of the operands array consists of an index into the second part.
- // Extract a 32-bit index value from the first part.
- static int operand_offset_at(Array* operands, int bsms_attribute_index) {
- int n = (bsms_attribute_index * 2);
- assert(n >= 0 && n+2 <= operands->length(), "oob");
- // The first 32-bit index points to the beginning of the second part
- // of the operands array. Make sure this index is in the first part.
- DEBUG_ONLY(int second_part = build_int_from_shorts(operands->at(0),
- operands->at(1)));
- assert(second_part == 0 || n+2 <= second_part, "oob (2)");
- int offset = build_int_from_shorts(operands->at(n+0),
- operands->at(n+1));
- // The offset itself must point into the second part of the array.
- assert(offset == 0 || (offset >= second_part && offset <= operands->length()), "oob (3)");
- return offset;
- }
- static void operand_offset_at_put(Array* operands, int bsms_attribute_index, int offset) {
- int n = bsms_attribute_index * 2;
- assert(n >= 0 && n+2 <= operands->length(), "oob");
- operands->at_put(n+0, extract_low_short_from_int(offset));
- operands->at_put(n+1, extract_high_short_from_int(offset));
- }
- static int operand_array_length(Array* operands) {
- if (operands == nullptr || operands->length() == 0) return 0;
- int second_part = operand_offset_at(operands, 0);
- return (second_part / 2);
- }
-
-#ifdef ASSERT
- // operand tuples fit together exactly, end to end
- static int operand_limit_at(Array* operands, int bsms_attribute_index) {
- int nextidx = bsms_attribute_index + 1;
- if (nextidx == operand_array_length(operands))
- return operands->length();
- else
- return operand_offset_at(operands, nextidx);
- }
-#endif //ASSERT
-
- // These functions are used in RedefineClasses for CP merge
- int operand_offset_at(int bsms_attribute_index) {
- assert(0 <= bsms_attribute_index &&
- bsms_attribute_index < operand_array_length(operands()),
- "Corrupted CP operands");
- return operand_offset_at(operands(), bsms_attribute_index);
- }
BSMAttributeEntry* bsm_attribute_entry(int bsms_attribute_index) {
- int offset = operand_offset_at(bsms_attribute_index);
- return reinterpret_cast(operands()->adr_at(offset));
+ return _bsm_entries.entry(bsms_attribute_index);
}
- int operand_next_offset_at(int bsms_attribute_index) {
- BSMAttributeEntry* bsme = bsm_attribute_entry(bsms_attribute_index);
- u2* argv_start = bsme->argument_indexes();
- int offset = argv_start - operands()->data();
- return offset + bsme->argument_count();
- }
- // Compare a bootstrap specifier data in the operands arrays
- bool compare_operand_to(int bsms_attribute_index1, const constantPoolHandle& cp2,
- int bsms_attribute_index2);
- // Find a bootstrap specifier data in the operands array
- int find_matching_operand(int bsms_attribute_index, const constantPoolHandle& search_cp,
- int operands_cur_len);
- // Resize the operands array with delta_len and delta_size
- void resize_operands(int delta_len, int delta_size, TRAPS);
- // Extend the operands array with the length and size of the ext_cp operands
- void extend_operands(const constantPoolHandle& ext_cp, TRAPS);
- // Shrink the operands array to a smaller array with new_len length
- void shrink_operands(int new_len, TRAPS);
+ bool compare_bootstrap_entry_to(int bsms_attribute_index1, const constantPoolHandle& cp2,
+ int bsms_attribute_index2);
+ // Find a BSM entry in search_cp that matches the BSM at bsm_attribute_index.
+ // Return -1 if not found.
+ int find_matching_bsm_entry(int bsms_attribute_index, const constantPoolHandle& search_cp,
+ int offset_limit);
+ // Extend the BSM attribute storage to fit both the current data and the BSM data in ext_cp.
+ // Use the returned InsertionIterator to fill out the newly allocated space.
+ BSMAttributeEntries::InsertionIterator start_extension(const constantPoolHandle& ext_cp, TRAPS);
+ void end_extension(BSMAttributeEntries::InsertionIterator iter, TRAPS);
u2 bootstrap_method_ref_index_at(int cp_index) {
assert(tag_at(cp_index).has_bootstrap(), "Corrupted constant pool");
@@ -641,7 +555,7 @@ class ConstantPool : public Metadata {
int bsmai = bootstrap_methods_attribute_index(cp_index);
BSMAttributeEntry* bsme = bsm_attribute_entry(bsmai);
assert((uint)j < (uint)bsme->argument_count(), "oob");
- return bsm_attribute_entry(bsmai)->argument_index(j);
+ return bsm_attribute_entry(bsmai)->argument(j);
}
// The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve,
@@ -848,7 +762,7 @@ private:
}
static void copy_cp_to_impl(const constantPoolHandle& from_cp, int start_cpi, int end_cpi, const constantPoolHandle& to_cp, int to_cpi, TRAPS);
static void copy_entry_to(const constantPoolHandle& from_cp, int from_cpi, const constantPoolHandle& to_cp, int to_cpi);
- static void copy_operands(const constantPoolHandle& from_cp, const constantPoolHandle& to_cp, TRAPS);
+ static void copy_bsm_entries(const constantPoolHandle& from_cp, const constantPoolHandle& to_cp, TRAPS);
int find_matching_entry(int pattern_i, const constantPoolHandle& search_cp);
int version() const { return _saved._version; }
void set_version(int version) { _saved._version = version; }
diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp
index 2d03b69ee92..24358f662bc 100644
--- a/src/hotspot/share/oops/instanceKlass.cpp
+++ b/src/hotspot/share/oops/instanceKlass.cpp
@@ -2870,7 +2870,7 @@ void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handl
}
bool InstanceKlass::can_be_verified_at_dumptime() const {
- if (AOTMetaspace::in_aot_cache(this)) {
+ if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(this)) {
// This is a class that was dumped into the base archive, so we know
// it was verified at dump time.
return true;
diff --git a/src/hotspot/share/oops/resolvedFieldEntry.cpp b/src/hotspot/share/oops/resolvedFieldEntry.cpp
index dd0a81ce0f3..83f1a6919a6 100644
--- a/src/hotspot/share/oops/resolvedFieldEntry.cpp
+++ b/src/hotspot/share/oops/resolvedFieldEntry.cpp
@@ -23,8 +23,17 @@
*/
#include "cds/archiveBuilder.hpp"
+#include "cppstdlib/type_traits.hpp"
#include "oops/resolvedFieldEntry.hpp"
+static_assert(std::is_trivially_copyable_v);
+
+// Detect inadvertently introduced trailing padding.
+class ResolvedFieldEntryWithExtra : public ResolvedFieldEntry {
+ u1 _extra_field;
+};
+static_assert(sizeof(ResolvedFieldEntryWithExtra) > sizeof(ResolvedFieldEntry));
+
void ResolvedFieldEntry::print_on(outputStream* st) const {
st->print_cr("Field Entry:");
@@ -45,9 +54,7 @@ void ResolvedFieldEntry::print_on(outputStream* st) const {
#if INCLUDE_CDS
void ResolvedFieldEntry::remove_unshareable_info() {
- u2 saved_cpool_index = _cpool_index;
- memset(this, 0, sizeof(*this));
- _cpool_index = saved_cpool_index;
+ *this = ResolvedFieldEntry(_cpool_index);
}
void ResolvedFieldEntry::mark_and_relocate() {
diff --git a/src/hotspot/share/oops/resolvedFieldEntry.hpp b/src/hotspot/share/oops/resolvedFieldEntry.hpp
index 1df4ae8d956..77ad4815730 100644
--- a/src/hotspot/share/oops/resolvedFieldEntry.hpp
+++ b/src/hotspot/share/oops/resolvedFieldEntry.hpp
@@ -43,6 +43,9 @@
// Field bytecodes start with a constant pool index as their operand, which is then rewritten to
// a "field index", which is an index into the array of ResolvedFieldEntry.
+// The explicit paddings are necessary for generating deterministic CDS archives. They prevent
+// the C++ compiler from potentially inserting random values in unused gaps.
+
//class InstanceKlass;
class ResolvedFieldEntry {
friend class VMStructs;
@@ -54,17 +57,9 @@ class ResolvedFieldEntry {
u1 _tos_state; // TOS state
u1 _flags; // Flags: [0000|00|is_final|is_volatile]
u1 _get_code, _put_code; // Get and Put bytecodes of the field
-
- void copy_from(const ResolvedFieldEntry& other) {
- _field_holder = other._field_holder;
- _field_offset = other._field_offset;
- _field_index = other._field_index;
- _cpool_index = other._cpool_index;
- _tos_state = other._tos_state;
- _flags = other._flags;
- _get_code = other._get_code;
- _put_code = other._put_code;
- }
+#ifdef _LP64
+ u4 _padding;
+#endif
public:
ResolvedFieldEntry(u2 cpi) :
@@ -75,48 +70,15 @@ public:
_tos_state(0),
_flags(0),
_get_code(0),
- _put_code(0) {}
+ _put_code(0)
+#ifdef _LP64
+ , _padding(0)
+#endif
+ {}
ResolvedFieldEntry() :
ResolvedFieldEntry(0) {}
- // Notes on copy constructor, copy assignment operator, and copy_from().
- // These are necessary for generating deterministic CDS archives.
- //
- // We have some unused padding on 64-bit platforms (4 bytes at the tail end).
- //
- // When ResolvedFieldEntries in a ConstantPoolCache are allocated from the metaspace,
- // their entire content (including the padding) is filled with zeros. They are
- // then initialized with initialize_resolved_entries_array() in cpCache.cpp from a
- // GrowableArray.
- //
- // The GrowableArray is initialized in rewriter.cpp, using ResolvedFieldEntries that
- // are originally allocated from the C++ stack. Functions like GrowableArray::expand_to()
- // will also allocate ResolvedFieldEntries from the stack. These may have random bits
- // in the padding as the C++ compiler is allowed to leave the padding in uninitialized
- // states.
- //
- // If we use the default copy constructor and/or default copy assignment operator,
- // the random padding will be copied into the GrowableArray, from there
- // to the ConstantPoolCache, and eventually to the CDS archive. As a result, the
- // CDS archive will contain random bits, causing failures in
- // test/hotspot/jtreg/runtime/cds/DeterministicDump.java (usually on Windows).
- //
- // By using copy_from(), we can prevent the random padding from being copied,
- // ensuring that the ResolvedFieldEntries in a ConstantPoolCache (and thus the
- // CDS archive) will have all zeros in the padding.
-
- // Copy constructor
- ResolvedFieldEntry(const ResolvedFieldEntry& other) {
- copy_from(other);
- }
-
- // Copy assignment operator
- ResolvedFieldEntry& operator=(const ResolvedFieldEntry& other) {
- copy_from(other);
- return *this;
- }
-
// Bit shift to get flags
// Note: Only two flags exists at the moment but more could be added
enum {
diff --git a/src/hotspot/share/oops/resolvedMethodEntry.cpp b/src/hotspot/share/oops/resolvedMethodEntry.cpp
index 2dc533dbee0..bb96ca86012 100644
--- a/src/hotspot/share/oops/resolvedMethodEntry.cpp
+++ b/src/hotspot/share/oops/resolvedMethodEntry.cpp
@@ -23,9 +23,18 @@
*/
#include "cds/archiveBuilder.hpp"
+#include "cppstdlib/type_traits.hpp"
#include "oops/method.hpp"
#include "oops/resolvedMethodEntry.hpp"
+static_assert(std::is_trivially_copyable_v);
+
+// Detect inadvertently introduced trailing padding.
+class ResolvedMethodEntryWithExtra : public ResolvedMethodEntry {
+ u1 _extra_field;
+};
+static_assert(sizeof(ResolvedMethodEntryWithExtra) > sizeof(ResolvedMethodEntry));
+
bool ResolvedMethodEntry::check_no_old_or_obsolete_entry() {
// return false if m refers to a non-deleted old or obsolete method
if (_method != nullptr) {
@@ -39,14 +48,10 @@ bool ResolvedMethodEntry::check_no_old_or_obsolete_entry() {
void ResolvedMethodEntry::reset_entry() {
if (has_resolved_references_index()) {
u2 saved_resolved_references_index = _entry_specific._resolved_references_index;
- u2 saved_cpool_index = _cpool_index;
- memset(this, 0, sizeof(*this));
+ *this = ResolvedMethodEntry(_cpool_index);
set_resolved_references_index(saved_resolved_references_index);
- _cpool_index = saved_cpool_index;
} else {
- u2 saved_cpool_index = _cpool_index;
- memset(this, 0, sizeof(*this));
- _cpool_index = saved_cpool_index;
+ *this = ResolvedMethodEntry(_cpool_index);
}
}
diff --git a/src/hotspot/share/oops/resolvedMethodEntry.hpp b/src/hotspot/share/oops/resolvedMethodEntry.hpp
index c95efb751e9..802cf252a6d 100644
--- a/src/hotspot/share/oops/resolvedMethodEntry.hpp
+++ b/src/hotspot/share/oops/resolvedMethodEntry.hpp
@@ -61,6 +61,9 @@
// pool entry and thus the same resolved method entry.
// The is_vfinal flag indicates method pointer for a final method or an index.
+// The explicit paddings are necessary for generating deterministic CDS archives. They prevent
+// the C++ compiler from potentially inserting random values in unused gaps.
+
class InstanceKlass;
class ResolvedMethodEntry {
friend class VMStructs;
@@ -70,6 +73,7 @@ class ResolvedMethodEntry {
InstanceKlass* _interface_klass; // for interface and static
u2 _resolved_references_index; // Index of resolved references array that holds the appendix oop for invokehandle
u2 _table_index; // vtable/itable index for virtual and interface calls
+ // The padding field is unused here, as the parent constructor zeroes the union.
} _entry_specific;
u2 _cpool_index; // Constant pool index
@@ -80,51 +84,36 @@ class ResolvedMethodEntry {
#ifdef ASSERT
bool _has_interface_klass;
bool _has_table_index;
+# ifdef _LP64
+ u2 _padding1;
+ u4 _padding2;
+# else
+ u1 _padding1;
+ u1 _padding2;
+# endif
#endif
- // See comments in resolvedFieldEntry.hpp about copy_from and padding.
- // We have unused padding on debug builds.
- void copy_from(const ResolvedMethodEntry& other) {
- _method = other._method;
- _entry_specific = other._entry_specific;
- _cpool_index = other._cpool_index;
- _number_of_parameters = other._number_of_parameters;
- _tos_state = other._tos_state;
- _flags = other._flags;
- _bytecode1 = other._bytecode1;
- _bytecode2 = other._bytecode2;
-#ifdef ASSERT
- _has_interface_klass = other._has_interface_klass;
- _has_table_index = other._has_table_index;
-#endif
- }
-
// Constructors
public:
ResolvedMethodEntry(u2 cpi) :
_method(nullptr),
+ _entry_specific{nullptr},
_cpool_index(cpi),
_number_of_parameters(0),
_tos_state(0),
_flags(0),
_bytecode1(0),
- _bytecode2(0) {
- _entry_specific._interface_klass = nullptr;
- DEBUG_ONLY(_has_interface_klass = false;)
- DEBUG_ONLY(_has_table_index = false;)
- }
+ _bytecode2(0)
+#ifdef ASSERT
+ , _has_interface_klass(false),
+ _has_table_index(false),
+ _padding1(0),
+ _padding2(0)
+#endif
+ {}
ResolvedMethodEntry() :
ResolvedMethodEntry(0) {}
- ResolvedMethodEntry(const ResolvedMethodEntry& other) {
- copy_from(other);
- }
-
- ResolvedMethodEntry& operator=(const ResolvedMethodEntry& other) {
- copy_from(other);
- return *this;
- }
-
// Bit shift to get flags
enum {
diff --git a/src/hotspot/share/opto/c2_globals.hpp b/src/hotspot/share/opto/c2_globals.hpp
index 0a4f231c49b..2b2b4db47b1 100644
--- a/src/hotspot/share/opto/c2_globals.hpp
+++ b/src/hotspot/share/opto/c2_globals.hpp
@@ -428,7 +428,7 @@
"0=print nothing except PhasePrintLevel directives, " \
"6=all details printed. " \
"Level of detail of printouts can be set on a per-method level " \
- "as well by using CompileCommand=PrintPhaseLevel.") \
+ "as well by using CompileCommand=PhasePrintLevel.") \
range(-1, 6) \
\
develop(bool, PrintIdealGraph, false, \
diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp
index 6babc13e1b3..89b5e36b120 100644
--- a/src/hotspot/share/opto/compile.cpp
+++ b/src/hotspot/share/opto/compile.cpp
@@ -5233,7 +5233,7 @@ void Compile::end_method() {
#ifndef PRODUCT
bool Compile::should_print_phase(const int level) const {
- return PrintPhaseLevel > 0 && directive()->PhasePrintLevelOption >= level &&
+ return PrintPhaseLevel >= 0 && directive()->PhasePrintLevelOption >= level &&
_method != nullptr; // Do not print phases for stubs.
}
diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp
index 754b0fa8d1c..91bb743618b 100644
--- a/src/hotspot/share/opto/doCall.cpp
+++ b/src/hotspot/share/opto/doCall.cpp
@@ -97,10 +97,9 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
ciMethod* orig_callee = caller->get_method_at_bci(bci);
- const bool is_virtual_or_interface = (bytecode == Bytecodes::_invokevirtual) ||
- (bytecode == Bytecodes::_invokeinterface) ||
- (orig_callee->intrinsic_id() == vmIntrinsics::_linkToVirtual) ||
- (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface);
+ const bool is_virtual = (bytecode == Bytecodes::_invokevirtual) || (orig_callee->intrinsic_id() == vmIntrinsics::_linkToVirtual);
+ const bool is_interface = (bytecode == Bytecodes::_invokeinterface) || (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface);
+ const bool is_virtual_or_interface = is_virtual || is_interface;
const bool check_access = !orig_callee->is_method_handle_intrinsic(); // method handle intrinsics don't perform access checks
@@ -339,17 +338,25 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// number of implementors for decl_interface is 0 or 1. If
// it's 0 then no class implements decl_interface and there's
// no point in inlining.
- if (call_does_dispatch && bytecode == Bytecodes::_invokeinterface) {
- ciInstanceKlass* declared_interface =
- caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();
+ if (call_does_dispatch && is_interface) {
+ ciInstanceKlass* declared_interface = nullptr;
+ if (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface) {
+ // MemberName doesn't keep information about resolved interface class (REFC) once
+ // resolution is over, but resolved method holder (DECC) can be used as a
+ // conservative approximation.
+ declared_interface = callee->holder();
+ } else {
+ assert(!orig_callee->is_method_handle_intrinsic(), "not allowed");
+ declared_interface = caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();
+ }
+ assert(declared_interface->is_interface(), "required");
ciInstanceKlass* singleton = declared_interface->unique_implementor();
if (singleton != nullptr) {
assert(singleton != declared_interface, "not a unique implementor");
- assert(check_access, "required");
ciMethod* cha_monomorphic_target =
- callee->find_monomorphic_target(caller->holder(), declared_interface, singleton);
+ callee->find_monomorphic_target(caller->holder(), declared_interface, singleton, check_access);
if (cha_monomorphic_target != nullptr &&
cha_monomorphic_target->holder() != env()->Object_klass()) { // subtype check against Object is useless
@@ -372,7 +379,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
}
}
}
- } // call_does_dispatch && bytecode == Bytecodes::_invokeinterface
+ } // call_does_dispatch && is_interface
// Nothing claimed the intrinsic, we go with straight-forward inlining
// for already discovered intrinsic.
diff --git a/src/hotspot/share/opto/loopTransform.cpp b/src/hotspot/share/opto/loopTransform.cpp
index 31d1cbe0443..5c65103677b 100644
--- a/src/hotspot/share/opto/loopTransform.cpp
+++ b/src/hotspot/share/opto/loopTransform.cpp
@@ -1411,7 +1411,6 @@ void PhaseIdealLoop::insert_pre_post_loops(IdealLoopTree *loop, Node_List &old_n
C->print_method(PHASE_BEFORE_PRE_MAIN_POST, 4, main_head);
- Node *pre_header= main_head->in(LoopNode::EntryControl);
Node *init = main_head->init_trip();
Node *incr = main_end ->incr();
Node *limit = main_end ->limit();
diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp
index dfff7ef96a5..03cc5cbcff6 100644
--- a/src/hotspot/share/opto/loopnode.cpp
+++ b/src/hotspot/share/opto/loopnode.cpp
@@ -1162,13 +1162,16 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
class CloneShortLoopPredicateVisitor : public PredicateVisitor {
ClonePredicateToTargetLoop _clone_predicate_to_loop;
PhaseIdealLoop* const _phase;
+ Node* const _new_init;
public:
CloneShortLoopPredicateVisitor(LoopNode* target_loop_head,
+ Node* new_init,
const NodeInSingleLoopBody &node_in_loop_body,
PhaseIdealLoop* phase)
: _clone_predicate_to_loop(target_loop_head, node_in_loop_body, phase),
- _phase(phase) {
+ _phase(phase),
+ _new_init(new_init) {
}
NONCOPYABLE(CloneShortLoopPredicateVisitor);
@@ -1180,11 +1183,32 @@ public:
}
void visit(const TemplateAssertionPredicate& template_assertion_predicate) override {
- _clone_predicate_to_loop.clone_template_assertion_predicate(template_assertion_predicate);
+ _clone_predicate_to_loop.clone_template_assertion_predicate_and_replace_init(template_assertion_predicate, _new_init);
template_assertion_predicate.kill(_phase->igvn());
}
};
+// For an int counted loop, try_make_short_running_loop() transforms the loop from:
+// for (int = start; i < stop; i+= stride) { ... }
+// to
+// for (int = 0; i < stop - start; i+= stride) { ... }
+// Template Assertion Predicates added so far were with an init value of start. They need to be updated with the new
+// init value of 0 (otherwise when a template assertion predicate is turned into an initialized assertion predicate, it
+// performs an incorrect check):
+// zero
+// init |
+// | ===> OpaqueLoopInit init
+// OpaqueLoopInit \ /
+// AddI
+//
+Node* PhaseIdealLoop::new_assertion_predicate_opaque_init(Node* entry_control, Node* init, Node* int_zero) {
+ OpaqueLoopInitNode* new_opaque_init = new OpaqueLoopInitNode(C, int_zero);
+ register_new_node(new_opaque_init, entry_control);
+ Node* new_init = new AddINode(new_opaque_init, init);
+ register_new_node(new_init, entry_control);
+ return new_init;
+}
+
// If the loop is either statically known to run for a small enough number of iterations or if profile data indicates
// that, we don't want an outer loop because the overhead of having an outer loop whose backedge is never taken, has a
// measurable cost. Furthermore, creating the loop nest usually causes one iteration of the loop to be peeled so
@@ -1236,6 +1260,7 @@ bool PhaseIdealLoop::try_make_short_running_loop(IdealLoopTree* loop, jint strid
}
register_new_node(new_limit, entry_control);
+ Node* int_zero = intcon(0);
PhiNode* phi = head->phi()->as_Phi();
if (profile_short_running_loop) {
// Add a Short Running Long Loop Predicate. It's the first predicate in the predicate chain before entering a loop
@@ -1261,9 +1286,11 @@ bool PhaseIdealLoop::try_make_short_running_loop(IdealLoopTree* loop, jint strid
if (!short_running_long_loop_predicate_block->has_parse_predicate()) { // already trapped
return false;
}
+ Node* new_init = new_assertion_predicate_opaque_init(entry_control, init, int_zero);
+
PredicateIterator predicate_iterator(entry_control);
NodeInSingleLoopBody node_in_short_loop_body(this, loop);
- CloneShortLoopPredicateVisitor clone_short_loop_predicates_visitor(head, node_in_short_loop_body, this);
+ CloneShortLoopPredicateVisitor clone_short_loop_predicates_visitor(head, new_init, node_in_short_loop_body, this);
predicate_iterator.for_each(clone_short_loop_predicates_visitor);
entry_control = head->skip_strip_mined()->in(LoopNode::EntryControl);
@@ -1311,6 +1338,10 @@ bool PhaseIdealLoop::try_make_short_running_loop(IdealLoopTree* loop, jint strid
register_new_node(new_limit, predicates.entry());
} else {
assert(bt == T_INT && known_short_running_loop, "only CountedLoop statically known to be short running");
+ PredicateIterator predicate_iterator(entry_control);
+ Node* new_init = new_assertion_predicate_opaque_init(entry_control, init, int_zero);
+ UpdateInitForTemplateAssertionPredicates update_init_for_template_assertion_predicates(new_init, this);
+ predicate_iterator.for_each(update_init_for_template_assertion_predicates);
}
IfNode* exit_test = head->loopexit();
@@ -1320,7 +1351,6 @@ bool PhaseIdealLoop::try_make_short_running_loop(IdealLoopTree* loop, jint strid
register_new_node(new_limit, entry_control);
}
- Node* int_zero = intcon(0);
if (stride_con < 0) {
new_limit = new SubINode(int_zero, new_limit);
register_new_node(new_limit, entry_control);
diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp
index 1e34331f213..3b97d76773f 100644
--- a/src/hotspot/share/opto/loopnode.hpp
+++ b/src/hotspot/share/opto/loopnode.hpp
@@ -1969,6 +1969,8 @@ public:
Node* ensure_node_and_inputs_are_above_pre_end(CountedLoopEndNode* pre_end, Node* node);
+ Node* new_assertion_predicate_opaque_init(Node* entry_control, Node* init, Node* int_zero);
+
bool try_make_short_running_loop(IdealLoopTree* loop, jint stride_con, const Node_List& range_checks, const uint iters_limit);
ConINode* intcon(jint i);
diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp
index 93ded36363e..2452677caf3 100644
--- a/src/hotspot/share/opto/node.cpp
+++ b/src/hotspot/share/opto/node.cpp
@@ -1209,9 +1209,12 @@ bool Node::has_special_unique_user() const {
if (this->is_Store()) {
// Condition for back-to-back stores folding.
return n->Opcode() == op && n->in(MemNode::Memory) == this;
- } else if (this->is_Load() || this->is_DecodeN() || this->is_Phi()) {
+ } else if ((this->is_Load() || this->is_DecodeN() || this->is_Phi()) && n->Opcode() == Op_MemBarAcquire) {
// Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input
- return n->Opcode() == Op_MemBarAcquire;
+ return true;
+ } else if (this->is_Load() && n->is_Move()) {
+ // Condition for MoveX2Y (LoadX mem) => LoadY mem
+ return true;
} else if (op == Op_AddL) {
// Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
return n->Opcode() == Op_ConvL2I && n->in(1) == this;
diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp
index 6067bcbac8d..ec80fb6a0ab 100644
--- a/src/hotspot/share/opto/node.hpp
+++ b/src/hotspot/share/opto/node.hpp
@@ -2176,7 +2176,10 @@ class BFSActions : public StackObj {
virtual bool is_target_node(Node* node) const = 0;
// Defines an action that should be taken when we visit a target node in the BFS traversal.
- virtual void target_node_action(Node* target_node) = 0;
+ // To give more freedom, we pass the direct child node to the target node such that
+ // child->in(i) == target node. This allows to also directly replace the target node instead
+ // of only updating its inputs.
+ virtual void target_node_action(Node* child, uint i) = 0;
};
// Class to perform a BFS traversal on the data nodes from a given start node. The provided BFSActions guide which
@@ -2198,7 +2201,7 @@ class DataNodeBFS : public StackObj {
Node* input = next->in(j);
if (_bfs_actions.is_target_node(input)) {
assert(_bfs_actions.should_visit(input), "must also pass node filter");
- _bfs_actions.target_node_action(input);
+ _bfs_actions.target_node_action(next, j);
} else if (_bfs_actions.should_visit(input)) {
_nodes_to_visit.push(input);
}
diff --git a/src/hotspot/share/opto/output.cpp b/src/hotspot/share/opto/output.cpp
index 84c01c68e38..136fc8ac864 100644
--- a/src/hotspot/share/opto/output.cpp
+++ b/src/hotspot/share/opto/output.cpp
@@ -1347,20 +1347,18 @@ CodeBuffer* PhaseOutput::init_buffer() {
// nmethod and CodeBuffer count stubs & constants as part of method's code.
// class HandlerImpl is platform-specific and defined in the *.ad files.
- int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
stub_req += MAX_stubs_size; // ensure per-stub margin
code_req += MAX_inst_size; // ensure per-instruction margin
if (StressCodeBuffers)
- code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
+ code_req = const_req = stub_req = deopt_handler_req = 0x10; // force expansion
int total_req =
const_req +
code_req +
pad_req +
stub_req +
- exception_handler_req +
deopt_handler_req; // deopt handler
CodeBuffer* cb = code_buffer();
@@ -1789,8 +1787,6 @@ void PhaseOutput::fill_buffer(C2_MacroAssembler* masm, uint* blk_starts) {
// Only java methods have exception handlers and deopt handlers
// class HandlerImpl is platform-specific and defined in the *.ad files.
if (C->method()) {
- // Emit the exception handler code.
- _code_offsets.set_value(CodeOffsets::Exceptions, HandlerImpl::emit_exception_handler(masm));
if (C->failing()) {
return; // CodeBuffer::expand failed
}
diff --git a/src/hotspot/share/opto/phaseX.cpp b/src/hotspot/share/opto/phaseX.cpp
index 1fe911aa7ac..4a0933b89f2 100644
--- a/src/hotspot/share/opto/phaseX.cpp
+++ b/src/hotspot/share/opto/phaseX.cpp
@@ -1132,7 +1132,7 @@ void PhaseIterGVN::verify_empty_worklist(Node* node) {
// (1) Integer "widen" changes, but the range is the same.
// (2) LoadNode performs deep traversals. Load is not notified for changes far away.
// (3) CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
-bool PhaseIterGVN::verify_Value_for(Node* n) {
+bool PhaseIterGVN::verify_Value_for(Node* n, bool strict) {
// If we assert inside type(n), because the type is still a null, then maybe
// the node never went through gvn.transform, which would be a bug.
const Type* told = type(n);
@@ -1152,7 +1152,7 @@ bool PhaseIterGVN::verify_Value_for(Node* n) {
}
// Exception (2)
// LoadNode performs deep traversals. Load is not notified for changes far away.
- if (n->is_Load() && !told->singleton()) {
+ if (!strict && n->is_Load() && !told->singleton()) {
// MemNode::can_see_stored_value looks up through many memory nodes,
// which means we would need to notify modifications from far up in
// the inputs all the way down to the LoadNode. We don't do that.
@@ -1160,7 +1160,7 @@ bool PhaseIterGVN::verify_Value_for(Node* n) {
}
// Exception (3)
// CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
- if (n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
+ if (!strict && n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
// SubNode::Value
// CmpPNode::sub
// MemNode::detect_ptr_independence
@@ -2799,6 +2799,7 @@ void PhaseCCP::analyze() {
// Compile is over. The local arena gets de-allocated at the end of its scope.
ResourceArea local_arena(mtCompiler);
Unique_Node_List worklist(&local_arena);
+ Unique_Node_List worklist_revisit(&local_arena);
DEBUG_ONLY(Unique_Node_List worklist_verify(&local_arena);)
// Push root onto worklist
@@ -2807,45 +2808,86 @@ void PhaseCCP::analyze() {
assert(_root_and_safepoints.size() == 0, "must be empty (unused)");
_root_and_safepoints.push(C->root());
- // Pull from worklist; compute new value; push changes out.
- // This loop is the meat of CCP.
+ // This is the meat of CCP: pull from worklist; compute new value; push changes out.
+
+ // Do the first round. Since all initial types are TOP, this will visit all alive nodes.
while (worklist.size() != 0) {
Node* n = fetch_next_node(worklist);
DEBUG_ONLY(worklist_verify.push(n);)
+ if (needs_revisit(n)) {
+ worklist_revisit.push(n);
+ }
if (n->is_SafePoint()) {
// Make sure safepoints are processed by PhaseCCP::transform even if they are
// not reachable from the bottom. Otherwise, infinite loops would be removed.
_root_and_safepoints.push(n);
}
- const Type* new_type = n->Value(this);
- if (new_type != type(n)) {
- DEBUG_ONLY(verify_type(n, new_type, type(n));)
- dump_type_and_node(n, new_type);
- set_type(n, new_type);
- push_child_nodes_to_worklist(worklist, n);
- }
- if (KillPathsReachableByDeadTypeNode && n->is_Type() && new_type == Type::TOP) {
- // Keep track of Type nodes to kill CFG paths that use Type
- // nodes that become dead.
- _maybe_top_type_nodes.push(n);
- }
+ analyze_step(worklist, n);
}
+
+ // More rounds to catch updates far in the graph.
+ // Revisit nodes that might be able to refine their types at the end of the round.
+ // If so, process these nodes. If there is remaining work, start another round.
+ do {
+ while (worklist.size() != 0) {
+ Node* n = fetch_next_node(worklist);
+ analyze_step(worklist, n);
+ }
+ for (uint t = 0; t < worklist_revisit.size(); t++) {
+ Node* n = worklist_revisit.at(t);
+ analyze_step(worklist, n);
+ }
+ } while (worklist.size() != 0);
+
DEBUG_ONLY(verify_analyze(worklist_verify);)
}
+void PhaseCCP::analyze_step(Unique_Node_List& worklist, Node* n) {
+ const Type* new_type = n->Value(this);
+ if (new_type != type(n)) {
+ DEBUG_ONLY(verify_type(n, new_type, type(n));)
+ dump_type_and_node(n, new_type);
+ set_type(n, new_type);
+ push_child_nodes_to_worklist(worklist, n);
+ }
+ if (KillPathsReachableByDeadTypeNode && n->is_Type() && new_type == Type::TOP) {
+ // Keep track of Type nodes to kill CFG paths that use Type
+ // nodes that become dead.
+ _maybe_top_type_nodes.push(n);
+ }
+}
+
+// Some nodes can refine their types due to type change somewhere deep
+// in the graph. We will need to revisit them before claiming convergence.
+// Add nodes here if particular *Node::Value is doing deep graph traversals
+// not handled by PhaseCCP::push_more_uses().
+bool PhaseCCP::needs_revisit(Node* n) const {
+ // LoadNode performs deep traversals. Load is not notified for changes far away.
+ if (n->is_Load()) {
+ return true;
+ }
+ // CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
+ if (n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
+ return true;
+ }
+ return false;
+}
+
#ifdef ASSERT
// For every node n on verify list, check if type(n) == n->Value()
-// We have a list of exceptions, see comments in verify_Value_for.
+// Note for CCP the non-convergence can lead to unsound analysis and mis-compilation.
+// Therefore, we are verifying Value convergence strictly.
void PhaseCCP::verify_analyze(Unique_Node_List& worklist_verify) {
bool failure = false;
while (worklist_verify.size()) {
Node* n = worklist_verify.pop();
- failure |= verify_Value_for(n);
+ failure |= verify_Value_for(n, /* strict = */ true);
}
// If we get this assert, check why the reported nodes were not processed again in CCP.
// We should either make sure that these nodes are properly added back to the CCP worklist
- // in PhaseCCP::push_child_nodes_to_worklist() to update their type or add an exception
- // in the verification code above if that is not possible for some reason (like Load nodes).
+ // in PhaseCCP::push_child_nodes_to_worklist() to update their type in the same round,
+ // or that they are added in PhaseCCP::needs_revisit() so that analysis revisits
+ // them at the end of the round.
assert(!failure, "PhaseCCP not at fixpoint: analysis result may be unsound.");
}
#endif
diff --git a/src/hotspot/share/opto/phaseX.hpp b/src/hotspot/share/opto/phaseX.hpp
index 083e77bf6d9..473231e6af5 100644
--- a/src/hotspot/share/opto/phaseX.hpp
+++ b/src/hotspot/share/opto/phaseX.hpp
@@ -490,7 +490,7 @@ public:
void optimize();
#ifdef ASSERT
void verify_optimize();
- bool verify_Value_for(Node* n);
+ bool verify_Value_for(Node* n, bool strict = false);
bool verify_Ideal_for(Node* n, bool can_reshape);
bool verify_Identity_for(Node* n);
void verify_empty_worklist(Node* n);
@@ -659,6 +659,8 @@ class PhaseCCP : public PhaseIterGVN {
// Worklist algorithm identifies constants
void analyze();
+ void analyze_step(Unique_Node_List& worklist, Node* n);
+ bool needs_revisit(Node* n) const;
#ifdef ASSERT
void verify_type(Node* n, const Type* tnew, const Type* told);
// For every node n on verify list, check if type(n) == n->Value()
diff --git a/src/hotspot/share/opto/predicates.cpp b/src/hotspot/share/opto/predicates.cpp
index 208bd6583c5..2489ff563a9 100644
--- a/src/hotspot/share/opto/predicates.cpp
+++ b/src/hotspot/share/opto/predicates.cpp
@@ -198,12 +198,21 @@ TemplateAssertionPredicate TemplateAssertionPredicate::clone_and_replace_opaque_
Node* new_opaque_input,
CountedLoopNode* new_loop_node,
PhaseIdealLoop* phase) const {
- DEBUG_ONLY(verify();)
OpaqueLoopInitNode* new_opaque_init = new OpaqueLoopInitNode(phase->C, new_opaque_input);
phase->register_new_node(new_opaque_init, new_control);
+ return clone_and_replace_init(new_control, new_opaque_init, new_loop_node, phase);
+}
+
+// Clone this Template Assertion Predicate and replace the old OpaqueLoopInit node with 'new_init'.
+// Note: 'new_init' could also have the 'OpaqueLoopInit` as parent node further up.
+TemplateAssertionPredicate TemplateAssertionPredicate::clone_and_replace_init(Node* new_control,
+ Node* new_init,
+ CountedLoopNode* new_loop_node,
+ PhaseIdealLoop* phase) const {
+ DEBUG_ONLY(verify();)
TemplateAssertionExpression template_assertion_expression(opaque_node(), phase);
OpaqueTemplateAssertionPredicateNode* new_opaque_node =
- template_assertion_expression.clone_and_replace_init(new_control, new_opaque_init, new_loop_node);
+ template_assertion_expression.clone_and_replace_init(new_control, new_init, new_loop_node);
AssertionPredicateIfCreator assertion_predicate_if_creator(phase);
IfTrueNode* success_proj = assertion_predicate_if_creator.create_for_template(new_control, _if_node->Opcode(),
new_opaque_node,
@@ -238,8 +247,40 @@ class ReplaceOpaqueStrideInput : public BFSActions {
return node->is_OpaqueLoopStride();
}
- void target_node_action(Node* target_node) override {
- _igvn.replace_input_of(target_node, 1, _new_opaque_stride_input);
+ void target_node_action(Node* child, uint i) override {
+ assert(child->in(i)->is_OpaqueLoopStride(), "must be OpaqueLoopStride");
+ _igvn.replace_input_of(child->in(i), 1, _new_opaque_stride_input);
+ }
+};
+
+// This class is used to replace the OpaqueLoopInitNode with a new node while leaving the other nodes
+// unchanged.
+class ReplaceOpaqueInitNode : public BFSActions {
+ Node* _new_opaque_init_node;
+ PhaseIterGVN& _igvn;
+
+ public:
+ ReplaceOpaqueInitNode(Node* new_opaque_init_node, PhaseIterGVN& igvn)
+ : _new_opaque_init_node(new_opaque_init_node),
+ _igvn(igvn) {}
+ NONCOPYABLE(ReplaceOpaqueInitNode);
+
+ void replace_for(OpaqueTemplateAssertionPredicateNode* opaque_node) {
+ DataNodeBFS bfs(*this);
+ bfs.run(opaque_node);
+ }
+
+ bool should_visit(Node* node) const override {
+ return TemplateAssertionExpressionNode::is_maybe_in_expression(node);
+ }
+
+ bool is_target_node(Node* node) const override {
+ return node->is_OpaqueLoopInit();
+ }
+
+ void target_node_action(Node* child, uint i) override {
+ assert(child->in(i)->is_OpaqueLoopInit(), "must be old OpaqueLoopInit");
+ _igvn.replace_input_of(child, i, _new_opaque_init_node);
}
};
@@ -250,6 +291,13 @@ void TemplateAssertionPredicate::replace_opaque_stride_input(Node* new_stride, P
replace_opaque_stride_input.replace_for(opaque_node());
}
+// Replace the OpaqueLoopInitNode with 'new_init' and leave the other nodes unchanged.
+void TemplateAssertionPredicate::replace_opaque_init_node(Node* new_init, PhaseIterGVN& igvn) const {
+ DEBUG_ONLY(verify();)
+ ReplaceOpaqueInitNode replace_opaque_init_node(new_init, igvn);
+ replace_opaque_init_node.replace_for(opaque_node());
+}
+
// Create a new Initialized Assertion Predicate from this template at the template success projection.
InitializedAssertionPredicate TemplateAssertionPredicate::initialize(PhaseIdealLoop* phase) const {
DEBUG_ONLY(verify();)
@@ -308,7 +356,8 @@ class OpaqueLoopNodesVerifier : public BFSActions {
return node->is_Opaque1();
}
- void target_node_action(Node* target_node) override {
+ void target_node_action(Node* child, uint i) override {
+ Node* target_node = child->in(i);
if (target_node->is_OpaqueLoopInit()) {
assert(!_found_init, "should only find one OpaqueLoopInitNode");
_found_init = true;
@@ -1094,6 +1143,18 @@ void ClonePredicateToTargetLoop::clone_template_assertion_predicate(
_target_loop_predicate_chain.insert_predicate(cloned_template_assertion_predicate);
}
+// Clones the provided Template Assertion Predicate to the head of the current predicate chain at the target loop and
+// replaces the current OpaqueLoopInit with 'new_init'.
+// Note: 'new_init' could also have the 'OpaqueLoopInit` as parent node further up.
+void ClonePredicateToTargetLoop::clone_template_assertion_predicate_and_replace_init(
+ const TemplateAssertionPredicate& template_assertion_predicate, Node* new_init) {
+ TemplateAssertionPredicate cloned_template_assertion_predicate =
+ template_assertion_predicate.clone_and_replace_init(_old_target_loop_entry, new_init, _target_loop_head->as_CountedLoop(), _phase);
+ template_assertion_predicate.rewire_loop_data_dependencies(cloned_template_assertion_predicate.tail(),
+ _node_in_loop_body, _phase);
+ _target_loop_predicate_chain.insert_predicate(cloned_template_assertion_predicate);
+}
+
CloneUnswitchedLoopPredicatesVisitor::CloneUnswitchedLoopPredicatesVisitor(
LoopNode* true_path_loop_head, LoopNode* false_path_loop_head,
const NodeInOriginalLoopBody& node_in_true_path_loop_body, const NodeInClonedLoopBody& node_in_false_path_loop_body,
@@ -1182,6 +1243,10 @@ void UpdateStrideForAssertionPredicates::connect_initialized_assertion_predicate
}
}
+void UpdateInitForTemplateAssertionPredicates::visit(const TemplateAssertionPredicate& template_assertion_predicate) {
+ template_assertion_predicate.replace_opaque_init_node(_new_init, _phase->igvn());
+}
+
// Do the following to find and eliminate useless Parse and Template Assertion Predicates:
// 1. Mark all Parse and Template Assertion Predicates "maybe useful".
// 2. Walk through the loop tree and iterate over all Predicates above each loop head. All found Parse and Template
diff --git a/src/hotspot/share/opto/predicates.hpp b/src/hotspot/share/opto/predicates.hpp
index 32b1c1cd3c4..cd0832cc062 100644
--- a/src/hotspot/share/opto/predicates.hpp
+++ b/src/hotspot/share/opto/predicates.hpp
@@ -438,7 +438,10 @@ class TemplateAssertionPredicate : public Predicate {
TemplateAssertionPredicate clone(Node* new_control, CountedLoopNode* new_loop_node, PhaseIdealLoop* phase) const;
TemplateAssertionPredicate clone_and_replace_opaque_input(Node* new_control, Node* new_opaque_input,
CountedLoopNode* new_loop_node, PhaseIdealLoop* phase) const;
+ TemplateAssertionPredicate clone_and_replace_init(Node* new_control, Node* new_input,
+ CountedLoopNode* new_loop_node, PhaseIdealLoop* phase) const;
void replace_opaque_stride_input(Node* new_stride, PhaseIterGVN& igvn) const;
+ void replace_opaque_init_node(Node* new_init, PhaseIterGVN& igvn) const;
InitializedAssertionPredicate initialize(PhaseIdealLoop* phase) const;
void rewire_loop_data_dependencies(IfTrueNode* target_predicate, const NodeInLoopBody& data_in_loop_body,
const PhaseIdealLoop* phase) const;
@@ -1228,6 +1231,7 @@ public:
}
void clone_template_assertion_predicate(const TemplateAssertionPredicate& template_assertion_predicate);
+ void clone_template_assertion_predicate_and_replace_init(const TemplateAssertionPredicate& template_assertion_predicate, Node* new_init);
};
// Visitor to clone Parse and Template Assertion Predicates from a loop to its unswitched true and false path loop.
@@ -1300,6 +1304,22 @@ class UpdateStrideForAssertionPredicates : public PredicateVisitor {
void visit(const InitializedAssertionPredicate& initialized_assertion_predicate) override;
};
+// This visitor replaces the OpaqueLoopInitNode for an Assertion Predicate with the expression passed as input.
+class UpdateInitForTemplateAssertionPredicates : public PredicateVisitor {
+ Node* const _new_init;
+ PhaseIdealLoop* const _phase;
+
+public:
+ UpdateInitForTemplateAssertionPredicates(Node* const new_init, PhaseIdealLoop* phase)
+ : _new_init(new_init),
+ _phase(phase) {}
+ NONCOPYABLE(UpdateInitForTemplateAssertionPredicates);
+
+ using PredicateVisitor::visit;
+
+ void visit(const TemplateAssertionPredicate& template_assertion_predicate) override;
+};
+
// Eliminate all useless Parse and Template Assertion Predicates. They become useless when they can no longer be found
// from a loop head. We mark these useless to clean them up later during IGVN. A Predicate that is marked useless will
// no longer be visited by a PredicateVisitor.
diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp
index 96fee925e5d..ecb8c2c1cd8 100644
--- a/src/hotspot/share/opto/type.cpp
+++ b/src/hotspot/share/opto/type.cpp
@@ -45,6 +45,8 @@
#include "opto/type.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/checkedCast.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/ostream.hpp"
#include "utilities/powerOfTwo.hpp"
#include "utilities/stringUtils.hpp"
@@ -2979,15 +2981,22 @@ const char *const TypePtr::ptr_msg[TypePtr::lastPTR] = {
#ifndef PRODUCT
void TypePtr::dump2( Dict &d, uint depth, outputStream *st ) const {
- if( _ptr == Null ) st->print("null");
- else st->print("%s *", ptr_msg[_ptr]);
- if( _offset == OffsetTop ) st->print("+top");
- else if( _offset == OffsetBot ) st->print("+bot");
- else if( _offset ) st->print("+%d", _offset);
+ st->print("ptr:%s", ptr_msg[_ptr]);
+ dump_offset(st);
dump_inline_depth(st);
dump_speculative(st);
}
+void TypePtr::dump_offset(outputStream* st) const {
+ if (_offset == OffsetBot) {
+ st->print("+bot");
+ } else if (_offset == OffsetTop) {
+ st->print("+top");
+ } else {
+ st->print("+%d", _offset);
+ }
+}
+
/**
*dump the speculative part of the type
*/
@@ -3159,11 +3168,12 @@ uint TypeRawPtr::hash(void) const {
//------------------------------dump2------------------------------------------
#ifndef PRODUCT
-void TypeRawPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
- if( _ptr == Constant )
- st->print(INTPTR_FORMAT, p2i(_bits));
- else
+void TypeRawPtr::dump2(Dict& d, uint depth, outputStream* st) const {
+ if (_ptr == Constant) {
+ st->print("rawptr:Constant:" INTPTR_FORMAT, p2i(_bits));
+ } else {
st->print("rawptr:%s", ptr_msg[_ptr]);
+ }
}
#endif
@@ -3798,24 +3808,29 @@ uint TypeOopPtr::hash(void) const {
//------------------------------dump2------------------------------------------
#ifndef PRODUCT
-void TypeOopPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
+void TypeOopPtr::dump2(Dict& d, uint depth, outputStream* st) const {
st->print("oopptr:%s", ptr_msg[_ptr]);
- if( _klass_is_exact ) st->print(":exact");
- if( const_oop() ) st->print(INTPTR_FORMAT, p2i(const_oop()));
- switch( _offset ) {
- case OffsetTop: st->print("+top"); break;
- case OffsetBot: st->print("+any"); break;
- case 0: break;
- default: st->print("+%d",_offset); break;
+ if (_klass_is_exact) {
+ st->print(":exact");
}
- if (_instance_id == InstanceTop)
- st->print(",iid=top");
- else if (_instance_id != InstanceBot)
- st->print(",iid=%d",_instance_id);
-
+ if (const_oop() != nullptr) {
+ st->print(":" INTPTR_FORMAT, p2i(const_oop()));
+ }
+ dump_offset(st);
+ dump_instance_id(st);
dump_inline_depth(st);
dump_speculative(st);
}
+
+void TypeOopPtr::dump_instance_id(outputStream* st) const {
+ if (_instance_id == InstanceTop) {
+ st->print(",iid=top");
+ } else if (_instance_id == InstanceBot) {
+ st->print(",iid=bot");
+ } else {
+ st->print(",iid=%d", _instance_id);
+ }
+}
#endif
//------------------------------singleton--------------------------------------
@@ -4453,50 +4468,30 @@ bool TypeInstPtr::maybe_java_subtype_of_helper(const TypeOopPtr* other, bool thi
#ifndef PRODUCT
void TypeInstPtr::dump2(Dict &d, uint depth, outputStream* st) const {
// Print the name of the klass.
+ st->print("instptr:");
klass()->print_name_on(st);
_interfaces->dump(st);
- switch( _ptr ) {
- case Constant:
- if (WizardMode || Verbose) {
- ResourceMark rm;
- stringStream ss;
+ if (_ptr == Constant && (WizardMode || Verbose)) {
+ ResourceMark rm;
+ stringStream ss;
- st->print(" ");
- const_oop()->print_oop(&ss);
- // 'const_oop->print_oop()' may emit newlines('\n') into ss.
- // suppress newlines from it so -XX:+Verbose -XX:+PrintIdeal dumps one-liner for each node.
- char* buf = ss.as_string(/* c_heap= */false);
- StringUtils::replace_no_expand(buf, "\n", "");
- st->print_raw(buf);
- }
- case BotPTR:
- if (!WizardMode && !Verbose) {
- if( _klass_is_exact ) st->print(":exact");
- break;
- }
- case TopPTR:
- case AnyNull:
- case NotNull:
- st->print(":%s", ptr_msg[_ptr]);
- if( _klass_is_exact ) st->print(":exact");
- break;
- default:
- break;
+ st->print(" ");
+ const_oop()->print_oop(&ss);
+ // 'const_oop->print_oop()' may emit newlines('\n') into ss.
+ // suppress newlines from it so -XX:+Verbose -XX:+PrintIdeal dumps one-liner for each node.
+ char* buf = ss.as_string(/* c_heap= */false);
+ StringUtils::replace_no_expand(buf, "\n", "");
+ st->print_raw(buf);
}
- if( _offset ) { // Dump offset, if any
- if( _offset == OffsetBot ) st->print("+any");
- else if( _offset == OffsetTop ) st->print("+unknown");
- else st->print("+%d", _offset);
+ st->print(":%s", ptr_msg[_ptr]);
+ if (_klass_is_exact) {
+ st->print(":exact");
}
- st->print(" *");
- if (_instance_id == InstanceTop)
- st->print(",iid=top");
- else if (_instance_id != InstanceBot)
- st->print(",iid=%d",_instance_id);
-
+ dump_offset(st);
+ dump_instance_id(st);
dump_inline_depth(st);
dump_speculative(st);
}
@@ -5089,26 +5084,17 @@ const Type *TypeAryPtr::xdual() const {
//------------------------------dump2------------------------------------------
#ifndef PRODUCT
void TypeAryPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
- _ary->dump2(d,depth,st);
+ st->print("aryptr:");
+ _ary->dump2(d, depth, st);
_interfaces->dump(st);
- switch( _ptr ) {
- case Constant:
+ if (_ptr == Constant) {
const_oop()->print(st);
- break;
- case BotPTR:
- if (!WizardMode && !Verbose) {
- if( _klass_is_exact ) st->print(":exact");
- break;
- }
- case TopPTR:
- case AnyNull:
- case NotNull:
- st->print(":%s", ptr_msg[_ptr]);
- if( _klass_is_exact ) st->print(":exact");
- break;
- default:
- break;
+ }
+
+ st->print(":%s", ptr_msg[_ptr]);
+ if (_klass_is_exact) {
+ st->print(":exact");
}
if( _offset != 0 ) {
@@ -5126,12 +5112,8 @@ void TypeAryPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
}
}
}
- st->print(" *");
- if (_instance_id == InstanceTop)
- st->print(",iid=top");
- else if (_instance_id != InstanceBot)
- st->print(",iid=%d",_instance_id);
+ dump_instance_id(st);
dump_inline_depth(st);
dump_speculative(st);
}
@@ -5490,13 +5472,10 @@ const Type *TypeMetadataPtr::xdual() const {
#ifndef PRODUCT
void TypeMetadataPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
st->print("metadataptr:%s", ptr_msg[_ptr]);
- if( metadata() ) st->print(INTPTR_FORMAT, p2i(metadata()));
- switch( _offset ) {
- case OffsetTop: st->print("+top"); break;
- case OffsetBot: st->print("+any"); break;
- case 0: break;
- default: st->print("+%d",_offset); break;
+ if (metadata() != nullptr) {
+ st->print(":" INTPTR_FORMAT, p2i(metadata()));
}
+ dump_offset(st);
}
#endif
@@ -5644,44 +5623,6 @@ intptr_t TypeKlassPtr::get_con() const {
return (intptr_t)k->constant_encoding();
}
-//------------------------------dump2------------------------------------------
-// Dump Klass Type
-#ifndef PRODUCT
-void TypeKlassPtr::dump2(Dict & d, uint depth, outputStream *st) const {
- switch(_ptr) {
- case Constant:
- st->print("precise ");
- case NotNull:
- {
- const char *name = klass()->name()->as_utf8();
- if (name) {
- st->print("%s: " INTPTR_FORMAT, name, p2i(klass()));
- } else {
- ShouldNotReachHere();
- }
- _interfaces->dump(st);
- }
- case BotPTR:
- if (!WizardMode && !Verbose && _ptr != Constant) break;
- case TopPTR:
- case AnyNull:
- st->print(":%s", ptr_msg[_ptr]);
- if (_ptr == Constant) st->print(":exact");
- break;
- default:
- break;
- }
-
- if (_offset) { // Dump offset, if any
- if (_offset == OffsetBot) { st->print("+any"); }
- else if (_offset == OffsetTop) { st->print("+unknown"); }
- else { st->print("+%d", _offset); }
- }
-
- st->print(" *");
-}
-#endif
-
//=============================================================================
// Convenience common pre-built types.
@@ -6036,6 +5977,15 @@ const TypeKlassPtr* TypeInstKlassPtr::try_improve() const {
return this;
}
+#ifndef PRODUCT
+void TypeInstKlassPtr::dump2(Dict& d, uint depth, outputStream* st) const {
+ st->print("instklassptr:");
+ klass()->print_name_on(st);
+ _interfaces->dump(st);
+ st->print(":%s", ptr_msg[_ptr]);
+ dump_offset(st);
+}
+#endif // PRODUCT
const TypeAryKlassPtr *TypeAryKlassPtr::make(PTR ptr, const Type* elem, ciKlass* k, int offset) {
return (TypeAryKlassPtr*)(new TypeAryKlassPtr(ptr, elem, k, offset))->hashcons();
@@ -6507,34 +6457,11 @@ ciKlass* TypeAryKlassPtr::klass() const {
// Dump Klass Type
#ifndef PRODUCT
void TypeAryKlassPtr::dump2( Dict & d, uint depth, outputStream *st ) const {
- switch( _ptr ) {
- case Constant:
- st->print("precise ");
- case NotNull:
- {
- st->print("[");
- _elem->dump2(d, depth, st);
- _interfaces->dump(st);
- st->print(": ");
- }
- case BotPTR:
- if( !WizardMode && !Verbose && _ptr != Constant ) break;
- case TopPTR:
- case AnyNull:
- st->print(":%s", ptr_msg[_ptr]);
- if( _ptr == Constant ) st->print(":exact");
- break;
- default:
- break;
- }
-
- if( _offset ) { // Dump offset, if any
- if( _offset == OffsetBot ) { st->print("+any"); }
- else if( _offset == OffsetTop ) { st->print("+unknown"); }
- else { st->print("+%d", _offset); }
- }
-
- st->print(" *");
+ st->print("aryklassptr:[");
+ _elem->dump2(d, depth, st);
+ _interfaces->dump(st);
+ st->print(":%s", ptr_msg[_ptr]);
+ dump_offset(st);
}
#endif
diff --git a/src/hotspot/share/opto/type.hpp b/src/hotspot/share/opto/type.hpp
index c61c2a64278..4666cfbcf2d 100644
--- a/src/hotspot/share/opto/type.hpp
+++ b/src/hotspot/share/opto/type.hpp
@@ -1176,15 +1176,15 @@ protected:
int hash_speculative() const;
const TypePtr* add_offset_speculative(intptr_t offset) const;
const TypePtr* with_offset_speculative(intptr_t offset) const;
-#ifndef PRODUCT
- void dump_speculative(outputStream *st) const;
-#endif
// utility methods to work on the inline depth of the type
int dual_inline_depth() const;
int meet_inline_depth(int depth) const;
+
#ifndef PRODUCT
- void dump_inline_depth(outputStream *st) const;
+ void dump_speculative(outputStream* st) const;
+ void dump_inline_depth(outputStream* st) const;
+ void dump_offset(outputStream* st) const;
#endif
// TypeInstPtr (TypeAryPtr resp.) and TypeInstKlassPtr (TypeAryKlassPtr resp.) implement very similar meet logic.
@@ -1364,6 +1364,10 @@ protected:
virtual ciKlass* exact_klass_helper() const { return nullptr; }
virtual ciKlass* klass() const { return _klass; }
+#ifndef PRODUCT
+ void dump_instance_id(outputStream* st) const;
+#endif // PRODUCT
+
public:
bool is_java_subtype_of(const TypeOopPtr* other) const {
@@ -1832,9 +1836,6 @@ public:
virtual const TypeKlassPtr* try_improve() const { return this; }
-#ifndef PRODUCT
- virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping
-#endif
private:
virtual bool is_meet_subtype_of(const TypePtr* other) const {
return is_meet_subtype_of_helper(other->is_klassptr(), klass_is_exact(), other->is_klassptr()->klass_is_exact());
@@ -1914,6 +1915,11 @@ public:
// Convenience common pre-built types.
static const TypeInstKlassPtr* OBJECT; // Not-null object klass or below
static const TypeInstKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
+
+#ifndef PRODUCT
+ virtual void dump2(Dict& d, uint depth, outputStream* st) const;
+#endif // PRODUCT
+
private:
virtual bool is_meet_subtype_of_helper(const TypeKlassPtr* other, bool this_xk, bool other_xk) const;
};
diff --git a/src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp b/src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp
index a441d405f8d..5077a1743b9 100644
--- a/src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp
+++ b/src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp
@@ -25,6 +25,7 @@
#include "classfile/symbolTable.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "memory/universe.hpp"
+#include "oops/bsmAttribute.inline.hpp"
#include "oops/constantPool.inline.hpp"
#include "oops/fieldStreams.inline.hpp"
#include "oops/instanceKlass.inline.hpp"
@@ -389,20 +390,13 @@ void JvmtiClassFileReconstituter::write_annotations_attribute(const char* attr_n
// } bootstrap_methods[num_bootstrap_methods];
// }
void JvmtiClassFileReconstituter::write_bootstrapmethod_attribute() {
- Array* operands = cpool()->operands();
write_attribute_name_index("BootstrapMethods");
- int num_bootstrap_methods = ConstantPool::operand_array_length(operands);
-
- // calculate length of attribute
- u4 length = sizeof(u2); // num_bootstrap_methods
- for (int n = 0; n < num_bootstrap_methods; n++) {
- u2 num_bootstrap_arguments = cpool()->bsm_attribute_entry(n)->argument_count();
- length += sizeof(u2); // bootstrap_method_ref
- length += sizeof(u2); // num_bootstrap_arguments
- length += (u4)sizeof(u2) * num_bootstrap_arguments; // bootstrap_arguments[num_bootstrap_arguments]
- }
+ u4 length = sizeof(u2) + // Size of num_bootstrap_methods
+ // The rest of the data for the attribute is exactly the u2s in the data array.
+ sizeof(u2) * cpool()->bsm_entries().array_length();
write_u4(length);
+ int num_bootstrap_methods = cpool()->bsm_entries().number_of_entries();
// write attribute
write_u2(checked_cast(num_bootstrap_methods));
for (int n = 0; n < num_bootstrap_methods; n++) {
@@ -411,7 +405,7 @@ void JvmtiClassFileReconstituter::write_bootstrapmethod_attribute() {
write_u2(bsme->bootstrap_method_index());
write_u2(num_bootstrap_arguments);
for (int arg = 0; arg < num_bootstrap_arguments; arg++) {
- u2 bootstrap_argument = bsme->argument_index(arg);
+ u2 bootstrap_argument = bsme->argument(arg);
write_u2(bootstrap_argument);
}
}
@@ -798,7 +792,7 @@ void JvmtiClassFileReconstituter::write_class_attributes() {
if (type_anno != nullptr) {
++attr_count; // has RuntimeVisibleTypeAnnotations attribute
}
- if (cpool()->operands() != nullptr) {
+ if (!cpool()->bsm_entries().is_empty()) {
++attr_count;
}
if (ik()->nest_host_index() != 0) {
@@ -843,7 +837,7 @@ void JvmtiClassFileReconstituter::write_class_attributes() {
if (ik()->record_components() != nullptr) {
write_record_attribute();
}
- if (cpool()->operands() != nullptr) {
+ if (!cpool()->bsm_entries().is_empty()) {
write_bootstrapmethod_attribute();
}
if (inner_classes_length > 0) {
diff --git a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp
index ef8875d582e..13b239b4df0 100644
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp
@@ -45,7 +45,8 @@
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/annotations.hpp"
-#include "oops/constantPool.hpp"
+#include "oops/bsmAttribute.inline.hpp"
+#include "oops/constantPool.inline.hpp"
#include "oops/fieldStreams.inline.hpp"
#include "oops/klass.inline.hpp"
#include "oops/klassVtable.hpp"
@@ -573,9 +574,9 @@ void VM_RedefineClasses::append_entry(const constantPoolHandle& scratch_cp,
case JVM_CONSTANT_Dynamic: // fall through
case JVM_CONSTANT_InvokeDynamic:
{
- // Index of the bootstrap specifier in the operands array
+ // Index of the bootstrap specifier in the BSM array
int old_bs_i = scratch_cp->bootstrap_methods_attribute_index(scratch_i);
- int new_bs_i = find_or_append_operand(scratch_cp, old_bs_i, merge_cp_p,
+ int new_bs_i = find_or_append_bsm_entry(scratch_cp, old_bs_i, merge_cp_p,
merge_cp_length_p);
// The bootstrap method NameAndType_info index
int old_ref_i = scratch_cp->bootstrap_name_and_type_ref_index_at(scratch_i);
@@ -591,10 +592,11 @@ void VM_RedefineClasses::append_entry(const constantPoolHandle& scratch_cp,
("Dynamic entry@%d name_and_type_index change: %d to %d", *merge_cp_length_p, old_ref_i, new_ref_i);
}
- if (scratch_cp->tag_at(scratch_i).is_dynamic_constant())
+ if (scratch_cp->tag_at(scratch_i).is_dynamic_constant()) {
(*merge_cp_p)->dynamic_constant_at_put(*merge_cp_length_p, new_bs_i, new_ref_i);
- else
+ } else {
(*merge_cp_p)->invoke_dynamic_at_put(*merge_cp_length_p, new_bs_i, new_ref_i);
+ }
if (scratch_i != *merge_cp_length_p) {
// The new entry in *merge_cp_p is at a different index than
// the new entry in scratch_cp so we need to map the index values.
@@ -660,10 +662,10 @@ u2 VM_RedefineClasses::find_or_append_indirect_entry(const constantPoolHandle& s
} // end find_or_append_indirect_entry()
-// Append a bootstrap specifier into the merge_cp operands that is semantically equal
-// to the scratch_cp operands bootstrap specifier passed by the old_bs_i index.
+// Append a bootstrap specifier into the merge_cp BSM entries that is semantically equal
+// to the scratch_cp BSM entries' bootstrap specifier passed by the old_bs_i index.
// Recursively append new merge_cp entries referenced by the new bootstrap specifier.
-void VM_RedefineClasses::append_operand(const constantPoolHandle& scratch_cp, const int old_bs_i,
+int VM_RedefineClasses::append_bsm_entry(const constantPoolHandle& scratch_cp, const int old_bs_i,
constantPoolHandle *merge_cp_p, int *merge_cp_length_p) {
BSMAttributeEntry* old_bsme = scratch_cp->bsm_attribute_entry(old_bs_i);
@@ -672,90 +674,82 @@ void VM_RedefineClasses::append_operand(const constantPoolHandle& scratch_cp, co
merge_cp_length_p);
if (new_ref_i != old_ref_i) {
log_trace(redefine, class, constantpool)
- ("operands entry@%d bootstrap method ref_index change: %d to %d", _operands_cur_length, old_ref_i, new_ref_i);
+ ("BSM attribute entry@%d bootstrap method ref_index change: %d to %d", _bsmae_iter.current_offset() - 1, old_ref_i, new_ref_i);
}
- Array* merge_ops = (*merge_cp_p)->operands();
- int new_bs_i = _operands_cur_length;
- // We have _operands_cur_length == 0 when the merge_cp operands is empty yet.
- // However, the operand_offset_at(0) was set in the extend_operands() call.
- int new_base = (new_bs_i == 0) ? (*merge_cp_p)->operand_offset_at(0)
- : (*merge_cp_p)->operand_next_offset_at(new_bs_i - 1);
- u2 argc = old_bsme->argument_count();
-
- ConstantPool::operand_offset_at_put(merge_ops, _operands_cur_length, new_base);
- merge_ops->at_put(new_base++, new_ref_i);
- merge_ops->at_put(new_base++, argc);
-
- for (int i = 0; i < argc; i++) {
- u2 old_arg_ref_i = old_bsme->argument_index(i);
+ const int new_bs_i = _bsmae_iter.current_offset();
+ BSMAttributeEntry* new_bsme =
+ _bsmae_iter.reserve_new_entry(new_ref_i, old_bsme->argument_count());
+ assert(new_bsme != nullptr, "must be");
+ for (int i = 0; i < new_bsme->argument_count(); i++) {
+ u2 old_arg_ref_i = old_bsme->argument(i);
u2 new_arg_ref_i = find_or_append_indirect_entry(scratch_cp, old_arg_ref_i, merge_cp_p,
merge_cp_length_p);
- merge_ops->at_put(new_base++, new_arg_ref_i);
+ new_bsme->set_argument(i, new_arg_ref_i);
+
if (new_arg_ref_i != old_arg_ref_i) {
log_trace(redefine, class, constantpool)
- ("operands entry@%d bootstrap method argument ref_index change: %d to %d",
- _operands_cur_length, old_arg_ref_i, new_arg_ref_i);
+ ("BSM attribute entry@%d bootstrap method argument ref_index change: %d to %d",
+ _bsmae_iter.current_offset() - 1, old_arg_ref_i, new_arg_ref_i);
}
}
- if (old_bs_i != _operands_cur_length) {
- // The bootstrap specifier in *merge_cp_p is at a different index than
- // that in scratch_cp so we need to map the index values.
- map_operand_index(old_bs_i, new_bs_i);
- }
- _operands_cur_length++;
-} // end append_operand()
+ // This is only for the logging
+ map_bsm_index(old_bs_i, new_bs_i);
+ return new_bs_i;
+} // end append_bsm_entry()
-int VM_RedefineClasses::find_or_append_operand(const constantPoolHandle& scratch_cp,
+int VM_RedefineClasses::find_or_append_bsm_entry(const constantPoolHandle& scratch_cp,
int old_bs_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p) {
+ const int max_offset_in_merge = _bsmae_iter.current_offset();
int new_bs_i = old_bs_i; // bootstrap specifier index
- bool match = (old_bs_i < _operands_cur_length) &&
- scratch_cp->compare_operand_to(old_bs_i, *merge_cp_p, old_bs_i);
+ // Has the old_bs_i index been used already? Check if it's the same so we know
+ // whether or not a remapping is required.
+ bool match = (old_bs_i < max_offset_in_merge) &&
+ scratch_cp->compare_bootstrap_entry_to(old_bs_i, *merge_cp_p, old_bs_i);
if (!match) {
// forward reference in *merge_cp_p or not a direct match
- int found_i = scratch_cp->find_matching_operand(old_bs_i, *merge_cp_p,
- _operands_cur_length);
+ int found_i = scratch_cp->find_matching_bsm_entry(old_bs_i, *merge_cp_p,
+ max_offset_in_merge);
if (found_i != -1) {
- guarantee(found_i != old_bs_i, "compare_operand_to() and find_matching_operand() disagree");
- // found a matching operand somewhere else in *merge_cp_p so just need a mapping
+ guarantee(found_i != old_bs_i, "compare_bootstrap_entry_to() and find_matching_bsm_entry() disagree");
+ // found a matching BSM entry somewhere else in *merge_cp_p so just need a mapping
new_bs_i = found_i;
- map_operand_index(old_bs_i, found_i);
+ map_bsm_index(old_bs_i, found_i);
} else {
// no match found so we have to append this bootstrap specifier to *merge_cp_p
- append_operand(scratch_cp, old_bs_i, merge_cp_p, merge_cp_length_p);
- new_bs_i = _operands_cur_length - 1;
+ new_bs_i = append_bsm_entry(scratch_cp, old_bs_i, merge_cp_p, merge_cp_length_p);
}
}
return new_bs_i;
-} // end find_or_append_operand()
+} // end find_or_append_bsm_entry()
-void VM_RedefineClasses::finalize_operands_merge(const constantPoolHandle& merge_cp, TRAPS) {
- if (merge_cp->operands() == nullptr) {
+void VM_RedefineClasses::finalize_bsm_entries_merge(const constantPoolHandle& merge_cp, TRAPS) {
+ if (merge_cp->bsm_entries().number_of_entries() == 0) {
return;
}
- // Shrink the merge_cp operands
- merge_cp->shrink_operands(_operands_cur_length, CHECK);
+ // Finished extending the BSMAEs
+ merge_cp->end_extension(_bsmae_iter, CHECK);
if (log_is_enabled(Trace, redefine, class, constantpool)) {
// don't want to loop unless we are tracing
int count = 0;
- for (int i = 1; i < _operands_index_map_p->length(); i++) {
- int value = _operands_index_map_p->at(i);
+ for (int i = 1; i < _bsm_index_map_p->length(); i++) {
+ int value = _bsm_index_map_p->at(i);
if (value != -1) {
- log_trace(redefine, class, constantpool)("operands_index_map[%d]: old=%d new=%d", count, i, value);
+ log_trace(redefine, class, constantpool)("bsm_index_map[%d]: old=%d new=%d", count, i, value);
count++;
}
}
}
// Clean-up
- _operands_index_map_p = nullptr;
- _operands_cur_length = 0;
- _operands_index_map_count = 0;
-} // end finalize_operands_merge()
+ _bsm_index_map_p = nullptr;
+ _bsm_index_map_count = 0;
+ _bsmae_iter = BSMAttributeEntries::InsertionIterator();
+} // end finalize_bsmentries_merge()
// Symbol* comparator for qsort
// The caller must have an active ResourceMark.
@@ -1272,26 +1266,26 @@ u2 VM_RedefineClasses::find_new_index(int old_index) {
// Find new bootstrap specifier index value for old bootstrap specifier index
// value by searching the index map. Returns unused index (-1) if there is
// no mapped value for the old bootstrap specifier index.
-int VM_RedefineClasses::find_new_operand_index(int old_index) {
- if (_operands_index_map_count == 0) {
+int VM_RedefineClasses::find_new_bsm_index(int old_index) {
+ if (_bsm_index_map_count == 0) {
// map is empty so nothing can be found
return -1;
}
- if (old_index == -1 || old_index >= _operands_index_map_p->length()) {
+ if (old_index == -1 || old_index >= _bsm_index_map_p->length()) {
// The old_index is out of range so it is not mapped.
// This should not happen in regular constant pool merging use.
return -1;
}
- int value = _operands_index_map_p->at(old_index);
+ int value = _bsm_index_map_p->at(old_index);
if (value == -1) {
// the old_index is not mapped
return -1;
}
return value;
-} // end find_new_operand_index()
+} // end find_new_bsm_index()
// The bug 6214132 caused the verification to fail.
@@ -1560,22 +1554,15 @@ void VM_RedefineClasses::map_index(const constantPoolHandle& scratch_cp,
// Map old_index to new_index as needed.
-void VM_RedefineClasses::map_operand_index(int old_index, int new_index) {
- if (find_new_operand_index(old_index) != -1) {
- // old_index is already mapped
- return;
- }
-
+void VM_RedefineClasses::map_bsm_index(int old_index, int new_index) {
if (old_index == new_index) {
// no mapping is needed
return;
}
-
- _operands_index_map_p->at_put(old_index, new_index);
- _operands_index_map_count++;
-
+ _bsm_index_map_p->at_put(old_index, new_index);
+ _bsm_index_map_count++;
log_trace(redefine, class, constantpool)("mapped bootstrap specifier at index %d to %d", old_index, new_index);
-} // end map_index()
+} // end map_bsm_index()
// Merge old_cp and scratch_cp and return the results of the merge via
@@ -1639,8 +1626,8 @@ bool VM_RedefineClasses::merge_constant_pools(const constantPoolHandle& old_cp,
}
} // end for each old_cp entry
- ConstantPool::copy_operands(old_cp, merge_cp_p, CHECK_false);
- merge_cp_p->extend_operands(scratch_cp, CHECK_false);
+ ConstantPool::copy_bsm_entries(old_cp, merge_cp_p, CHECK_false);
+ _bsmae_iter = merge_cp_p->start_extension(scratch_cp, CHECK_false);
// We don't need to sanity check that *merge_cp_length_p is within
// *merge_cp_p bounds since we have the minimum on-entry check above.
@@ -1737,7 +1724,7 @@ bool VM_RedefineClasses::merge_constant_pools(const constantPoolHandle& old_cp,
("after pass 1b: merge_cp_len=%d, scratch_i=%d, index_map_len=%d",
merge_cp_length_p, scratch_i, _index_map_count);
}
- finalize_operands_merge(merge_cp_p, CHECK_false);
+ finalize_bsm_entries_merge(merge_cp_p, CHECK_false);
return true;
} // end merge_constant_pools()
@@ -1807,12 +1794,11 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite(
_index_map_count = 0;
_index_map_p = new intArray(scratch_cp->length(), scratch_cp->length(), -1);
- _operands_cur_length = ConstantPool::operand_array_length(old_cp->operands());
- _operands_index_map_count = 0;
- int operands_index_map_len = ConstantPool::operand_array_length(scratch_cp->operands());
- _operands_index_map_p = new intArray(operands_index_map_len, operands_index_map_len, -1);
+ _bsm_index_map_count = 0;
+ int bsm_data_len = scratch_cp->bsm_entries().array_length();
+ _bsm_index_map_p = new intArray(bsm_data_len, bsm_data_len, -1);
- // reference to the cp holder is needed for copy_operands()
+ // reference to the cp holder is needed for reallocating the BSM attribute
merge_cp->set_pool_holder(scratch_class);
bool result = merge_constant_pools(old_cp, scratch_cp, merge_cp,
merge_cp_length, THREAD);
@@ -3500,7 +3486,7 @@ void VM_RedefineClasses::set_new_constant_pool(
smaller_cp->set_version(version);
// attach klass to new constant pool
- // reference to the cp holder is needed for copy_operands()
+ // reference to the cp holder is needed for reallocating the BSM attribute
smaller_cp->set_pool_holder(scratch_class);
smaller_cp->copy_fields(scratch_cp());
diff --git a/src/hotspot/share/prims/jvmtiRedefineClasses.hpp b/src/hotspot/share/prims/jvmtiRedefineClasses.hpp
index d2eda1f3eed..3f1b555b175 100644
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.hpp
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.hpp
@@ -363,11 +363,16 @@ class VM_RedefineClasses: public VM_Operation {
int _index_map_count;
intArray * _index_map_p;
- // _operands_index_map_count is just an optimization for knowing if
- // _operands_index_map_p contains any entries.
- int _operands_cur_length;
- int _operands_index_map_count;
- intArray * _operands_index_map_p;
+ // _bsm_index_map_count is just an optimization for knowing if
+ // _bsm_index_map_p contains any entries.
+ int _bsm_index_map_count;
+ intArray * _bsm_index_map_p;
+
+ // After merge_constant_pools "Pass 0", the BSMAttribute entries of merge_cp_p will have been expanded to fit
+ // scratch_cp's BSMAttribute entries as well.
+ // However, the newly acquired space will not have been filled in yet.
+ // To append to this new space, the iterator is used.
+ BSMAttributeEntries::InsertionIterator _bsmae_iter;
// ptr to _class_count scratch_classes
InstanceKlass** _scratch_classes;
@@ -429,17 +434,18 @@ class VM_RedefineClasses: public VM_Operation {
// Support for constant pool merging (these routines are in alpha order):
void append_entry(const constantPoolHandle& scratch_cp, int scratch_i,
constantPoolHandle *merge_cp_p, int *merge_cp_length_p);
- void append_operand(const constantPoolHandle& scratch_cp, int scratch_bootstrap_spec_index,
+ // Returns the index of the appended BSM
+ int append_bsm_entry(const constantPoolHandle& scratch_cp, int scratch_bootstrap_spec_index,
constantPoolHandle *merge_cp_p, int *merge_cp_length_p);
- void finalize_operands_merge(const constantPoolHandle& merge_cp, TRAPS);
+ void finalize_bsm_entries_merge(const constantPoolHandle& merge_cp, TRAPS);
u2 find_or_append_indirect_entry(const constantPoolHandle& scratch_cp, int scratch_i,
constantPoolHandle *merge_cp_p, int *merge_cp_length_p);
- int find_or_append_operand(const constantPoolHandle& scratch_cp, int scratch_bootstrap_spec_index,
+ int find_or_append_bsm_entry(const constantPoolHandle& scratch_cp, int scratch_bootstrap_spec_index,
constantPoolHandle *merge_cp_p, int *merge_cp_length_p);
u2 find_new_index(int old_index);
- int find_new_operand_index(int old_bootstrap_spec_index);
+ int find_new_bsm_index(int old_bootstrap_spec_index);
void map_index(const constantPoolHandle& scratch_cp, int old_index, int new_index);
- void map_operand_index(int old_bootstrap_spec_index, int new_bootstrap_spec_index);
+ void map_bsm_index(int old_bootstrap_spec_index, int new_bootstrap_spec_index);
bool merge_constant_pools(const constantPoolHandle& old_cp,
const constantPoolHandle& scratch_cp, constantPoolHandle& merge_cp_p,
int& merge_cp_length_p, TRAPS);
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
index 1ef2ee9de0d..55ee7641a5f 100644
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -2483,6 +2483,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, JVMFlagOrigin
}
} else if (match_option(option, "-Xmaxjitcodesize", &tail) ||
match_option(option, "-XX:ReservedCodeCacheSize=", &tail)) {
+ if (match_option(option, "-Xmaxjitcodesize", &tail)) {
+ warning("Option -Xmaxjitcodesize was deprecated in JDK 26 and will likely be removed in a future release.");
+ }
julong long_ReservedCodeCacheSize = 0;
ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize, 1);
diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp
index 0aa7b392b17..e2029a26d37 100644
--- a/src/hotspot/share/runtime/deoptimization.cpp
+++ b/src/hotspot/share/runtime/deoptimization.cpp
@@ -498,6 +498,9 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
RegisterMap::WalkContinuation::skip);
// Now get the deoptee with a valid map
frame deoptee = stub_frame.sender(&map);
+ if (exec_mode == Unpack_deopt) {
+ assert(deoptee.is_deoptimized_frame(), "frame is not marked for deoptimization");
+ }
// Set the deoptee nmethod
assert(current->deopt_compiled_method() == nullptr, "Pending deopt!");
nmethod* nm = deoptee.cb()->as_nmethod_or_null();
diff --git a/src/hotspot/share/runtime/frame.cpp b/src/hotspot/share/runtime/frame.cpp
index b5cd4acc75d..8f969600ba8 100644
--- a/src/hotspot/share/runtime/frame.cpp
+++ b/src/hotspot/share/runtime/frame.cpp
@@ -206,7 +206,7 @@ address frame::raw_pc() const {
if (is_deoptimized_frame()) {
nmethod* nm = cb()->as_nmethod_or_null();
assert(nm != nullptr, "only nmethod is expected here");
- return nm->deopt_handler_begin() - pc_return_offset;
+ return nm->deopt_handler_entry() - pc_return_offset;
} else {
return (pc() - pc_return_offset);
}
@@ -355,7 +355,7 @@ void frame::deoptimize(JavaThread* thread) {
// If the call site is a MethodHandle call site use the MH deopt handler.
nmethod* nm = _cb->as_nmethod();
- address deopt = nm->deopt_handler_begin();
+ address deopt = nm->deopt_handler_entry();
NativePostCallNop* inst = nativePostCallNop_at(pc());
diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp
index e008f29eecc..b65bf643cbf 100644
--- a/src/hotspot/share/runtime/os.hpp
+++ b/src/hotspot/share/runtime/os.hpp
@@ -534,6 +534,7 @@ class os: AllStatic {
static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
// NUMA-specific interface
+ static void numa_set_thread_affinity(Thread* thread, int node);
static void numa_make_local(char *addr, size_t bytes, int lgrp_hint);
static void numa_make_global(char *addr, size_t bytes);
static size_t numa_get_groups_num();
diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp
index 79c7c0b32b4..e277e1fb569 100644
--- a/src/hotspot/share/runtime/sharedRuntime.cpp
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp
@@ -87,6 +87,9 @@
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
+#ifdef COMPILER2
+#include "opto/runtime.hpp"
+#endif
#if INCLUDE_JFR
#include "jfr/jfr.inline.hpp"
#endif
@@ -601,6 +604,11 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr
// The deferred StackWatermarkSet::after_unwind check will be performed in
// * OptoRuntime::handle_exception_C_helper for C2 code
// * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
+#ifdef COMPILER2
+ if (nm->compiler_type() == compiler_c2) {
+ return OptoRuntime::exception_blob()->entry_point();
+ }
+#endif // COMPILER2
return nm->exception_begin();
}
}
diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp
index a75e67e9b56..25a99c2d758 100644
--- a/src/hotspot/share/runtime/vmStructs.cpp
+++ b/src/hotspot/share/runtime/vmStructs.cpp
@@ -54,6 +54,7 @@
#include "oops/array.hpp"
#include "oops/arrayKlass.hpp"
#include "oops/arrayOop.hpp"
+#include "oops/bsmAttribute.hpp"
#include "oops/constantPool.hpp"
#include "oops/constMethod.hpp"
#include "oops/cpCache.hpp"
@@ -166,10 +167,12 @@
nonstatic_field(ArrayKlass, _dimension, int) \
volatile_nonstatic_field(ArrayKlass, _higher_dimension, ObjArrayKlass*) \
volatile_nonstatic_field(ArrayKlass, _lower_dimension, ArrayKlass*) \
+ nonstatic_field(BSMAttributeEntries, _offsets, Array*) \
+ nonstatic_field(BSMAttributeEntries, _bootstrap_methods, Array*) \
+ nonstatic_field(ConstantPool, _bsm_entries, BSMAttributeEntries) \
nonstatic_field(ConstantPool, _tags, Array*) \
nonstatic_field(ConstantPool, _cache, ConstantPoolCache*) \
nonstatic_field(ConstantPool, _pool_holder, InstanceKlass*) \
- nonstatic_field(ConstantPool, _operands, Array*) \
nonstatic_field(ConstantPool, _resolved_klasses, Array*) \
nonstatic_field(ConstantPool, _length, int) \
nonstatic_field(ConstantPool, _minor_version, u2) \
@@ -534,7 +537,7 @@
nonstatic_field(nmethod, _osr_link, nmethod*) \
nonstatic_field(nmethod, _state, volatile signed char) \
nonstatic_field(nmethod, _exception_offset, int) \
- nonstatic_field(nmethod, _deopt_handler_offset, int) \
+ nonstatic_field(nmethod, _deopt_handler_entry_offset, int) \
nonstatic_field(nmethod, _orig_pc_offset, int) \
nonstatic_field(nmethod, _stub_offset, int) \
nonstatic_field(nmethod, _immutable_data_ref_count_offset, int) \
@@ -733,6 +736,7 @@
unchecked_nonstatic_field(Array, _data, sizeof(int)) \
unchecked_nonstatic_field(Array, _data, sizeof(u1)) \
unchecked_nonstatic_field(Array, _data, sizeof(u2)) \
+ unchecked_nonstatic_field(Array, _data, sizeof(u4)) \
unchecked_nonstatic_field(Array, _data, sizeof(Method*)) \
unchecked_nonstatic_field(Array, _data, sizeof(Klass*)) \
unchecked_nonstatic_field(Array, _data, sizeof(ResolvedFieldEntry)) \
@@ -964,6 +968,7 @@
declare_toplevel_type(volatile Metadata*) \
\
declare_toplevel_type(DataLayout) \
+ declare_toplevel_type(BSMAttributeEntries) \
\
/********/ \
/* Oops */ \
diff --git a/src/hotspot/share/services/cpuTimeUsage.cpp b/src/hotspot/share/services/cpuTimeUsage.cpp
index 0c7ecfdb655..27b5e90fbaf 100644
--- a/src/hotspot/share/services/cpuTimeUsage.cpp
+++ b/src/hotspot/share/services/cpuTimeUsage.cpp
@@ -36,7 +36,6 @@
volatile bool CPUTimeUsage::Error::_has_error = false;
static inline jlong thread_cpu_time_or_zero(Thread* thread) {
- assert(!Universe::is_shutting_down(), "Should not query during shutdown");
jlong cpu_time = os::thread_cpu_time(thread);
if (cpu_time == -1) {
CPUTimeUsage::Error::mark_error();
diff --git a/src/hotspot/share/utilities/debug.cpp b/src/hotspot/share/utilities/debug.cpp
index 89c0a1ebc08..de39fe32dc1 100644
--- a/src/hotspot/share/utilities/debug.cpp
+++ b/src/hotspot/share/utilities/debug.cpp
@@ -29,6 +29,7 @@
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
+#include "cppstdlib/new.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
@@ -63,7 +64,6 @@
#include "utilities/unsigned5.hpp"
#include "utilities/vmError.hpp"
-#include
#include
#include
diff --git a/src/hotspot/share/utilities/deferredStatic.hpp b/src/hotspot/share/utilities/deferredStatic.hpp
index 56bdb9b8e6b..3a32f920fe8 100644
--- a/src/hotspot/share/utilities/deferredStatic.hpp
+++ b/src/hotspot/share/utilities/deferredStatic.hpp
@@ -25,11 +25,10 @@
#ifndef SHARE_UTILITIES_DEFERREDSTATIC_HPP
#define SHARE_UTILITIES_DEFERREDSTATIC_HPP
+#include "cppstdlib/new.hpp"
#include "cppstdlib/type_traits.hpp"
#include "utilities/globalDefinitions.hpp"
-#include
-
// The purpose of this class is to provide control over the initialization
// time for an object of type T with static storage duration. An instance of
// this class provides storage for an object, sized and aligned for T. The
diff --git a/src/hotspot/share/utilities/elfFile.cpp b/src/hotspot/share/utilities/elfFile.cpp
index 9ea19b38276..0b7713e9ca9 100644
--- a/src/hotspot/share/utilities/elfFile.cpp
+++ b/src/hotspot/share/utilities/elfFile.cpp
@@ -25,6 +25,7 @@
#if !defined(_WINDOWS) && !defined(__APPLE__)
+#include "cppstdlib/new.hpp"
#include "jvm_io.h"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
@@ -37,7 +38,6 @@
#include "utilities/ostream.hpp"
#include
-#include
#include
#include
diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp
index 1910759b434..3284fd3bd15 100644
--- a/src/hotspot/share/utilities/globalDefinitions.hpp
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp
@@ -1386,4 +1386,25 @@ template inline constexpr bool DependentAlwaysFalse = false;
// handled.
bool IEEE_subnormal_handling_OK();
+//----------------------------------------------------------------------------------------------------
+// Forbid using the global allocator by HotSpot code.
+//
+// This is a subset of allocator and deallocator functions. These are
+// implicitly declared in all translation units, without needing to include
+// ; see C++17 6.7.4. This isn't even the full set of those; implicit
+// declarations involving std::align_val_t are not covered here, since that
+// type is defined in . A translation unit that doesn't include is
+// still likely to include this file. See cppstdlib/new.hpp for more details.
+#ifndef HOTSPOT_GTEST
+
+[[deprecated]] void* operator new(std::size_t);
+[[deprecated]] void operator delete(void*) noexcept;
+[[deprecated]] void operator delete(void*, std::size_t) noexcept;
+
+[[deprecated]] void* operator new[](std::size_t);
+[[deprecated]] void operator delete[](void*) noexcept;
+[[deprecated]] void operator delete[](void*, std::size_t) noexcept;
+
+#endif // HOTSPOT_GTEST
+
#endif // SHARE_UTILITIES_GLOBALDEFINITIONS_HPP
diff --git a/src/hotspot/share/utilities/lockFreeStack.hpp b/src/hotspot/share/utilities/lockFreeStack.hpp
index 43bc58fbc44..3f63482a268 100644
--- a/src/hotspot/share/utilities/lockFreeStack.hpp
+++ b/src/hotspot/share/utilities/lockFreeStack.hpp
@@ -25,6 +25,7 @@
#ifndef SHARE_UTILITIES_LOCKFREESTACK_HPP
#define SHARE_UTILITIES_LOCKFREESTACK_HPP
+#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -34,11 +35,14 @@
// a result, there is no allocation involved in adding objects to the stack
// or removing them from the stack.
//
-// To be used in a LockFreeStack of objects of type T, an object of
-// type T must have a list entry member of type T* volatile, with an
-// non-member accessor function returning a pointer to that member. A
-// LockFreeStack is associated with the class of its elements and an
-// entry member from that class.
+// To be used in a LockFreeStack of objects of type T, an object of type T
+// must have a list entry member. A list entry member is a data member whose
+// type is either (1) Atomic, or (2) T* volatile. There must be a
+// non-member or static member function returning a pointer to that member,
+// which is used to provide access to it by a LockFreeStack. A LockFreeStack
+// is associated with the class of its elements and an entry member from that
+// class by being specialized on the element class and a pointer to the
+// function for accessing that entry member.
//
// An object can be in multiple stacks at the same time, so long as
// each stack uses a different entry member. That is, the class of the
@@ -52,12 +56,12 @@
//
// \tparam T is the class of the elements in the stack.
//
-// \tparam next_ptr is a function pointer. Applying this function to
+// \tparam next_accessor is a function pointer. Applying this function to
// an object of type T must return a pointer to the list entry member
// of the object associated with the LockFreeStack type.
-template
+template
class LockFreeStack {
- T* volatile _top;
+ Atomic _top;
void prepend_impl(T* first, T* last) {
T* cur = top();
@@ -65,12 +69,21 @@ class LockFreeStack {
do {
old = cur;
set_next(*last, cur);
- cur = AtomicAccess::cmpxchg(&_top, cur, first);
+ cur = _top.compare_exchange(cur, first);
} while (old != cur);
}
NONCOPYABLE(LockFreeStack);
+ template
+ static constexpr void use_atomic_access_impl(NextAccessor) {
+ static_assert(DependentAlwaysFalse, "Invalid next accessor");
+ }
+ static constexpr bool use_atomic_access_impl(T* volatile* (*)(T&)) { return true; }
+ static constexpr bool use_atomic_access_impl(Atomic* (*)(T&)) { return false; }
+
+ static constexpr bool use_atomic_access = use_atomic_access_impl(next_accessor);
+
public:
LockFreeStack() : _top(nullptr) {}
~LockFreeStack() { assert(empty(), "stack not empty"); }
@@ -89,7 +102,7 @@ public:
new_top = next(*result);
}
// CAS even on empty pop, for consistent membar behavior.
- result = AtomicAccess::cmpxchg(&_top, result, new_top);
+ result = _top.compare_exchange(result, new_top);
} while (result != old);
if (result != nullptr) {
set_next(*result, nullptr);
@@ -101,7 +114,7 @@ public:
// list of elements. Acts as a full memory barrier.
// postcondition: empty()
T* pop_all() {
- return AtomicAccess::xchg(&_top, (T*)nullptr);
+ return _top.exchange(nullptr);
}
// Atomically adds value to the top of this stack. Acts as a full
@@ -143,9 +156,9 @@ public:
// Return true if the stack is empty.
bool empty() const { return top() == nullptr; }
- // Return the most recently pushed element, or nullptr if the stack is empty.
+ // Return the most recently pushed element, or null if the stack is empty.
// The returned element is not removed from the stack.
- T* top() const { return AtomicAccess::load(&_top); }
+ T* top() const { return _top.load_relaxed(); }
// Return the number of objects in the stack. There must be no concurrent
// pops while the length is being determined.
@@ -160,7 +173,11 @@ public:
// Return the entry following value in the list used by the
// specialized LockFreeStack class.
static T* next(const T& value) {
- return AtomicAccess::load(next_ptr(const_cast(value)));
+ if constexpr (use_atomic_access) {
+ return AtomicAccess::load(next_accessor(const_cast(value)));
+ } else {
+ return next_accessor(const_cast(value))->load_relaxed();
+ }
}
// Set the entry following value to new_next in the list used by the
@@ -168,7 +185,11 @@ public:
// if value is in an instance of this specialization of LockFreeStack,
// there must be no concurrent push or pop operations on that stack.
static void set_next(T& value, T* new_next) {
- AtomicAccess::store(next_ptr(value), new_next);
+ if constexpr (use_atomic_access) {
+ AtomicAccess::store(next_accessor(value), new_next);
+ } else {
+ next_accessor(value)->store_relaxed(new_next);
+ }
}
};
diff --git a/src/java.base/share/classes/com/sun/crypto/provider/DHKEM.java b/src/java.base/share/classes/com/sun/crypto/provider/DHKEM.java
index b27320ed24b..c7372a4c2c8 100644
--- a/src/java.base/share/classes/com/sun/crypto/provider/DHKEM.java
+++ b/src/java.base/share/classes/com/sun/crypto/provider/DHKEM.java
@@ -26,26 +26,51 @@ package com.sun.crypto.provider;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.io.Serial;
import java.math.BigInteger;
-import java.security.*;
-import java.security.interfaces.ECKey;
+import java.security.AsymmetricKey;
+import java.security.InvalidAlgorithmParameterException;
+import java.security.InvalidKeyException;
+import java.security.KeyFactory;
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.NoSuchAlgorithmException;
+import java.security.PrivateKey;
+import java.security.ProviderException;
+import java.security.PublicKey;
+import java.security.SecureRandom;
import java.security.interfaces.ECPublicKey;
-import java.security.interfaces.XECKey;
import java.security.interfaces.XECPublicKey;
-import java.security.spec.*;
+import java.security.spec.AlgorithmParameterSpec;
+import java.security.spec.ECParameterSpec;
+import java.security.spec.ECPoint;
+import java.security.spec.ECPrivateKeySpec;
+import java.security.spec.ECPublicKeySpec;
+import java.security.spec.InvalidKeySpecException;
+import java.security.spec.KeySpec;
+import java.security.spec.NamedParameterSpec;
+import java.security.spec.XECPrivateKeySpec;
+import java.security.spec.XECPublicKeySpec;
import java.util.Arrays;
import java.util.Objects;
-import javax.crypto.*;
-import javax.crypto.spec.SecretKeySpec;
+import javax.crypto.DecapsulateException;
+import javax.crypto.KDF;
+import javax.crypto.KEM;
+import javax.crypto.KEMSpi;
+import javax.crypto.KeyAgreement;
+import javax.crypto.SecretKey;
import javax.crypto.spec.HKDFParameterSpec;
+import javax.crypto.spec.SecretKeySpec;
import sun.security.jca.JCAUtil;
-import sun.security.util.*;
-
-import jdk.internal.access.SharedSecrets;
+import sun.security.util.ArrayUtil;
+import sun.security.util.CurveDB;
+import sun.security.util.ECUtil;
+import sun.security.util.InternalPrivateKey;
+import sun.security.util.NamedCurve;
+import sun.security.util.SliceableSecretKey;
// Implementing DHKEM defined inside https://www.rfc-editor.org/rfc/rfc9180.html,
-// without the AuthEncap and AuthDecap functions
public class DHKEM implements KEMSpi {
private static final byte[] KEM = new byte[]
@@ -65,80 +90,86 @@ public class DHKEM implements KEMSpi {
private static final byte[] EMPTY = new byte[0];
private record Handler(Params params, SecureRandom secureRandom,
- PrivateKey skR, PublicKey pkR)
+ PrivateKey skS, PublicKey pkS, // sender keys
+ PrivateKey skR, PublicKey pkR) // receiver keys
implements EncapsulatorSpi, DecapsulatorSpi {
@Override
public KEM.Encapsulated engineEncapsulate(int from, int to, String algorithm) {
- Objects.checkFromToIndex(from, to, params.Nsecret);
+ Objects.checkFromToIndex(from, to, params.nsecret);
Objects.requireNonNull(algorithm, "null algorithm");
KeyPair kpE = params.generateKeyPair(secureRandom);
PrivateKey skE = kpE.getPrivate();
PublicKey pkE = kpE.getPublic();
- byte[] pkEm = params.SerializePublicKey(pkE);
- byte[] pkRm = params.SerializePublicKey(pkR);
- byte[] kem_context = concat(pkEm, pkRm);
- byte[] key = null;
+ byte[] pkEm = params.serializePublicKey(pkE);
+ byte[] pkRm = params.serializePublicKey(pkR);
try {
- byte[] dh = params.DH(skE, pkR);
- key = params.ExtractAndExpand(dh, kem_context);
- return new KEM.Encapsulated(
- new SecretKeySpec(key, from, to - from, algorithm),
- pkEm, null);
+ SecretKey key;
+ if (skS == null) {
+ byte[] kem_context = concat(pkEm, pkRm);
+ key = params.deriveKey(algorithm, from, to, kem_context,
+ params.dh(skE, pkR));
+ } else {
+ byte[] pkSm = params.serializePublicKey(pkS);
+ byte[] kem_context = concat(pkEm, pkRm, pkSm);
+ key = params.deriveKey(algorithm, from, to, kem_context,
+ params.dh(skE, pkR), params.dh(skS, pkR));
+ }
+ return new KEM.Encapsulated(key, pkEm, null);
+ } catch (UnsupportedOperationException e) {
+ throw e;
} catch (Exception e) {
throw new ProviderException("internal error", e);
- } finally {
- // `key` has been cloned into the `SecretKeySpec` within the
- // returned `KEM.Encapsulated`, so it can now be cleared.
- if (key != null) {
- Arrays.fill(key, (byte)0);
- }
}
}
@Override
public SecretKey engineDecapsulate(byte[] encapsulation,
int from, int to, String algorithm) throws DecapsulateException {
- Objects.checkFromToIndex(from, to, params.Nsecret);
+ Objects.checkFromToIndex(from, to, params.nsecret);
Objects.requireNonNull(algorithm, "null algorithm");
Objects.requireNonNull(encapsulation, "null encapsulation");
- if (encapsulation.length != params.Npk) {
+ if (encapsulation.length != params.npk) {
throw new DecapsulateException("incorrect encapsulation size");
}
- byte[] key = null;
try {
- PublicKey pkE = params.DeserializePublicKey(encapsulation);
- byte[] dh = params.DH(skR, pkE);
- byte[] pkRm = params.SerializePublicKey(pkR);
- byte[] kem_context = concat(encapsulation, pkRm);
- key = params.ExtractAndExpand(dh, kem_context);
- return new SecretKeySpec(key, from, to - from, algorithm);
+ PublicKey pkE = params.deserializePublicKey(encapsulation);
+ byte[] pkRm = params.serializePublicKey(pkR);
+ if (pkS == null) {
+ byte[] kem_context = concat(encapsulation, pkRm);
+ return params.deriveKey(algorithm, from, to, kem_context,
+ params.dh(skR, pkE));
+ } else {
+ byte[] pkSm = params.serializePublicKey(pkS);
+ byte[] kem_context = concat(encapsulation, pkRm, pkSm);
+ return params.deriveKey(algorithm, from, to, kem_context,
+ params.dh(skR, pkE), params.dh(skR, pkS));
+ }
+ } catch (UnsupportedOperationException e) {
+ throw e;
} catch (IOException | InvalidKeyException e) {
throw new DecapsulateException("Cannot decapsulate", e);
} catch (Exception e) {
throw new ProviderException("internal error", e);
- } finally {
- if (key != null) {
- Arrays.fill(key, (byte)0);
- }
}
}
@Override
public int engineSecretSize() {
- return params.Nsecret;
+ return params.nsecret;
}
@Override
public int engineEncapsulationSize() {
- return params.Npk;
+ return params.npk;
}
}
// Not really a random. For KAT test only. It generates key pair from ikm.
public static class RFC9180DeriveKeyPairSR extends SecureRandom {
- static final long serialVersionUID = 0L;
+ @Serial
+ private static final long serialVersionUID = 0L;
private final byte[] ikm;
@@ -147,7 +178,7 @@ public class DHKEM implements KEMSpi {
this.ikm = ikm;
}
- public KeyPair derive(Params params) {
+ private KeyPair derive(Params params) {
try {
return params.deriveKeyPair(ikm);
} catch (Exception e) {
@@ -183,9 +214,9 @@ public class DHKEM implements KEMSpi {
;
private final int kem_id;
- private final int Nsecret;
- private final int Nsk;
- private final int Npk;
+ private final int nsecret;
+ private final int nsk;
+ private final int npk;
private final String kaAlgorithm;
private final String keyAlgorithm;
private final AlgorithmParameterSpec spec;
@@ -193,18 +224,18 @@ public class DHKEM implements KEMSpi {
private final byte[] suiteId;
- Params(int kem_id, int Nsecret, int Nsk, int Npk,
+ Params(int kem_id, int nsecret, int nsk, int npk,
String kaAlgorithm, String keyAlgorithm, AlgorithmParameterSpec spec,
String hkdfAlgorithm) {
this.kem_id = kem_id;
this.spec = spec;
- this.Nsecret = Nsecret;
- this.Nsk = Nsk;
- this.Npk = Npk;
+ this.nsecret = nsecret;
+ this.nsk = nsk;
+ this.npk = npk;
this.kaAlgorithm = kaAlgorithm;
this.keyAlgorithm = keyAlgorithm;
this.hkdfAlgorithm = hkdfAlgorithm;
- suiteId = concat(KEM, I2OSP(kem_id, 2));
+ suiteId = concat(KEM, i2OSP(kem_id, 2));
}
private boolean isEC() {
@@ -224,18 +255,18 @@ public class DHKEM implements KEMSpi {
}
}
- private byte[] SerializePublicKey(PublicKey k) {
+ private byte[] serializePublicKey(PublicKey k) {
if (isEC()) {
ECPoint w = ((ECPublicKey) k).getW();
return ECUtil.encodePoint(w, ((NamedCurve) spec).getCurve());
} else {
byte[] uArray = ((XECPublicKey) k).getU().toByteArray();
ArrayUtil.reverse(uArray);
- return Arrays.copyOf(uArray, Npk);
+ return Arrays.copyOf(uArray, npk);
}
}
- private PublicKey DeserializePublicKey(byte[] data)
+ private PublicKey deserializePublicKey(byte[] data)
throws IOException, NoSuchAlgorithmException, InvalidKeySpecException {
KeySpec keySpec;
if (isEC()) {
@@ -251,29 +282,59 @@ public class DHKEM implements KEMSpi {
return KeyFactory.getInstance(keyAlgorithm).generatePublic(keySpec);
}
- private byte[] DH(PrivateKey skE, PublicKey pkR)
+ private SecretKey dh(PrivateKey skE, PublicKey pkR)
throws NoSuchAlgorithmException, InvalidKeyException {
KeyAgreement ka = KeyAgreement.getInstance(kaAlgorithm);
ka.init(skE);
ka.doPhase(pkR, true);
- return ka.generateSecret();
+ return ka.generateSecret("Generic");
}
- private byte[] ExtractAndExpand(byte[] dh, byte[] kem_context)
- throws NoSuchAlgorithmException, InvalidKeyException {
- KDF hkdf = KDF.getInstance(hkdfAlgorithm);
- SecretKey eae_prk = LabeledExtract(hkdf, suiteId, EAE_PRK, dh);
- try {
- return LabeledExpand(hkdf, suiteId, eae_prk, SHARED_SECRET,
- kem_context, Nsecret);
- } finally {
- if (eae_prk instanceof SecretKeySpec s) {
- SharedSecrets.getJavaxCryptoSpecAccess()
- .clearSecretKeySpec(s);
+ // The final shared secret derivation of either the encapsulator
+ // or the decapsulator. The key slicing is implemented inside.
+ // Throws UOE if a slice of the key cannot be found.
+ private SecretKey deriveKey(String alg, int from, int to,
+ byte[] kem_context, SecretKey... dhs)
+ throws NoSuchAlgorithmException {
+ if (from == 0 && to == nsecret) {
+ return extractAndExpand(kem_context, alg, dhs);
+ } else {
+ // First get shared secrets in "Generic" and then get a slice
+ // of it in the requested algorithm.
+ var fullKey = extractAndExpand(kem_context, "Generic", dhs);
+ if ("RAW".equalsIgnoreCase(fullKey.getFormat())) {
+ byte[] km = fullKey.getEncoded();
+ if (km == null) {
+ // Should not happen if format is "RAW"
+ throw new UnsupportedOperationException("Key extract failed");
+ } else {
+ try {
+ return new SecretKeySpec(km, from, to - from, alg);
+ } finally {
+ Arrays.fill(km, (byte)0);
+ }
+ }
+ } else if (fullKey instanceof SliceableSecretKey ssk) {
+ return ssk.slice(alg, from, to);
+ } else {
+ throw new UnsupportedOperationException("Cannot extract key");
}
}
}
+ private SecretKey extractAndExpand(byte[] kem_context, String alg, SecretKey... dhs)
+ throws NoSuchAlgorithmException {
+ var kdf = KDF.getInstance(hkdfAlgorithm);
+ var builder = labeledExtract(suiteId, EAE_PRK);
+ for (var dh : dhs) builder.addIKM(dh);
+ try {
+ return kdf.deriveKey(alg,
+ labeledExpand(builder, suiteId, SHARED_SECRET, kem_context, nsecret));
+ } catch (InvalidAlgorithmParameterException e) {
+ throw new ProviderException(e);
+ }
+ }
+
private PublicKey getPublicKey(PrivateKey sk)
throws InvalidKeyException {
if (!(sk instanceof InternalPrivateKey)) {
@@ -298,45 +359,37 @@ public class DHKEM implements KEMSpi {
// For KAT tests only. See RFC9180DeriveKeyPairSR.
public KeyPair deriveKeyPair(byte[] ikm) throws Exception {
- KDF hkdf = KDF.getInstance(hkdfAlgorithm);
- SecretKey dkp_prk = LabeledExtract(hkdf, suiteId, DKP_PRK, ikm);
- try {
- if (isEC()) {
- NamedCurve curve = (NamedCurve) spec;
- BigInteger sk = BigInteger.ZERO;
- int counter = 0;
- while (sk.signum() == 0 ||
- sk.compareTo(curve.getOrder()) >= 0) {
- if (counter > 255) {
- throw new RuntimeException();
- }
- byte[] bytes = LabeledExpand(hkdf, suiteId, dkp_prk,
- CANDIDATE, I2OSP(counter, 1), Nsk);
- // bitmask is defined to be 0xFF for P-256 and P-384,
- // and 0x01 for P-521
- if (this == Params.P521) {
- bytes[0] = (byte) (bytes[0] & 0x01);
- }
- sk = new BigInteger(1, (bytes));
- counter = counter + 1;
+ var kdf = KDF.getInstance(hkdfAlgorithm);
+ var builder = labeledExtract(suiteId, DKP_PRK).addIKM(ikm);
+ if (isEC()) {
+ NamedCurve curve = (NamedCurve) spec;
+ BigInteger sk = BigInteger.ZERO;
+ int counter = 0;
+ while (sk.signum() == 0 || sk.compareTo(curve.getOrder()) >= 0) {
+ if (counter > 255) {
+ // So unlucky and should not happen
+ throw new ProviderException("DeriveKeyPairError");
}
- PrivateKey k = DeserializePrivateKey(sk.toByteArray());
- return new KeyPair(getPublicKey(k), k);
- } else {
- byte[] sk = LabeledExpand(hkdf, suiteId, dkp_prk, SK, EMPTY,
- Nsk);
- PrivateKey k = DeserializePrivateKey(sk);
- return new KeyPair(getPublicKey(k), k);
- }
- } finally {
- if (dkp_prk instanceof SecretKeySpec s) {
- SharedSecrets.getJavaxCryptoSpecAccess()
- .clearSecretKeySpec(s);
+ byte[] bytes = kdf.deriveData(labeledExpand(builder,
+ suiteId, CANDIDATE, i2OSP(counter, 1), nsk));
+ // bitmask is defined to be 0xFF for P-256 and P-384, and 0x01 for P-521
+ if (this == Params.P521) {
+ bytes[0] = (byte) (bytes[0] & 0x01);
+ }
+ sk = new BigInteger(1, (bytes));
+ counter = counter + 1;
}
+ PrivateKey k = deserializePrivateKey(sk.toByteArray());
+ return new KeyPair(getPublicKey(k), k);
+ } else {
+ byte[] sk = kdf.deriveData(labeledExpand(builder,
+ suiteId, SK, EMPTY, nsk));
+ PrivateKey k = deserializePrivateKey(sk);
+ return new KeyPair(getPublicKey(k), k);
}
}
- private PrivateKey DeserializePrivateKey(byte[] data) throws Exception {
+ private PrivateKey deserializePrivateKey(byte[] data) throws Exception {
KeySpec keySpec = isEC()
? new ECPrivateKeySpec(new BigInteger(1, (data)), (NamedCurve) spec)
: new XECPrivateKeySpec(spec, data);
@@ -359,7 +412,22 @@ public class DHKEM implements KEMSpi {
throw new InvalidAlgorithmParameterException("no spec needed");
}
Params params = paramsFromKey(pk);
- return new Handler(params, getSecureRandom(secureRandom), null, pk);
+ return new Handler(params, getSecureRandom(secureRandom), null, null, null, pk);
+ }
+
+ // AuthEncap is not public KEM API
+ public EncapsulatorSpi engineNewAuthEncapsulator(PublicKey pkR, PrivateKey skS,
+ AlgorithmParameterSpec spec, SecureRandom secureRandom)
+ throws InvalidAlgorithmParameterException, InvalidKeyException {
+ if (pkR == null || skS == null) {
+ throw new InvalidKeyException("input key is null");
+ }
+ if (spec != null) {
+ throw new InvalidAlgorithmParameterException("no spec needed");
+ }
+ Params params = paramsFromKey(pkR);
+ return new Handler(params, getSecureRandom(secureRandom),
+ skS, params.getPublicKey(skS), null, pkR);
}
@Override
@@ -372,20 +440,34 @@ public class DHKEM implements KEMSpi {
throw new InvalidAlgorithmParameterException("no spec needed");
}
Params params = paramsFromKey(sk);
- return new Handler(params, null, sk, params.getPublicKey(sk));
+ return new Handler(params, null, null, null, sk, params.getPublicKey(sk));
}
- private Params paramsFromKey(Key k) throws InvalidKeyException {
- if (k instanceof ECKey eckey) {
- if (ECUtil.equals(eckey.getParams(), CurveDB.P_256)) {
+ // AuthDecap is not public KEM API
+ public DecapsulatorSpi engineNewAuthDecapsulator(
+ PrivateKey skR, PublicKey pkS, AlgorithmParameterSpec spec)
+ throws InvalidAlgorithmParameterException, InvalidKeyException {
+ if (skR == null || pkS == null) {
+ throw new InvalidKeyException("input key is null");
+ }
+ if (spec != null) {
+ throw new InvalidAlgorithmParameterException("no spec needed");
+ }
+ Params params = paramsFromKey(skR);
+ return new Handler(params, null, null, pkS, skR, params.getPublicKey(skR));
+ }
+
+ private Params paramsFromKey(AsymmetricKey k) throws InvalidKeyException {
+ var p = k.getParams();
+ if (p instanceof ECParameterSpec ecp) {
+ if (ECUtil.equals(ecp, CurveDB.P_256)) {
return Params.P256;
- } else if (ECUtil.equals(eckey.getParams(), CurveDB.P_384)) {
+ } else if (ECUtil.equals(ecp, CurveDB.P_384)) {
return Params.P384;
- } else if (ECUtil.equals(eckey.getParams(), CurveDB.P_521)) {
+ } else if (ECUtil.equals(ecp, CurveDB.P_521)) {
return Params.P521;
}
- } else if (k instanceof XECKey xkey
- && xkey.getParams() instanceof NamedParameterSpec ns) {
+ } else if (p instanceof NamedParameterSpec ns) {
if (ns.getName().equalsIgnoreCase("X25519")) {
return Params.X25519;
} else if (ns.getName().equalsIgnoreCase("X448")) {
@@ -401,8 +483,11 @@ public class DHKEM implements KEMSpi {
return o.toByteArray();
}
- private static byte[] I2OSP(int n, int w) {
- assert n < 256;
+ // I2OSP(n, w) as defined in RFC 9180 Section 3.
+ // In DHKEM and HPKE, number is always <65536
+ // and converted to at most 2 bytes.
+ public static byte[] i2OSP(int n, int w) {
+ assert n < 65536;
assert w == 1 || w == 2;
if (w == 1) {
return new byte[] { (byte) n };
@@ -411,32 +496,32 @@ public class DHKEM implements KEMSpi {
}
}
- private static SecretKey LabeledExtract(KDF hkdf, byte[] suite_id,
- byte[] label, byte[] ikm) throws InvalidKeyException {
- SecretKeySpec s = new SecretKeySpec(concat(HPKE_V1, suite_id, label,
- ikm), "IKM");
- try {
- HKDFParameterSpec spec =
- HKDFParameterSpec.ofExtract().addIKM(s).extractOnly();
- return hkdf.deriveKey("Generic", spec);
- } catch (InvalidAlgorithmParameterException |
- NoSuchAlgorithmException e) {
- throw new InvalidKeyException(e.getMessage(), e);
- } finally {
- SharedSecrets.getJavaxCryptoSpecAccess().clearSecretKeySpec(s);
- }
+ // Create a LabeledExtract builder with labels.
+ // You can add more IKM and salt into the result.
+ public static HKDFParameterSpec.Builder labeledExtract(
+ byte[] suiteId, byte[] label) {
+ return HKDFParameterSpec.ofExtract()
+ .addIKM(HPKE_V1).addIKM(suiteId).addIKM(label);
}
- private static byte[] LabeledExpand(KDF hkdf, byte[] suite_id,
- SecretKey prk, byte[] label, byte[] info, int L)
- throws InvalidKeyException {
- byte[] labeled_info = concat(I2OSP(L, 2), HPKE_V1, suite_id, label,
- info);
- try {
- return hkdf.deriveData(HKDFParameterSpec.expandOnly(
- prk, labeled_info, L));
- } catch (InvalidAlgorithmParameterException iape) {
- throw new InvalidKeyException(iape.getMessage(), iape);
- }
+ // Create a labeled info from info and labels
+ private static byte[] labeledInfo(
+ byte[] suiteId, byte[] label, byte[] info, int length) {
+ return concat(i2OSP(length, 2), HPKE_V1, suiteId, label, info);
+ }
+
+ // LabeledExpand from a builder
+ public static HKDFParameterSpec labeledExpand(
+ HKDFParameterSpec.Builder builder,
+ byte[] suiteId, byte[] label, byte[] info, int length) {
+ return builder.thenExpand(
+ labeledInfo(suiteId, label, info, length), length);
+ }
+
+ // LabeledExpand from a prk
+ public static HKDFParameterSpec labeledExpand(
+ SecretKey prk, byte[] suiteId, byte[] label, byte[] info, int length) {
+ return HKDFParameterSpec.expandOnly(
+ prk, labeledInfo(suiteId, label, info, length), length);
}
}
diff --git a/src/java.base/share/classes/com/sun/crypto/provider/HPKE.java b/src/java.base/share/classes/com/sun/crypto/provider/HPKE.java
new file mode 100644
index 00000000000..eee5f59cc75
--- /dev/null
+++ b/src/java.base/share/classes/com/sun/crypto/provider/HPKE.java
@@ -0,0 +1,588 @@
+/*
+ * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.sun.crypto.provider;
+
+import sun.security.util.CurveDB;
+import sun.security.util.ECUtil;
+
+import javax.crypto.BadPaddingException;
+import javax.crypto.Cipher;
+import javax.crypto.CipherSpi;
+import javax.crypto.DecapsulateException;
+import javax.crypto.IllegalBlockSizeException;
+import javax.crypto.KDF;
+import javax.crypto.KEM;
+import javax.crypto.NoSuchPaddingException;
+import javax.crypto.SecretKey;
+import javax.crypto.ShortBufferException;
+import javax.crypto.spec.GCMParameterSpec;
+import javax.crypto.spec.HPKEParameterSpec;
+import javax.crypto.spec.IvParameterSpec;
+import java.io.ByteArrayOutputStream;
+import java.nio.ByteBuffer;
+import java.security.AlgorithmParameters;
+import java.security.AsymmetricKey;
+import java.security.InvalidAlgorithmParameterException;
+import java.security.InvalidKeyException;
+import java.security.Key;
+import java.security.NoSuchAlgorithmException;
+import java.security.PrivateKey;
+import java.security.ProviderException;
+import java.security.PublicKey;
+import java.security.SecureRandom;
+import java.security.spec.AlgorithmParameterSpec;
+import java.security.spec.ECParameterSpec;
+import java.security.spec.NamedParameterSpec;
+import java.util.Arrays;
+
+public class HPKE extends CipherSpi {
+
+ private static final byte[] HPKE = new byte[]
+ {'H', 'P', 'K', 'E'};
+ private static final byte[] SEC = new byte[]
+ {'s', 'e', 'c'};
+ private static final byte[] PSK_ID_HASH = new byte[]
+ {'p', 's', 'k', '_', 'i', 'd', '_', 'h', 'a', 's', 'h'};
+ private static final byte[] INFO_HASH = new byte[]
+ {'i', 'n', 'f', 'o', '_', 'h', 'a', 's', 'h'};
+ private static final byte[] SECRET = new byte[]
+ {'s', 'e', 'c', 'r', 'e', 't'};
+ private static final byte[] EXP = new byte[]
+ {'e', 'x', 'p'};
+ private static final byte[] KEY = new byte[]
+ {'k', 'e', 'y'};
+ private static final byte[] BASE_NONCE = new byte[]
+ {'b', 'a', 's', 'e', '_', 'n', 'o', 'n', 'c', 'e'};
+
+ private static final int BEGIN = 1;
+ private static final int EXPORT_ONLY = 2; // init done with aead_id == 65535
+ private static final int ENCRYPT_AND_EXPORT = 3; // int done with AEAD
+ private static final int AFTER_FINAL = 4; // after doFinal, need reinit internal cipher
+
+ private int state = BEGIN;
+ private Impl impl;
+
+ @Override
+ protected void engineSetMode(String mode) throws NoSuchAlgorithmException {
+ throw new NoSuchAlgorithmException(mode);
+ }
+
+ @Override
+ protected void engineSetPadding(String padding) throws NoSuchPaddingException {
+ throw new NoSuchPaddingException(padding);
+ }
+
+ @Override
+ protected int engineGetBlockSize() {
+ if (state == ENCRYPT_AND_EXPORT || state == AFTER_FINAL) {
+ return impl.aead.cipher.getBlockSize();
+ } else {
+ return 0;
+ }
+ }
+
+ @Override
+ protected int engineGetOutputSize(int inputLen) {
+ if (state == ENCRYPT_AND_EXPORT || state == AFTER_FINAL) {
+ return impl.aead.cipher.getOutputSize(inputLen);
+ } else {
+ return 0;
+ }
+ }
+
+ @Override
+ protected byte[] engineGetIV() {
+ return (state == BEGIN || impl.kemEncaps == null)
+ ? null : impl.kemEncaps.clone();
+ }
+
+ @Override
+ protected AlgorithmParameters engineGetParameters() {
+ return null;
+ }
+
+ @Override
+ protected void engineInit(int opmode, Key key, SecureRandom random)
+ throws InvalidKeyException {
+ throw new InvalidKeyException("HPKEParameterSpec must be provided");
+ }
+
+ @Override
+ protected void engineInit(int opmode, Key key,
+ AlgorithmParameterSpec params, SecureRandom random)
+ throws InvalidKeyException, InvalidAlgorithmParameterException {
+ impl = new Impl(opmode);
+ if (!(key instanceof AsymmetricKey ak)) {
+ throw new InvalidKeyException("Not an asymmetric key");
+ }
+ if (params == null) {
+ throw new InvalidAlgorithmParameterException(
+ "HPKEParameterSpec must be provided");
+ } else if (params instanceof HPKEParameterSpec hps) {
+ impl.init(ak, hps, random);
+ } else {
+ throw new InvalidAlgorithmParameterException(
+ "Unsupported params type: " + params.getClass());
+ }
+ if (impl.hasEncrypt()) {
+ impl.aead.start(impl.opmode, impl.context.k, impl.context.computeNonce());
+ state = ENCRYPT_AND_EXPORT;
+ } else {
+ state = EXPORT_ONLY;
+ }
+ }
+
+ @Override
+ protected void engineInit(int opmode, Key key,
+ AlgorithmParameters params, SecureRandom random)
+ throws InvalidKeyException, InvalidAlgorithmParameterException {
+ throw new InvalidKeyException("HPKEParameterSpec must be provided");
+ }
+
+ // state is ENCRYPT_AND_EXPORT after this call succeeds
+ private void maybeReinitInternalCipher() {
+ if (state == BEGIN) {
+ throw new IllegalStateException("Illegal state: " + state);
+ }
+ if (state == EXPORT_ONLY) {
+ throw new UnsupportedOperationException();
+ }
+ if (state == AFTER_FINAL) {
+ impl.aead.start(impl.opmode, impl.context.k, impl.context.computeNonce());
+ state = ENCRYPT_AND_EXPORT;
+ }
+ }
+
+ @Override
+ protected byte[] engineUpdate(byte[] input, int inputOffset, int inputLen) {
+ maybeReinitInternalCipher();
+ return impl.aead.cipher.update(input, inputOffset, inputLen);
+ }
+
+ @Override
+ protected int engineUpdate(byte[] input, int inputOffset, int inputLen,
+ byte[] output, int outputOffset) throws ShortBufferException {
+ maybeReinitInternalCipher();
+ return impl.aead.cipher.update(
+ input, inputOffset, inputLen, output, outputOffset);
+ }
+
+ @Override
+ protected void engineUpdateAAD(byte[] src, int offset, int len) {
+ maybeReinitInternalCipher();
+ impl.aead.cipher.updateAAD(src, offset, len);
+ }
+
+ @Override
+ protected void engineUpdateAAD(ByteBuffer src) {
+ maybeReinitInternalCipher();
+ impl.aead.cipher.updateAAD(src);
+ }
+
+ @Override
+ protected byte[] engineDoFinal(byte[] input, int inputOffset, int inputLen)
+ throws IllegalBlockSizeException, BadPaddingException {
+ maybeReinitInternalCipher();
+ impl.context.IncrementSeq();
+ state = AFTER_FINAL;
+ if (input == null) { // a bug in doFinal(null, ?, ?)
+ return impl.aead.cipher.doFinal();
+ } else {
+ return impl.aead.cipher.doFinal(input, inputOffset, inputLen);
+ }
+ }
+
+ @Override
+ protected int engineDoFinal(byte[] input, int inputOffset, int inputLen,
+ byte[] output, int outputOffset) throws ShortBufferException,
+ IllegalBlockSizeException, BadPaddingException {
+ maybeReinitInternalCipher();
+ impl.context.IncrementSeq();
+ state = AFTER_FINAL;
+ return impl.aead.cipher.doFinal(
+ input, inputOffset, inputLen, output, outputOffset);
+ }
+
+ //@Override
+ protected SecretKey engineExportKey(String algorithm, byte[] context, int length) {
+ if (state == BEGIN) {
+ throw new IllegalStateException("State: " + state);
+ } else {
+ return impl.context.exportKey(algorithm, context, length);
+ }
+ }
+
+ //@Override
+ protected byte[] engineExportData(byte[] context, int length) {
+ if (state == BEGIN) {
+ throw new IllegalStateException("State: " + state);
+ } else {
+ return impl.context.exportData(context, length);
+ }
+ }
+
+ private static class AEAD {
+ final Cipher cipher;
+ final int nk, nn, nt;
+ final int id;
+ public AEAD(int id) throws InvalidAlgorithmParameterException {
+ this.id = id;
+ try {
+ switch (id) {
+ case HPKEParameterSpec.AEAD_AES_128_GCM -> {
+ cipher = Cipher.getInstance("AES/GCM/NoPadding");
+ nk = 16;
+ }
+ case HPKEParameterSpec.AEAD_AES_256_GCM -> {
+ cipher = Cipher.getInstance("AES/GCM/NoPadding");
+ nk = 32;
+ }
+ case HPKEParameterSpec.AEAD_CHACHA20_POLY1305 -> {
+ cipher = Cipher.getInstance("ChaCha20-Poly1305");
+ nk = 32;
+ }
+ case HPKEParameterSpec.EXPORT_ONLY -> {
+ cipher = null;
+ nk = -1;
+ }
+ default -> throw new InvalidAlgorithmParameterException(
+ "Unknown aead_id: " + id);
+ }
+ } catch (NoSuchAlgorithmException | NoSuchPaddingException e) {
+ throw new ProviderException("Internal error", e);
+ }
+ nn = 12; nt = 16;
+ }
+
+ void start(int opmode, SecretKey key, byte[] nonce) {
+ try {
+ if (id == HPKEParameterSpec.AEAD_CHACHA20_POLY1305) {
+ cipher.init(opmode, key, new IvParameterSpec(nonce));
+ } else {
+ cipher.init(opmode, key, new GCMParameterSpec(nt * 8, nonce));
+ }
+ } catch (InvalidAlgorithmParameterException | InvalidKeyException e) {
+ throw new ProviderException("Internal error", e);
+ }
+ }
+ }
+
+ private static class Impl {
+
+ final int opmode;
+
+ HPKEParameterSpec params;
+ Context context;
+ AEAD aead;
+
+ byte[] suite_id;
+ String kdfAlg;
+ int kdfNh;
+
+ // only used on sender side
+ byte[] kemEncaps;
+
+ class Context {
+ final SecretKey k; // null if only export
+ final byte[] base_nonce;
+ final SecretKey exporter_secret;
+
+ byte[] seq = new byte[aead.nn];
+
+ public Context(SecretKey sk, byte[] base_nonce,
+ SecretKey exporter_secret) {
+ this.k = sk;
+ this.base_nonce = base_nonce;
+ this.exporter_secret = exporter_secret;
+ }
+
+ SecretKey exportKey(String algorithm, byte[] exporter_context, int length) {
+ if (exporter_context == null) {
+ throw new IllegalArgumentException("Null exporter_context");
+ }
+ try {
+ var kdf = KDF.getInstance(kdfAlg);
+ return kdf.deriveKey(algorithm, DHKEM.labeledExpand(
+ exporter_secret, suite_id, SEC, exporter_context, length));
+ } catch (InvalidAlgorithmParameterException | NoSuchAlgorithmException e) {
+ // algorithm not accepted by HKDF, length too big or too small
+ throw new IllegalArgumentException("Invalid input", e);
+ }
+ }
+
+ byte[] exportData(byte[] exporter_context, int length) {
+ if (exporter_context == null) {
+ throw new IllegalArgumentException("Null exporter_context");
+ }
+ try {
+ var kdf = KDF.getInstance(kdfAlg);
+ return kdf.deriveData(DHKEM.labeledExpand(
+ exporter_secret, suite_id, SEC, exporter_context, length));
+ } catch (InvalidAlgorithmParameterException | NoSuchAlgorithmException e) {
+ // algorithm not accepted by HKDF, length too big or too small
+ throw new IllegalArgumentException("Invalid input", e);
+ }
+ }
+
+ private byte[] computeNonce() {
+ var result = new byte[aead.nn];
+ for (var i = 0; i < result.length; i++) {
+ result[i] = (byte)(seq[i] ^ base_nonce[i]);
+ }
+ return result;
+ }
+
+ private void IncrementSeq() {
+ for (var i = seq.length - 1; i >= 0; i--) {
+ if ((seq[i] & 0xff) == 0xff) {
+ seq[i] = 0;
+ } else {
+ seq[i]++;
+ return;
+ }
+ }
+ // seq >= (1 << (8*aead.Nn)) - 1 when this method is called
+ throw new ProviderException("MessageLimitReachedError");
+ }
+ }
+
+ public Impl(int opmode) {
+ this.opmode = opmode;
+ }
+
+ public boolean hasEncrypt() {
+ return params.aead_id() != 65535;
+ }
+
+ // Section 7.2.1 of RFC 9180 has restrictions on size of psk, psk_id,
+ // info, and exporter_context (~2^61 for HMAC-SHA256 and ~2^125 for
+ // HMAC-SHA384 and HMAC-SHA512). This method does not pose any
+ // restrictions.
+ public void init(AsymmetricKey key, HPKEParameterSpec p, SecureRandom rand)
+ throws InvalidKeyException, InvalidAlgorithmParameterException {
+ if (opmode != Cipher.ENCRYPT_MODE && opmode != Cipher.DECRYPT_MODE) {
+ throw new UnsupportedOperationException(
+ "Can only be used for encryption and decryption");
+ }
+ setParams(p);
+ SecretKey shared_secret;
+ if (opmode == Cipher.ENCRYPT_MODE) {
+ if (!(key instanceof PublicKey pk)) {
+ throw new InvalidKeyException(
+ "Cannot encrypt with private key");
+ }
+ if (p.encapsulation() != null) {
+ throw new InvalidAlgorithmParameterException(
+ "Must not provide key encapsulation message on sender side");
+ }
+ checkMatch(false, pk, params.kem_id());
+ KEM.Encapsulated enc;
+ switch (p.authKey()) {
+ case null -> {
+ var e = kem().newEncapsulator(pk, rand);
+ enc = e.encapsulate();
+ }
+ case PrivateKey skS -> {
+ checkMatch(true, skS, params.kem_id());
+ // AuthEncap not public KEM API but it's internally supported
+ var e = new DHKEM().engineNewAuthEncapsulator(pk, skS, null, rand);
+ enc = e.engineEncapsulate(0, e.engineSecretSize(), "Generic");
+ }
+ default -> throw new InvalidAlgorithmParameterException(
+ "Cannot auth with public key");
+ }
+ kemEncaps = enc.encapsulation();
+ shared_secret = enc.key();
+ } else {
+ if (!(key instanceof PrivateKey sk)) {
+ throw new InvalidKeyException("Cannot decrypt with public key");
+ }
+ checkMatch(false, sk, params.kem_id());
+ try {
+ var encap = p.encapsulation();
+ if (encap == null) {
+ throw new InvalidAlgorithmParameterException(
+ "Must provide key encapsulation message on recipient side");
+ }
+ switch (p.authKey()) {
+ case null -> {
+ var d = kem().newDecapsulator(sk);
+ shared_secret = d.decapsulate(encap);
+ }
+ case PublicKey pkS -> {
+ checkMatch(true, pkS, params.kem_id());
+ // AuthDecap not public KEM API but it's internally supported
+ var d = new DHKEM().engineNewAuthDecapsulator(sk, pkS, null);
+ shared_secret = d.engineDecapsulate(
+ encap, 0, d.engineSecretSize(), "Generic");
+ }
+ default -> throw new InvalidAlgorithmParameterException(
+ "Cannot auth with private key");
+ }
+ } catch (DecapsulateException e) {
+ throw new InvalidAlgorithmParameterException(e);
+ }
+ }
+
+ var usePSK = usePSK(params.psk());
+ int mode = params.authKey() == null ? (usePSK ? 1 : 0) : (usePSK ? 3 : 2);
+ context = keySchedule(mode, shared_secret,
+ params.info(),
+ params.psk(),
+ params.psk_id());
+ }
+
+ private static void checkMatch(boolean inSpec, AsymmetricKey k, int kem_id)
+ throws InvalidKeyException, InvalidAlgorithmParameterException {
+ var p = k.getParams();
+ switch (p) {
+ case ECParameterSpec ecp -> {
+ if ((!ECUtil.equals(ecp, CurveDB.P_256)
+ || kem_id != HPKEParameterSpec.KEM_DHKEM_P_256_HKDF_SHA256)
+ && (!ECUtil.equals(ecp, CurveDB.P_384)
+ || kem_id != HPKEParameterSpec.KEM_DHKEM_P_384_HKDF_SHA384)
+ && (!ECUtil.equals(ecp, CurveDB.P_521)
+ || kem_id != HPKEParameterSpec.KEM_DHKEM_P_521_HKDF_SHA512)) {
+ var name = ECUtil.getCurveName(ecp);
+ throw new InvalidAlgorithmParameterException(
+ name + " does not match " + kem_id);
+ }
+ }
+ case NamedParameterSpec ns -> {
+ var name = ns.getName();
+ if ((!name.equalsIgnoreCase("x25519")
+ || kem_id != HPKEParameterSpec.KEM_DHKEM_X25519_HKDF_SHA256)
+ && (!name.equalsIgnoreCase("x448")
+ || kem_id != HPKEParameterSpec.KEM_DHKEM_X448_HKDF_SHA512)) {
+ throw new InvalidAlgorithmParameterException(
+ name + " does not match " + kem_id);
+ }
+ }
+ case null, default -> {
+ var msg = k.getClass() + " does not match " + kem_id;
+ if (inSpec) {
+ throw new InvalidAlgorithmParameterException(msg);
+ } else {
+ throw new InvalidKeyException(msg);
+ }
+ }
+ }
+ }
+
+ private KEM kem() {
+ try {
+ return KEM.getInstance("DHKEM");
+ } catch (NoSuchAlgorithmException e) {
+ throw new ProviderException("Internal error", e);
+ }
+ }
+
+ private void setParams(HPKEParameterSpec p)
+ throws InvalidAlgorithmParameterException {
+ params = p;
+ suite_id = concat(
+ HPKE,
+ DHKEM.i2OSP(params.kem_id(), 2),
+ DHKEM.i2OSP(params.kdf_id(), 2),
+ DHKEM.i2OSP(params.aead_id(), 2));
+ switch (params.kdf_id()) {
+ case HPKEParameterSpec.KDF_HKDF_SHA256 -> {
+ kdfAlg = "HKDF-SHA256";
+ kdfNh = 32;
+ }
+ case HPKEParameterSpec.KDF_HKDF_SHA384 -> {
+ kdfAlg = "HKDF-SHA384";
+ kdfNh = 48;
+ }
+ case HPKEParameterSpec.KDF_HKDF_SHA512 -> {
+ kdfAlg = "HKDF-SHA512";
+ kdfNh = 64;
+ }
+ default -> throw new InvalidAlgorithmParameterException(
+ "Unsupported kdf_id: " + params.kdf_id());
+ }
+ aead = new AEAD(params.aead_id());
+ }
+
+ private Context keySchedule(int mode,
+ SecretKey shared_secret,
+ byte[] info,
+ SecretKey psk,
+ byte[] psk_id) {
+ try {
+ var psk_id_hash_x = DHKEM.labeledExtract(suite_id, PSK_ID_HASH)
+ .addIKM(psk_id).extractOnly();
+ var info_hash_x = DHKEM.labeledExtract(suite_id, INFO_HASH)
+ .addIKM(info).extractOnly();
+
+ // deriveData must and can be called because all info to
+ // thw builder are just byte arrays. Any KDF impl can handle this.
+ var kdf = KDF.getInstance(kdfAlg);
+ var key_schedule_context = concat(new byte[]{(byte) mode},
+ kdf.deriveData(psk_id_hash_x),
+ kdf.deriveData(info_hash_x));
+
+ var secret_x_builder = DHKEM.labeledExtract(suite_id, SECRET);
+ if (psk != null) {
+ secret_x_builder.addIKM(psk);
+ }
+ secret_x_builder.addSalt(shared_secret);
+ var secret_x = kdf.deriveKey("Generic", secret_x_builder.extractOnly());
+
+ // A new KDF object must be created because secret_x_builder
+ // might contain provider-specific keys which the previous
+ // KDF (provider already chosen) cannot handle.
+ kdf = KDF.getInstance(kdfAlg);
+ var exporter_secret = kdf.deriveKey("Generic", DHKEM.labeledExpand(
+ secret_x, suite_id, EXP, key_schedule_context, kdfNh));
+
+ if (hasEncrypt()) {
+ // ChaCha20-Poly1305 does not care about algorithm name
+ var key = kdf.deriveKey("AES", DHKEM.labeledExpand(secret_x,
+ suite_id, KEY, key_schedule_context, aead.nk));
+ // deriveData must be called because we need to increment nonce
+ var base_nonce = kdf.deriveData(DHKEM.labeledExpand(secret_x,
+ suite_id, BASE_NONCE, key_schedule_context, aead.nn));
+ return new Context(key, base_nonce, exporter_secret);
+ } else {
+ return new Context(null, null, exporter_secret);
+ }
+ } catch (InvalidAlgorithmParameterException
+ | NoSuchAlgorithmException | UnsupportedOperationException e) {
+ throw new ProviderException("Internal error", e);
+ }
+ }
+ }
+
+ private static boolean usePSK(SecretKey psk) {
+ return psk != null;
+ }
+
+ private static byte[] concat(byte[]... inputs) {
+ var o = new ByteArrayOutputStream();
+ Arrays.stream(inputs).forEach(o::writeBytes);
+ return o.toByteArray();
+ }
+}
diff --git a/src/java.base/share/classes/com/sun/crypto/provider/JceKeyStore.java b/src/java.base/share/classes/com/sun/crypto/provider/JceKeyStore.java
index ec8e0f3757d..ad98653b9c2 100644
--- a/src/java.base/share/classes/com/sun/crypto/provider/JceKeyStore.java
+++ b/src/java.base/share/classes/com/sun/crypto/provider/JceKeyStore.java
@@ -661,6 +661,10 @@ public final class JceKeyStore extends KeyStoreSpi {
dos.close();
}
}
+
+ if (debug != null) {
+ emitWeakKeyStoreWarning();
+ }
}
}
@@ -862,6 +866,10 @@ public final class JceKeyStore extends KeyStoreSpi {
secretKeyCount);
}
+ if (debug != null) {
+ emitWeakKeyStoreWarning();
+ }
+
/*
* If a password has been provided, we check the keyed digest
* at the end. If this check fails, the store has been tampered
@@ -978,4 +986,12 @@ public final class JceKeyStore extends KeyStoreSpi {
return Status.UNDECIDED;
}
}
+
+ private void emitWeakKeyStoreWarning() {
+ debug.println("WARNING: JCEKS uses outdated cryptographic "
+ + "algorithms and will be removed in a future "
+ + "release. Migrate to PKCS12 using:");
+ debug.println("keytool -importkeystore -srckeystore "
+ + "-destkeystore -deststoretype pkcs12");
+ }
}
diff --git a/src/java.base/share/classes/com/sun/crypto/provider/SunJCE.java b/src/java.base/share/classes/com/sun/crypto/provider/SunJCE.java
index 22d5f17c6e0..4b38bd55809 100644
--- a/src/java.base/share/classes/com/sun/crypto/provider/SunJCE.java
+++ b/src/java.base/share/classes/com/sun/crypto/provider/SunJCE.java
@@ -371,6 +371,8 @@ public final class SunJCE extends Provider {
ps("Cipher", "PBEWithHmacSHA512/256AndAES_256",
"com.sun.crypto.provider.PBES2Core$HmacSHA512_256AndAES_256");
+ ps("Cipher", "HPKE", "com.sun.crypto.provider.HPKE");
+
/*
* Key(pair) Generator engines
*/
diff --git a/src/java.base/share/classes/java/lang/Character.java b/src/java.base/share/classes/java/lang/Character.java
index 72ff33651f9..b71849eaee7 100644
--- a/src/java.base/share/classes/java/lang/Character.java
+++ b/src/java.base/share/classes/java/lang/Character.java
@@ -63,7 +63,7 @@ import static java.lang.constant.ConstantDescs.DEFAULT_NAME;
* from the Unicode Consortium at
* http://www.unicode.org.
*
- * Character information is based on the Unicode Standard, version 16.0.
+ * Character information is based on the Unicode Standard, version 17.0.
*
* The Java platform has supported different versions of the Unicode
* Standard over time. Upgrades to newer versions of the Unicode Standard
@@ -75,6 +75,8 @@ import static java.lang.constant.ConstantDescs.DEFAULT_NAME;
*
Unicode version |
*
*
+ * | Java SE 26 |
+ * Unicode 17.0 |
* | Java SE 24 |
* Unicode 16.0 |
* | Java SE 22 |
@@ -741,11 +743,12 @@ class Character implements java.io.Serializable, Comparable, Constabl
*/
public static final class UnicodeBlock extends Subset {
/**
- * NUM_ENTITIES should match the total number of UnicodeBlocks.
+ * NUM_ENTITIES should match the total number of UnicodeBlock identifier
+ * names plus their aliases.
* It should be adjusted whenever the Unicode Character Database
* is upgraded.
*/
- private static final int NUM_ENTITIES = 782;
+ private static final int NUM_ENTITIES = 804;
private static Map map = HashMap.newHashMap(NUM_ENTITIES);
/**
@@ -3715,6 +3718,85 @@ class Character implements java.io.Serializable, Comparable, Constabl
"OL ONAL",
"OLONAL");
+ /**
+ * Constant for the "Sidetic" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock SIDETIC =
+ new UnicodeBlock("SIDETIC");
+
+ /**
+ * Constant for the "Sharada Supplement" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock SHARADA_SUPPLEMENT =
+ new UnicodeBlock("SHARADA_SUPPLEMENT",
+ "SHARADA SUPPLEMENT",
+ "SHARADASUPPLEMENT");
+
+ /**
+ * Constant for the "Tolong Siki" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock TOLONG_SIKI =
+ new UnicodeBlock("TOLONG_SIKI",
+ "TOLONG SIKI",
+ "TOLONGSIKI");
+
+ /**
+ * Constant for the "Beria Erfe" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock BERIA_ERFE =
+ new UnicodeBlock("BERIA_ERFE",
+ "BERIA ERFE",
+ "BERIAERFE");
+
+ /**
+ * Constant for the "Tangut Components Supplement" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock TANGUT_COMPONENTS_SUPPLEMENT =
+ new UnicodeBlock("TANGUT_COMPONENTS_SUPPLEMENT",
+ "TANGUT COMPONENTS SUPPLEMENT",
+ "TANGUTCOMPONENTSSUPPLEMENT");
+
+ /**
+ * Constant for the "Miscellaneous Symbols Supplement" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock MISCELLANEOUS_SYMBOLS_SUPPLEMENT =
+ new UnicodeBlock("MISCELLANEOUS_SYMBOLS_SUPPLEMENT",
+ "MISCELLANEOUS SYMBOLS SUPPLEMENT",
+ "MISCELLANEOUSSYMBOLSSUPPLEMENT");
+
+ /**
+ * Constant for the "Tai Yo" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock TAI_YO =
+ new UnicodeBlock("TAI_YO",
+ "TAI YO",
+ "TAIYO");
+
+ /**
+ * Constant for the "CJK Unified Ideographs Extension J" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock CJK_UNIFIED_IDEOGRAPHS_EXTENSION_J =
+ new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS_EXTENSION_J",
+ "CJK UNIFIED IDEOGRAPHS EXTENSION J",
+ "CJKUNIFIEDIDEOGRAPHSEXTENSIONJ");
+
+
private static final int[] blockStarts = {
0x0000, // 0000..007F; Basic Latin
0x0080, // 0080..00FF; Latin-1 Supplement
@@ -3916,7 +3998,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x108E0, // 108E0..108FF; Hatran
0x10900, // 10900..1091F; Phoenician
0x10920, // 10920..1093F; Lydian
- 0x10940, // unassigned
+ 0x10940, // 10940..1095F; Sidetic
+ 0x10960, // unassigned
0x10980, // 10980..1099F; Meroitic Hieroglyphs
0x109A0, // 109A0..109FF; Meroitic Cursive
0x10A00, // 10A00..10A5F; Kharoshthi
@@ -3977,14 +4060,16 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x11AB0, // 11AB0..11ABF; Unified Canadian Aboriginal Syllabics Extended-A
0x11AC0, // 11AC0..11AFF; Pau Cin Hau
0x11B00, // 11B00..11B5F; Devanagari Extended-A
- 0x11B60, // unassigned
+ 0x11B60, // 11B60..11B7F; Sharada Supplement
+ 0x11B80, // unassigned
0x11BC0, // 11BC0..11BFF; Sunuwar
0x11C00, // 11C00..11C6F; Bhaiksuki
0x11C70, // 11C70..11CBF; Marchen
0x11CC0, // unassigned
0x11D00, // 11D00..11D5F; Masaram Gondi
0x11D60, // 11D60..11DAF; Gunjala Gondi
- 0x11DB0, // unassigned
+ 0x11DB0, // 11DB0..11DEF; Tolong Siki
+ 0x11DF0, // unassigned
0x11EE0, // 11EE0..11EFF; Makasar
0x11F00, // 11F00..11F5F; Kawi
0x11F60, // unassigned
@@ -4011,7 +4096,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x16D40, // 16D40..16D7F; Kirat Rai
0x16D80, // unassigned
0x16E40, // 16E40..16E9F; Medefaidrin
- 0x16EA0, // unassigned
+ 0x16EA0, // 16EA0..16EDF; Beria Erfe
+ 0x16EE0, // unassigned
0x16F00, // 16F00..16F9F; Miao
0x16FA0, // unassigned
0x16FE0, // 16FE0..16FFF; Ideographic Symbols and Punctuation
@@ -4019,7 +4105,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x18800, // 18800..18AFF; Tangut Components
0x18B00, // 18B00..18CFF; Khitan Small Script
0x18D00, // 18D00..18D7F; Tangut Supplement
- 0x18D80, // unassigned
+ 0x18D80, // 18D80..18DFF; Tangut Components Supplement
+ 0x18E00, // unassigned
0x1AFF0, // 1AFF0..1AFFF; Kana Extended-B
0x1B000, // 1B000..1B0FF; Kana Supplement
0x1B100, // 1B100..1B12F; Kana Extended-A
@@ -4030,7 +4117,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1BCA0, // 1BCA0..1BCAF; Shorthand Format Controls
0x1BCB0, // unassigned
0x1CC00, // 1CC00..1CEBF; Symbols for Legacy Computing Supplement
- 0x1CEC0, // unassigned
+ 0x1CEC0, // 1CEC0..1CEFF; Miscellaneous Symbols Supplement
0x1CF00, // 1CF00..1CFCF; Znamenny Musical Notation
0x1CFD0, // unassigned
0x1D000, // 1D000..1D0FF; Byzantine Musical Symbols
@@ -4058,6 +4145,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1E500, // unassigned
0x1E5D0, // 1E5D0..1E5FF; Ol Onal
0x1E600, // unassigned
+ 0x1E6C0, // 1E6C0..1E6FF; Tai Yo
+ 0x1E700, // unassigned
0x1E7E0, // 1E7E0..1E7FF; Ethiopic Extended-B
0x1E800, // 1E800..1E8DF; Mende Kikakui
0x1E8E0, // unassigned
@@ -4098,7 +4187,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x2FA20, // unassigned
0x30000, // 30000..3134F; CJK Unified Ideographs Extension G
0x31350, // 31350..323AF; CJK Unified Ideographs Extension H
- 0x323B0, // unassigned
+ 0x323B0, // 323B0..3347F; CJK Unified Ideographs Extension J
+ 0x33480, // unassigned
0xE0000, // E0000..E007F; Tags
0xE0080, // unassigned
0xE0100, // E0100..E01EF; Variation Selectors Supplement
@@ -4308,6 +4398,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
HATRAN,
PHOENICIAN,
LYDIAN,
+ SIDETIC,
null,
MEROITIC_HIEROGLYPHS,
MEROITIC_CURSIVE,
@@ -4369,6 +4460,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS_EXTENDED_A,
PAU_CIN_HAU,
DEVANAGARI_EXTENDED_A,
+ SHARADA_SUPPLEMENT,
null,
SUNUWAR,
BHAIKSUKI,
@@ -4376,6 +4468,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
null,
MASARAM_GONDI,
GUNJALA_GONDI,
+ TOLONG_SIKI,
null,
MAKASAR,
KAWI,
@@ -4403,6 +4496,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
KIRAT_RAI,
null,
MEDEFAIDRIN,
+ BERIA_ERFE,
null,
MIAO,
null,
@@ -4411,6 +4505,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
TANGUT_COMPONENTS,
KHITAN_SMALL_SCRIPT,
TANGUT_SUPPLEMENT,
+ TANGUT_COMPONENTS_SUPPLEMENT,
null,
KANA_EXTENDED_B,
KANA_SUPPLEMENT,
@@ -4422,7 +4517,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
SHORTHAND_FORMAT_CONTROLS,
null,
SYMBOLS_FOR_LEGACY_COMPUTING_SUPPLEMENT,
- null,
+ MISCELLANEOUS_SYMBOLS_SUPPLEMENT,
ZNAMENNY_MUSICAL_NOTATION,
null,
BYZANTINE_MUSICAL_SYMBOLS,
@@ -4450,6 +4545,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
null,
OL_ONAL,
null,
+ TAI_YO,
+ null,
ETHIOPIC_EXTENDED_B,
MENDE_KIKAKUI,
null,
@@ -4490,6 +4587,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
null,
CJK_UNIFIED_IDEOGRAPHS_EXTENSION_G,
CJK_UNIFIED_IDEOGRAPHS_EXTENSION_H,
+ CJK_UNIFIED_IDEOGRAPHS_EXTENSION_J,
null,
TAGS,
null,
@@ -5547,6 +5645,30 @@ class Character implements java.io.Serializable, Comparable, Constabl
*/
OL_ONAL,
+ /**
+ * Unicode script "Sidetic".
+ * @since 26
+ */
+ SIDETIC,
+
+ /**
+ * Unicode script "Tolong Siki".
+ * @since 26
+ */
+ TOLONG_SIKI,
+
+ /**
+ * Unicode script "Beria Erfe".
+ * @since 26
+ */
+ BERIA_ERFE,
+
+ /**
+ * Unicode script "Tai Yo".
+ * @since 26
+ */
+ TAI_YO,
+
/**
* Unicode script "Unknown".
*/
@@ -5648,9 +5770,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x085F, // 085F ; UNKNOWN
0x0860, // 0860..086A; SYRIAC
0x086B, // 086B..086F; UNKNOWN
- 0x0870, // 0870..088E; ARABIC
- 0x088F, // 088F ; UNKNOWN
- 0x0890, // 0890..0891; ARABIC
+ 0x0870, // 0870..0891; ARABIC
0x0892, // 0892..0896; UNKNOWN
0x0897, // 0897..08E1; ARABIC
0x08E2, // 08E2 ; COMMON
@@ -5825,8 +5945,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x0C55, // 0C55..0C56; TELUGU
0x0C57, // 0C57 ; UNKNOWN
0x0C58, // 0C58..0C5A; TELUGU
- 0x0C5B, // 0C5B..0C5C; UNKNOWN
- 0x0C5D, // 0C5D ; TELUGU
+ 0x0C5B, // 0C5B ; UNKNOWN
+ 0x0C5C, // 0C5C..0C5D; TELUGU
0x0C5E, // 0C5E..0C5F; UNKNOWN
0x0C60, // 0C60..0C63; TELUGU
0x0C64, // 0C64..0C65; UNKNOWN
@@ -5850,8 +5970,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x0CCA, // 0CCA..0CCD; KANNADA
0x0CCE, // 0CCE..0CD4; UNKNOWN
0x0CD5, // 0CD5..0CD6; KANNADA
- 0x0CD7, // 0CD7..0CDC; UNKNOWN
- 0x0CDD, // 0CDD..0CDE; KANNADA
+ 0x0CD7, // 0CD7..0CDB; UNKNOWN
+ 0x0CDC, // 0CDC..0CDE; KANNADA
0x0CDF, // 0CDF ; UNKNOWN
0x0CE0, // 0CE0..0CE3; KANNADA
0x0CE4, // 0CE4..0CE5; UNKNOWN
@@ -6062,8 +6182,10 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1A9A, // 1A9A..1A9F; UNKNOWN
0x1AA0, // 1AA0..1AAD; TAI_THAM
0x1AAE, // 1AAE..1AAF; UNKNOWN
- 0x1AB0, // 1AB0..1ACE; INHERITED
- 0x1ACF, // 1ACF..1AFF; UNKNOWN
+ 0x1AB0, // 1AB0..1ADD; INHERITED
+ 0x1ADE, // 1ADE..1ADF; UNKNOWN
+ 0x1AE0, // 1AE0..1AEB; INHERITED
+ 0x1AEC, // 1AEC..1AFF; UNKNOWN
0x1B00, // 1B00..1B4C; BALINESE
0x1B4D, // 1B4D ; UNKNOWN
0x1B4E, // 1B4E..1B7F; BALINESE
@@ -6155,8 +6277,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x208F, // 208F ; UNKNOWN
0x2090, // 2090..209C; LATIN
0x209D, // 209D..209F; UNKNOWN
- 0x20A0, // 20A0..20C0; COMMON
- 0x20C1, // 20C1..20CF; UNKNOWN
+ 0x20A0, // 20A0..20C1; COMMON
+ 0x20C2, // 20C2..20CF; UNKNOWN
0x20D0, // 20D0..20F0; INHERITED
0x20F1, // 20F1..20FF; UNKNOWN
0x2100, // 2100..2125; COMMON
@@ -6179,9 +6301,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x2800, // 2800..28FF; BRAILLE
0x2900, // 2900..2B73; COMMON
0x2B74, // 2B74..2B75; UNKNOWN
- 0x2B76, // 2B76..2B95; COMMON
- 0x2B96, // 2B96 ; UNKNOWN
- 0x2B97, // 2B97..2BFF; COMMON
+ 0x2B76, // 2B76..2BFF; COMMON
0x2C00, // 2C00..2C5F; GLAGOLITIC
0x2C60, // 2C60..2C7F; LATIN
0x2C80, // 2C80..2CF3; COPTIC
@@ -6282,15 +6402,9 @@ class Character implements java.io.Serializable, Comparable, Constabl
0xA700, // A700..A721; COMMON
0xA722, // A722..A787; LATIN
0xA788, // A788..A78A; COMMON
- 0xA78B, // A78B..A7CD; LATIN
- 0xA7CE, // A7CE..A7CF; UNKNOWN
- 0xA7D0, // A7D0..A7D1; LATIN
- 0xA7D2, // A7D2 ; UNKNOWN
- 0xA7D3, // A7D3 ; LATIN
- 0xA7D4, // A7D4 ; UNKNOWN
- 0xA7D5, // A7D5..A7DC; LATIN
- 0xA7DD, // A7DD..A7F1; UNKNOWN
- 0xA7F2, // A7F2..A7FF; LATIN
+ 0xA78B, // A78B..A7DC; LATIN
+ 0xA7DD, // A7DD..A7F0; UNKNOWN
+ 0xA7F1, // A7F1..A7FF; LATIN
0xA800, // A800..A82C; SYLOTI_NAGRI
0xA82D, // A82D..A82F; UNKNOWN
0xA830, // A830..A839; COMMON
@@ -6378,15 +6492,9 @@ class Character implements java.io.Serializable, Comparable, Constabl
0xFB43, // FB43..FB44; HEBREW
0xFB45, // FB45 ; UNKNOWN
0xFB46, // FB46..FB4F; HEBREW
- 0xFB50, // FB50..FBC2; ARABIC
- 0xFBC3, // FBC3..FBD2; UNKNOWN
- 0xFBD3, // FBD3..FD3D; ARABIC
+ 0xFB50, // FB50..FD3D; ARABIC
0xFD3E, // FD3E..FD3F; COMMON
- 0xFD40, // FD40..FD8F; ARABIC
- 0xFD90, // FD90..FD91; UNKNOWN
- 0xFD92, // FD92..FDC7; ARABIC
- 0xFDC8, // FDC8..FDCE; UNKNOWN
- 0xFDCF, // FDCF ; ARABIC
+ 0xFD40, // FD40..FDCF; ARABIC
0xFDD0, // FDD0..FDEF; UNKNOWN
0xFDF0, // FDF0..FDFF; ARABIC
0xFE00, // FE00..FE0F; INHERITED
@@ -6555,7 +6663,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x10920, // 10920..10939; LYDIAN
0x1093A, // 1093A..1093E; UNKNOWN
0x1093F, // 1093F ; LYDIAN
- 0x10940, // 10940..1097F; UNKNOWN
+ 0x10940, // 10940..10959; SIDETIC
+ 0x1095A, // 1095A..1097F; UNKNOWN
0x10980, // 10980..1099F; MEROITIC_HIEROGLYPHS
0x109A0, // 109A0..109B7; MEROITIC_CURSIVE
0x109B8, // 109B8..109BB; UNKNOWN
@@ -6625,9 +6734,11 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x10EAE, // 10EAE..10EAF; UNKNOWN
0x10EB0, // 10EB0..10EB1; YEZIDI
0x10EB2, // 10EB2..10EC1; UNKNOWN
- 0x10EC2, // 10EC2..10EC4; ARABIC
- 0x10EC5, // 10EC5..10EFB; UNKNOWN
- 0x10EFC, // 10EFC..10EFF; ARABIC
+ 0x10EC2, // 10EC2..10EC7; ARABIC
+ 0x10EC8, // 10EC8..10ECF; UNKNOWN
+ 0x10ED0, // 10ED0..10ED8; ARABIC
+ 0x10ED9, // 10ED9..10EF9; UNKNOWN
+ 0x10EFA, // 10EFA..10EFF; ARABIC
0x10F00, // 10F00..10F27; OLD_SOGDIAN
0x10F28, // 10F28..10F2F; UNKNOWN
0x10F30, // 10F30..10F59; SOGDIAN
@@ -6797,7 +6908,9 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x11AC0, // 11AC0..11AF8; PAU_CIN_HAU
0x11AF9, // 11AF9..11AFF; UNKNOWN
0x11B00, // 11B00..11B09; DEVANAGARI
- 0x11B0A, // 11B0A..11BBF; UNKNOWN
+ 0x11B0A, // 11B0A..11B5F; UNKNOWN
+ 0x11B60, // 11B60..11B67; SHARADA
+ 0x11B68, // 11B68..11BBF; UNKNOWN
0x11BC0, // 11BC0..11BE1; SUNUWAR
0x11BE2, // 11BE2..11BEF; UNKNOWN
0x11BF0, // 11BF0..11BF9; SUNUWAR
@@ -6841,7 +6954,11 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x11D93, // 11D93..11D98; GUNJALA_GONDI
0x11D99, // 11D99..11D9F; UNKNOWN
0x11DA0, // 11DA0..11DA9; GUNJALA_GONDI
- 0x11DAA, // 11DAA..11EDF; UNKNOWN
+ 0x11DAA, // 11DAA..11DAF; UNKNOWN
+ 0x11DB0, // 11DB0..11DDB; TOLONG_SIKI
+ 0x11DDC, // 11DDC..11DDF; UNKNOWN
+ 0x11DE0, // 11DE0..11DE9; TOLONG_SIKI
+ 0x11DEA, // 11DEA..11EDF; UNKNOWN
0x11EE0, // 11EE0..11EF8; MAKASAR
0x11EF9, // 11EF9..11EFF; UNKNOWN
0x11F00, // 11F00..11F10; KAWI
@@ -6901,7 +7018,11 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x16D40, // 16D40..16D79; KIRAT_RAI
0x16D7A, // 16D7A..16E3F; UNKNOWN
0x16E40, // 16E40..16E9A; MEDEFAIDRIN
- 0x16E9B, // 16E9B..16EFF; UNKNOWN
+ 0x16E9B, // 16E9B..16E9F; UNKNOWN
+ 0x16EA0, // 16EA0..16EB8; BERIA_ERFE
+ 0x16EB9, // 16EB9..16EBA; UNKNOWN
+ 0x16EBB, // 16EBB..16ED3; BERIA_ERFE
+ 0x16ED4, // 16ED4..16EFF; UNKNOWN
0x16F00, // 16F00..16F4A; MIAO
0x16F4B, // 16F4B..16F4E; UNKNOWN
0x16F4F, // 16F4F..16F87; MIAO
@@ -6913,16 +7034,16 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x16FE2, // 16FE2..16FE3; HAN
0x16FE4, // 16FE4 ; KHITAN_SMALL_SCRIPT
0x16FE5, // 16FE5..16FEF; UNKNOWN
- 0x16FF0, // 16FF0..16FF1; HAN
- 0x16FF2, // 16FF2..16FFF; UNKNOWN
- 0x17000, // 17000..187F7; TANGUT
- 0x187F8, // 187F8..187FF; UNKNOWN
- 0x18800, // 18800..18AFF; TANGUT
+ 0x16FF0, // 16FF0..16FF6; HAN
+ 0x16FF7, // 16FF7..16FFF; UNKNOWN
+ 0x17000, // 17000..18AFF; TANGUT
0x18B00, // 18B00..18CD5; KHITAN_SMALL_SCRIPT
0x18CD6, // 18CD6..18CFE; UNKNOWN
0x18CFF, // 18CFF ; KHITAN_SMALL_SCRIPT
- 0x18D00, // 18D00..18D08; TANGUT
- 0x18D09, // 18D09..1AFEF; UNKNOWN
+ 0x18D00, // 18D00..18D1E; TANGUT
+ 0x18D1F, // 18D1F..18D7F; UNKNOWN
+ 0x18D80, // 18D80..18DF2; TANGUT
+ 0x18DF3, // 18DF3..1AFEF; UNKNOWN
0x1AFF0, // 1AFF0..1AFF3; KATAKANA
0x1AFF4, // 1AFF4 ; UNKNOWN
0x1AFF5, // 1AFF5..1AFFB; KATAKANA
@@ -6954,10 +7075,14 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1BC9C, // 1BC9C..1BC9F; DUPLOYAN
0x1BCA0, // 1BCA0..1BCA3; COMMON
0x1BCA4, // 1BCA4..1CBFF; UNKNOWN
- 0x1CC00, // 1CC00..1CCF9; COMMON
- 0x1CCFA, // 1CCFA..1CCFF; UNKNOWN
+ 0x1CC00, // 1CC00..1CCFC; COMMON
+ 0x1CCFD, // 1CCFD..1CCFF; UNKNOWN
0x1CD00, // 1CD00..1CEB3; COMMON
- 0x1CEB4, // 1CEB4..1CEFF; UNKNOWN
+ 0x1CEB4, // 1CEB4..1CEB9; UNKNOWN
+ 0x1CEBA, // 1CEBA..1CED0; COMMON
+ 0x1CED1, // 1CED1..1CEDF; UNKNOWN
+ 0x1CEE0, // 1CEE0..1CEF0; COMMON
+ 0x1CEF1, // 1CEF1..1CEFF; UNKNOWN
0x1CF00, // 1CF00..1CF2D; INHERITED
0x1CF2E, // 1CF2E..1CF2F; UNKNOWN
0x1CF30, // 1CF30..1CF46; INHERITED
@@ -7072,7 +7197,13 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1E5D0, // 1E5D0..1E5FA; OL_ONAL
0x1E5FB, // 1E5FB..1E5FE; UNKNOWN
0x1E5FF, // 1E5FF ; OL_ONAL
- 0x1E600, // 1E600..1E7DF; UNKNOWN
+ 0x1E600, // 1E600..1E6BF; UNKNOWN
+ 0x1E6C0, // 1E6C0..1E6DE; TAI_YO
+ 0x1E6DF, // 1E6DF ; UNKNOWN
+ 0x1E6E0, // 1E6E0..1E6F5; TAI_YO
+ 0x1E6F6, // 1E6F6..1E6FD; UNKNOWN
+ 0x1E6FE, // 1E6FE..1E6FF; TAI_YO
+ 0x1E700, // 1E700..1E7DF; UNKNOWN
0x1E7E0, // 1E7E0..1E7E6; ETHIOPIC
0x1E7E7, // 1E7E7 ; UNKNOWN
0x1E7E8, // 1E7E8..1E7EB; ETHIOPIC
@@ -7189,15 +7320,13 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1F252, // 1F252..1F25F; UNKNOWN
0x1F260, // 1F260..1F265; COMMON
0x1F266, // 1F266..1F2FF; UNKNOWN
- 0x1F300, // 1F300..1F6D7; COMMON
- 0x1F6D8, // 1F6D8..1F6DB; UNKNOWN
+ 0x1F300, // 1F300..1F6D8; COMMON
+ 0x1F6D9, // 1F6D9..1F6DB; UNKNOWN
0x1F6DC, // 1F6DC..1F6EC; COMMON
0x1F6ED, // 1F6ED..1F6EF; UNKNOWN
0x1F6F0, // 1F6F0..1F6FC; COMMON
0x1F6FD, // 1F6FD..1F6FF; UNKNOWN
- 0x1F700, // 1F700..1F776; COMMON
- 0x1F777, // 1F777..1F77A; UNKNOWN
- 0x1F77B, // 1F77B..1F7D9; COMMON
+ 0x1F700, // 1F700..1F7D9; COMMON
0x1F7DA, // 1F7DA..1F7DF; UNKNOWN
0x1F7E0, // 1F7E0..1F7EB; COMMON
0x1F7EC, // 1F7EC..1F7EF; UNKNOWN
@@ -7216,35 +7345,37 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1F8B0, // 1F8B0..1F8BB; COMMON
0x1F8BC, // 1F8BC..1F8BF; UNKNOWN
0x1F8C0, // 1F8C0..1F8C1; COMMON
- 0x1F8C2, // 1F8C2..1F8FF; UNKNOWN
- 0x1F900, // 1F900..1FA53; COMMON
- 0x1FA54, // 1FA54..1FA5F; UNKNOWN
+ 0x1F8C2, // 1F8C2..1F8CF; UNKNOWN
+ 0x1F8D0, // 1F8D0..1F8D8; COMMON
+ 0x1F8D9, // 1F8D9..1F8FF; UNKNOWN
+ 0x1F900, // 1F900..1FA57; COMMON
+ 0x1FA58, // 1FA58..1FA5F; UNKNOWN
0x1FA60, // 1FA60..1FA6D; COMMON
0x1FA6E, // 1FA6E..1FA6F; UNKNOWN
0x1FA70, // 1FA70..1FA7C; COMMON
0x1FA7D, // 1FA7D..1FA7F; UNKNOWN
- 0x1FA80, // 1FA80..1FA89; COMMON
- 0x1FA8A, // 1FA8A..1FA8E; UNKNOWN
- 0x1FA8F, // 1FA8F..1FAC6; COMMON
- 0x1FAC7, // 1FAC7..1FACD; UNKNOWN
- 0x1FACE, // 1FACE..1FADC; COMMON
+ 0x1FA80, // 1FA80..1FA8A; COMMON
+ 0x1FA8B, // 1FA8B..1FA8D; UNKNOWN
+ 0x1FA8E, // 1FA8E..1FAC6; COMMON
+ 0x1FAC7, // 1FAC7 ; UNKNOWN
+ 0x1FAC8, // 1FAC8 ; COMMON
+ 0x1FAC9, // 1FAC9..1FACC; UNKNOWN
+ 0x1FACD, // 1FACD..1FADC; COMMON
0x1FADD, // 1FADD..1FADE; UNKNOWN
- 0x1FADF, // 1FADF..1FAE9; COMMON
- 0x1FAEA, // 1FAEA..1FAEF; UNKNOWN
- 0x1FAF0, // 1FAF0..1FAF8; COMMON
+ 0x1FADF, // 1FADF..1FAEA; COMMON
+ 0x1FAEB, // 1FAEB..1FAEE; UNKNOWN
+ 0x1FAEF, // 1FAEF..1FAF8; COMMON
0x1FAF9, // 1FAF9..1FAFF; UNKNOWN
0x1FB00, // 1FB00..1FB92; COMMON
0x1FB93, // 1FB93 ; UNKNOWN
- 0x1FB94, // 1FB94..1FBF9; COMMON
- 0x1FBFA, // 1FBFA..1FFFF; UNKNOWN
+ 0x1FB94, // 1FB94..1FBFA; COMMON
+ 0x1FBFB, // 1FBFB..1FFFF; UNKNOWN
0x20000, // 20000..2A6DF; HAN
0x2A6E0, // 2A6E0..2A6FF; UNKNOWN
- 0x2A700, // 2A700..2B739; HAN
- 0x2B73A, // 2B73A..2B73F; UNKNOWN
- 0x2B740, // 2B740..2B81D; HAN
+ 0x2A700, // 2A700..2B81D; HAN
0x2B81E, // 2B81E..2B81F; UNKNOWN
- 0x2B820, // 2B820..2CEA1; HAN
- 0x2CEA2, // 2CEA2..2CEAF; UNKNOWN
+ 0x2B820, // 2B820..2CEAD; HAN
+ 0x2CEAE, // 2CEAE..2CEAF; UNKNOWN
0x2CEB0, // 2CEB0..2EBE0; HAN
0x2EBE1, // 2EBE1..2EBEF; UNKNOWN
0x2EBF0, // 2EBF0..2EE5D; HAN
@@ -7253,8 +7384,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x2FA1E, // 2FA1E..2FFFF; UNKNOWN
0x30000, // 30000..3134A; HAN
0x3134B, // 3134B..3134F; UNKNOWN
- 0x31350, // 31350..323AF; HAN
- 0x323B0, // 323B0..E0000; UNKNOWN
+ 0x31350, // 31350..33479; HAN
+ 0x3347A, // 3347A..E0000; UNKNOWN
0xE0001, // E0001 ; COMMON
0xE0002, // E0002..E001F; UNKNOWN
0xE0020, // E0020..E007F; COMMON
@@ -7359,9 +7490,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNKNOWN, // 085F
SYRIAC, // 0860..086A
UNKNOWN, // 086B..086F
- ARABIC, // 0870..088E
- UNKNOWN, // 088F
- ARABIC, // 0890..0891
+ ARABIC, // 0870..0891
UNKNOWN, // 0892..0896
ARABIC, // 0897..08E1
COMMON, // 08E2
@@ -7536,8 +7665,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
TELUGU, // 0C55..0C56
UNKNOWN, // 0C57
TELUGU, // 0C58..0C5A
- UNKNOWN, // 0C5B..0C5C
- TELUGU, // 0C5D
+ UNKNOWN, // 0C5B
+ TELUGU, // 0C5C..0C5D
UNKNOWN, // 0C5E..0C5F
TELUGU, // 0C60..0C63
UNKNOWN, // 0C64..0C65
@@ -7561,8 +7690,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
KANNADA, // 0CCA..0CCD
UNKNOWN, // 0CCE..0CD4
KANNADA, // 0CD5..0CD6
- UNKNOWN, // 0CD7..0CDC
- KANNADA, // 0CDD..0CDE
+ UNKNOWN, // 0CD7..0CDB
+ KANNADA, // 0CDC..0CDE
UNKNOWN, // 0CDF
KANNADA, // 0CE0..0CE3
UNKNOWN, // 0CE4..0CE5
@@ -7773,8 +7902,10 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNKNOWN, // 1A9A..1A9F
TAI_THAM, // 1AA0..1AAD
UNKNOWN, // 1AAE..1AAF
- INHERITED, // 1AB0..1ACE
- UNKNOWN, // 1ACF..1AFF
+ INHERITED, // 1AB0..1ADD
+ UNKNOWN, // 1ADE..1ADF
+ INHERITED, // 1AE0..1AEB
+ UNKNOWN, // 1AEC..1AFF
BALINESE, // 1B00..1B4C
UNKNOWN, // 1B4D
BALINESE, // 1B4E..1B7F
@@ -7866,8 +7997,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNKNOWN, // 208F
LATIN, // 2090..209C
UNKNOWN, // 209D..209F
- COMMON, // 20A0..20C0
- UNKNOWN, // 20C1..20CF
+ COMMON, // 20A0..20C1
+ UNKNOWN, // 20C2..20CF
INHERITED, // 20D0..20F0
UNKNOWN, // 20F1..20FF
COMMON, // 2100..2125
@@ -7890,9 +8021,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
BRAILLE, // 2800..28FF
COMMON, // 2900..2B73
UNKNOWN, // 2B74..2B75
- COMMON, // 2B76..2B95
- UNKNOWN, // 2B96
- COMMON, // 2B97..2BFF
+ COMMON, // 2B76..2BFF
GLAGOLITIC, // 2C00..2C5F
LATIN, // 2C60..2C7F
COPTIC, // 2C80..2CF3
@@ -7993,15 +8122,9 @@ class Character implements java.io.Serializable, Comparable, Constabl
COMMON, // A700..A721
LATIN, // A722..A787
COMMON, // A788..A78A
- LATIN, // A78B..A7CD
- UNKNOWN, // A7CE..A7CF
- LATIN, // A7D0..A7D1
- UNKNOWN, // A7D2
- LATIN, // A7D3
- UNKNOWN, // A7D4
- LATIN, // A7D5..A7DC
- UNKNOWN, // A7DD..A7F1
- LATIN, // A7F2..A7FF
+ LATIN, // A78B..A7DC
+ UNKNOWN, // A7DD..A7F0
+ LATIN, // A7F1..A7FF
SYLOTI_NAGRI, // A800..A82C
UNKNOWN, // A82D..A82F
COMMON, // A830..A839
@@ -8089,15 +8212,9 @@ class Character implements java.io.Serializable, Comparable, Constabl
HEBREW, // FB43..FB44
UNKNOWN, // FB45
HEBREW, // FB46..FB4F
- ARABIC, // FB50..FBC2
- UNKNOWN, // FBC3..FBD2
- ARABIC, // FBD3..FD3D
+ ARABIC, // FB50..FD3D
COMMON, // FD3E..FD3F
- ARABIC, // FD40..FD8F
- UNKNOWN, // FD90..FD91
- ARABIC, // FD92..FDC7
- UNKNOWN, // FDC8..FDCE
- ARABIC, // FDCF
+ ARABIC, // FD40..FDCF
UNKNOWN, // FDD0..FDEF
ARABIC, // FDF0..FDFF
INHERITED, // FE00..FE0F
@@ -8266,7 +8383,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
LYDIAN, // 10920..10939
UNKNOWN, // 1093A..1093E
LYDIAN, // 1093F
- UNKNOWN, // 10940..1097F
+ SIDETIC, // 10940..10959
+ UNKNOWN, // 1095A..1097F
MEROITIC_HIEROGLYPHS, // 10980..1099F
MEROITIC_CURSIVE, // 109A0..109B7
UNKNOWN, // 109B8..109BB
@@ -8336,9 +8454,11 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNKNOWN, // 10EAE..10EAF
YEZIDI, // 10EB0..10EB1
UNKNOWN, // 10EB2..10EC1
- ARABIC, // 10EC2..10EC4
- UNKNOWN, // 10EC5..10EFB
- ARABIC, // 10EFC..10EFF
+ ARABIC, // 10EC2..10EC7
+ UNKNOWN, // 10EC8..10ECF
+ ARABIC, // 10ED0..10ED8
+ UNKNOWN, // 10ED9..10EF9
+ ARABIC, // 10EFA..10EFF
OLD_SOGDIAN, // 10F00..10F27
UNKNOWN, // 10F28..10F2F
SOGDIAN, // 10F30..10F59
@@ -8508,7 +8628,9 @@ class Character implements java.io.Serializable, Comparable, Constabl
PAU_CIN_HAU, // 11AC0..11AF8
UNKNOWN, // 11AF9..11AFF
DEVANAGARI, // 11B00..11B09
- UNKNOWN, // 11B0A..11BBF
+ UNKNOWN, // 11B0A..11B5F
+ SHARADA, // 11B60..11B67
+ UNKNOWN, // 11B68..11BBF
SUNUWAR, // 11BC0..11BE1
UNKNOWN, // 11BE2..11BEF
SUNUWAR, // 11BF0..11BF9
@@ -8552,7 +8674,11 @@ class Character implements java.io.Serializable, Comparable, Constabl
GUNJALA_GONDI, // 11D93..11D98
UNKNOWN, // 11D99..11D9F
GUNJALA_GONDI, // 11DA0..11DA9
- UNKNOWN, // 11DAA..11EDF
+ UNKNOWN, // 11DAA..11DAF
+ TOLONG_SIKI, // 11DB0..11DDB
+ UNKNOWN, // 11DDC..11DDF
+ TOLONG_SIKI, // 11DE0..11DE9
+ UNKNOWN, // 11DEA..11EDF
MAKASAR, // 11EE0..11EF8
UNKNOWN, // 11EF9..11EFF
KAWI, // 11F00..11F10
@@ -8612,7 +8738,11 @@ class Character implements java.io.Serializable, Comparable, Constabl
KIRAT_RAI, // 16D40..16D79
UNKNOWN, // 16D7A..16E3F
MEDEFAIDRIN, // 16E40..16E9A
- UNKNOWN, // 16E9B..16EFF
+ UNKNOWN, // 16E9B..16E9F
+ BERIA_ERFE, // 16EA0..16EB8
+ UNKNOWN, // 16EB9..16EBA
+ BERIA_ERFE, // 16EBB..16ED3
+ UNKNOWN, // 16ED4..16EFF
MIAO, // 16F00..16F4A
UNKNOWN, // 16F4B..16F4E
MIAO, // 16F4F..16F87
@@ -8624,16 +8754,16 @@ class Character implements java.io.Serializable, Comparable, Constabl
HAN, // 16FE2..16FE3
KHITAN_SMALL_SCRIPT, // 16FE4
UNKNOWN, // 16FE5..16FEF
- HAN, // 16FF0..16FF1
- UNKNOWN, // 16FF2..16FFF
- TANGUT, // 17000..187F7
- UNKNOWN, // 187F8..187FF
- TANGUT, // 18800..18AFF
+ HAN, // 16FF0..16FF6
+ UNKNOWN, // 16FF7..16FFF
+ TANGUT, // 17000..18AFF
KHITAN_SMALL_SCRIPT, // 18B00..18CD5
UNKNOWN, // 18CD6..18CFE
KHITAN_SMALL_SCRIPT, // 18CFF
- TANGUT, // 18D00..18D08
- UNKNOWN, // 18D09..1AFEF
+ TANGUT, // 18D00..18D1E
+ UNKNOWN, // 18D1F..18D7F
+ TANGUT, // 18D80..18DF2
+ UNKNOWN, // 18DF3..1AFEF
KATAKANA, // 1AFF0..1AFF3
UNKNOWN, // 1AFF4
KATAKANA, // 1AFF5..1AFFB
@@ -8665,10 +8795,14 @@ class Character implements java.io.Serializable, Comparable, Constabl
DUPLOYAN, // 1BC9C..1BC9F
COMMON, // 1BCA0..1BCA3
UNKNOWN, // 1BCA4..1CBFF
- COMMON, // 1CC00..1CCF9
- UNKNOWN, // 1CCFA..1CCFF
+ COMMON, // 1CC00..1CCFC
+ UNKNOWN, // 1CCFD..1CCFF
COMMON, // 1CD00..1CEB3
- UNKNOWN, // 1CEB4..1CEFF
+ UNKNOWN, // 1CEB4..1CEB9
+ COMMON, // 1CEBA..1CED0
+ UNKNOWN, // 1CED1..1CEDF
+ COMMON, // 1CEE0..1CEF0
+ UNKNOWN, // 1CEF1..1CEFF
INHERITED, // 1CF00..1CF2D
UNKNOWN, // 1CF2E..1CF2F
INHERITED, // 1CF30..1CF46
@@ -8783,7 +8917,13 @@ class Character implements java.io.Serializable, Comparable, Constabl
OL_ONAL, // 1E5D0..1E5FA
UNKNOWN, // 1E5FB..1E5FE
OL_ONAL, // 1E5FF
- UNKNOWN, // 1E600..1E7DF
+ UNKNOWN, // 1E600..1E6BF
+ TAI_YO, // 1E6C0..1E6DE
+ UNKNOWN, // 1E6DF
+ TAI_YO, // 1E6E0..1E6F5
+ UNKNOWN, // 1E6F6..1E6FD
+ TAI_YO, // 1E6FE..1E6FF
+ UNKNOWN, // 1E700..1E7DF
ETHIOPIC, // 1E7E0..1E7E6
UNKNOWN, // 1E7E7
ETHIOPIC, // 1E7E8..1E7EB
@@ -8900,15 +9040,13 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNKNOWN, // 1F252..1F25F
COMMON, // 1F260..1F265
UNKNOWN, // 1F266..1F2FF
- COMMON, // 1F300..1F6D7
- UNKNOWN, // 1F6D8..1F6DB
+ COMMON, // 1F300..1F6D8
+ UNKNOWN, // 1F6D9..1F6DB
COMMON, // 1F6DC..1F6EC
UNKNOWN, // 1F6ED..1F6EF
COMMON, // 1F6F0..1F6FC
UNKNOWN, // 1F6FD..1F6FF
- COMMON, // 1F700..1F776
- UNKNOWN, // 1F777..1F77A
- COMMON, // 1F77B..1F7D9
+ COMMON, // 1F700..1F7D9
UNKNOWN, // 1F7DA..1F7DF
COMMON, // 1F7E0..1F7EB
UNKNOWN, // 1F7EC..1F7EF
@@ -8927,35 +9065,37 @@ class Character implements java.io.Serializable, Comparable, Constabl
COMMON, // 1F8B0..1F8BB
UNKNOWN, // 1F8BC..1F8BF
COMMON, // 1F8C0..1F8C1
- UNKNOWN, // 1F8C2..1F8FF
- COMMON, // 1F900..1FA53
- UNKNOWN, // 1FA54..1FA5F
+ UNKNOWN, // 1F8C2..1F8CF
+ COMMON, // 1F8D0..1F8D8
+ UNKNOWN, // 1F8D9..1F8FF
+ COMMON, // 1F900..1FA57
+ UNKNOWN, // 1FA58..1FA5F
COMMON, // 1FA60..1FA6D
UNKNOWN, // 1FA6E..1FA6F
COMMON, // 1FA70..1FA7C
UNKNOWN, // 1FA7D..1FA7F
- COMMON, // 1FA80..1FA89
- UNKNOWN, // 1FA8A..1FA8E
- COMMON, // 1FA8F..1FAC6
- UNKNOWN, // 1FAC7..1FACD
- COMMON, // 1FACE..1FADC
+ COMMON, // 1FA80..1FA8A
+ UNKNOWN, // 1FA8B..1FA8D
+ COMMON, // 1FA8E..1FAC6
+ UNKNOWN, // 1FAC7
+ COMMON, // 1FAC8
+ UNKNOWN, // 1FAC9..1FACC
+ COMMON, // 1FACD..1FADC
UNKNOWN, // 1FADD..1FADE
- COMMON, // 1FADF..1FAE9
- UNKNOWN, // 1FAEA..1FAEF
- COMMON, // 1FAF0..1FAF8
+ COMMON, // 1FADF..1FAEA
+ UNKNOWN, // 1FAEB..1FAEE
+ COMMON, // 1FAEF..1FAF8
UNKNOWN, // 1FAF9..1FAFF
COMMON, // 1FB00..1FB92
UNKNOWN, // 1FB93
- COMMON, // 1FB94..1FBF9
- UNKNOWN, // 1FBFA..1FFFF
+ COMMON, // 1FB94..1FBFA
+ UNKNOWN, // 1FBFB..1FFFF
HAN, // 20000..2A6DF
UNKNOWN, // 2A6E0..2A6FF
- HAN, // 2A700..2B739
- UNKNOWN, // 2B73A..2B73F
- HAN, // 2B740..2B81D
+ HAN, // 2A700..2B81D
UNKNOWN, // 2B81E..2B81F
- HAN, // 2B820..2CEA1
- UNKNOWN, // 2CEA2..2CEAF
+ HAN, // 2B820..2CEAD
+ UNKNOWN, // 2CEAE..2CEAF
HAN, // 2CEB0..2EBE0
UNKNOWN, // 2EBE1..2EBEF
HAN, // 2EBF0..2EE5D
@@ -8964,8 +9104,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNKNOWN, // 2FA1E..2FFFF
HAN, // 30000..3134A
UNKNOWN, // 3134B..3134F
- HAN, // 31350..323AF
- UNKNOWN, // 323B0..E0000
+ HAN, // 31350..33479
+ UNKNOWN, // 3347A..E0000
COMMON, // E0001
UNKNOWN, // E0002..E001F
COMMON, // E0020..E007F
@@ -8989,6 +9129,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
aliases.put("BASS", BASSA_VAH);
aliases.put("BATK", BATAK);
aliases.put("BENG", BENGALI);
+ aliases.put("BERF", BERIA_ERFE);
aliases.put("BHKS", BHAIKSUKI);
aliases.put("BOPO", BOPOMOFO);
aliases.put("BRAH", BRAHMI);
@@ -9107,6 +9248,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
aliases.put("SHAW", SHAVIAN);
aliases.put("SHRD", SHARADA);
aliases.put("SIDD", SIDDHAM);
+ aliases.put("SIDT", SIDETIC);
aliases.put("SIND", KHUDAWADI);
aliases.put("SINH", SINHALA);
aliases.put("SOGD", SOGDIAN);
@@ -9124,6 +9266,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
aliases.put("TAML", TAMIL);
aliases.put("TANG", TANGUT);
aliases.put("TAVT", TAI_VIET);
+ aliases.put("TAYO", TAI_YO);
aliases.put("TELU", TELUGU);
aliases.put("TFNG", TIFINAGH);
aliases.put("TGLG", TAGALOG);
@@ -9133,6 +9276,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
aliases.put("TIRH", TIRHUTA);
aliases.put("TNSA", TANGSA);
aliases.put("TODR", TODHRI);
+ aliases.put("TOLS", TOLONG_SIKI);
aliases.put("TOTO", TOTO);
aliases.put("TUTG", TULU_TIGALARI);
aliases.put("UGAR", UGARITIC);
diff --git a/src/java.base/share/classes/java/lang/Class.java b/src/java.base/share/classes/java/lang/Class.java
index cfd2fc82235..eab1993a2b4 100644
--- a/src/java.base/share/classes/java/lang/Class.java
+++ b/src/java.base/share/classes/java/lang/Class.java
@@ -43,6 +43,7 @@ import java.lang.reflect.Executable;
import java.lang.reflect.Field;
import java.lang.reflect.GenericArrayType;
import java.lang.reflect.GenericDeclaration;
+import java.lang.reflect.GenericSignatureFormatError;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Member;
import java.lang.reflect.Method;
@@ -159,6 +160,10 @@ import sun.reflect.annotation.*;
* other members are the classes and interfaces whose declarations are
* enclosed within the top-level class declaration.
*
+ * Unless otherwise specified, methods in this class throw a
+ * {@link NullPointerException} when they are called with {@code null}
+ * or an array that contains {@code null} as an argument.
+ *
*
* A class or interface created by the invocation of
* {@link java.lang.invoke.MethodHandles.Lookup#defineHiddenClass(byte[], boolean, MethodHandles.Lookup.ClassOption...)
@@ -529,7 +534,8 @@ public final class Class implements java.io.Serializable,
* (which implies linking). See Section {@jls
* 12.4} of
|---|