diff --git a/doc/testing.html b/doc/testing.html
index 31f4fbd1778..195153c8612 100644
--- a/doc/testing.html
+++ b/doc/testing.html
@@ -72,6 +72,7 @@ id="toc-notes-for-specific-tests">Notes for Specific Tests
Non-US
locale
PKCS11 Tests
+SCTP Tests
Testing Ahead-of-time
Optimizations
@@ -621,6 +622,21 @@ element of the appropriate @Artifact class. (See
JTREG="JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs"
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
+SCTP Tests
+The SCTP tests require the SCTP runtime library, which is often not
+installed by default in popular Linux distributions. Without this
+library, the SCTP tests will be skipped. If you want to enable the SCTP
+tests, you should install the SCTP library before running the tests.
+For distributions using the .deb packaging format and the apt tool
+(such as Debian, Ubuntu, etc.), try this:
+sudo apt install libsctp1
+sudo modprobe sctp
+lsmod | grep sctp
+For distributions using the .rpm packaging format and the dnf tool
+(such as Fedora, Red Hat, etc.), try this:
+sudo dnf install -y lksctp-tools
+sudo modprobe sctp
+lsmod | grep sctp
Testing Ahead-of-time
Optimizations
One way to improve test coverage of ahead-of-time (AOT) optimizations
diff --git a/doc/testing.md b/doc/testing.md
index b95f59de9fd..d0e54aab02b 100644
--- a/doc/testing.md
+++ b/doc/testing.md
@@ -640,6 +640,32 @@ $ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
+
+### SCTP Tests
+
+The SCTP tests require the SCTP runtime library, which is often not installed
+by default in popular Linux distributions. Without this library, the SCTP tests
+will be skipped. If you want to enable the SCTP tests, you should install the
+SCTP library before running the tests.
+
+For distributions using the .deb packaging format and the apt tool
+(such as Debian, Ubuntu, etc.), try this:
+
+```
+sudo apt install libsctp1
+sudo modprobe sctp
+lsmod | grep sctp
+```
+
+For distributions using the .rpm packaging format and the dnf tool
+(such as Fedora, Red Hat, etc.), try this:
+
+```
+sudo dnf install -y lksctp-tools
+sudo modprobe sctp
+lsmod | grep sctp
+```
+
### Testing Ahead-of-time Optimizations
One way to improve test coverage of ahead-of-time (AOT) optimizations in
diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4
index 5a9fdc57c74..639c3852212 100644
--- a/make/autoconf/flags-cflags.m4
+++ b/make/autoconf/flags-cflags.m4
@@ -69,22 +69,18 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
# Debug prefix mapping if supported by compiler
DEBUG_PREFIX_CFLAGS=
- UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: string,
- DEFAULT: "",
- RESULT: DEBUG_SYMBOLS_LEVEL,
+ UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: literal,
+ DEFAULT: [auto], VALID_VALUES: [auto 1 2 3],
+ CHECK_AVAILABLE: [
+ if test x$TOOLCHAIN_TYPE = xmicrosoft; then
+ AVAILABLE=false
+ fi
+ ],
DESC: [set the native debug symbol level (GCC and Clang only)],
- DEFAULT_DESC: [toolchain default])
- AC_SUBST(DEBUG_SYMBOLS_LEVEL)
-
- if test "x${TOOLCHAIN_TYPE}" = xgcc || \
- test "x${TOOLCHAIN_TYPE}" = xclang; then
- DEBUG_SYMBOLS_LEVEL_FLAGS="-g"
- if test "x${DEBUG_SYMBOLS_LEVEL}" != "x"; then
- DEBUG_SYMBOLS_LEVEL_FLAGS="-g${DEBUG_SYMBOLS_LEVEL}"
- FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_SYMBOLS_LEVEL_FLAGS}],
- IF_FALSE: AC_MSG_ERROR("Debug info level ${DEBUG_SYMBOLS_LEVEL} is not supported"))
- fi
- fi
+ DEFAULT_DESC: [toolchain default],
+ IF_AUTO: [
+ RESULT=""
+ ])
# Debug symbols
if test "x$TOOLCHAIN_TYPE" = xgcc; then
@@ -111,8 +107,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
fi
# Debug info level should follow the debug format to be effective.
- CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
- ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
+ CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
+ ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then
# Check if compiler supports -fdebug-prefix-map. If so, use that to make
@@ -132,8 +128,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
IF_FALSE: [GDWARF_FLAGS=""])
# Debug info level should follow the debug format to be effective.
- CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
- ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
+ CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
+ ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CFLAGS_DEBUG_SYMBOLS="-Z7"
fi
diff --git a/make/hotspot/lib/CompileGtest.gmk b/make/hotspot/lib/CompileGtest.gmk
index 60912992134..327014b1e9d 100644
--- a/make/hotspot/lib/CompileGtest.gmk
+++ b/make/hotspot/lib/CompileGtest.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,8 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBGTEST, \
INCLUDE_FILES := gtest-all.cc gmock-all.cc, \
DISABLED_WARNINGS_gcc := format-nonliteral maybe-uninitialized undef \
unused-result zero-as-null-pointer-constant, \
- DISABLED_WARNINGS_clang := format-nonliteral undef unused-result, \
+ DISABLED_WARNINGS_clang := format-nonliteral undef unused-result \
+ zero-as-null-pointer-constant, \
DISABLED_WARNINGS_microsoft := 4530, \
DEFAULT_CFLAGS := false, \
CFLAGS := $(JVM_CFLAGS) \
diff --git a/make/jdk/src/classes/build/tools/taglet/JSpec.java b/make/jdk/src/classes/build/tools/taglet/JSpec.java
index 1c301e94a8a..7e1e0ca215e 100644
--- a/make/jdk/src/classes/build/tools/taglet/JSpec.java
+++ b/make/jdk/src/classes/build/tools/taglet/JSpec.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,13 +49,15 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* The tags can be used as follows:
*
*
- * @jls section-number description
+ * @jls chapter.section description
+ * @jls preview-feature-chapter.section description
*
*
* For example:
*
*
* @jls 3.4 Line Terminators
+ * @jls primitive-types-in-patterns-instanceof-switch-5.7.1 Exact Testing Conversions
*
*
* will produce the following HTML, depending on the file containing
@@ -64,10 +66,24 @@ import static com.sun.source.doctree.DocTree.Kind.*;
* {@code
* See Java Language Specification:
* 3.4 Line terminators
+ *
+ * 5.7.1 Exact Testing Conversions
+ * PREVIEW
* }
*
- * Copies of JLS and JVMS are expected to have been placed in the {@code specs}
- * folder. These documents are not included in open-source repositories.
+ * In inline tags (note you need manual JLS/JVMS prefix):
+ *
+ * JLS {@jls 3.4}
+ *
+ *
+ * produces (note the section sign and no trailing dot):
+ *
+ * JLS §3.4
+ *
+ *
+ * Copies of JLS, JVMS, and preview JLS and JVMS changes are expected to have
+ * been placed in the {@code specs} folder. These documents are not included
+ * in open-source repositories.
*/
public class JSpec implements Taglet {
@@ -87,9 +103,9 @@ public class JSpec implements Taglet {
}
}
- private String tagName;
- private String specTitle;
- private String idPrefix;
+ private final String tagName;
+ private final String specTitle;
+ private final String idPrefix;
JSpec(String tagName, String specTitle, String idPrefix) {
this.tagName = tagName;
@@ -98,7 +114,7 @@ public class JSpec implements Taglet {
}
// Note: Matches special cases like @jvms 6.5.checkcast
- private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?[1-9][0-9]*)(?[0-9a-z_.]*)( .*)?$");
+ private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?([a-z0-9]+-)+)?(?[1-9][0-9]*)(?[0-9a-z_.]*)( .*)?$");
/**
* Returns the set of locations in which the tag may be used.
@@ -157,19 +173,50 @@ public class JSpec implements Taglet {
.trim();
Matcher m = TAG_PATTERN.matcher(tagText);
if (m.find()) {
+ // preview-feature-4.6 is preview-feature-, 4, .6
+ String preview = m.group("preview"); // null if no preview feature
String chapter = m.group("chapter");
String section = m.group("section");
String rootParent = currentPath().replaceAll("[^/]+", "..");
- String url = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
- rootParent, idPrefix, chapter, section);
+ String url = preview == null ?
+ String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
+ rootParent, idPrefix, chapter, section) :
+ String.format("%1$s/specs/%5$s%2$s.html#%2$s-%3$s%4$s",
+ rootParent, idPrefix, chapter, section, preview);
+
+ var literal = expand(contents).trim();
+ var prefix = (preview == null ? "" : preview) + chapter + section;
+ if (literal.startsWith(prefix)) {
+ var hasFullTitle = literal.length() > prefix.length();
+ if (hasFullTitle) {
+ // Drop the preview identifier
+ literal = chapter + section + literal.substring(prefix.length());
+ } else {
+ // No section sign if the tag refers to a chapter, like {@jvms 4}
+ String sectionSign = section.isEmpty() ? "" : "§";
+ // Change whole text to "§chapter.x" in inline tags.
+ literal = sectionSign + chapter + section;
+ }
+ }
sb.append("")
- .append(expand(contents))
+ .append(literal)
.append("");
+ if (preview != null) {
+ // Add PREVIEW superscript that links to JLS/JVMS 1.5.1
+ // "Restrictions on the Use of Preview Features"
+ // Similar to how APIs link to the Preview info box warning
+ var sectionLink = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
+ rootParent, idPrefix, "1", ".5.1");
+ sb.append("PREVIEW");
+ }
+
if (tag.getKind() == DocTree.Kind.UNKNOWN_BLOCK_TAG) {
sb.append("
");
}
diff --git a/make/modules/jdk.security.auth/Lib.gmk b/make/modules/jdk.security.auth/Lib.gmk
index 9ead32dbe12..96d609f08d6 100644
--- a/make/modules/jdk.security.auth/Lib.gmk
+++ b/make/modules/jdk.security.auth/Lib.gmk
@@ -31,13 +31,14 @@ include LibCommon.gmk
## Build libjaas
################################################################################
-$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
- NAME := jaas, \
- OPTIMIZATION := LOW, \
- EXTRA_HEADER_DIRS := java.base:libjava, \
- LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
-))
-
-TARGETS += $(BUILD_LIBJAAS)
+ifeq ($(call isTargetOs, windows), true)
+ $(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
+ NAME := jaas, \
+ OPTIMIZATION := LOW, \
+ EXTRA_HEADER_DIRS := java.base:libjava, \
+ LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
+ ))
+ TARGETS += $(BUILD_LIBJAAS)
+endif
################################################################################
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index b8a9afc123f..27428a5c558 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -5782,6 +5782,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
// return false;
bind(A_IS_NOT_NULL);
ldrw(cnt1, Address(a1, length_offset));
+ ldrw(tmp5, Address(a2, length_offset));
+ cmp(cnt1, tmp5);
+ br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
lea(a1, Address(a1, start_offset));
@@ -5848,6 +5851,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
cbz(a1, DONE);
ldrw(cnt1, Address(a1, length_offset));
cbz(a2, DONE);
+ ldrw(tmp5, Address(a2, length_offset));
+ cmp(cnt1, tmp5);
+ br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
index 89ae6bc10e0..73b631029a0 100644
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -722,22 +722,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
- __ andsw(zr, rscratch1, JVM_ACC_STATIC);
- __ br(Assembler::EQ, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
+ __ andsw(zr, rscratch1, JVM_ACC_STATIC);
+ __ br(Assembler::EQ, L_skip_barrier); // non-static
- __ load_method_holder(rscratch2, rmethod);
- __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
- __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+ __ load_method_holder(rscratch2, rmethod);
+ __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@@ -1508,7 +1506,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// SVC, HVC, or SMC. Make it a NOP.
__ nop();
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
__ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
index cde142b39ac..07b469650f0 100644
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2290,7 +2290,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
__ br(Assembler::NE, L_clinit_barrier_slow);
__ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
@@ -2340,8 +2341,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ br(Assembler::NE, L_clinit_barrier_slow);
diff --git a/src/hotspot/cpu/arm/frame_arm.cpp b/src/hotspot/cpu/arm/frame_arm.cpp
index 7a23296a3d4..f791fae7bd7 100644
--- a/src/hotspot/cpu/arm/frame_arm.cpp
+++ b/src/hotspot/cpu/arm/frame_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -356,10 +356,10 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
assert(is_interpreted_frame(), "Not an interpreted frame");
// These are reasonable sanity checks
- if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
+ if (fp() == nullptr || (intptr_t(fp()) & (wordSize-1)) != 0) {
return false;
}
- if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
+ if (sp() == nullptr || (intptr_t(sp()) & (wordSize-1)) != 0) {
return false;
}
if (fp() + interpreter_frame_initial_sp_offset < sp()) {
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
index 232294b246a..df780ac31a6 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -172,7 +172,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
address addr = oop_addr != nullptr ? (address)oop_addr : (address)metadata_addr;
- if(pc == 0) {
+ if (pc == nullptr) {
offset = addr - instruction_address() - 8;
} else {
offset = addr - pc - 8;
@@ -228,7 +228,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
void NativeMovConstReg::set_pc_relative_offset(address addr, address pc) {
int offset;
- if (pc == 0) {
+ if (pc == nullptr) {
offset = addr - instruction_address() - 8;
} else {
offset = addr - pc - 8;
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
index 82385bf0244..2b52db89285 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -371,7 +371,7 @@ class NativeMovConstReg: public NativeInstruction {
public:
intptr_t data() const;
- void set_data(intptr_t x, address pc = 0);
+ void set_data(intptr_t x, address pc = nullptr);
bool is_pc_relative() {
return !is_movw();
}
diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
index fc865be015e..f7bf457f72c 100644
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
@@ -1109,11 +1109,11 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
- add(R11_scratch1, R12_scratch2, R12_scratch2);
+ add(R11_scratch1, R11_scratch1, R12_scratch2);
cmpd(CR0, R11_scratch1, R14_bcp);
beq(CR0, verify_continue);
- call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
+ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), R19_method, R14_bcp, R28_mdx);
bind(verify_continue);
#endif
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
index 5649ead2ea8..809285afddb 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
@@ -4535,7 +4535,7 @@ void MacroAssembler::push_cont_fastpath() {
Label done;
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
cmpld(CR0, R1_SP, R0);
- ble(CR0, done);
+ ble(CR0, done); // if (SP <= _cont_fastpath) goto done;
st_ptr(R1_SP, JavaThread::cont_fastpath_offset(), R16_thread);
bind(done);
}
@@ -4546,7 +4546,7 @@ void MacroAssembler::pop_cont_fastpath() {
Label done;
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
cmpld(CR0, R1_SP, R0);
- ble(CR0, done);
+ blt(CR0, done); // if (SP < _cont_fastpath) goto done;
li(R0, 0);
st_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
bind(done);
diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
index 4e427ace404..4eb2028f529 100644
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -1237,26 +1237,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
- __ andi_(R0, R0, JVM_ACC_STATIC);
- __ beq(CR0, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
+ __ andi_(R0, R0, JVM_ACC_STATIC);
+ __ beq(CR0, L_skip_barrier); // non-static
- Register klass = R11_scratch1;
- __ load_method_holder(klass, R19_method);
- __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
+ Register klass = R11_scratch1;
+ __ load_method_holder(klass, R19_method);
+ __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
- __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
- __ mtctr(klass);
- __ bctr();
+ __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
+ __ mtctr(klass);
+ __ bctr();
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);
@@ -2210,7 +2208,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// --------------------------------------------------------------------------
vep_start_pc = (intptr_t)__ pc();
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = r_temp_1;
// Notify OOP recorder (don't need the relocation)
diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
index 8d61ba1b2d7..8a3af748fa1 100644
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2199,7 +2199,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rca
__ isync(); // Order load wrt. succeeding loads.
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = Rscratch;
const Register klass = Rscratch;
@@ -2244,8 +2245,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no, Register Rcac
__ isync(); // Order load wrt. succeeding loads.
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = R4_ARG2;
// InterpreterRuntime::resolve_get_put sets field_holder and finally release-stores put_code.
diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
index eeb6fad1b59..8c343f6ab2b 100644
--- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
+++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -213,7 +213,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
// Is vector's size (in bytes) bigger than a size saved by default?
// riscv does not ovlerlay the floating-point registers on vector registers like aarch64.
bool SharedRuntime::is_wide_vector(int size) {
- return UseRVV;
+ return UseRVV && size > 0;
}
// ---------------------------------------------------------------------------
@@ -637,22 +637,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
- __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
- __ beqz(t1, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
+ __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
+ __ beqz(t1, L_skip_barrier); // non-static
- __ load_method_holder(t1, xmethod);
- __ clinit_barrier(t1, t0, &L_skip_barrier);
- __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+ __ load_method_holder(t1, xmethod);
+ __ clinit_barrier(t1, t0, &L_skip_barrier);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@@ -1443,7 +1441,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ nop(); // 4 bytes
}
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
__ mov_metadata(t1, method->method_holder()); // InstanceKlass*
__ clinit_barrier(t1, t0, &L_skip_barrier);
diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
index ca41583e4bc..5a3644f70bb 100644
--- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp
+++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -2192,7 +2192,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ mv(t0, (int) code);
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
__ bne(temp, t0, L_clinit_barrier_slow); // have we resolved this bytecode?
__ ld(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
@@ -2243,8 +2244,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ mv(t0, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ bne(temp, t0, L_clinit_barrier_slow);
diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
index 5b6f7dcd984..00a830a80cd 100644
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -1567,7 +1567,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
//---------------------------------------------------------------------
wrapper_VEPStart = __ offset();
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = Z_R1_scratch;
// Notify OOP recorder (don't need the relocation)
@@ -2378,24 +2379,22 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
- __ z_bfalse(L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
+ __ z_bfalse(L_skip_barrier); // non-static
- Register klass = Z_R11;
- __ load_method_holder(klass, Z_method);
- __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
+ Register klass = Z_R11;
+ __ load_method_holder(klass, Z_method);
+ __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
- __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
- __ z_br(klass);
+ __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
+ __ z_br(klass);
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
return;
diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp
index 4e8fdf275e4..647915ef4fa 100644
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2377,7 +2377,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ z_cli(Address(Rcache, bc_offset), code);
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = Z_R1_scratch;
const Register klass = Z_R1_scratch;
__ z_brne(L_clinit_barrier_slow);
@@ -2427,8 +2428,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ z_cli(Address(cache, code_offset), code);
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = index;
__ z_brne(L_clinit_barrier_slow);
diff --git a/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp b/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp
index 0951bda0d17..77a149addb5 100644
--- a/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp
+++ b/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2024, 2025, Intel Corporation. All rights reserved.
+ * Copyright (c) 2024, 2026, Intel Corporation. All rights reserved.
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -1330,10 +1330,12 @@ static void big_case_loop_helper(bool sizeKnown, int size, Label &noMatch, Label
// Clarification: The BYTE_K compare above compares haystack[(n-32):(n-1)]. We need to
// compare haystack[(k-1):(k-1+31)]. Subtracting either index gives shift value of
// (k + 31 - n): x = (k-1+31)-(n-1) = k-1+31-n+1 = k+31-n.
+ // When isU is set, similarly, shift is from haystack[(n-32):(n-1)] to [(k-2):(k-2+31)]
+
if (sizeKnown) {
- __ movl(temp2, 31 + size);
+ __ movl(temp2, (isU ? 30 : 31) + size);
} else {
- __ movl(temp2, 31);
+ __ movl(temp2, isU ? 30 : 31);
__ addl(temp2, needleLen);
}
__ subl(temp2, hsLength);
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
index 5a4a5b1809e..bbd43c1a0e8 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1043,26 +1043,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
- Register method = rbx;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
+ Register method = rbx;
- { // Bypass the barrier for non-static methods
- Register flags = rscratch1;
- __ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
- __ testl(flags, JVM_ACC_STATIC);
- __ jcc(Assembler::zero, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ Register flags = rscratch1;
+ __ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
+ __ testl(flags, JVM_ACC_STATIC);
+ __ jcc(Assembler::zero, L_skip_barrier); // non-static
- Register klass = rscratch1;
- __ load_method_holder(klass, method);
- __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
+ Register klass = rscratch1;
+ __ load_method_holder(klass, method);
+ __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
- __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
+ __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@@ -1904,7 +1902,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
int vep_offset = ((intptr_t)__ pc()) - start;
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = r10;
__ mov_metadata(klass, method->method_holder()); // InstanceKlass*
@@ -3602,4 +3601,3 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
}
#endif // INCLUDE_JFR
-
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp
index 3e5593322d5..7d5dee6a5df 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,39 @@ static address kyberAvx512ConstsAddr(int offset) {
const Register scratch = r10;
+ATTRIBUTE_ALIGNED(64) static const uint8_t kyberAvx512_12To16Dup[] = {
+// 0 - 63
+ 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15, 16,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23, 24, 25, 25, 26, 27, 28, 28, 29, 30,
+ 31, 31, 32, 33, 34, 34, 35, 36, 37, 37, 38, 39, 40, 40, 41, 42, 43, 43, 44,
+ 45, 46, 46, 47
+ };
+
+static address kyberAvx512_12To16DupAddr() {
+ return (address) kyberAvx512_12To16Dup;
+}
+
+ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512_12To16Shift[] = {
+// 0 - 31
+ 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0,
+ 4, 0, 4, 0, 4, 0, 4
+ };
+
+static address kyberAvx512_12To16ShiftAddr() {
+ return (address) kyberAvx512_12To16Shift;
+}
+
+ATTRIBUTE_ALIGNED(64) static const uint64_t kyberAvx512_12To16And[] = {
+// 0 - 7
+ 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
+ 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
+ 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF
+ };
+
+static address kyberAvx512_12To16AndAddr() {
+ return (address) kyberAvx512_12To16And;
+}
+
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512NttPerms[] = {
// 0
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
@@ -822,10 +855,65 @@ address generate_kyber12To16_avx512(StubGenerator *stubgen,
const Register perms = r11;
- Label Loop;
+ Label Loop, VBMILoop;
__ addptr(condensed, condensedOffs);
+ if (VM_Version::supports_avx512_vbmi()) {
+ // mask load for the first 48 bytes of each vector
+ __ mov64(rax, 0x0000FFFFFFFFFFFF);
+ __ kmovql(k1, rax);
+
+ __ lea(perms, ExternalAddress(kyberAvx512_12To16DupAddr()));
+ __ evmovdqub(xmm20, Address(perms), Assembler::AVX_512bit);
+
+ __ lea(perms, ExternalAddress(kyberAvx512_12To16ShiftAddr()));
+ __ evmovdquw(xmm21, Address(perms), Assembler::AVX_512bit);
+
+ __ lea(perms, ExternalAddress(kyberAvx512_12To16AndAddr()));
+ __ evmovdquq(xmm22, Address(perms), Assembler::AVX_512bit);
+
+ __ align(OptoLoopAlignment);
+ __ BIND(VBMILoop);
+
+ __ evmovdqub(xmm0, k1, Address(condensed, 0), false,
+ Assembler::AVX_512bit);
+ __ evmovdqub(xmm1, k1, Address(condensed, 48), false,
+ Assembler::AVX_512bit);
+ __ evmovdqub(xmm2, k1, Address(condensed, 96), false,
+ Assembler::AVX_512bit);
+ __ evmovdqub(xmm3, k1, Address(condensed, 144), false,
+ Assembler::AVX_512bit);
+
+ __ evpermb(xmm4, k0, xmm20, xmm0, false, Assembler::AVX_512bit);
+ __ evpermb(xmm5, k0, xmm20, xmm1, false, Assembler::AVX_512bit);
+ __ evpermb(xmm6, k0, xmm20, xmm2, false, Assembler::AVX_512bit);
+ __ evpermb(xmm7, k0, xmm20, xmm3, false, Assembler::AVX_512bit);
+
+ __ evpsrlvw(xmm4, xmm4, xmm21, Assembler::AVX_512bit);
+ __ evpsrlvw(xmm5, xmm5, xmm21, Assembler::AVX_512bit);
+ __ evpsrlvw(xmm6, xmm6, xmm21, Assembler::AVX_512bit);
+ __ evpsrlvw(xmm7, xmm7, xmm21, Assembler::AVX_512bit);
+
+ __ evpandq(xmm0, xmm22, xmm4, Assembler::AVX_512bit);
+ __ evpandq(xmm1, xmm22, xmm5, Assembler::AVX_512bit);
+ __ evpandq(xmm2, xmm22, xmm6, Assembler::AVX_512bit);
+ __ evpandq(xmm3, xmm22, xmm7, Assembler::AVX_512bit);
+
+ store4regs(parsed, 0, xmm0_3, _masm);
+
+ __ addptr(condensed, 192);
+ __ addptr(parsed, 256);
+ __ subl(parsedLength, 128);
+ __ jcc(Assembler::greater, VBMILoop);
+
+ __ leave(); // required for proper stackwalking of RuntimeStub frame
+ __ mov64(rax, 0); // return 0
+ __ ret(0);
+
+ return start;
+ }
+
__ lea(perms, ExternalAddress(kyberAvx512_12To16PermsAddr()));
load4regs(xmm24_27, perms, 0, _masm);
diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp
index 42392b84833..db7749ec482 100644
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2216,7 +2216,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ cmpl(temp, code); // have we resolved this bytecode?
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = temp;
const Register klass = temp;
@@ -2264,8 +2265,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ cmpl(temp, code); // have we resolved this bytecode?
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ jcc(Assembler::notEqual, L_clinit_barrier_slow);
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index f88729cdc66..d7c1911a914 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -258,10 +258,18 @@ bool os::free_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
+bool os::Machine::free_memory(physical_memory_size_type& value) {
+ return Aix::available_memory(value);
+}
+
bool os::available_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
+bool os::Machine::available_memory(physical_memory_size_type& value) {
+ return Aix::available_memory(value);
+}
+
bool os::Aix::available_memory(physical_memory_size_type& value) {
os::Aix::meminfo_t mi;
if (os::Aix::get_meminfo(&mi)) {
@@ -273,6 +281,10 @@ bool os::Aix::available_memory(physical_memory_size_type& value) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
@@ -282,6 +294,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
+ return Machine::free_swap_space(value);
+}
+
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
@@ -294,6 +310,10 @@ physical_memory_size_type os::physical_memory() {
return Aix::physical_memory();
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return Aix::physical_memory();
+}
+
size_t os::rss() { return (size_t)0; }
// Cpu architecture string
@@ -2264,6 +2284,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
return online_cpus;
diff --git a/src/hotspot/os/bsd/memMapPrinter_macosx.cpp b/src/hotspot/os/bsd/memMapPrinter_macosx.cpp
index 6fd08d63e85..30e258c9d2c 100644
--- a/src/hotspot/os/bsd/memMapPrinter_macosx.cpp
+++ b/src/hotspot/os/bsd/memMapPrinter_macosx.cpp
@@ -132,7 +132,7 @@ public:
static const char* tagToStr(uint32_t user_tag) {
switch (user_tag) {
case 0:
- return 0;
+ return nullptr;
X1(MALLOC, malloc);
X1(MALLOC_SMALL, malloc_small);
X1(MALLOC_LARGE, malloc_large);
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 61de48bb7fa..0e21c2d1785 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -137,10 +137,18 @@ bool os::available_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
+bool os::Machine::available_memory(physical_memory_size_type& value) {
+ return Bsd::available_memory(value);
+}
+
bool os::free_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
+bool os::Machine::free_memory(physical_memory_size_type& value) {
+ return Bsd::available_memory(value);
+}
+
// Available here means free. Note that this number is of no much use. As an estimate
// for future memory pressure it is far too conservative, since MacOS will use a lot
// of unused memory for caches, and return it willingly in case of needs.
@@ -181,6 +189,10 @@ void os::Bsd::print_uptime_info(outputStream* st) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
@@ -195,6 +207,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
+ return Machine::free_swap_space(value);
+}
+
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
@@ -212,6 +228,10 @@ physical_memory_size_type os::physical_memory() {
return Bsd::physical_memory();
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return Bsd::physical_memory();
+}
+
size_t os::rss() {
size_t rss = 0;
#ifdef __APPLE__
@@ -608,7 +628,7 @@ static void *thread_native_entry(Thread *thread) {
log_info(os, thread)("Thread finished (tid: %zu, pthread id: %zu).",
os::current_thread_id(), (uintx) pthread_self());
- return 0;
+ return nullptr;
}
bool os::create_thread(Thread* thread, ThreadType thr_type,
@@ -1400,7 +1420,7 @@ int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *pa
#elif defined(__APPLE__)
for (uint32_t i = 1; i < _dyld_image_count(); i++) {
// Value for top_address is returned as 0 since we don't have any information about module size
- if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), (address)0, param)) {
+ if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), nullptr, param)) {
return 1;
}
}
@@ -2189,6 +2209,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
return _processor_count;
}
diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
index f9c6f794ebd..e49d070890e 100644
--- a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -631,22 +631,20 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
* return:
* true if there were no errors. false otherwise.
*/
-bool CgroupSubsystem::active_processor_count(int& value) {
- int cpu_count;
- int result = -1;
-
+bool CgroupSubsystem::active_processor_count(double& value) {
// We use a cache with a timeout to avoid performing expensive
// computations in the event this function is called frequently.
// [See 8227006].
- CachingCgroupController* contrl = cpu_controller();
- CachedMetric* cpu_limit = contrl->metrics_cache();
+ CachingCgroupController* contrl = cpu_controller();
+ CachedMetric* cpu_limit = contrl->metrics_cache();
if (!cpu_limit->should_check_metric()) {
- value = (int)cpu_limit->value();
- log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", value);
+ value = cpu_limit->value();
+ log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %.2f", value);
return true;
}
- cpu_count = os::Linux::active_processor_count();
+ int cpu_count = os::Linux::active_processor_count();
+ double result = -1;
if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
return false;
}
@@ -671,8 +669,8 @@ bool CgroupSubsystem::active_processor_count(int& value) {
*/
bool CgroupSubsystem::memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value) {
- CachingCgroupController* contrl = memory_controller();
- CachedMetric* memory_limit = contrl->metrics_cache();
+ CachingCgroupController* contrl = memory_controller();
+ CachedMetric* memory_limit = contrl->metrics_cache();
if (!memory_limit->should_check_metric()) {
value = memory_limit->value();
return true;
diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
index 522b64f8816..d083a9985c2 100644
--- a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -181,20 +181,21 @@ class CgroupController: public CHeapObj {
static bool limit_from_str(char* limit_str, physical_memory_size_type& value);
};
+template
class CachedMetric : public CHeapObj{
private:
- volatile physical_memory_size_type _metric;
+ volatile MetricType _metric;
volatile jlong _next_check_counter;
public:
CachedMetric() {
- _metric = value_unlimited;
+ _metric = static_cast(value_unlimited);
_next_check_counter = min_jlong;
}
bool should_check_metric() {
return os::elapsed_counter() > _next_check_counter;
}
- physical_memory_size_type value() { return _metric; }
- void set_value(physical_memory_size_type value, jlong timeout) {
+ MetricType value() { return _metric; }
+ void set_value(MetricType value, jlong timeout) {
_metric = value;
// Metric is unlikely to change, but we want to remain
// responsive to configuration changes. A very short grace time
@@ -205,19 +206,19 @@ class CachedMetric : public CHeapObj{
}
};
-template
+template
class CachingCgroupController : public CHeapObj {
private:
T* _controller;
- CachedMetric* _metrics_cache;
+ CachedMetric* _metrics_cache;
public:
CachingCgroupController(T* cont) {
_controller = cont;
- _metrics_cache = new CachedMetric();
+ _metrics_cache = new CachedMetric();
}
- CachedMetric* metrics_cache() { return _metrics_cache; }
+ CachedMetric* metrics_cache() { return _metrics_cache; }
T* controller() { return _controller; }
};
@@ -277,7 +278,7 @@ class CgroupMemoryController: public CHeapObj {
class CgroupSubsystem: public CHeapObj {
public:
bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
- bool active_processor_count(int& value);
+ bool active_processor_count(double& value);
virtual bool pids_max(uint64_t& value) = 0;
virtual bool pids_current(uint64_t& value) = 0;
@@ -286,8 +287,8 @@ class CgroupSubsystem: public CHeapObj {
virtual char * cpu_cpuset_cpus() = 0;
virtual char * cpu_cpuset_memory_nodes() = 0;
virtual const char * container_type() = 0;
- virtual CachingCgroupController* memory_controller() = 0;
- virtual CachingCgroupController* cpu_controller() = 0;
+ virtual CachingCgroupController* memory_controller() = 0;
+ virtual CachingCgroupController* cpu_controller() = 0;
virtual CgroupCpuacctController* cpuacct_controller() = 0;
bool cpu_quota(int& value);
diff --git a/src/hotspot/os/linux/cgroupUtil_linux.cpp b/src/hotspot/os/linux/cgroupUtil_linux.cpp
index 7aa07d53148..570b335940b 100644
--- a/src/hotspot/os/linux/cgroupUtil_linux.cpp
+++ b/src/hotspot/os/linux/cgroupUtil_linux.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2024, 2025, Red Hat, Inc.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,9 +26,8 @@
#include "cgroupUtil_linux.hpp"
#include "os_linux.hpp"
-bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, int& value) {
+bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, double& value) {
assert(upper_bound > 0, "upper bound of cpus must be positive");
- int limit_count = upper_bound;
int quota = -1;
int period = -1;
if (!cpu_ctrl->cpu_quota(quota)) {
@@ -37,20 +37,15 @@ bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound,
return false;
}
int quota_count = 0;
- int result = upper_bound;
+ double result = upper_bound;
- if (quota > -1 && period > 0) {
- quota_count = ceilf((float)quota / (float)period);
- log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
+ if (quota > 0 && period > 0) { // Use quotas
+ double cpu_quota = static_cast(quota) / period;
+ log_trace(os, container)("CPU Quota based on quota/period: %.2f", cpu_quota);
+ result = MIN2(result, cpu_quota);
}
- // Use quotas
- if (quota_count != 0) {
- limit_count = quota_count;
- }
-
- result = MIN2(upper_bound, limit_count);
- log_trace(os, container)("OSContainer::active_processor_count: %d", result);
+ log_trace(os, container)("OSContainer::active_processor_count: %.2f", result);
value = result;
return true;
}
@@ -73,11 +68,11 @@ physical_memory_size_type CgroupUtil::get_updated_mem_limit(CgroupMemoryControll
// Get an updated cpu limit. The return value is strictly less than or equal to the
// passed in 'lowest' value.
-int CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
+double CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
int lowest,
int upper_bound) {
assert(lowest > 0 && lowest <= upper_bound, "invariant");
- int cpu_limit_val = -1;
+ double cpu_limit_val = -1;
if (CgroupUtil::processor_count(cpu, upper_bound, cpu_limit_val) && cpu_limit_val != upper_bound) {
assert(cpu_limit_val <= upper_bound, "invariant");
if (lowest > cpu_limit_val) {
@@ -172,7 +167,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
assert(cg_path[0] == '/', "cgroup path must start with '/'");
int host_cpus = os::Linux::active_processor_count();
int lowest_limit = host_cpus;
- int cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
+ double cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
char* limit_cg_path = nullptr;
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
diff --git a/src/hotspot/os/linux/cgroupUtil_linux.hpp b/src/hotspot/os/linux/cgroupUtil_linux.hpp
index d72bbd1cf1e..1fd2a7d872b 100644
--- a/src/hotspot/os/linux/cgroupUtil_linux.hpp
+++ b/src/hotspot/os/linux/cgroupUtil_linux.hpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2024, Red Hat, Inc.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +32,7 @@
class CgroupUtil: AllStatic {
public:
- static bool processor_count(CgroupCpuController* cpu, int upper_bound, int& value);
+ static bool processor_count(CgroupCpuController* cpu, int upper_bound, double& value);
// Given a memory controller, adjust its path to a point in the hierarchy
// that represents the closest memory limit.
static void adjust_controller(CgroupMemoryController* m);
@@ -42,9 +43,7 @@ class CgroupUtil: AllStatic {
static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
physical_memory_size_type lowest,
physical_memory_size_type upper_bound);
- static int get_updated_cpu_limit(CgroupCpuController* c,
- int lowest,
- int upper_bound);
+ static double get_updated_cpu_limit(CgroupCpuController* c, int lowest, int upper_bound);
};
#endif // CGROUP_UTIL_LINUX_HPP
diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
index 2df604083d2..c8f5a290c99 100644
--- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -328,8 +328,8 @@ CgroupV1Subsystem::CgroupV1Subsystem(CgroupV1Controller* cpuset,
_pids(pids) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
- _memory = new CachingCgroupController(memory);
- _cpu = new CachingCgroupController(cpu);
+ _memory = new CachingCgroupController(memory);
+ _cpu = new CachingCgroupController(cpu);
}
bool CgroupV1Subsystem::is_containerized() {
diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
index f556bc57f26..af8d0efd378 100644
--- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -214,15 +214,15 @@ class CgroupV1Subsystem: public CgroupSubsystem {
const char * container_type() override {
return "cgroupv1";
}
- CachingCgroupController* memory_controller() override { return _memory; }
- CachingCgroupController* cpu_controller() override { return _cpu; }
+ CachingCgroupController* memory_controller() override { return _memory; }
+ CachingCgroupController* cpu_controller() override { return _cpu; }
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; }
private:
/* controllers */
- CachingCgroupController* _memory = nullptr;
+ CachingCgroupController* _memory = nullptr;
CgroupV1Controller* _cpuset = nullptr;
- CachingCgroupController* _cpu = nullptr;
+ CachingCgroupController* _cpu = nullptr;
CgroupV1CpuacctController* _cpuacct = nullptr;
CgroupV1Controller* _pids = nullptr;
diff --git a/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
index c61d30e9236..30e1affc646 100644
--- a/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2025, Red Hat Inc.
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -156,8 +156,8 @@ CgroupV2Subsystem::CgroupV2Subsystem(CgroupV2MemoryController * memory,
_unified(unified) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
- _memory = new CachingCgroupController(memory);
- _cpu = new CachingCgroupController(cpu);
+ _memory = new CachingCgroupController(memory);
+ _cpu = new CachingCgroupController(cpu);
_cpuacct = cpuacct;
}
diff --git a/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp b/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
index 39a4fabe9f6..998145c0ff9 100644
--- a/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2024, Red Hat Inc.
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -152,8 +152,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
/* One unified controller */
CgroupV2Controller _unified;
/* Caching wrappers for cpu/memory metrics */
- CachingCgroupController* _memory = nullptr;
- CachingCgroupController* _cpu = nullptr;
+ CachingCgroupController* _memory = nullptr;
+ CachingCgroupController* _cpu = nullptr;
CgroupCpuacctController* _cpuacct = nullptr;
@@ -175,8 +175,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
const char * container_type() override {
return "cgroupv2";
}
- CachingCgroupController* memory_controller() override { return _memory; }
- CachingCgroupController* cpu_controller() override { return _cpu; }
+ CachingCgroupController* memory_controller() override { return _memory; }
+ CachingCgroupController* cpu_controller() override { return _cpu; }
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; };
};
diff --git a/src/hotspot/os/linux/osContainer_linux.cpp b/src/hotspot/os/linux/osContainer_linux.cpp
index 15a6403d07f..b46263efd99 100644
--- a/src/hotspot/os/linux/osContainer_linux.cpp
+++ b/src/hotspot/os/linux/osContainer_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,8 +86,8 @@ void OSContainer::init() {
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
physical_memory_size_type mem_limit_val = value_unlimited;
(void)memory_limit_in_bytes(mem_limit_val); // discard error and use default
- int host_cpus = os::Linux::active_processor_count();
- int cpus = host_cpus;
+ double host_cpus = os::Linux::active_processor_count();
+ double cpus = host_cpus;
(void)active_processor_count(cpus); // discard error and use default
any_mem_cpu_limit_present = mem_limit_val != value_unlimited || host_cpus != cpus;
if (any_mem_cpu_limit_present) {
@@ -127,8 +127,7 @@ bool OSContainer::available_memory_in_bytes(physical_memory_size_type& value) {
return false;
}
-bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_swap,
- physical_memory_size_type& value) {
+bool OSContainer::available_swap_in_bytes(physical_memory_size_type& value) {
physical_memory_size_type mem_limit = 0;
physical_memory_size_type mem_swap_limit = 0;
if (memory_limit_in_bytes(mem_limit) &&
@@ -179,8 +178,7 @@ bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_sw
assert(num < 25, "buffer too small");
mem_limit_buf[num] = '\0';
log_trace(os,container)("OSContainer::available_swap_in_bytes: container_swap_limit=%s"
- " container_mem_limit=%s, host_free_swap: " PHYS_MEM_TYPE_FORMAT,
- mem_swap_buf, mem_limit_buf, host_free_swap);
+ " container_mem_limit=%s", mem_swap_buf, mem_limit_buf);
}
return false;
}
@@ -252,7 +250,7 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
return cgroup_subsystem->cpu_cpuset_memory_nodes();
}
-bool OSContainer::active_processor_count(int& value) {
+bool OSContainer::active_processor_count(double& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->active_processor_count(value);
}
@@ -291,11 +289,13 @@ template struct metric_fmt;
template<> struct metric_fmt { static constexpr const char* fmt = "%llu"; };
template<> struct metric_fmt { static constexpr const char* fmt = "%lu"; };
template<> struct metric_fmt { static constexpr const char* fmt = "%d"; };
+template<> struct metric_fmt { static constexpr const char* fmt = "%.2f"; };
template<> struct metric_fmt { static constexpr const char* fmt = "%s"; };
template void OSContainer::print_container_metric(outputStream*, const char*, unsigned long long int, const char*);
template void OSContainer::print_container_metric(outputStream*, const char*, unsigned long int, const char*);
template void OSContainer::print_container_metric(outputStream*, const char*, int, const char*);
+template void OSContainer::print_container_metric(outputStream*, const char*, double, const char*);
template void OSContainer::print_container_metric(outputStream*, const char*, const char*, const char*);
template
@@ -304,12 +304,13 @@ void OSContainer::print_container_metric(outputStream* st, const char* metrics,
constexpr int longest_value = max_length - 11; // Max length - shortest "metric: " string ("cpu_quota: ")
char value_str[longest_value + 1] = {};
os::snprintf_checked(value_str, longest_value, metric_fmt::fmt, value);
- st->print("%s: %*s", metrics, max_length - static_cast(strlen(metrics)) - 2, value_str); // -2 for the ": "
- if (unit[0] != '\0') {
- st->print_cr(" %s", unit);
- } else {
- st->print_cr("");
- }
+
+ const int pad_width = max_length - static_cast(strlen(metrics)) - 2; // -2 for the ": "
+ const char* unit_prefix = unit[0] != '\0' ? " " : "";
+
+ char line[128] = {};
+ os::snprintf_checked(line, sizeof(line), "%s: %*s%s%s", metrics, pad_width, value_str, unit_prefix, unit);
+ st->print_cr("%s", line);
}
void OSContainer::print_container_helper(outputStream* st, MetricResult& res, const char* metrics) {
diff --git a/src/hotspot/os/linux/osContainer_linux.hpp b/src/hotspot/os/linux/osContainer_linux.hpp
index 11c3e086feb..96b59b98db8 100644
--- a/src/hotspot/os/linux/osContainer_linux.hpp
+++ b/src/hotspot/os/linux/osContainer_linux.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -72,8 +72,7 @@ class OSContainer: AllStatic {
static const char * container_type();
static bool available_memory_in_bytes(physical_memory_size_type& value);
- static bool available_swap_in_bytes(physical_memory_size_type host_free_swap,
- physical_memory_size_type& value);
+ static bool available_swap_in_bytes(physical_memory_size_type& value);
static bool memory_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_usage_in_bytes(physical_memory_size_type& value);
@@ -84,7 +83,7 @@ class OSContainer: AllStatic {
static bool rss_usage_in_bytes(physical_memory_size_type& value);
static bool cache_usage_in_bytes(physical_memory_size_type& value);
- static bool active_processor_count(int& value);
+ static bool active_processor_count(double& value);
static char * cpu_cpuset_cpus();
static char * cpu_cpuset_memory_nodes();
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index 6a2a3974a16..7190845a8ba 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -211,15 +211,58 @@ static bool suppress_primordial_thread_resolution = false;
// utility functions
+bool os::is_containerized() {
+ return OSContainer::is_containerized();
+}
+
+bool os::Container::memory_limit(physical_memory_size_type& value) {
+ physical_memory_size_type result = 0;
+ if (OSContainer::memory_limit_in_bytes(result) && result != value_unlimited) {
+ value = result;
+ return true;
+ }
+ return false;
+}
+
+bool os::Container::memory_soft_limit(physical_memory_size_type& value) {
+ physical_memory_size_type result = 0;
+ if (OSContainer::memory_soft_limit_in_bytes(result) && result != 0 && result != value_unlimited) {
+ value = result;
+ return true;
+ }
+ return false;
+}
+
+bool os::Container::memory_throttle_limit(physical_memory_size_type& value) {
+ physical_memory_size_type result = 0;
+ if (OSContainer::memory_throttle_limit_in_bytes(result) && result != value_unlimited) {
+ value = result;
+ return true;
+ }
+ return false;
+}
+
+bool os::Container::used_memory(physical_memory_size_type& value) {
+ return OSContainer::memory_usage_in_bytes(value);
+}
+
bool os::available_memory(physical_memory_size_type& value) {
- if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
+ if (is_containerized() && Container::available_memory(value)) {
log_trace(os)("available container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
+ return Machine::available_memory(value);
+}
+
+bool os::Machine::available_memory(physical_memory_size_type& value) {
return Linux::available_memory(value);
}
+bool os::Container::available_memory(physical_memory_size_type& value) {
+ return OSContainer::available_memory_in_bytes(value);
+}
+
bool os::Linux::available_memory(physical_memory_size_type& value) {
physical_memory_size_type avail_mem = 0;
@@ -251,11 +294,15 @@ bool os::Linux::available_memory(physical_memory_size_type& value) {
}
bool os::free_memory(physical_memory_size_type& value) {
- if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
+ if (is_containerized() && Container::available_memory(value)) {
log_trace(os)("free container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
+ return Machine::free_memory(value);
+}
+
+bool os::Machine::free_memory(physical_memory_size_type& value) {
return Linux::free_memory(value);
}
@@ -274,21 +321,30 @@ bool os::Linux::free_memory(physical_memory_size_type& value) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
- if (OSContainer::is_containerized()) {
- physical_memory_size_type mem_swap_limit = value_unlimited;
- physical_memory_size_type memory_limit = value_unlimited;
- if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
- OSContainer::memory_limit_in_bytes(memory_limit)) {
- if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
- mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
- value = mem_swap_limit - memory_limit;
- return true;
- }
- }
- } // fallback to the host swap space if the container returned unlimited
+ if (is_containerized() && Container::total_swap_space(value)) {
+ return true;
+ } // fallback to the host swap space if the container value fails
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
return Linux::host_swap(value);
}
+bool os::Container::total_swap_space(physical_memory_size_type& value) {
+ physical_memory_size_type mem_swap_limit = value_unlimited;
+ physical_memory_size_type memory_limit = value_unlimited;
+ if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
+ OSContainer::memory_limit_in_bytes(memory_limit)) {
+ if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
+ mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
+ value = mem_swap_limit - memory_limit;
+ return true;
+ }
+ }
+ return false;
+}
+
static bool host_free_swap_f(physical_memory_size_type& value) {
struct sysinfo si;
int ret = sysinfo(&si);
@@ -309,32 +365,45 @@ bool os::free_swap_space(physical_memory_size_type& value) {
return false;
}
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
- if (OSContainer::is_containerized()) {
- if (OSContainer::available_swap_in_bytes(host_free_swap_val, value)) {
+ if (is_containerized()) {
+ if (Container::free_swap_space(value)) {
return true;
}
// Fall through to use host value
log_trace(os,container)("os::free_swap_space: containerized value unavailable"
" returning host value: " PHYS_MEM_TYPE_FORMAT, host_free_swap_val);
}
+
value = host_free_swap_val;
return true;
}
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
+ return host_free_swap_f(value);
+}
+
+bool os::Container::free_swap_space(physical_memory_size_type& value) {
+ return OSContainer::available_swap_in_bytes(value);
+}
+
physical_memory_size_type os::physical_memory() {
- if (OSContainer::is_containerized()) {
+ if (is_containerized()) {
physical_memory_size_type mem_limit = value_unlimited;
- if (OSContainer::memory_limit_in_bytes(mem_limit) && mem_limit != value_unlimited) {
+ if (Container::memory_limit(mem_limit) && mem_limit != value_unlimited) {
log_trace(os)("total container memory: " PHYS_MEM_TYPE_FORMAT, mem_limit);
return mem_limit;
}
}
- physical_memory_size_type phys_mem = Linux::physical_memory();
+ physical_memory_size_type phys_mem = Machine::physical_memory();
log_trace(os)("total system memory: " PHYS_MEM_TYPE_FORMAT, phys_mem);
return phys_mem;
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return Linux::physical_memory();
+}
+
// Returns the resident set size (RSS) of the process.
// Falls back to using VmRSS from /proc/self/status if /proc/self/smaps_rollup is unavailable.
// Note: On kernels with memory cgroups or shared memory, VmRSS may underreport RSS.
@@ -2439,20 +2508,21 @@ bool os::Linux::print_container_info(outputStream* st) {
OSContainer::print_container_metric(st, "cpu_memory_nodes", p != nullptr ? p : "not supported");
free(p);
- int i = -1;
- bool supported = OSContainer::active_processor_count(i);
+ double cpus = -1;
+ bool supported = OSContainer::active_processor_count(cpus);
if (supported) {
- assert(i > 0, "must be");
+ assert(cpus > 0, "must be");
if (ActiveProcessorCount > 0) {
OSContainer::print_container_metric(st, "active_processor_count", ActiveProcessorCount, "(from -XX:ActiveProcessorCount)");
} else {
- OSContainer::print_container_metric(st, "active_processor_count", i);
+ OSContainer::print_container_metric(st, "active_processor_count", cpus);
}
} else {
OSContainer::print_container_metric(st, "active_processor_count", "not supported");
}
+ int i = -1;
supported = OSContainer::cpu_quota(i);
if (supported && i > 0) {
OSContainer::print_container_metric(st, "cpu_quota", i);
@@ -4737,15 +4807,26 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
- int active_cpus = -1;
- if (OSContainer::is_containerized() && OSContainer::active_processor_count(active_cpus)) {
- log_trace(os)("active_processor_count: determined by OSContainer: %d",
- active_cpus);
- } else {
- active_cpus = os::Linux::active_processor_count();
+ if (is_containerized()) {
+ double cpu_quota;
+ if (Container::processor_count(cpu_quota)) {
+ int active_cpus = ceilf(cpu_quota); // Round fractional CPU quota up.
+ assert(active_cpus <= Machine::active_processor_count(), "must be");
+ log_trace(os)("active_processor_count: determined by OSContainer: %d",
+ active_cpus);
+ return active_cpus;
+ }
}
- return active_cpus;
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
+ return os::Linux::active_processor_count();
+}
+
+bool os::Container::processor_count(double& value) {
+ return OSContainer::active_processor_count(value);
}
static bool should_warn_invalid_processor_id() {
@@ -4882,9 +4963,14 @@ int os::open(const char *path, int oflag, int mode) {
oflag |= O_CLOEXEC;
int fd = ::open(path, oflag, mode);
- if (fd == -1) return -1;
+ // No further checking is needed if open() returned an error or
+ // access mode is not read only.
+ if (fd == -1 || (oflag & O_ACCMODE) != O_RDONLY) {
+ return fd;
+ }
- //If the open succeeded, the file might still be a directory
+ // If the open succeeded and is read only, the file might be a directory
+ // which the JVM doesn't allow to be read.
{
struct stat buf;
int ret = ::fstat(fd, &buf);
diff --git a/src/hotspot/os/posix/perfMemory_posix.cpp b/src/hotspot/os/posix/perfMemory_posix.cpp
index 08a19270943..ce9c2a4f031 100644
--- a/src/hotspot/os/posix/perfMemory_posix.cpp
+++ b/src/hotspot/os/posix/perfMemory_posix.cpp
@@ -112,6 +112,10 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
+ } else {
+ if (!successful_write) {
+ remove(destfile);
+ }
}
}
FREE_C_HEAP_ARRAY(char, destfile);
@@ -949,6 +953,7 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size
warning("Insufficient space for shared memory file: %s/%s\n", dirname, filename);
}
result = OS_ERR;
+ remove(filename);
break;
}
}
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index efbd1fe7c68..b0b7ae18106 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -839,10 +839,18 @@ bool os::available_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
+bool os::Machine::available_memory(physical_memory_size_type& value) {
+ return win32::available_memory(value);
+}
+
bool os::free_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
+bool os::Machine::free_memory(physical_memory_size_type& value) {
+ return win32::available_memory(value);
+}
+
bool os::win32::available_memory(physical_memory_size_type& value) {
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
// value if total memory is larger than 4GB
@@ -858,7 +866,11 @@ bool os::win32::available_memory(physical_memory_size_type& value) {
}
}
-bool os::total_swap_space(physical_memory_size_type& value) {
+bool os::total_swap_space(physical_memory_size_type& value) {
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
@@ -872,6 +884,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
+ return Machine::free_swap_space(value);
+}
+
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
@@ -888,6 +904,10 @@ physical_memory_size_type os::physical_memory() {
return win32::physical_memory();
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return win32::physical_memory();
+}
+
size_t os::rss() {
size_t rss = 0;
PROCESS_MEMORY_COUNTERS_EX pmex;
@@ -911,6 +931,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
bool schedules_all_processor_groups = win32::is_windows_11_or_greater() || win32::is_windows_server_2022_or_greater();
if (UseAllWindowsProcessorGroups && !schedules_all_processor_groups && !win32::processor_group_warning_displayed()) {
win32::set_processor_group_warning_displayed(true);
diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
index afef21b091a..3ab81697280 100644
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2025 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -412,12 +412,8 @@ run_stub:
}
void os::Aix::init_thread_fpu_state(void) {
-#if !defined(USE_XLC_BUILTINS)
// Disable FP exceptions.
__asm__ __volatile__ ("mtfsfi 6,0");
-#else
- __mtfsfi(6, 0);
-#endif
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/src/hotspot/os_cpu/aix_ppc/prefetch_aix_ppc.inline.hpp b/src/hotspot/os_cpu/aix_ppc/prefetch_aix_ppc.inline.hpp
index c741335b5f0..d9dac0e231f 100644
--- a/src/hotspot/os_cpu/aix_ppc/prefetch_aix_ppc.inline.hpp
+++ b/src/hotspot/os_cpu/aix_ppc/prefetch_aix_ppc.inline.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,29 +29,21 @@
// Included in runtime/prefetch.inline.hpp
inline void Prefetch::read(const void *loc, intx interval) {
-#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbt 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
-#else
- __dcbt(((address)loc) +((long)interval));
-#endif
}
inline void Prefetch::write(void *loc, intx interval) {
-#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbtst 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
-#else
- __dcbtst( ((address)loc) +((long)interval) );
-#endif
}
#endif // OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP
diff --git a/src/hotspot/share/cds/aotGrowableArray.cpp b/src/hotspot/share/cds/aotGrowableArray.cpp
new file mode 100644
index 00000000000..ec63e7aa57f
--- /dev/null
+++ b/src/hotspot/share/cds/aotGrowableArray.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "cds/aotGrowableArray.hpp"
+#include "cds/aotMetaspace.hpp"
+#include "memory/allocation.inline.hpp"
+#include "utilities/growableArray.hpp"
+
+void AOTGrowableArrayHelper::deallocate(void* mem) {
+ if (!AOTMetaspace::in_aot_cache(mem)) {
+ GrowableArrayCHeapAllocator::deallocate(mem);
+ }
+}
diff --git a/src/hotspot/share/cds/aotGrowableArray.hpp b/src/hotspot/share/cds/aotGrowableArray.hpp
new file mode 100644
index 00000000000..0a0c137ed07
--- /dev/null
+++ b/src/hotspot/share/cds/aotGrowableArray.hpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_AOT_AOTGROWABLEARRAY_HPP
+#define SHARE_AOT_AOTGROWABLEARRAY_HPP
+
+#include
+#include
+
+class AOTGrowableArrayHelper {
+public:
+ static void deallocate(void* mem);
+};
+
+// An AOTGrowableArray provides the same functionality as a GrowableArray that
+// uses the C heap allocator. In addition, AOTGrowableArray can be iterated with
+// MetaspaceClosure. This type should be used for growable arrays that need to be
+// stored in the AOT cache. See ModuleEntry::_reads for an example.
+template
+class AOTGrowableArray : public GrowableArrayWithAllocator> {
+ friend class VMStructs;
+ friend class GrowableArrayWithAllocator;
+
+ static E* allocate(int max, MemTag mem_tag) {
+ return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), mem_tag);
+ }
+
+ E* allocate() {
+ return allocate(this->_capacity, mtClass);
+ }
+
+ void deallocate(E* mem) {
+#if INCLUDE_CDS
+ AOTGrowableArrayHelper::deallocate(mem);
+#else
+ GrowableArrayCHeapAllocator::deallocate(mem);
+#endif
+ }
+
+public:
+ AOTGrowableArray(int initial_capacity, MemTag mem_tag) :
+ GrowableArrayWithAllocator(
+ allocate(initial_capacity, mem_tag),
+ initial_capacity) {}
+
+ AOTGrowableArray() : AOTGrowableArray(0, mtClassShared) {}
+
+ // methods required by MetaspaceClosure
+ void metaspace_pointers_do(MetaspaceClosure* it);
+ int size_in_heapwords() const { return (int)heap_word_size(sizeof(*this)); }
+ MetaspaceClosureType type() const { return MetaspaceClosureType::GrowableArrayType; }
+ static bool is_read_only_by_default() { return false; }
+};
+
+#endif // SHARE_AOT_AOTGROWABLEARRAY_HPP
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp b/src/hotspot/share/cds/aotGrowableArray.inline.hpp
similarity index 62%
rename from src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp
rename to src/hotspot/share/cds/aotGrowableArray.inline.hpp
index f6d47acdc01..8c6e8cb6503 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp
+++ b/src/hotspot/share/cds/aotGrowableArray.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,17 +22,16 @@
*
*/
-#ifndef SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
-#define SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
+#ifndef SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
+#define SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
-#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
+#include "cds/aotGrowableArray.hpp"
-#include "gc/shared/gc_globals.hpp"
-#include "oops/oop.inline.hpp"
-#include "oops/oopsHierarchy.hpp"
+#include "memory/metaspaceClosure.hpp"
-inline bool G1CMObjArrayProcessor::should_be_sliced(oop obj) {
- return obj->is_objArray() && ((objArrayOop)obj)->size() >= 2 * ObjArrayMarkingStride;
+template
+void AOTGrowableArray::metaspace_pointers_do(MetaspaceClosure* it) {
+ it->push_c_array(AOTGrowableArray::data_addr(), AOTGrowableArray::capacity());
}
-#endif // SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_INLINE_HPP
+#endif // SHARE_CDS_AOTGROWABLEARRAY_INLINE_HPP
diff --git a/src/hotspot/share/cds/aotMapLogger.cpp b/src/hotspot/share/cds/aotMapLogger.cpp
index a252eae4b84..5e4e0956824 100644
--- a/src/hotspot/share/cds/aotMapLogger.cpp
+++ b/src/hotspot/share/cds/aotMapLogger.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,8 @@
#include "cds/aotStreamedHeapWriter.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/filemap.hpp"
+#include "classfile/moduleEntry.hpp"
+#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "logging/log.hpp"
@@ -141,7 +143,7 @@ public:
info._buffered_addr = ref->obj();
info._requested_addr = ref->obj();
info._bytes = ref->size() * BytesPerWord;
- info._type = ref->msotype();
+ info._type = ref->type();
_objs.append(info);
}
@@ -214,7 +216,7 @@ void AOTMapLogger::dumptime_log_metaspace_region(const char* name, DumpRegion* r
info._buffered_addr = src_info->buffered_addr();
info._requested_addr = info._buffered_addr + _buffer_to_requested_delta;
info._bytes = src_info->size_in_bytes();
- info._type = src_info->msotype();
+ info._type = src_info->type();
objs.append(info);
}
@@ -332,43 +334,52 @@ void AOTMapLogger::log_metaspace_objects_impl(address region_base, address regio
address buffered_addr = info._buffered_addr;
address requested_addr = info._requested_addr;
int bytes = info._bytes;
- MetaspaceObj::Type type = info._type;
- const char* type_name = MetaspaceObj::type_name(type);
+ MetaspaceClosureType type = info._type;
+ const char* type_name = MetaspaceClosure::type_name(type);
log_as_hex(last_obj_base, buffered_addr, last_obj_base + _buffer_to_requested_delta);
switch (type) {
- case MetaspaceObj::ClassType:
+ case MetaspaceClosureType::ClassType:
log_klass((Klass*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::ConstantPoolType:
+ case MetaspaceClosureType::ConstantPoolType:
log_constant_pool((ConstantPool*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::ConstantPoolCacheType:
+ case MetaspaceClosureType::ConstantPoolCacheType:
log_constant_pool_cache((ConstantPoolCache*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::ConstMethodType:
+ case MetaspaceClosureType::ConstMethodType:
log_const_method((ConstMethod*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::MethodType:
+ case MetaspaceClosureType::MethodType:
log_method((Method*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::MethodCountersType:
+ case MetaspaceClosureType::MethodCountersType:
log_method_counters((MethodCounters*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::MethodDataType:
+ case MetaspaceClosureType::MethodDataType:
log_method_data((MethodData*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::SymbolType:
+ case MetaspaceClosureType::ModuleEntryType:
+ log_module_entry((ModuleEntry*)src, requested_addr, type_name, bytes, current);
+ break;
+ case MetaspaceClosureType::PackageEntryType:
+ log_package_entry((PackageEntry*)src, requested_addr, type_name, bytes, current);
+ break;
+ case MetaspaceClosureType::GrowableArrayType:
+ log_growable_array((GrowableArrayBase*)src, requested_addr, type_name, bytes, current);
+ break;
+ case MetaspaceClosureType::SymbolType:
log_symbol((Symbol*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::KlassTrainingDataType:
+ case MetaspaceClosureType::KlassTrainingDataType:
log_klass_training_data((KlassTrainingData*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::MethodTrainingDataType:
+ case MetaspaceClosureType::MethodTrainingDataType:
log_method_training_data((MethodTrainingData*)src, requested_addr, type_name, bytes, current);
break;
- case MetaspaceObj::CompileTrainingDataType:
+ case MetaspaceClosureType::CompileTrainingDataType:
log_compile_training_data((CompileTrainingData*)src, requested_addr, type_name, bytes, current);
break;
default:
@@ -421,6 +432,27 @@ void AOTMapLogger::log_method_data(MethodData* md, address requested_addr, const
log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes, md->method()->external_name());
}
+void AOTMapLogger::log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name,
+ int bytes, Thread* current) {
+ ResourceMark rm(current);
+ log_debug(aot, map)(_LOG_PREFIX " %s", p2i(requested_addr), type_name, bytes,
+ mod->name_as_C_string());
+}
+
+void AOTMapLogger::log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name,
+ int bytes, Thread* current) {
+ ResourceMark rm(current);
+ log_debug(aot, map)(_LOG_PREFIX " %s - %s", p2i(requested_addr), type_name, bytes,
+ pkg->module()->name_as_C_string(), pkg->name_as_C_string());
+}
+
+void AOTMapLogger::log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name,
+ int bytes, Thread* current) {
+ ResourceMark rm(current);
+ log_debug(aot, map)(_LOG_PREFIX " %d (%d)", p2i(requested_addr), type_name, bytes,
+ arr->length(), arr->capacity());
+}
+
void AOTMapLogger::log_klass(Klass* k, address requested_addr, const char* type_name,
int bytes, Thread* current) {
ResourceMark rm(current);
diff --git a/src/hotspot/share/cds/aotMapLogger.hpp b/src/hotspot/share/cds/aotMapLogger.hpp
index ba188514861..bf7ce0028b9 100644
--- a/src/hotspot/share/cds/aotMapLogger.hpp
+++ b/src/hotspot/share/cds/aotMapLogger.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "cds/archiveBuilder.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
+#include "memory/metaspaceClosureType.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
@@ -37,9 +38,13 @@ class ArchiveStreamedHeapInfo;
class CompileTrainingData;
class DumpRegion;
class FileMapInfo;
+class GrowableArrayBase;
class KlassTrainingData;
+class MethodCounters;
class MethodTrainingData;
+class ModuleEntry;
class outputStream;
+class PackageEntry;
// Write detailed info to a mapfile to analyze contents of the AOT cache/CDS archive.
// -Xlog:aot+map* can be used both when creating an AOT cache, or when using an AOT cache.
@@ -62,7 +67,7 @@ class AOTMapLogger : AllStatic {
address _buffered_addr;
address _requested_addr;
int _bytes;
- MetaspaceObj::Type _type;
+ MetaspaceClosureType _type;
};
public:
@@ -142,6 +147,9 @@ private:
Thread* current);
static void log_klass(Klass* k, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method(Method* m, address requested_addr, const char* type_name, int bytes, Thread* current);
+ static void log_module_entry(ModuleEntry* mod, address requested_addr, const char* type_name, int bytes, Thread* current);
+ static void log_package_entry(PackageEntry* pkg, address requested_addr, const char* type_name, int bytes, Thread* current);
+ static void log_growable_array(GrowableArrayBase* arr, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_symbol(Symbol* s, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_klass_training_data(KlassTrainingData* ktd, address requested_addr, const char* type_name, int bytes, Thread* current);
static void log_method_training_data(MethodTrainingData* mtd, address requested_addr, const char* type_name, int bytes, Thread* current);
diff --git a/src/hotspot/share/cds/aotMetaspace.cpp b/src/hotspot/share/cds/aotMetaspace.cpp
index 79d789e0c70..683c897d855 100644
--- a/src/hotspot/share/cds/aotMetaspace.cpp
+++ b/src/hotspot/share/cds/aotMetaspace.cpp
@@ -698,6 +698,9 @@ public:
Universe::metaspace_pointers_do(it);
vmSymbols::metaspace_pointers_do(it);
TrainingData::iterate_roots(it);
+ if (CDSConfig::is_dumping_full_module_graph()) {
+ ClassLoaderDataShared::iterate_roots(it);
+ }
// The above code should find all the symbols that are referenced by the
// archived classes. We just need to add the extra symbols which
@@ -795,6 +798,10 @@ void VM_PopulateDumpSharedSpace::doit() {
_builder.make_klasses_shareable();
AOTMetaspace::make_method_handle_intrinsics_shareable();
+ if (CDSConfig::is_dumping_full_module_graph()) {
+ ClassLoaderDataShared::remove_unshareable_info();
+ }
+
dump_java_heap_objects();
dump_shared_symbol_table(_builder.symbols());
@@ -1135,6 +1142,7 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
HeapShared::init_heap_writer();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::ensure_module_entry_tables_exist();
+ ClassLoaderDataShared::build_tables(CHECK);
HeapShared::reset_archived_object_states(CHECK);
}
diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp
index 6bbefea5cd9..9161980c4be 100644
--- a/src/hotspot/share/cds/archiveBuilder.cpp
+++ b/src/hotspot/share/cds/archiveBuilder.cpp
@@ -243,7 +243,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
if (get_follow_mode(ref) != make_a_copy) {
return false;
}
- if (ref->msotype() == MetaspaceObj::ClassType) {
+ if (ref->type() == MetaspaceClosureType::ClassType) {
Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be");
if (!is_excluded(klass)) {
@@ -252,7 +252,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
assert(klass->is_instance_klass(), "must be");
}
}
- } else if (ref->msotype() == MetaspaceObj::SymbolType) {
+ } else if (ref->type() == MetaspaceClosureType::SymbolType) {
// Make sure the symbol won't be GC'ed while we are dumping the archive.
Symbol* sym = (Symbol*)ref->obj();
sym->increment_refcount();
@@ -271,11 +271,6 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
aot_log_info(aot)("Gathering classes and symbols ... ");
GatherKlassesAndSymbols doit(this);
iterate_roots(&doit);
-#if INCLUDE_CDS_JAVA_HEAP
- if (CDSConfig::is_dumping_full_module_graph()) {
- ClassLoaderDataShared::iterate_symbols(&doit);
- }
-#endif
doit.finish();
if (CDSConfig::is_dumping_static_archive()) {
@@ -446,14 +441,14 @@ bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read
}
#ifdef ASSERT
- if (ref->msotype() == MetaspaceObj::MethodType) {
+ if (ref->type() == MetaspaceClosureType::MethodType) {
Method* m = (Method*)ref->obj();
assert(!RegeneratedClasses::has_been_regenerated((address)m->method_holder()),
"Should not archive methods in a class that has been regenerated");
}
#endif
- if (ref->msotype() == MetaspaceObj::MethodDataType) {
+ if (ref->type() == MetaspaceClosureType::MethodDataType) {
MethodData* md = (MethodData*)ref->obj();
md->clean_method_data(false /* always_clean */);
}
@@ -554,16 +549,16 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(obj)) {
// Don't dump existing shared metadata again.
return point_to_it;
- } else if (ref->msotype() == MetaspaceObj::MethodDataType ||
- ref->msotype() == MetaspaceObj::MethodCountersType ||
- ref->msotype() == MetaspaceObj::KlassTrainingDataType ||
- ref->msotype() == MetaspaceObj::MethodTrainingDataType ||
- ref->msotype() == MetaspaceObj::CompileTrainingDataType) {
+ } else if (ref->type() == MetaspaceClosureType::MethodDataType ||
+ ref->type() == MetaspaceClosureType::MethodCountersType ||
+ ref->type() == MetaspaceClosureType::KlassTrainingDataType ||
+ ref->type() == MetaspaceClosureType::MethodTrainingDataType ||
+ ref->type() == MetaspaceClosureType::CompileTrainingDataType) {
return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null;
- } else if (ref->msotype() == MetaspaceObj::AdapterHandlerEntryType) {
+ } else if (ref->type() == MetaspaceClosureType::AdapterHandlerEntryType) {
return CDSConfig::is_dumping_adapters() ? make_a_copy : set_to_null;
} else {
- if (ref->msotype() == MetaspaceObj::ClassType) {
+ if (ref->type() == MetaspaceClosureType::ClassType) {
Klass* klass = (Klass*)ref->obj();
assert(klass->is_klass(), "must be");
if (RegeneratedClasses::has_been_regenerated(klass)) {
@@ -571,7 +566,12 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
}
if (is_excluded(klass)) {
ResourceMark rm;
- log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
+ aot_log_trace(aot)("pointer set to null: class (excluded): %s", klass->external_name());
+ return set_to_null;
+ }
+ if (klass->is_array_klass() && CDSConfig::is_dumping_dynamic_archive()) {
+ ResourceMark rm;
+ aot_log_trace(aot)("pointer set to null: array class not supported in dynamic region: %s", klass->external_name());
return set_to_null;
}
}
@@ -615,15 +615,6 @@ void ArchiveBuilder::dump_rw_metadata() {
ResourceMark rm;
aot_log_info(aot)("Allocating RW objects ... ");
make_shallow_copies(&_rw_region, &_rw_src_objs);
-
-#if INCLUDE_CDS_JAVA_HEAP
- if (CDSConfig::is_dumping_full_module_graph()) {
- // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
- char* start = rw_region()->top();
- ClassLoaderDataShared::allocate_archived_tables();
- alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
- }
-#endif
}
void ArchiveBuilder::dump_ro_metadata() {
@@ -632,15 +623,6 @@ void ArchiveBuilder::dump_ro_metadata() {
start_dump_region(&_ro_region);
make_shallow_copies(&_ro_region, &_ro_src_objs);
-
-#if INCLUDE_CDS_JAVA_HEAP
- if (CDSConfig::is_dumping_full_module_graph()) {
- char* start = ro_region()->top();
- ClassLoaderDataShared::init_archived_tables();
- alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
- }
-#endif
-
RegeneratedClasses::record_regenerated_objects();
}
@@ -658,7 +640,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
char* oldtop = dump_region->top();
- if (src_info->msotype() == MetaspaceObj::ClassType) {
+ if (src_info->type() == MetaspaceClosureType::ClassType) {
// Allocate space for a pointer directly in front of the future InstanceKlass, so
// we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
// without building another hashtable. See RunTimeClassInfo::get_for()
@@ -674,7 +656,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift());
}
#endif
- } else if (src_info->msotype() == MetaspaceObj::SymbolType) {
+ } else if (src_info->type() == MetaspaceClosureType::SymbolType) {
// Symbols may be allocated by using AllocateHeap, so their sizes
// may be less than size_in_bytes() indicates.
bytes = ((Symbol*)src)->byte_size();
@@ -684,7 +666,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
memcpy(dest, src, bytes);
// Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
- if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) {
+ if (CDSConfig::is_dumping_static_archive() && (src_info->type() == MetaspaceClosureType::SymbolType)) {
Symbol* buffered_symbol = (Symbol*)dest;
assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
buffered_symbol->update_identity_hash();
@@ -699,7 +681,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
}
}
- intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
+ intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->type(), (address)dest);
if (archived_vtable != nullptr) {
*(address*)dest = (address)archived_vtable;
ArchivePtrMarker::mark_pointer((address*)dest);
@@ -709,7 +691,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
src_info->set_buffered_addr((address)dest);
char* newtop = dump_region->top();
- _alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());
+ _alloc_stats.record(src_info->type(), int(newtop - oldtop), src_info->read_only());
DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
}
@@ -992,15 +974,15 @@ void ArchiveBuilder::make_training_data_shareable() {
return;
}
- if (info.msotype() == MetaspaceObj::KlassTrainingDataType ||
- info.msotype() == MetaspaceObj::MethodTrainingDataType ||
- info.msotype() == MetaspaceObj::CompileTrainingDataType) {
+ if (info.type() == MetaspaceClosureType::KlassTrainingDataType ||
+ info.type() == MetaspaceClosureType::MethodTrainingDataType ||
+ info.type() == MetaspaceClosureType::CompileTrainingDataType) {
TrainingData* buffered_td = (TrainingData*)info.buffered_addr();
buffered_td->remove_unshareable_info();
- } else if (info.msotype() == MetaspaceObj::MethodDataType) {
+ } else if (info.type() == MetaspaceClosureType::MethodDataType) {
MethodData* buffered_mdo = (MethodData*)info.buffered_addr();
buffered_mdo->remove_unshareable_info();
- } else if (info.msotype() == MetaspaceObj::MethodCountersType) {
+ } else if (info.type() == MetaspaceClosureType::MethodCountersType) {
MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr();
buffered_mc->remove_unshareable_info();
}
diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp
index 9a628439039..9de6c02edc5 100644
--- a/src/hotspot/share/cds/archiveBuilder.hpp
+++ b/src/hotspot/share/cds/archiveBuilder.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -134,13 +134,13 @@ private:
int _size_in_bytes;
int _id; // Each object has a unique serial ID, starting from zero. The ID is assigned
// when the object is added into _source_objs.
- MetaspaceObj::Type _msotype;
+ MetaspaceClosureType _type;
address _source_addr; // The source object to be copied.
address _buffered_addr; // The copy of this object insider the buffer.
public:
SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _has_embedded_pointer(false), _follow_mode(follow_mode),
- _size_in_bytes(ref->size() * BytesPerWord), _id(0), _msotype(ref->msotype()),
+ _size_in_bytes(ref->size() * BytesPerWord), _id(0), _type(ref->type()),
_source_addr(ref->obj()) {
if (follow_mode == point_to_it) {
_buffered_addr = ref->obj();
@@ -155,7 +155,7 @@ private:
SourceObjInfo(address src, SourceObjInfo* renegerated_obj_info) :
_ptrmap_start(0), _ptrmap_end(0), _read_only(false),
_follow_mode(renegerated_obj_info->_follow_mode),
- _size_in_bytes(0), _msotype(renegerated_obj_info->_msotype),
+ _size_in_bytes(0), _type(renegerated_obj_info->_type),
_source_addr(src), _buffered_addr(renegerated_obj_info->_buffered_addr) {}
bool should_copy() const { return _follow_mode == make_a_copy; }
@@ -182,7 +182,7 @@ private:
}
return _buffered_addr;
}
- MetaspaceObj::Type msotype() const { return _msotype; }
+ MetaspaceClosureType type() const { return _type; }
FollowMode follow_mode() const { return _follow_mode; }
};
diff --git a/src/hotspot/share/cds/cppVtables.cpp b/src/hotspot/share/cds/cppVtables.cpp
index f2862454286..da68fa70761 100644
--- a/src/hotspot/share/cds/cppVtables.cpp
+++ b/src/hotspot/share/cds/cppVtables.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,12 +22,14 @@
*
*/
+#include "cds/aotGrowableArray.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cppVtables.hpp"
#include "logging/log.hpp"
+#include "memory/resourceArea.hpp"
#include "oops/instanceClassLoaderKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceRefKlass.hpp"
@@ -53,6 +55,19 @@
// + at run time: we clone the actual contents of the vtables from libjvm.so
// into our own tables.
+
+#ifndef PRODUCT
+
+// AOTGrowableArray has a vtable only when in non-product builds (due to
+// the virtual printing functions in AnyObj).
+
+using GrowableArray_ModuleEntry_ptr = AOTGrowableArray;
+
+#define DEBUG_CPP_VTABLE_TYPES_DO(f) \
+ f(GrowableArray_ModuleEntry_ptr) \
+
+#endif
+
// Currently, the archive contains ONLY the following types of objects that have C++ vtables.
#define CPP_VTABLE_TYPES_DO(f) \
f(ConstantPool) \
@@ -68,7 +83,8 @@
f(TypeArrayKlass) \
f(KlassTrainingData) \
f(MethodTrainingData) \
- f(CompileTrainingData)
+ f(CompileTrainingData) \
+ NOT_PRODUCT(DEBUG_CPP_VTABLE_TYPES_DO(f))
class CppVtableInfo {
intptr_t _vtable_size;
@@ -86,7 +102,7 @@ public:
}
};
-static inline intptr_t* vtable_of(const Metadata* m) {
+static inline intptr_t* vtable_of(const void* m) {
return *((intptr_t**)m);
}
@@ -116,6 +132,7 @@ CppVtableInfo* CppVtableCloner::allocate_and_initialize(const char* name) {
template
void CppVtableCloner::initialize(const char* name, CppVtableInfo* info) {
+ ResourceMark rm;
T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
int n = info->vtable_size();
intptr_t* srcvtable = vtable_of(&tmp);
@@ -268,7 +285,7 @@ void CppVtables::serialize(SerializeClosure* soc) {
}
}
-intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address obj) {
+intptr_t* CppVtables::get_archived_vtable(MetaspaceClosureType type, address obj) {
if (!_orig_cpp_vtptrs_inited) {
CPP_VTABLE_TYPES_DO(INIT_ORIG_CPP_VTPTRS);
_orig_cpp_vtptrs_inited = true;
@@ -276,19 +293,23 @@ intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address ob
assert(CDSConfig::is_dumping_archive(), "sanity");
int kind = -1;
- switch (msotype) {
- case MetaspaceObj::SymbolType:
- case MetaspaceObj::TypeArrayU1Type:
- case MetaspaceObj::TypeArrayU2Type:
- case MetaspaceObj::TypeArrayU4Type:
- case MetaspaceObj::TypeArrayU8Type:
- case MetaspaceObj::TypeArrayOtherType:
- case MetaspaceObj::ConstMethodType:
- case MetaspaceObj::ConstantPoolCacheType:
- case MetaspaceObj::AnnotationsType:
- case MetaspaceObj::RecordComponentType:
- case MetaspaceObj::AdapterHandlerEntryType:
- case MetaspaceObj::AdapterFingerPrintType:
+ switch (type) {
+ case MetaspaceClosureType::SymbolType:
+ case MetaspaceClosureType::TypeArrayU1Type:
+ case MetaspaceClosureType::TypeArrayU2Type:
+ case MetaspaceClosureType::TypeArrayU4Type:
+ case MetaspaceClosureType::TypeArrayU8Type:
+ case MetaspaceClosureType::TypeArrayOtherType:
+ case MetaspaceClosureType::CArrayType:
+ case MetaspaceClosureType::ConstMethodType:
+ case MetaspaceClosureType::ConstantPoolCacheType:
+ case MetaspaceClosureType::AnnotationsType:
+ case MetaspaceClosureType::ModuleEntryType:
+ case MetaspaceClosureType::PackageEntryType:
+ case MetaspaceClosureType::RecordComponentType:
+ case MetaspaceClosureType::AdapterHandlerEntryType:
+ case MetaspaceClosureType::AdapterFingerPrintType:
+ PRODUCT_ONLY(case MetaspaceClosureType::GrowableArrayType:)
// These have no vtables.
break;
default:
diff --git a/src/hotspot/share/cds/cppVtables.hpp b/src/hotspot/share/cds/cppVtables.hpp
index b40ca036023..9e28ba020ee 100644
--- a/src/hotspot/share/cds/cppVtables.hpp
+++ b/src/hotspot/share/cds/cppVtables.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
+#include "memory/metaspaceClosureType.hpp"
#include "utilities/globalDefinitions.hpp"
class ArchiveBuilder;
@@ -40,7 +41,7 @@ class CppVtables : AllStatic {
public:
static void dumptime_init(ArchiveBuilder* builder);
static void zero_archived_vtables();
- static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj);
+ static intptr_t* get_archived_vtable(MetaspaceClosureType type, address obj);
static void serialize(SerializeClosure* sc);
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
static char* vtables_serialized_base() { return _vtables_serialized_base; }
diff --git a/src/hotspot/share/cds/dumpAllocStats.hpp b/src/hotspot/share/cds/dumpAllocStats.hpp
index 7d651320e6f..4553f0f6a01 100644
--- a/src/hotspot/share/cds/dumpAllocStats.hpp
+++ b/src/hotspot/share/cds/dumpAllocStats.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,32 +27,34 @@
#include "classfile/compactHashtable.hpp"
#include "memory/allocation.hpp"
+#include "memory/metaspaceClosureType.hpp"
// This is for dumping detailed statistics for the allocations
// in the shared spaces.
class DumpAllocStats : public StackObj {
public:
- // Here's poor man's enum inheritance
-#define SHAREDSPACE_OBJ_TYPES_DO(f) \
- METASPACE_OBJ_TYPES_DO(f) \
+#define DUMPED_OBJ_TYPES_DO(f) \
+ METASPACE_CLOSURE_TYPES_DO(f) \
f(SymbolHashentry) \
f(SymbolBucket) \
f(StringHashentry) \
f(StringBucket) \
- f(ModulesNatives) \
f(CppVTables) \
f(Other)
+#define DUMPED_TYPE_DECLARE(name) name ## Type,
+#define DUMPED_TYPE_NAME_CASE(name) case name ## Type: return #name;
+
enum Type {
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
- SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
+ DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_DECLARE)
_number_of_types
};
static const char* type_name(Type type) {
switch(type) {
- SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
+ DUMPED_OBJ_TYPES_DO(DUMPED_TYPE_NAME_CASE)
default:
ShouldNotReachHere();
return nullptr;
@@ -101,16 +103,12 @@ public:
CompactHashtableStats* symbol_stats() { return &_symbol_stats; }
CompactHashtableStats* string_stats() { return &_string_stats; }
- void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
- assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
+ void record(MetaspaceClosureType type, int byte_size, bool read_only) {
+ int t = (int)type;
+ assert(t >= 0 && t < (int)MetaspaceClosureType::_number_of_types, "sanity");
int which = (read_only) ? RO : RW;
- _counts[which][type] ++;
- _bytes [which][type] += byte_size;
- }
-
- void record_modules(int byte_size, bool read_only) {
- int which = (read_only) ? RO : RW;
- _bytes [which][ModulesNativesType] += byte_size;
+ _counts[which][t] ++;
+ _bytes [which][t] += byte_size;
}
void record_other_type(int byte_size, bool read_only) {
diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp
index fdc335f3799..89694c6780e 100644
--- a/src/hotspot/share/cds/heapShared.cpp
+++ b/src/hotspot/share/cds/heapShared.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -948,10 +948,6 @@ void HeapShared::archive_subgraphs() {
true /* is_full_module_graph */);
}
}
-
- if (CDSConfig::is_dumping_full_module_graph()) {
- Modules::verify_archived_modules();
- }
}
//
diff --git a/src/hotspot/share/ci/ciField.cpp b/src/hotspot/share/ci/ciField.cpp
index 19e05784f4d..e0c818f02fc 100644
--- a/src/hotspot/share/ci/ciField.cpp
+++ b/src/hotspot/share/ci/ciField.cpp
@@ -216,6 +216,10 @@ ciField::ciField(fieldDescriptor *fd) :
static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
if (holder == nullptr)
return false;
+ if (holder->trust_final_fields()) {
+ // Explicit opt-in from system classes
+ return true;
+ }
// Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") ||
holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") ||
@@ -230,14 +234,6 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// Trust final fields in records
if (holder->is_record())
return true;
- // Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
- // more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
- if (holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
- holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater() ||
- holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater() ||
- holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl()) {
- return true;
- }
return TrustFinalNonStaticFields;
}
diff --git a/src/hotspot/share/ci/ciInstanceKlass.cpp b/src/hotspot/share/ci/ciInstanceKlass.cpp
index 64b9acf9146..33bcabc4566 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp
@@ -65,6 +65,7 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
_has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
_is_hidden = ik->is_hidden();
_is_record = ik->is_record();
+ _trust_final_fields = ik->trust_final_fields();
_nonstatic_fields = nullptr; // initialized lazily by compute_nonstatic_fields:
_has_injected_fields = -1;
_implementor = nullptr; // we will fill these lazily
diff --git a/src/hotspot/share/ci/ciInstanceKlass.hpp b/src/hotspot/share/ci/ciInstanceKlass.hpp
index a1b2d8dd12d..8ccf1fadfb7 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp
@@ -59,6 +59,7 @@ private:
bool _has_nonstatic_concrete_methods;
bool _is_hidden;
bool _is_record;
+ bool _trust_final_fields;
bool _has_trusted_loader;
ciFlags _flags;
@@ -207,6 +208,10 @@ public:
return _is_record;
}
+ bool trust_final_fields() const {
+ return _trust_final_fields;
+ }
+
ciInstanceKlass* get_canonical_holder(int offset);
ciField* get_field_by_offset(int field_offset, bool is_static);
ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index c9d9d3632b5..817d0c64d11 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -943,6 +943,7 @@ public:
_java_lang_Deprecated_for_removal,
_jdk_internal_vm_annotation_AOTSafeClassInitializer,
_method_AOTRuntimeSetup,
+ _jdk_internal_vm_annotation_TrustFinalFields,
_annotation_LIMIT
};
const Location _location;
@@ -1878,6 +1879,11 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
if (!privileged) break; // only allow in privileged code
return _field_Stable;
}
+ case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_TrustFinalFields_signature): {
+ if (_location != _in_class) break; // only allow for classes
+ if (!privileged) break; // only allow in privileged code
+ return _jdk_internal_vm_annotation_TrustFinalFields;
+ }
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature): {
if (_location != _in_field && _location != _in_class) {
break; // only allow for fields and classes
@@ -1992,6 +1998,9 @@ void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
if (has_annotation(_jdk_internal_vm_annotation_AOTSafeClassInitializer)) {
ik->set_has_aot_safe_initializer();
}
+ if (has_annotation(_jdk_internal_vm_annotation_TrustFinalFields)) {
+ ik->set_trust_final_fields(true);
+ }
}
#define MAX_ARGS_SIZE 255
diff --git a/src/hotspot/share/classfile/classLoaderDataShared.cpp b/src/hotspot/share/classfile/classLoaderDataShared.cpp
index 7a7743edd03..d415fe64bac 100644
--- a/src/hotspot/share/classfile/classLoaderDataShared.cpp
+++ b/src/hotspot/share/classfile/classLoaderDataShared.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionary.hpp"
#include "logging/log.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
@@ -56,9 +57,9 @@ class ArchivedClassLoaderData {
public:
ArchivedClassLoaderData() : _packages(nullptr), _modules(nullptr), _unnamed_module(nullptr) {}
- void iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure);
- void allocate(ClassLoaderData* loader_data);
- void init_archived_entries(ClassLoaderData* loader_data);
+ void iterate_roots(MetaspaceClosure* closure);
+ void build_tables(ClassLoaderData* loader_data, TRAPS);
+ void remove_unshareable_info();
ModuleEntry* unnamed_module() {
return _unnamed_module;
}
@@ -80,17 +81,14 @@ static ModuleEntry* _archived_javabase_moduleEntry = nullptr;
static int _platform_loader_root_index = -1;
static int _system_loader_root_index = -1;
-void ArchivedClassLoaderData::iterate_symbols(ClassLoaderData* loader_data, MetaspaceClosure* closure) {
+void ArchivedClassLoaderData::iterate_roots(MetaspaceClosure* it) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
- assert_valid(loader_data);
- if (loader_data != nullptr) {
- loader_data->packages()->iterate_symbols(closure);
- loader_data->modules() ->iterate_symbols(closure);
- loader_data->unnamed_module()->iterate_symbols(closure);
- }
+ it->push(&_packages);
+ it->push(&_modules);
+ it->push(&_unnamed_module);
}
-void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
+void ArchivedClassLoaderData::build_tables(ClassLoaderData* loader_data, TRAPS) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
assert_valid(loader_data);
if (loader_data != nullptr) {
@@ -98,19 +96,28 @@ void ArchivedClassLoaderData::allocate(ClassLoaderData* loader_data) {
// address of the Symbols, which may be relocated at runtime due to ASLR.
// So we store the packages/modules in Arrays. At runtime, we create
// the hashtables using these arrays.
- _packages = loader_data->packages()->allocate_archived_entries();
- _modules = loader_data->modules() ->allocate_archived_entries();
- _unnamed_module = loader_data->unnamed_module()->allocate_archived_entry();
+ _packages = loader_data->packages()->build_aot_table(loader_data, CHECK);
+ _modules = loader_data->modules()->build_aot_table(loader_data, CHECK);
+ _unnamed_module = loader_data->unnamed_module();
}
}
-void ArchivedClassLoaderData::init_archived_entries(ClassLoaderData* loader_data) {
- assert(CDSConfig::is_dumping_full_module_graph(), "must be");
- assert_valid(loader_data);
- if (loader_data != nullptr) {
- loader_data->packages()->init_archived_entries(_packages);
- loader_data->modules() ->init_archived_entries(_modules);
- _unnamed_module->init_as_archived_entry();
+void ArchivedClassLoaderData::remove_unshareable_info() {
+ if (_packages != nullptr) {
+ _packages = ArchiveBuilder::current()->get_buffered_addr(_packages);
+ for (int i = 0; i < _packages->length(); i++) {
+ _packages->at(i)->remove_unshareable_info();
+ }
+ }
+ if (_modules != nullptr) {
+ _modules = ArchiveBuilder::current()->get_buffered_addr(_modules);
+ for (int i = 0; i < _modules->length(); i++) {
+ _modules->at(i)->remove_unshareable_info();
+ }
+ }
+ if (_unnamed_module != nullptr) {
+ _unnamed_module = ArchiveBuilder::current()->get_buffered_addr(_unnamed_module);
+ _unnamed_module->remove_unshareable_info();
}
}
@@ -153,7 +160,6 @@ void ArchivedClassLoaderData::clear_archived_oops() {
// ------------------------------
void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
-#if INCLUDE_CDS_JAVA_HEAP
// The streaming object loader prefers loading the class loader related objects before
// the CLD constructor which has a NoSafepointVerifier.
if (!HeapShared::is_loading_streaming_mode()) {
@@ -178,7 +184,6 @@ void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
if (system_loader_module_entry != nullptr) {
system_loader_module_entry->preload_archived_oops();
}
-#endif
}
static ClassLoaderData* null_class_loader_data() {
@@ -210,28 +215,27 @@ void ClassLoaderDataShared::ensure_module_entry_table_exists(oop class_loader) {
assert(met != nullptr, "sanity");
}
-void ClassLoaderDataShared::iterate_symbols(MetaspaceClosure* closure) {
+void ClassLoaderDataShared::build_tables(TRAPS) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
- _archived_boot_loader_data.iterate_symbols (null_class_loader_data(), closure);
- _archived_platform_loader_data.iterate_symbols(java_platform_loader_data_or_null(), closure);
- _archived_system_loader_data.iterate_symbols (java_system_loader_data_or_null(), closure);
+ _archived_boot_loader_data.build_tables(null_class_loader_data(), CHECK);
+ _archived_platform_loader_data.build_tables(java_platform_loader_data_or_null(), CHECK);
+ _archived_system_loader_data.build_tables(java_system_loader_data_or_null(), CHECK);
}
-void ClassLoaderDataShared::allocate_archived_tables() {
+void ClassLoaderDataShared::iterate_roots(MetaspaceClosure* it) {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
- _archived_boot_loader_data.allocate (null_class_loader_data());
- _archived_platform_loader_data.allocate(java_platform_loader_data_or_null());
- _archived_system_loader_data.allocate (java_system_loader_data_or_null());
+ _archived_boot_loader_data.iterate_roots(it);
+ _archived_platform_loader_data.iterate_roots(it);
+ _archived_system_loader_data.iterate_roots(it);
}
-void ClassLoaderDataShared::init_archived_tables() {
+void ClassLoaderDataShared::remove_unshareable_info() {
assert(CDSConfig::is_dumping_full_module_graph(), "must be");
+ _archived_boot_loader_data.remove_unshareable_info();
+ _archived_platform_loader_data.remove_unshareable_info();
+ _archived_system_loader_data.remove_unshareable_info();
- _archived_boot_loader_data.init_archived_entries (null_class_loader_data());
- _archived_platform_loader_data.init_archived_entries(java_platform_loader_data_or_null());
- _archived_system_loader_data.init_archived_entries (java_system_loader_data_or_null());
-
- _archived_javabase_moduleEntry = ModuleEntry::get_archived_entry(ModuleEntryTable::javabase_moduleEntry());
+ _archived_javabase_moduleEntry = ArchiveBuilder::current()->get_buffered_addr(ModuleEntryTable::javabase_moduleEntry());
_platform_loader_root_index = HeapShared::append_root(SystemDictionary::java_platform_loader());
_system_loader_root_index = HeapShared::append_root(SystemDictionary::java_system_loader());
@@ -271,7 +275,6 @@ ModuleEntry* ClassLoaderDataShared::archived_unnamed_module(ClassLoaderData* loa
return archived_module;
}
-
void ClassLoaderDataShared::clear_archived_oops() {
assert(!CDSConfig::is_using_full_module_graph(), "must be");
_archived_boot_loader_data.clear_archived_oops();
diff --git a/src/hotspot/share/classfile/classLoaderDataShared.hpp b/src/hotspot/share/classfile/classLoaderDataShared.hpp
index 39d0a89418f..2cf37310e50 100644
--- a/src/hotspot/share/classfile/classLoaderDataShared.hpp
+++ b/src/hotspot/share/classfile/classLoaderDataShared.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,11 +40,11 @@ class ClassLoaderDataShared : AllStatic {
public:
static void load_archived_platform_and_system_class_loaders() NOT_CDS_JAVA_HEAP_RETURN;
static void restore_archived_modules_for_preloading_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN;
+ static void build_tables(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
+ static void iterate_roots(MetaspaceClosure* closure) NOT_CDS_JAVA_HEAP_RETURN;
+ static void remove_unshareable_info() NOT_CDS_JAVA_HEAP_RETURN;
#if INCLUDE_CDS_JAVA_HEAP
static void ensure_module_entry_tables_exist();
- static void allocate_archived_tables();
- static void iterate_symbols(MetaspaceClosure* closure);
- static void init_archived_tables();
static void serialize(SerializeClosure* f);
static void clear_archived_oops();
static void restore_archived_entries_for_null_class_loader_data();
diff --git a/src/hotspot/share/classfile/moduleEntry.cpp b/src/hotspot/share/classfile/moduleEntry.cpp
index 5fb3d6f2d13..b5b8aa4ef55 100644
--- a/src/hotspot/share/classfile/moduleEntry.cpp
+++ b/src/hotspot/share/classfile/moduleEntry.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "cds/aotClassLocation.hpp"
+#include "cds/aotGrowableArray.inline.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
@@ -37,6 +38,7 @@
#include "jni.h"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
+#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/oopHandle.inline.hpp"
@@ -44,7 +46,6 @@
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/events.hpp"
-#include "utilities/growableArray.hpp"
#include "utilities/hashTable.hpp"
#include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp"
@@ -167,7 +168,7 @@ void ModuleEntry::add_read(ModuleEntry* m) {
} else {
if (reads() == nullptr) {
// Lazily create a module's reads list
- GrowableArray* new_reads = new (mtModule) GrowableArray(MODULE_READS_SIZE, mtModule);
+ AOTGrowableArray* new_reads = new (mtModule) AOTGrowableArray(MODULE_READS_SIZE, mtModule);
set_reads(new_reads);
}
@@ -274,8 +275,7 @@ ModuleEntry::ModuleEntry(Handle module_handle,
_has_default_read_edges(false),
_must_walk_reads(false),
_is_open(is_open),
- _is_patched(false)
- DEBUG_ONLY(COMMA _reads_is_archived(false)) {
+ _is_patched(false) {
// Initialize fields specific to a ModuleEntry
if (_name == nullptr) {
@@ -394,7 +394,6 @@ ModuleEntryTable::~ModuleEntryTable() {
ModuleEntryTableDeleter deleter;
_table.unlink(&deleter);
assert(_table.number_of_entries() == 0, "should have removed all entries");
-
}
void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
@@ -402,147 +401,51 @@ void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
_loader_data = cld;
}
+void ModuleEntry::metaspace_pointers_do(MetaspaceClosure* it) {
+ it->push(&_name);
+ it->push(&_reads);
+ it->push(&_version);
+ it->push(&_location);
+}
+
#if INCLUDE_CDS_JAVA_HEAP
-typedef HashTable<
- const ModuleEntry*,
- ModuleEntry*,
- 557, // prime number
- AnyObj::C_HEAP> ArchivedModuleEntries;
-static ArchivedModuleEntries* _archive_modules_entries = nullptr;
-
-#ifndef PRODUCT
-static int _num_archived_module_entries = 0;
-static int _num_inited_module_entries = 0;
-#endif
-
bool ModuleEntry::should_be_archived() const {
return SystemDictionaryShared::is_builtin_loader(loader_data());
}
-ModuleEntry* ModuleEntry::allocate_archived_entry() const {
- precond(should_be_archived());
- precond(CDSConfig::is_dumping_full_module_graph());
- ModuleEntry* archived_entry = (ModuleEntry*)ArchiveBuilder::rw_region_alloc(sizeof(ModuleEntry));
- memcpy((void*)archived_entry, (void*)this, sizeof(ModuleEntry));
+void ModuleEntry::remove_unshareable_info() {
+ _archived_module_index = HeapShared::append_root(module_oop());
- archived_entry->_archived_module_index = HeapShared::append_root(module_oop());
- if (_archive_modules_entries == nullptr) {
- _archive_modules_entries = new (mtClass)ArchivedModuleEntries();
- }
- assert(_archive_modules_entries->get(this) == nullptr, "Each ModuleEntry must not be shared across ModuleEntryTables");
- _archive_modules_entries->put(this, archived_entry);
- DEBUG_ONLY(_num_archived_module_entries++);
-
- if (CDSConfig::is_dumping_final_static_archive()) {
- OopHandle null_handle;
- archived_entry->_shared_pd = null_handle;
- } else {
- assert(archived_entry->shared_protection_domain() == nullptr, "never set during -Xshare:dump");
+ if (_reads != nullptr) {
+ _reads->set_in_aot_cache();
}
// Clear handles and restore at run time. Handles cannot be archived.
+ if (CDSConfig::is_dumping_final_static_archive()) {
+ OopHandle null_handle;
+ _shared_pd = null_handle;
+ } else {
+ assert(shared_protection_domain() == nullptr, "never set during -Xshare:dump");
+ }
+
OopHandle null_handle;
- archived_entry->_module_handle = null_handle;
-
- // For verify_archived_module_entries()
- DEBUG_ONLY(_num_inited_module_entries++);
-
- if (log_is_enabled(Info, aot, module)) {
- ResourceMark rm;
- LogStream ls(Log(aot, module)::info());
- ls.print("Stored in archive: ");
- archived_entry->print(&ls);
- }
- return archived_entry;
-}
-
-bool ModuleEntry::has_been_archived() {
- assert(!ArchiveBuilder::current()->is_in_buffer_space(this), "must be called on original ModuleEntry");
- return _archive_modules_entries->contains(this);
-}
-
-ModuleEntry* ModuleEntry::get_archived_entry(ModuleEntry* orig_entry) {
- ModuleEntry** ptr = _archive_modules_entries->get(orig_entry);
- assert(ptr != nullptr && *ptr != nullptr, "must have been allocated");
- return *ptr;
-}
-
-// This function is used to archive ModuleEntry::_reads and PackageEntry::_qualified_exports.
-// GrowableArray cannot be directly archived, as it needs to be expandable at runtime.
-// Write it out as an Array, and convert it back to GrowableArray at runtime.
-Array* ModuleEntry::write_growable_array(GrowableArray* array) {
- Array* archived_array = nullptr;
- int length = (array == nullptr) ? 0 : array->length();
- if (length > 0) {
- archived_array = ArchiveBuilder::new_ro_array(length);
- for (int i = 0; i < length; i++) {
- ModuleEntry* archived_entry = get_archived_entry(array->at(i));
- archived_array->at_put(i, archived_entry);
- ArchivePtrMarker::mark_pointer((address*)archived_array->adr_at(i));
- }
- }
-
- return archived_array;
-}
-
-GrowableArray* ModuleEntry::restore_growable_array(Array* archived_array) {
- GrowableArray* array = nullptr;
- int length = (archived_array == nullptr) ? 0 : archived_array->length();
- if (length > 0) {
- array = new (mtModule) GrowableArray(length, mtModule);
- for (int i = 0; i < length; i++) {
- ModuleEntry* archived_entry = archived_array->at(i);
- array->append(archived_entry);
- }
- }
-
- return array;
-}
-
-void ModuleEntry::iterate_symbols(MetaspaceClosure* closure) {
- closure->push(&_name);
- closure->push(&_version);
- closure->push(&_location);
-}
-
-void ModuleEntry::init_as_archived_entry() {
- set_archived_reads(write_growable_array(reads()));
+ _module_handle = null_handle;
_loader_data = nullptr; // re-init at runtime
if (name() != nullptr) {
- _shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(_location);
- _name = ArchiveBuilder::get_buffered_symbol(_name);
- ArchivePtrMarker::mark_pointer((address*)&_name);
+ Symbol* src_location = ArchiveBuilder::current()->get_source_addr(_location);
+ _shared_path_index = AOTClassLocationConfig::dumptime()->get_module_shared_path_index(src_location);
} else {
// _shared_path_index is used only by SystemDictionary::is_shared_class_visible_impl()
// for checking classes in named modules.
_shared_path_index = -1;
}
- if (_version != nullptr) {
- _version = ArchiveBuilder::get_buffered_symbol(_version);
- }
- if (_location != nullptr) {
- _location = ArchiveBuilder::get_buffered_symbol(_location);
- }
JFR_ONLY(set_trace_id(0);) // re-init at runtime
-
- ArchivePtrMarker::mark_pointer((address*)&_reads);
- ArchivePtrMarker::mark_pointer((address*)&_version);
- ArchivePtrMarker::mark_pointer((address*)&_location);
}
-#ifndef PRODUCT
-void ModuleEntry::verify_archived_module_entries() {
- assert(_num_archived_module_entries == _num_inited_module_entries,
- "%d ModuleEntries have been archived but %d of them have been properly initialized with archived java.lang.Module objects",
- _num_archived_module_entries, _num_inited_module_entries);
-}
-#endif // PRODUCT
-
void ModuleEntry::load_from_archive(ClassLoaderData* loader_data) {
assert(CDSConfig::is_using_archive(), "runtime only");
set_loader_data(loader_data);
- set_reads(restore_growable_array(archived_reads()));
JFR_ONLY(INIT_ID(this);)
}
@@ -581,38 +484,28 @@ static int compare_module_by_name(ModuleEntry* a, ModuleEntry* b) {
return a->name()->fast_compare(b->name());
}
-void ModuleEntryTable::iterate_symbols(MetaspaceClosure* closure) {
- auto syms = [&] (const SymbolHandle& key, ModuleEntry*& m) {
- m->iterate_symbols(closure);
- };
- _table.iterate_all(syms);
-}
-
-Array* ModuleEntryTable::allocate_archived_entries() {
- Array* archived_modules = ArchiveBuilder::new_rw_array(_table.number_of_entries());
+Array* ModuleEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
+ Array* aot_table =
+ MetadataFactory::new_array(loader_data, _table.number_of_entries(), nullptr, CHECK_NULL);
int n = 0;
auto grab = [&] (const SymbolHandle& key, ModuleEntry*& m) {
- archived_modules->at_put(n++, m);
+ m->pack_reads();
+ aot_table->at_put(n++, m);
+ if (log_is_enabled(Info, aot, module)) {
+ ResourceMark rm;
+ LogStream ls(Log(aot, module)::info());
+ ls.print("Stored in archive: ");
+ m->print(&ls);
+ }
};
_table.iterate_all(grab);
if (n > 1) {
// Always allocate in the same order to produce deterministic archive.
- QuickSort::sort(archived_modules->data(), n, compare_module_by_name);
+ QuickSort::sort(aot_table->data(), n, compare_module_by_name);
}
- for (int i = 0; i < n; i++) {
- archived_modules->at_put(i, archived_modules->at(i)->allocate_archived_entry());
- ArchivePtrMarker::mark_pointer((address*)archived_modules->adr_at(i));
- }
- return archived_modules;
-}
-void ModuleEntryTable::init_archived_entries(Array* archived_modules) {
- assert(CDSConfig::is_dumping_full_module_graph(), "sanity");
- for (int i = 0; i < archived_modules->length(); i++) {
- ModuleEntry* archived_entry = archived_modules->at(i);
- archived_entry->init_as_archived_entry();
- }
+ return aot_table;
}
void ModuleEntryTable::load_archived_entries(ClassLoaderData* loader_data,
diff --git a/src/hotspot/share/classfile/moduleEntry.hpp b/src/hotspot/share/classfile/moduleEntry.hpp
index 2e1852c5369..1a0251a2c2a 100644
--- a/src/hotspot/share/classfile/moduleEntry.hpp
+++ b/src/hotspot/share/classfile/moduleEntry.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,9 @@
#ifndef SHARE_CLASSFILE_MODULEENTRY_HPP
#define SHARE_CLASSFILE_MODULEENTRY_HPP
+#include "cds/aotGrowableArray.hpp"
#include "jni.h"
+#include "memory/metaspaceClosureType.hpp"
#include "oops/oopHandle.hpp"
#include "oops/symbol.hpp"
#include "oops/symbolHandle.hpp"
@@ -68,11 +70,8 @@ private:
// for shared classes from this module
Symbol* _name; // name of this module
ClassLoaderData* _loader_data;
+ AOTGrowableArray* _reads; // list of modules that are readable by this module
- union {
- GrowableArray* _reads; // list of modules that are readable by this module
- Array* _archived_reads; // List of readable modules stored in the CDS archive
- };
Symbol* _version; // module version number
Symbol* _location; // module location
CDS_ONLY(int _shared_path_index;) // >=0 if classes in this module are in CDS archive
@@ -81,7 +80,6 @@ private:
bool _must_walk_reads; // walk module's reads list at GC safepoints to purge out dead modules
bool _is_open; // whether the packages in the module are all unqualifiedly exported
bool _is_patched; // whether the module is patched via --patch-module
- DEBUG_ONLY(bool _reads_is_archived);
CDS_JAVA_HEAP_ONLY(int _archived_module_index;)
JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
@@ -120,22 +118,18 @@ public:
bool can_read(ModuleEntry* m) const;
bool has_reads_list() const;
- GrowableArray* reads() const {
- assert(!_reads_is_archived, "sanity");
+ AOTGrowableArray* reads() const {
return _reads;
}
- void set_reads(GrowableArray* r) {
+ void set_reads(AOTGrowableArray* r) {
_reads = r;
- DEBUG_ONLY(_reads_is_archived = false);
}
- Array* archived_reads() const {
- assert(_reads_is_archived, "sanity");
- return _archived_reads;
- }
- void set_archived_reads(Array* r) {
- _archived_reads = r;
- DEBUG_ONLY(_reads_is_archived = true);
+ void pack_reads() {
+ if (_reads != nullptr) {
+ _reads->shrink_to_fit();
+ }
}
+
void add_read(ModuleEntry* m);
void set_read_walk_required(ClassLoaderData* m_loader_data);
@@ -189,6 +183,13 @@ public:
const char* name_as_C_string() const {
return is_named() ? name()->as_C_string() : UNNAMED_MODULE;
}
+
+ // methods required by MetaspaceClosure
+ void metaspace_pointers_do(MetaspaceClosure* it);
+ int size_in_heapwords() const { return (int)heap_word_size(sizeof(ModuleEntry)); }
+ MetaspaceClosureType type() const { return MetaspaceClosureType::ModuleEntryType; }
+ static bool is_read_only_by_default() { return false; }
+
void print(outputStream* st = tty) const;
void verify();
@@ -198,18 +199,11 @@ public:
#if INCLUDE_CDS_JAVA_HEAP
bool should_be_archived() const;
- void iterate_symbols(MetaspaceClosure* closure);
- ModuleEntry* allocate_archived_entry() const;
- void init_as_archived_entry();
- static ModuleEntry* get_archived_entry(ModuleEntry* orig_entry);
- bool has_been_archived();
- static Array* write_growable_array(GrowableArray* array);
- static GrowableArray* restore_growable_array(Array* archived_array);
+ void remove_unshareable_info();
void load_from_archive(ClassLoaderData* loader_data);
void preload_archived_oops();
void restore_archived_oops(ClassLoaderData* loader_data);
void clear_archived_oops();
- static void verify_archived_module_entries() PRODUCT_RETURN;
#endif
};
@@ -275,9 +269,7 @@ public:
void verify();
#if INCLUDE_CDS_JAVA_HEAP
- void iterate_symbols(MetaspaceClosure* closure);
- Array* allocate_archived_entries();
- void init_archived_entries(Array* archived_modules);
+ Array* build_aot_table(ClassLoaderData* loader_data, TRAPS);
void load_archived_entries(ClassLoaderData* loader_data,
Array* archived_modules);
void restore_archived_oops(ClassLoaderData* loader_data,
diff --git a/src/hotspot/share/classfile/modules.cpp b/src/hotspot/share/classfile/modules.cpp
index baf2acfb78c..51d09d9c47f 100644
--- a/src/hotspot/share/classfile/modules.cpp
+++ b/src/hotspot/share/classfile/modules.cpp
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -505,13 +505,10 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
ClassLoaderData* loader_data = orig_module_ent->loader_data();
assert(loader_data->is_builtin_class_loader_data(), "must be");
- if (orig_module_ent->name() != nullptr) {
- // For each named module, we archive both the java.lang.Module oop and the ModuleEntry.
- assert(orig_module_ent->has_been_archived(), "sanity");
- } else {
+ precond(ArchiveBuilder::current()->has_been_archived(orig_module_ent));
+ if (orig_module_ent->name() == nullptr) {
// We always archive unnamed module oop for boot, platform, and system loaders.
precond(orig_module_ent->should_be_archived());
- precond(orig_module_ent->has_been_archived());
if (loader_data->is_boot_class_loader_data()) {
assert(!_seen_boot_unnamed_module, "only once");
@@ -529,10 +526,6 @@ void Modules::check_archived_module_oop(oop orig_module_obj) {
}
}
-void Modules::verify_archived_modules() {
- ModuleEntry::verify_archived_module_entries();
-}
-
class Modules::ArchivedProperty {
const char* _prop;
const bool _numbered;
diff --git a/src/hotspot/share/classfile/modules.hpp b/src/hotspot/share/classfile/modules.hpp
index 27a22c1017a..75857c8960c 100644
--- a/src/hotspot/share/classfile/modules.hpp
+++ b/src/hotspot/share/classfile/modules.hpp
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -59,7 +59,6 @@ public:
TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void init_archived_modules(JavaThread* current, Handle h_platform_loader, Handle h_system_loader)
NOT_CDS_JAVA_HEAP_RETURN;
- static void verify_archived_modules() NOT_CDS_JAVA_HEAP_RETURN;
static void dump_archived_module_info() NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_archived_module_info(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
diff --git a/src/hotspot/share/classfile/packageEntry.cpp b/src/hotspot/share/classfile/packageEntry.cpp
index ea2e6cd1def..3e61f2e3a3e 100644
--- a/src/hotspot/share/classfile/packageEntry.cpp
+++ b/src/hotspot/share/classfile/packageEntry.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,8 @@
*
*/
+#include "cds/aotGrowableArray.inline.hpp"
+#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
@@ -31,13 +33,13 @@
#include "classfile/vmSymbols.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
+#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/array.hpp"
#include "oops/symbol.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "utilities/events.hpp"
-#include "utilities/growableArray.hpp"
#include "utilities/hashTable.hpp"
#include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp"
@@ -51,7 +53,7 @@ PackageEntry::PackageEntry(Symbol* name, ModuleEntry* module) :
_qualified_exports(nullptr),
_defined_by_cds_in_class_path(0)
{
- // name can't be null
+ // name can't be null -- a class in the default package gets a PackageEntry of nullptr.
_name->increment_refcount();
JFR_ONLY(INIT_ID(this);)
@@ -81,7 +83,7 @@ void PackageEntry::add_qexport(ModuleEntry* m) {
if (!has_qual_exports_list()) {
// Lazily create a package's qualified exports list.
// Initial size is small, do not anticipate export lists to be large.
- _qualified_exports = new (mtModule) GrowableArray(QUAL_EXP_SIZE, mtModule);
+ _qualified_exports = new (mtModule) AOTGrowableArray(QUAL_EXP_SIZE, mtModule);
}
// Determine, based on this newly established export to module m,
@@ -183,12 +185,24 @@ void PackageEntry::purge_qualified_exports() {
}
void PackageEntry::delete_qualified_exports() {
- if (_qualified_exports != nullptr) {
+ if (_qualified_exports != nullptr && !AOTMetaspace::in_aot_cache(_qualified_exports)) {
delete _qualified_exports;
}
_qualified_exports = nullptr;
}
+void PackageEntry::pack_qualified_exports() {
+ if (_qualified_exports != nullptr) {
+ _qualified_exports->shrink_to_fit();
+ }
+}
+
+void PackageEntry::metaspace_pointers_do(MetaspaceClosure* it) {
+ it->push(&_name);
+ it->push(&_module);
+ it->push(&_qualified_exports);
+}
+
PackageEntryTable::PackageEntryTable() { }
PackageEntryTable::~PackageEntryTable() {
@@ -212,66 +226,19 @@ PackageEntryTable::~PackageEntryTable() {
}
#if INCLUDE_CDS_JAVA_HEAP
-typedef HashTable<
- const PackageEntry*,
- PackageEntry*,
- 557, // prime number
- AnyObj::C_HEAP> ArchivedPackageEntries;
-static ArchivedPackageEntries* _archived_packages_entries = nullptr;
-
bool PackageEntry::should_be_archived() const {
return module()->should_be_archived();
}
-PackageEntry* PackageEntry::allocate_archived_entry() const {
- precond(should_be_archived());
- PackageEntry* archived_entry = (PackageEntry*)ArchiveBuilder::rw_region_alloc(sizeof(PackageEntry));
- memcpy((void*)archived_entry, (void*)this, sizeof(PackageEntry));
-
- if (_archived_packages_entries == nullptr) {
- _archived_packages_entries = new (mtClass)ArchivedPackageEntries();
+void PackageEntry::remove_unshareable_info() {
+ if (_qualified_exports != nullptr) {
+ _qualified_exports->set_in_aot_cache();
}
- assert(_archived_packages_entries->get(this) == nullptr, "Each PackageEntry must not be shared across PackageEntryTables");
- _archived_packages_entries->put(this, archived_entry);
-
- return archived_entry;
-}
-
-PackageEntry* PackageEntry::get_archived_entry(PackageEntry* orig_entry) {
- PackageEntry** ptr = _archived_packages_entries->get(orig_entry);
- if (ptr != nullptr) {
- return *ptr;
- } else {
- return nullptr;
- }
-}
-
-void PackageEntry::iterate_symbols(MetaspaceClosure* closure) {
- closure->push(&_name);
-}
-
-void PackageEntry::init_as_archived_entry() {
- Array* archived_qualified_exports = ModuleEntry::write_growable_array(_qualified_exports);
-
- _name = ArchiveBuilder::get_buffered_symbol(_name);
- _module = ModuleEntry::get_archived_entry(_module);
- _qualified_exports = (GrowableArray*)archived_qualified_exports;
_defined_by_cds_in_class_path = 0;
JFR_ONLY(set_trace_id(0);) // re-init at runtime
-
- ArchivePtrMarker::mark_pointer((address*)&_name);
- ArchivePtrMarker::mark_pointer((address*)&_module);
- ArchivePtrMarker::mark_pointer((address*)&_qualified_exports);
-
- LogStreamHandle(Info, aot, package) st;
- if (st.is_enabled()) {
- st.print("archived ");
- print(&st);
- }
}
void PackageEntry::load_from_archive() {
- _qualified_exports = ModuleEntry::restore_growable_array((Array*)_qualified_exports);
JFR_ONLY(INIT_ID(this);)
}
@@ -280,14 +247,7 @@ static int compare_package_by_name(PackageEntry* a, PackageEntry* b) {
return a->name()->fast_compare(b->name());
}
-void PackageEntryTable::iterate_symbols(MetaspaceClosure* closure) {
- auto syms = [&] (const SymbolHandle& key, PackageEntry*& p) {
- p->iterate_symbols(closure);
- };
- _table.iterate_all(syms);
-}
-
-Array* PackageEntryTable::allocate_archived_entries() {
+Array* PackageEntryTable::build_aot_table(ClassLoaderData* loader_data, TRAPS) {
// First count the packages in named modules
int n = 0;
auto count = [&] (const SymbolHandle& key, PackageEntry*& p) {
@@ -297,12 +257,19 @@ Array* PackageEntryTable::allocate_archived_entries() {
};
_table.iterate_all(count);
- Array* archived_packages = ArchiveBuilder::new_rw_array(n);
+ Array* archived_packages = MetadataFactory::new_array(loader_data, n, nullptr, CHECK_NULL);
// reset n
n = 0;
auto grab = [&] (const SymbolHandle& key, PackageEntry*& p) {
if (p->should_be_archived()) {
+ p->pack_qualified_exports();
archived_packages->at_put(n++, p);
+
+ LogStreamHandle(Info, aot, package) st;
+ if (st.is_enabled()) {
+ st.print("archived ");
+ p->print(&st);
+ }
}
};
_table.iterate_all(grab);
@@ -311,18 +278,8 @@ Array* PackageEntryTable::allocate_archived_entries() {
// Always allocate in the same order to produce deterministic archive.
QuickSort::sort(archived_packages->data(), n, compare_package_by_name);
}
- for (int i = 0; i < n; i++) {
- archived_packages->at_put(i, archived_packages->at(i)->allocate_archived_entry());
- ArchivePtrMarker::mark_pointer((address*)archived_packages->adr_at(i));
- }
- return archived_packages;
-}
-void PackageEntryTable::init_archived_entries(Array* archived_packages) {
- for (int i = 0; i < archived_packages->length(); i++) {
- PackageEntry* archived_entry = archived_packages->at(i);
- archived_entry->init_as_archived_entry();
- }
+ return archived_packages;
}
void PackageEntryTable::load_archived_entries(Array* archived_packages) {
diff --git a/src/hotspot/share/classfile/packageEntry.hpp b/src/hotspot/share/classfile/packageEntry.hpp
index 6abf89dc60f..7b174a92287 100644
--- a/src/hotspot/share/classfile/packageEntry.hpp
+++ b/src/hotspot/share/classfile/packageEntry.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,9 @@
#ifndef SHARE_CLASSFILE_PACKAGEENTRY_HPP
#define SHARE_CLASSFILE_PACKAGEENTRY_HPP
+#include "cds/aotGrowableArray.hpp"
#include "classfile/moduleEntry.hpp"
+#include "memory/metaspaceClosureType.hpp"
#include "oops/symbol.hpp"
#include "oops/symbolHandle.hpp"
#include "runtime/atomicAccess.hpp"
@@ -114,7 +116,7 @@ private:
bool _must_walk_exports;
// Contains list of modules this package is qualifiedly exported to. Access
// to this list is protected by the Module_lock.
- GrowableArray* _qualified_exports;
+ AOTGrowableArray* _qualified_exports;
JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
// Initial size of a package entry's list of qualified exports.
@@ -205,14 +207,24 @@ public:
void purge_qualified_exports();
void delete_qualified_exports();
+ void pack_qualified_exports(); // used by AOT
+
+ // methods required by MetaspaceClosure
+ void metaspace_pointers_do(MetaspaceClosure* it);
+ int size_in_heapwords() const { return (int)heap_word_size(sizeof(PackageEntry)); }
+ MetaspaceClosureType type() const { return MetaspaceClosureType::PackageEntryType; }
+ static bool is_read_only_by_default() { return false; }
+
void print(outputStream* st = tty);
+ char* name_as_C_string() const {
+ assert(_name != nullptr, "name can't be null");
+ return name()->as_C_string();
+ }
+
#if INCLUDE_CDS_JAVA_HEAP
bool should_be_archived() const;
- void iterate_symbols(MetaspaceClosure* closure);
- PackageEntry* allocate_archived_entry() const;
- void init_as_archived_entry();
- static PackageEntry* get_archived_entry(PackageEntry* orig_entry);
+ void remove_unshareable_info();
void load_from_archive();
#endif
@@ -271,9 +283,7 @@ public:
void print(outputStream* st = tty);
#if INCLUDE_CDS_JAVA_HEAP
- void iterate_symbols(MetaspaceClosure* closure);
- Array* allocate_archived_entries();
- void init_archived_entries(Array* archived_packages);
+ Array* build_aot_table(ClassLoaderData* loader_data, TRAPS);
void load_archived_entries(Array* archived_packages);
#endif
};
diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp
index c775014cfac..20dfad0d980 100644
--- a/src/hotspot/share/classfile/stringTable.cpp
+++ b/src/hotspot/share/classfile/stringTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -614,6 +614,10 @@ struct StringTableDeleteCheck : StackObj {
};
void StringTable::clean_dead_entries(JavaThread* jt) {
+ // BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
+ // When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
+ // Take SuspendMark first to keep lock order and avoid deadlock.
+ NativeHeapTrimmer::SuspendMark sm("stringtable");
StringTableHash::BulkDeleteTask bdt(_local_table);
if (!bdt.prepare(jt)) {
return;
@@ -621,7 +625,6 @@ void StringTable::clean_dead_entries(JavaThread* jt) {
StringTableDeleteCheck stdc;
StringTableDoDelete stdd;
- NativeHeapTrimmer::SuspendMark sm("stringtable");
{
TraceTime timer("Clean", TRACETIME_LOG(Debug, stringtable, perf));
while(bdt.do_task(jt, stdc, stdd)) {
diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp
index ec639a2b4d3..c49aa10fa0d 100644
--- a/src/hotspot/share/classfile/symbolTable.cpp
+++ b/src/hotspot/share/classfile/symbolTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -763,6 +763,10 @@ struct SymbolTableDeleteCheck : StackObj {
};
void SymbolTable::clean_dead_entries(JavaThread* jt) {
+ // BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
+ // When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
+ // Take SuspendMark first to keep lock order and avoid deadlock.
+ NativeHeapTrimmer::SuspendMark sm("symboltable");
SymbolTableHash::BulkDeleteTask bdt(_local_table);
if (!bdt.prepare(jt)) {
return;
@@ -770,7 +774,6 @@ void SymbolTable::clean_dead_entries(JavaThread* jt) {
SymbolTableDeleteCheck stdc;
SymbolTableDoDelete stdd;
- NativeHeapTrimmer::SuspendMark sm("symboltable");
{
TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf));
while (bdt.do_task(jt, stdc, stdd)) {
diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp
index 8388b98faae..79646f24d0e 100644
--- a/src/hotspot/share/classfile/vmSymbols.hpp
+++ b/src/hotspot/share/classfile/vmSymbols.hpp
@@ -245,10 +245,6 @@ class SerializeClosure;
\
/* Concurrency support */ \
template(java_util_concurrent_locks_AbstractOwnableSynchronizer, "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
- template(java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicIntegerFieldUpdater$AtomicIntegerFieldUpdaterImpl") \
- template(java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$CASUpdater") \
- template(java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$LockedUpdater") \
- template(java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl") \
template(jdk_internal_vm_annotation_Contended_signature, "Ljdk/internal/vm/annotation/Contended;") \
template(jdk_internal_vm_annotation_ReservedStackAccess_signature, "Ljdk/internal/vm/annotation/ReservedStackAccess;") \
template(jdk_internal_ValueBased_signature, "Ljdk/internal/ValueBased;") \
@@ -302,6 +298,7 @@ class SerializeClosure;
template(jdk_internal_misc_Scoped_signature, "Ljdk/internal/misc/ScopedMemoryAccess$Scoped;") \
template(jdk_internal_vm_annotation_IntrinsicCandidate_signature, "Ljdk/internal/vm/annotation/IntrinsicCandidate;") \
template(jdk_internal_vm_annotation_Stable_signature, "Ljdk/internal/vm/annotation/Stable;") \
+ template(jdk_internal_vm_annotation_TrustFinalFields_signature, "Ljdk/internal/vm/annotation/TrustFinalFields;") \
\
template(jdk_internal_vm_annotation_ChangesCurrentThread_signature, "Ljdk/internal/vm/annotation/ChangesCurrentThread;") \
template(jdk_internal_vm_annotation_JvmtiHideEvents_signature, "Ljdk/internal/vm/annotation/JvmtiHideEvents;") \
diff --git a/src/hotspot/share/gc/g1/g1Arguments.cpp b/src/hotspot/share/gc/g1/g1Arguments.cpp
index 58e76cdd43a..ffb06a7d822 100644
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -209,6 +209,17 @@ void G1Arguments::initialize() {
FLAG_SET_DEFAULT(GCTimeRatio, 24);
}
+ // Do not interfere with GC-Pressure driven heap resizing unless the user
+ // explicitly sets otherwise. G1 heap sizing should be free to grow or shrink
+ // the heap based on GC pressure, rather than being forced to satisfy
+ // MinHeapFreeRatio or MaxHeapFreeRatio defaults that the user did not set.
+ if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
+ FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
+ }
+ if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
+ FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
+ }
+
// Below, we might need to calculate the pause time interval based on
// the pause target. When we do so we are going to give G1 maximum
// flexibility and allow it to do pauses when it needs to. So, we'll
diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
index ee2c1450d9b..794e5db0634 100644
--- a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
@@ -70,7 +70,11 @@ inline void G1BarrierSet::write_ref_field_pre(T* field) {
template
inline void G1BarrierSet::write_ref_field_post(T* field) {
- volatile CardValue* byte = _card_table->byte_for(field);
+ // Make sure that the card table reference is read only once. Otherwise the compiler
+ // might reload that value in the two accesses below, that could cause writes to
+ // the wrong card table.
+ CardTable* card_table = AtomicAccess::load(&_card_table);
+ CardValue* byte = card_table->byte_for(field);
if (*byte == G1CardTable::clean_card_val()) {
*byte = G1CardTable::dirty_card_val();
}
diff --git a/src/hotspot/share/gc/g1/g1BatchedTask.cpp b/src/hotspot/share/gc/g1/g1BatchedTask.cpp
index 57558301541..1f082153476 100644
--- a/src/hotspot/share/gc/g1/g1BatchedTask.cpp
+++ b/src/hotspot/share/gc/g1/g1BatchedTask.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#include "gc/g1/g1BatchedTask.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1GCParPhaseTimesTracker.hpp"
-#include "runtime/atomicAccess.hpp"
#include "utilities/growableArray.hpp"
void G1AbstractSubTask::record_work_item(uint worker_id, uint index, size_t count) {
@@ -40,7 +39,7 @@ const char* G1AbstractSubTask::name() const {
}
bool G1BatchedTask::try_claim_serial_task(int& task) {
- task = AtomicAccess::fetch_then_add(&_num_serial_tasks_done, 1);
+ task = _num_serial_tasks_done.fetch_then_add(1);
return task < _serial_tasks.length();
}
@@ -96,8 +95,8 @@ void G1BatchedTask::work(uint worker_id) {
}
G1BatchedTask::~G1BatchedTask() {
- assert(AtomicAccess::load(&_num_serial_tasks_done) >= _serial_tasks.length(),
- "Only %d tasks of %d claimed", AtomicAccess::load(&_num_serial_tasks_done), _serial_tasks.length());
+ assert(_num_serial_tasks_done.load_relaxed() >= _serial_tasks.length(),
+ "Only %d tasks of %d claimed", _num_serial_tasks_done.load_relaxed(), _serial_tasks.length());
for (G1AbstractSubTask* task : _parallel_tasks) {
delete task;
diff --git a/src/hotspot/share/gc/g1/g1BatchedTask.hpp b/src/hotspot/share/gc/g1/g1BatchedTask.hpp
index 020fda634e4..a6d2ef923c0 100644
--- a/src/hotspot/share/gc/g1/g1BatchedTask.hpp
+++ b/src/hotspot/share/gc/g1/g1BatchedTask.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/shared/workerThread.hpp"
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
template
class GrowableArrayCHeap;
@@ -120,7 +121,7 @@ public:
// 5) ~T()
//
class G1BatchedTask : public WorkerTask {
- volatile int _num_serial_tasks_done;
+ Atomic _num_serial_tasks_done;
G1GCPhaseTimes* _phase_times;
bool try_claim_serial_task(int& task);
diff --git a/src/hotspot/share/gc/g1/g1CardSet.cpp b/src/hotspot/share/gc/g1/g1CardSet.cpp
index 80cf5fea76a..60ad63e812c 100644
--- a/src/hotspot/share/gc/g1/g1CardSet.cpp
+++ b/src/hotspot/share/gc/g1/g1CardSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "memory/allocation.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "utilities/bitMap.inline.hpp"
@@ -192,32 +191,32 @@ const char* G1CardSetConfiguration::mem_object_type_name_str(uint index) {
void G1CardSetCoarsenStats::reset() {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
- _coarsen_from[i] = 0;
- _coarsen_collision[i] = 0;
+ _coarsen_from[i].store_relaxed(0);
+ _coarsen_collision[i].store_relaxed(0);
}
}
void G1CardSetCoarsenStats::set(G1CardSetCoarsenStats& other) {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
- _coarsen_from[i] = other._coarsen_from[i];
- _coarsen_collision[i] = other._coarsen_collision[i];
+ _coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed());
+ _coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed());
}
}
void G1CardSetCoarsenStats::subtract_from(G1CardSetCoarsenStats& other) {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
- _coarsen_from[i] = other._coarsen_from[i] - _coarsen_from[i];
- _coarsen_collision[i] = other._coarsen_collision[i] - _coarsen_collision[i];
+ _coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed() - _coarsen_from[i].load_relaxed());
+ _coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed() - _coarsen_collision[i].load_relaxed());
}
}
void G1CardSetCoarsenStats::record_coarsening(uint tag, bool collision) {
assert(tag < ARRAY_SIZE(_coarsen_from), "tag %u out of bounds", tag);
- AtomicAccess::inc(&_coarsen_from[tag], memory_order_relaxed);
+ _coarsen_from[tag].add_then_fetch(1u, memory_order_relaxed);
if (collision) {
- AtomicAccess::inc(&_coarsen_collision[tag], memory_order_relaxed);
+ _coarsen_collision[tag].add_then_fetch(1u, memory_order_relaxed);
}
}
@@ -228,13 +227,13 @@ void G1CardSetCoarsenStats::print_on(outputStream* out) {
"Inline->AoC %zu (%zu) "
"AoC->BitMap %zu (%zu) "
"BitMap->Full %zu (%zu) ",
- _coarsen_from[0], _coarsen_collision[0],
- _coarsen_from[1], _coarsen_collision[1],
+ _coarsen_from[0].load_relaxed(), _coarsen_collision[0].load_relaxed(),
+ _coarsen_from[1].load_relaxed(), _coarsen_collision[1].load_relaxed(),
// There is no BitMap at the first level so we can't .
- _coarsen_from[3], _coarsen_collision[3],
- _coarsen_from[4], _coarsen_collision[4],
- _coarsen_from[5], _coarsen_collision[5],
- _coarsen_from[6], _coarsen_collision[6]
+ _coarsen_from[3].load_relaxed(), _coarsen_collision[3].load_relaxed(),
+ _coarsen_from[4].load_relaxed(), _coarsen_collision[4].load_relaxed(),
+ _coarsen_from[5].load_relaxed(), _coarsen_collision[5].load_relaxed(),
+ _coarsen_from[6].load_relaxed(), _coarsen_collision[6].load_relaxed()
);
}
@@ -248,7 +247,7 @@ class G1CardSetHashTable : public CHeapObj {
// the per region cardsets.
const static uint GroupBucketClaimSize = 4;
// Did we insert at least one card in the table?
- bool volatile _inserted_card;
+ Atomic _inserted_card;
G1CardSetMemoryManager* _mm;
CardSetHash _table;
@@ -311,10 +310,10 @@ public:
G1CardSetHashTableValue value(region_idx, G1CardSetInlinePtr());
bool inserted = _table.insert_get(Thread::current(), lookup, value, found, should_grow);
- if (!_inserted_card && inserted) {
+ if (!_inserted_card.load_relaxed() && inserted) {
// It does not matter to us who is setting the flag so a regular atomic store
// is sufficient.
- AtomicAccess::store(&_inserted_card, true);
+ _inserted_card.store_relaxed(true);
}
return found.value();
@@ -343,9 +342,9 @@ public:
}
void reset() {
- if (AtomicAccess::load(&_inserted_card)) {
+ if (_inserted_card.load_relaxed()) {
_table.unsafe_reset(InitialLogTableSize);
- AtomicAccess::store(&_inserted_card, false);
+ _inserted_card.store_relaxed(false);
}
}
@@ -455,14 +454,14 @@ void G1CardSet::free_mem_object(ContainerPtr container) {
_mm->free(container_type_to_mem_object_type(type), value);
}
-G1CardSet::ContainerPtr G1CardSet::acquire_container(ContainerPtr volatile* container_addr) {
+G1CardSet::ContainerPtr G1CardSet::acquire_container(Atomic* container_addr) {
// Update reference counts under RCU critical section to avoid a
// use-after-cleapup bug where we increment a reference count for
// an object whose memory has already been cleaned up and reused.
GlobalCounter::CriticalSection cs(Thread::current());
while (true) {
// Get ContainerPtr and increment refcount atomically wrt to memory reuse.
- ContainerPtr container = AtomicAccess::load_acquire(container_addr);
+ ContainerPtr container = container_addr->load_acquire();
uint cs_type = container_type(container);
if (container == FullCardSet || cs_type == ContainerInlinePtr) {
return container;
@@ -503,15 +502,15 @@ class G1ReleaseCardsets : public StackObj {
G1CardSet* _card_set;
using ContainerPtr = G1CardSet::ContainerPtr;
- void coarsen_to_full(ContainerPtr* container_addr) {
+ void coarsen_to_full(Atomic* container_addr) {
while (true) {
- ContainerPtr cur_container = AtomicAccess::load_acquire(container_addr);
+ ContainerPtr cur_container = container_addr->load_acquire();
uint cs_type = G1CardSet::container_type(cur_container);
if (cur_container == G1CardSet::FullCardSet) {
return;
}
- ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, G1CardSet::FullCardSet);
+ ContainerPtr old_value = container_addr->compare_exchange(cur_container, G1CardSet::FullCardSet);
if (old_value == cur_container) {
_card_set->release_and_maybe_free_container(cur_container);
@@ -523,7 +522,7 @@ class G1ReleaseCardsets : public StackObj {
public:
explicit G1ReleaseCardsets(G1CardSet* card_set) : _card_set(card_set) { }
- void operator ()(ContainerPtr* container_addr) {
+ void operator ()(Atomic* container_addr) {
coarsen_to_full(container_addr);
}
};
@@ -544,10 +543,10 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
ContainerPtr container;
uint bucket = _config->howl_bucket_index(card_in_region);
- ContainerPtr volatile* bucket_entry = howl->container_addr(bucket);
+ Atomic* bucket_entry = howl->container_addr(bucket);
while (true) {
- if (AtomicAccess::load(&howl->_num_entries) >= _config->cards_in_howl_threshold()) {
+ if (howl->_num_entries.load_relaxed() >= _config->cards_in_howl_threshold()) {
return Overflow;
}
@@ -571,7 +570,7 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
}
if (increment_total && add_result == Added) {
- AtomicAccess::inc(&howl->_num_entries, memory_order_relaxed);
+ howl->_num_entries.add_then_fetch(1u, memory_order_relaxed);
}
if (to_transfer != nullptr) {
@@ -588,7 +587,7 @@ G1AddCardResult G1CardSet::add_to_bitmap(ContainerPtr container, uint card_in_re
return bitmap->add(card_offset, _config->cards_in_howl_bitmap_threshold(), _config->max_cards_in_howl_bitmap());
}
-G1AddCardResult G1CardSet::add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region) {
+G1AddCardResult G1CardSet::add_to_inline_ptr(Atomic* container_addr, ContainerPtr container, uint card_in_region) {
G1CardSetInlinePtr value(container_addr, container);
return value.add(card_in_region, _config->inline_ptr_bits_per_card(), _config->max_cards_in_inline_ptr());
}
@@ -610,7 +609,7 @@ G1CardSet::ContainerPtr G1CardSet::create_coarsened_array_of_cards(uint card_in_
return new_container;
}
-bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
+bool G1CardSet::coarsen_container(Atomic* container_addr,
ContainerPtr cur_container,
uint card_in_region,
bool within_howl) {
@@ -640,7 +639,7 @@ bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
ShouldNotReachHere();
}
- ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, new_container); // Memory order?
+ ContainerPtr old_value = container_addr->compare_exchange(cur_container, new_container); // Memory order?
if (old_value == cur_container) {
// Success. Indicate that the cards from the current card set must be transferred
// by this caller.
@@ -687,7 +686,7 @@ void G1CardSet::transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPt
assert(container_type(source_container) == ContainerHowl, "must be");
// Need to correct for that the Full remembered set occupies more cards than the
// AoCS before.
- AtomicAccess::add(&_num_occupied, _config->max_cards_in_region() - table_entry->_num_occupied, memory_order_relaxed);
+ _num_occupied.add_then_fetch(_config->max_cards_in_region() - table_entry->_num_occupied.load_relaxed(), memory_order_relaxed);
}
}
@@ -713,18 +712,18 @@ void G1CardSet::transfer_cards_in_howl(ContainerPtr parent_container,
diff -= 1;
G1CardSetHowl* howling_array = container_ptr(parent_container);
- AtomicAccess::add(&howling_array->_num_entries, diff, memory_order_relaxed);
+ howling_array->_num_entries.add_then_fetch(diff, memory_order_relaxed);
G1CardSetHashTableValue* table_entry = get_container(card_region);
assert(table_entry != nullptr, "Table entry not found for transferred cards");
- AtomicAccess::add(&table_entry->_num_occupied, diff, memory_order_relaxed);
+ table_entry->_num_occupied.add_then_fetch(diff, memory_order_relaxed);
- AtomicAccess::add(&_num_occupied, diff, memory_order_relaxed);
+ _num_occupied.add_then_fetch(diff, memory_order_relaxed);
}
}
-G1AddCardResult G1CardSet::add_to_container(ContainerPtr volatile* container_addr,
+G1AddCardResult G1CardSet::add_to_container(Atomic* container_addr,
ContainerPtr container,
uint card_region,
uint card_in_region,
@@ -827,8 +826,8 @@ G1AddCardResult G1CardSet::add_card(uint card_region, uint card_in_region, bool
}
if (increment_total && add_result == Added) {
- AtomicAccess::inc(&table_entry->_num_occupied, memory_order_relaxed);
- AtomicAccess::inc(&_num_occupied, memory_order_relaxed);
+ table_entry->_num_occupied.add_then_fetch(1u, memory_order_relaxed);
+ _num_occupied.add_then_fetch(1u, memory_order_relaxed);
}
if (should_grow_table) {
_table->grow();
@@ -853,7 +852,7 @@ bool G1CardSet::contains_card(uint card_region, uint card_in_region) {
return false;
}
- ContainerPtr container = table_entry->_container;
+ ContainerPtr container = table_entry->_container.load_relaxed();
if (container == FullCardSet) {
// contains_card() is not a performance critical method so we do not hide that
// case in the switch below.
@@ -889,7 +888,7 @@ void G1CardSet::print_info(outputStream* st, uintptr_t card) {
return;
}
- ContainerPtr container = table_entry->_container;
+ ContainerPtr container = table_entry->_container.load_relaxed();
if (container == FullCardSet) {
st->print("FULL card set)");
return;
@@ -940,7 +939,7 @@ void G1CardSet::iterate_cards_during_transfer(ContainerPtr const container, Card
void G1CardSet::iterate_containers(ContainerPtrClosure* cl, bool at_safepoint) {
auto do_value =
[&] (G1CardSetHashTableValue* value) {
- cl->do_containerptr(value->_region_idx, value->_num_occupied, value->_container);
+ cl->do_containerptr(value->_region_idx, value->_num_occupied.load_relaxed(), value->_container.load_relaxed());
return true;
};
@@ -1001,11 +1000,11 @@ bool G1CardSet::occupancy_less_or_equal_to(size_t limit) const {
}
bool G1CardSet::is_empty() const {
- return _num_occupied == 0;
+ return _num_occupied.load_relaxed() == 0;
}
size_t G1CardSet::occupied() const {
- return _num_occupied;
+ return _num_occupied.load_relaxed();
}
size_t G1CardSet::num_containers() {
@@ -1024,10 +1023,6 @@ size_t G1CardSet::num_containers() {
return cl._count;
}
-G1CardSetCoarsenStats G1CardSet::coarsen_stats() {
- return _coarsen_stats;
-}
-
void G1CardSet::print_coarsen_stats(outputStream* out) {
_last_coarsen_stats.subtract_from(_coarsen_stats);
@@ -1055,7 +1050,7 @@ size_t G1CardSet::static_mem_size() {
void G1CardSet::clear() {
_table->reset();
- _num_occupied = 0;
+ _num_occupied.store_relaxed(0);
_mm->flush();
}
diff --git a/src/hotspot/share/gc/g1/g1CardSet.hpp b/src/hotspot/share/gc/g1/g1CardSet.hpp
index 9cefc4b1c22..64ddf0ca6a4 100644
--- a/src/hotspot/share/gc/g1/g1CardSet.hpp
+++ b/src/hotspot/share/gc/g1/g1CardSet.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/concurrentHashTable.hpp"
class G1CardSetAllocOptions;
@@ -154,8 +155,8 @@ public:
private:
// Indices are "from" indices.
- size_t _coarsen_from[NumCoarsenCategories];
- size_t _coarsen_collision[NumCoarsenCategories];
+ Atomic _coarsen_from[NumCoarsenCategories];
+ Atomic _coarsen_collision[NumCoarsenCategories];
public:
G1CardSetCoarsenStats() { reset(); }
@@ -271,11 +272,11 @@ private:
// Total number of cards in this card set. This is a best-effort value, i.e. there may
// be (slightly) more cards in the card set than this value in reality.
- size_t _num_occupied;
+ Atomic _num_occupied;
ContainerPtr make_container_ptr(void* value, uintptr_t type);
- ContainerPtr acquire_container(ContainerPtr volatile* container_addr);
+ ContainerPtr acquire_container(Atomic* container_addr);
// Returns true if the card set container should be released
bool release_container(ContainerPtr container);
// Release card set and free if needed.
@@ -288,7 +289,7 @@ private:
// coarsen_container does not transfer cards from cur_container
// to the new container. Transfer is achieved by transfer_cards.
// Returns true if this was the thread that coarsened the container (and added the card).
- bool coarsen_container(ContainerPtr volatile* container_addr,
+ bool coarsen_container(Atomic* container_addr,
ContainerPtr cur_container,
uint card_in_region, bool within_howl = false);
@@ -300,9 +301,9 @@ private:
void transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPtr source_container, uint card_region);
void transfer_cards_in_howl(ContainerPtr parent_container, ContainerPtr source_container, uint card_region);
- G1AddCardResult add_to_container(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
+ G1AddCardResult add_to_container(Atomic* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
- G1AddCardResult add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region);
+ G1AddCardResult add_to_inline_ptr(Atomic* container_addr, ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_array(ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_bitmap(ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_howl(ContainerPtr parent_container, uint card_region, uint card_in_region, bool increment_total = true);
@@ -366,7 +367,6 @@ public:
size_t num_containers();
- static G1CardSetCoarsenStats coarsen_stats();
static void print_coarsen_stats(outputStream* out);
// Returns size of the actual remembered set containers in bytes.
@@ -412,8 +412,15 @@ public:
using ContainerPtr = G1CardSet::ContainerPtr;
const uint _region_idx;
- uint volatile _num_occupied;
- ContainerPtr volatile _container;
+ Atomic _num_occupied;
+ Atomic _container;
+
+ // Copy constructor needed for use in ConcurrentHashTable.
+ G1CardSetHashTableValue(const G1CardSetHashTableValue& other) :
+ _region_idx(other._region_idx),
+ _num_occupied(other._num_occupied.load_relaxed()),
+ _container(other._container.load_relaxed())
+ { }
G1CardSetHashTableValue(uint region_idx, ContainerPtr container) : _region_idx(region_idx), _num_occupied(0), _container(container) { }
};
diff --git a/src/hotspot/share/gc/g1/g1CardSetContainers.hpp b/src/hotspot/share/gc/g1/g1CardSetContainers.hpp
index 72c7795be2e..78551479e06 100644
--- a/src/hotspot/share/gc/g1/g1CardSetContainers.hpp
+++ b/src/hotspot/share/gc/g1/g1CardSetContainers.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "gc/g1/g1CardSet.hpp"
#include "memory/allocation.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -67,7 +67,7 @@ class G1CardSetInlinePtr : public StackObj {
using ContainerPtr = G1CardSet::ContainerPtr;
- ContainerPtr volatile * _value_addr;
+ Atomic* _value_addr;
ContainerPtr _value;
static const uint SizeFieldLen = 3;
@@ -103,7 +103,7 @@ public:
explicit G1CardSetInlinePtr(ContainerPtr value) :
G1CardSetInlinePtr(nullptr, value) {}
- G1CardSetInlinePtr(ContainerPtr volatile* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
+ G1CardSetInlinePtr(Atomic* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
assert(G1CardSet::container_type(_value) == G1CardSet::ContainerInlinePtr, "Value " PTR_FORMAT " is not a valid G1CardSetInlinePtr.", p2i(_value));
}
@@ -145,13 +145,13 @@ public:
// All but inline pointers are of this kind. For those, card entries are stored
// directly in the ContainerPtr of the ConcurrentHashTable node.
class G1CardSetContainer {
- uintptr_t _ref_count;
+ Atomic _ref_count;
protected:
~G1CardSetContainer() = default;
public:
G1CardSetContainer() : _ref_count(3) { }
- uintptr_t refcount() const { return AtomicAccess::load_acquire(&_ref_count); }
+ uintptr_t refcount() const { return _ref_count.load_acquire(); }
bool try_increment_refcount();
@@ -172,7 +172,7 @@ public:
using ContainerPtr = G1CardSet::ContainerPtr;
private:
EntryCountType _size;
- EntryCountType volatile _num_entries;
+ Atomic _num_entries;
// VLA implementation.
EntryDataType _data[1];
@@ -180,10 +180,10 @@ private:
static const EntryCountType EntryMask = LockBitMask - 1;
class G1CardSetArrayLocker : public StackObj {
- EntryCountType volatile* _num_entries_addr;
+ Atomic* _num_entries_addr;
EntryCountType _local_num_entries;
public:
- G1CardSetArrayLocker(EntryCountType volatile* value);
+ G1CardSetArrayLocker(Atomic* value);
EntryCountType num_entries() const { return _local_num_entries; }
void inc_num_entries() {
@@ -192,7 +192,7 @@ private:
}
~G1CardSetArrayLocker() {
- AtomicAccess::release_store(_num_entries_addr, _local_num_entries);
+ _num_entries_addr->release_store(_local_num_entries);
}
};
@@ -213,7 +213,7 @@ public:
template
void iterate(CardVisitor& found);
- size_t num_entries() const { return _num_entries & EntryMask; }
+ size_t num_entries() const { return _num_entries.load_relaxed() & EntryMask; }
static size_t header_size_in_bytes();
@@ -223,7 +223,7 @@ public:
};
class G1CardSetBitMap : public G1CardSetContainer {
- size_t _num_bits_set;
+ Atomic _num_bits_set;
BitMap::bm_word_t _bits[1];
public:
@@ -236,7 +236,7 @@ public:
return bm.at(card_idx);
}
- uint num_bits_set() const { return (uint)_num_bits_set; }
+ uint num_bits_set() const { return (uint)_num_bits_set.load_relaxed(); }
template
void iterate(CardVisitor& found, size_t const size_in_bits, uint offset);
@@ -255,10 +255,10 @@ class G1CardSetHowl : public G1CardSetContainer {
public:
typedef uint EntryCountType;
using ContainerPtr = G1CardSet::ContainerPtr;
- EntryCountType volatile _num_entries;
+ Atomic _num_entries;
private:
// VLA implementation.
- ContainerPtr _buckets[1];
+ Atomic _buckets[1];
// Do not add class member variables beyond this point.
// Iterates over the given ContainerPtr with at index in this Howl card set,
@@ -268,14 +268,14 @@ private:
ContainerPtr at(EntryCountType index) const;
- ContainerPtr const* buckets() const;
+ Atomic const* buckets() const;
public:
G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config);
- ContainerPtr const* container_addr(EntryCountType index) const;
+ Atomic const* container_addr(EntryCountType index) const;
- ContainerPtr* container_addr(EntryCountType index);
+ Atomic* container_addr(EntryCountType index);
bool contains(uint card_idx, G1CardSetConfiguration* config);
// Iterates over all ContainerPtrs in this Howl card set, applying a CardOrRangeVisitor
diff --git a/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp b/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp
index 1958309f517..3c6fb9d1a02 100644
--- a/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -67,7 +67,7 @@ inline G1AddCardResult G1CardSetInlinePtr::add(uint card_idx, uint bits_per_card
return Overflow;
}
ContainerPtr new_value = merge(_value, card_idx, num_cards, bits_per_card);
- ContainerPtr old_value = AtomicAccess::cmpxchg(_value_addr, _value, new_value, memory_order_relaxed);
+ ContainerPtr old_value = _value_addr->compare_exchange(_value, new_value, memory_order_relaxed);
if (_value == old_value) {
return Added;
}
@@ -126,7 +126,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
}
uintptr_t new_value = old_value + 2;
- uintptr_t ref_count = AtomicAccess::cmpxchg(&_ref_count, old_value, new_value);
+ uintptr_t ref_count = _ref_count.compare_exchange(old_value, new_value);
if (ref_count == old_value) {
return true;
}
@@ -137,7 +137,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
inline uintptr_t G1CardSetContainer::decrement_refcount() {
uintptr_t old_value = refcount();
assert((old_value & 0x1) != 0 && old_value >= 3, "precondition");
- return AtomicAccess::sub(&_ref_count, 2u);
+ return _ref_count.sub_then_fetch(2u);
}
inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_cards) :
@@ -149,14 +149,13 @@ inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_ca
*entry_addr(0) = checked_cast(card_in_region);
}
-inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(EntryCountType volatile* num_entries_addr) :
+inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(Atomic* num_entries_addr) :
_num_entries_addr(num_entries_addr) {
SpinYield s;
- EntryCountType num_entries = AtomicAccess::load(_num_entries_addr) & EntryMask;
+ EntryCountType num_entries = _num_entries_addr->load_relaxed() & EntryMask;
while (true) {
- EntryCountType old_value = AtomicAccess::cmpxchg(_num_entries_addr,
- num_entries,
- (EntryCountType)(num_entries | LockBitMask));
+ EntryCountType old_value = _num_entries_addr->compare_exchange(num_entries,
+ (EntryCountType)(num_entries | LockBitMask));
if (old_value == num_entries) {
// Succeeded locking the array.
_local_num_entries = num_entries;
@@ -174,7 +173,7 @@ inline G1CardSetArray::EntryDataType const* G1CardSetArray::base_addr() const {
}
inline G1CardSetArray::EntryDataType const* G1CardSetArray::entry_addr(EntryCountType index) const {
- assert(index < _num_entries, "precondition");
+ assert(index < _num_entries.load_relaxed(), "precondition");
return base_addr() + index;
}
@@ -189,7 +188,7 @@ inline G1CardSetArray::EntryDataType G1CardSetArray::at(EntryCountType index) co
inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
assert(card_idx < (1u << (sizeof(EntryDataType) * BitsPerByte)),
"Card index %u does not fit allowed card value range.", card_idx);
- EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
+ EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
EntryCountType idx = 0;
for (; idx < num_entries; idx++) {
if (at(idx) == card_idx) {
@@ -223,7 +222,7 @@ inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
}
inline bool G1CardSetArray::contains(uint card_idx) {
- EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
+ EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) {
if (at(idx) == card_idx) {
@@ -235,7 +234,7 @@ inline bool G1CardSetArray::contains(uint card_idx) {
template
void G1CardSetArray::iterate(CardVisitor& found) {
- EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
+ EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) {
found(at(idx));
}
@@ -256,11 +255,11 @@ inline G1CardSetBitMap::G1CardSetBitMap(uint card_in_region, uint size_in_bits)
inline G1AddCardResult G1CardSetBitMap::add(uint card_idx, size_t threshold, size_t size_in_bits) {
BitMapView bm(_bits, size_in_bits);
- if (_num_bits_set >= threshold) {
+ if (_num_bits_set.load_relaxed() >= threshold) {
return bm.at(card_idx) ? Found : Overflow;
}
if (bm.par_set_bit(card_idx)) {
- AtomicAccess::inc(&_num_bits_set, memory_order_relaxed);
+ _num_bits_set.add_then_fetch(1u, memory_order_relaxed);
return Added;
}
return Found;
@@ -276,22 +275,22 @@ inline size_t G1CardSetBitMap::header_size_in_bytes() {
return offset_of(G1CardSetBitMap, _bits);
}
-inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::container_addr(EntryCountType index) const {
- assert(index < _num_entries, "precondition");
+inline Atomic const* G1CardSetHowl::container_addr(EntryCountType index) const {
+ assert(index < _num_entries.load_relaxed(), "precondition");
return buckets() + index;
}
-inline G1CardSetHowl::ContainerPtr* G1CardSetHowl::container_addr(EntryCountType index) {
- return const_cast(const_cast(this)->container_addr(index));
+inline Atomic* G1CardSetHowl::container_addr(EntryCountType index) {
+ return const_cast*>(const_cast(this)->container_addr(index));
}
inline G1CardSetHowl::ContainerPtr G1CardSetHowl::at(EntryCountType index) const {
- return *container_addr(index);
+ return (*container_addr(index)).load_relaxed();
}
-inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::buckets() const {
+inline Atomic const* G1CardSetHowl::buckets() const {
const void* ptr = reinterpret_cast(this) + header_size_in_bytes();
- return reinterpret_cast(ptr);
+ return reinterpret_cast const*>(ptr);
}
inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config) :
@@ -300,7 +299,7 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
EntryCountType num_buckets = config->num_buckets_in_howl();
EntryCountType bucket = config->howl_bucket_index(card_in_region);
for (uint i = 0; i < num_buckets; ++i) {
- *container_addr(i) = G1CardSetInlinePtr();
+ container_addr(i)->store_relaxed(G1CardSetInlinePtr());
if (i == bucket) {
G1CardSetInlinePtr value(container_addr(i), at(i));
value.add(card_in_region, config->inline_ptr_bits_per_card(), config->max_cards_in_inline_ptr());
@@ -310,8 +309,8 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
inline bool G1CardSetHowl::contains(uint card_idx, G1CardSetConfiguration* config) {
EntryCountType bucket = config->howl_bucket_index(card_idx);
- ContainerPtr* array_entry = container_addr(bucket);
- ContainerPtr container = AtomicAccess::load_acquire(array_entry);
+ Atomic* array_entry = container_addr(bucket);
+ ContainerPtr container = array_entry->load_acquire();
switch (G1CardSet::container_type(container)) {
case G1CardSet::ContainerArrayOfCards: {
diff --git a/src/hotspot/share/gc/g1/g1CardSetMemory.cpp b/src/hotspot/share/gc/g1/g1CardSetMemory.cpp
index d13a6fe2dca..60602ef942b 100644
--- a/src/hotspot/share/gc/g1/g1CardSetMemory.cpp
+++ b/src/hotspot/share/gc/g1/g1CardSetMemory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#include "gc/g1/g1CardSetContainers.inline.hpp"
#include "gc/g1/g1CardSetMemory.inline.hpp"
#include "gc/g1/g1MonotonicArena.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "utilities/ostream.hpp"
G1CardSetAllocator::G1CardSetAllocator(const char* name,
diff --git a/src/hotspot/share/gc/g1/g1CardTableClaimTable.cpp b/src/hotspot/share/gc/g1/g1CardTableClaimTable.cpp
index e0cadbdd907..d8cabaa00a4 100644
--- a/src/hotspot/share/gc/g1/g1CardTableClaimTable.cpp
+++ b/src/hotspot/share/gc/g1/g1CardTableClaimTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,20 +44,20 @@ G1CardTableClaimTable::~G1CardTableClaimTable() {
void G1CardTableClaimTable::initialize(uint max_reserved_regions) {
assert(_card_claims == nullptr, "Must not be initialized twice");
- _card_claims = NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC);
+ _card_claims = NEW_C_HEAP_ARRAY(Atomic, max_reserved_regions, mtGC);
_max_reserved_regions = max_reserved_regions;
reset_all_to_unclaimed();
}
void G1CardTableClaimTable::reset_all_to_unclaimed() {
for (uint i = 0; i < _max_reserved_regions; i++) {
- _card_claims[i] = 0;
+ _card_claims[i].store_relaxed(0);
}
}
void G1CardTableClaimTable::reset_all_to_claimed() {
for (uint i = 0; i < _max_reserved_regions; i++) {
- _card_claims[i] = (uint)G1HeapRegion::CardsPerRegion;
+ _card_claims[i].store_relaxed((uint)G1HeapRegion::CardsPerRegion);
}
}
diff --git a/src/hotspot/share/gc/g1/g1CardTableClaimTable.hpp b/src/hotspot/share/gc/g1/g1CardTableClaimTable.hpp
index 4f524b83f97..822ef45c722 100644
--- a/src/hotspot/share/gc/g1/g1CardTableClaimTable.hpp
+++ b/src/hotspot/share/gc/g1/g1CardTableClaimTable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "gc/g1/g1CardTable.hpp"
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
class G1HeapRegionClosure;
@@ -45,7 +46,7 @@ class G1CardTableClaimTable : public CHeapObj {
// Card table iteration claim values for every heap region, from 0 (completely unclaimed)
// to (>=) G1HeapRegion::CardsPerRegion (completely claimed).
- uint volatile* _card_claims;
+ Atomic* _card_claims;
uint _cards_per_chunk; // For conversion between card index and chunk index.
diff --git a/src/hotspot/share/gc/g1/g1CardTableClaimTable.inline.hpp b/src/hotspot/share/gc/g1/g1CardTableClaimTable.inline.hpp
index d682f0d17ae..35b2484982c 100644
--- a/src/hotspot/share/gc/g1/g1CardTableClaimTable.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1CardTableClaimTable.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,26 +29,25 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
-#include "runtime/atomicAccess.hpp"
bool G1CardTableClaimTable::has_unclaimed_cards(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- return AtomicAccess::load(&_card_claims[region]) < G1HeapRegion::CardsPerRegion;
+ return _card_claims[region].load_relaxed() < G1HeapRegion::CardsPerRegion;
}
void G1CardTableClaimTable::reset_to_unclaimed(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- AtomicAccess::store(&_card_claims[region], 0u);
+ _card_claims[region].store_relaxed(0u);
}
uint G1CardTableClaimTable::claim_cards(uint region, uint increment) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- return AtomicAccess::fetch_then_add(&_card_claims[region], increment, memory_order_relaxed);
+ return _card_claims[region].fetch_then_add(increment, memory_order_relaxed);
}
uint G1CardTableClaimTable::claim_chunk(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- return AtomicAccess::fetch_then_add(&_card_claims[region], cards_per_chunk(), memory_order_relaxed);
+ return _card_claims[region].fetch_then_add(cards_per_chunk(), memory_order_relaxed);
}
uint G1CardTableClaimTable::claim_all_cards(uint region) {
diff --git a/src/hotspot/share/gc/g1/g1CodeRootSet.cpp b/src/hotspot/share/gc/g1/g1CodeRootSet.cpp
index 60ad3a2af32..ca4487876b9 100644
--- a/src/hotspot/share/gc/g1/g1CodeRootSet.cpp
+++ b/src/hotspot/share/gc/g1/g1CodeRootSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "gc/g1/g1HeapRegion.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/concurrentHashTable.inline.hpp"
#include "utilities/concurrentHashTableTasks.inline.hpp"
@@ -60,7 +60,7 @@ class G1CodeRootSetHashTable : public CHeapObj {
HashTable _table;
HashTableScanTask _table_scanner;
- size_t volatile _num_entries;
+ Atomic _num_entries;
bool is_empty() const { return number_of_entries() == 0; }
@@ -120,7 +120,7 @@ public:
bool grow_hint = false;
bool inserted = _table.insert(Thread::current(), lookup, method, &grow_hint);
if (inserted) {
- AtomicAccess::inc(&_num_entries);
+ _num_entries.add_then_fetch(1u);
}
if (grow_hint) {
_table.grow(Thread::current());
@@ -131,7 +131,7 @@ public:
HashTableLookUp lookup(method);
bool removed = _table.remove(Thread::current(), lookup);
if (removed) {
- AtomicAccess::dec(&_num_entries);
+ _num_entries.sub_then_fetch(1u);
}
return removed;
}
@@ -182,7 +182,7 @@ public:
guarantee(succeeded, "unable to clean table");
if (num_deleted != 0) {
- size_t current_size = AtomicAccess::sub(&_num_entries, num_deleted);
+ size_t current_size = _num_entries.sub_then_fetch(num_deleted);
shrink_to_match(current_size);
}
}
@@ -226,7 +226,7 @@ public:
size_t mem_size() { return sizeof(*this) + _table.get_mem_size(Thread::current()); }
- size_t number_of_entries() const { return AtomicAccess::load(&_num_entries); }
+ size_t number_of_entries() const { return _num_entries.load_relaxed(); }
};
uintx G1CodeRootSetHashTable::HashTableLookUp::get_hash() const {
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 3a0c4a04441..b6c3c0b0907 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -103,7 +103,6 @@
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/cpuTimeCounters.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index aff7166d391..8009df1fa6a 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,7 @@
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/threadSMR.hpp"
#include "utilities/bitMap.hpp"
@@ -124,7 +125,7 @@ class G1JavaThreadsListClaimer : public StackObj {
ThreadsListHandle _list;
uint _claim_step;
- volatile uint _cur_claim;
+ Atomic _cur_claim;
// Attempts to claim _claim_step JavaThreads, returning an array of claimed
// JavaThread* with count elements. Returns null (and a zero count) if there
@@ -1267,7 +1268,6 @@ public:
bool is_marked(oop obj) const;
- inline static bool is_obj_filler(const oop obj);
// Determine if an object is dead, given the object and also
// the region to which the object belongs.
inline bool is_obj_dead(const oop obj, const G1HeapRegion* hr) const;
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
index abd61e72d57..8782b65b6f9 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
#include "gc/g1/g1EvacFailureRegions.hpp"
+#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "gc/g1/g1HeapRegionManager.inline.hpp"
#include "gc/g1/g1HeapRegionRemSet.hpp"
@@ -38,10 +39,10 @@
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RegionPinCache.inline.hpp"
#include "gc/g1/g1RemSet.hpp"
+#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/markBitMap.inline.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "oops/stackChunkOop.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/threadSMR.inline.hpp"
#include "utilities/bitMap.inline.hpp"
@@ -53,10 +54,10 @@ inline bool G1STWIsAliveClosure::do_object_b(oop p) {
inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) {
count = 0;
- if (AtomicAccess::load(&_cur_claim) >= _list.length()) {
+ if (_cur_claim.load_relaxed() >= _list.length()) {
return nullptr;
}
- uint claim = AtomicAccess::fetch_then_add(&_cur_claim, _claim_step);
+ uint claim = _cur_claim.fetch_then_add(_claim_step);
if (claim >= _list.length()) {
return nullptr;
}
@@ -230,16 +231,11 @@ inline bool G1CollectedHeap::requires_barriers(stackChunkOop obj) const {
return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary null check
}
-inline bool G1CollectedHeap::is_obj_filler(const oop obj) {
- Klass* k = obj->klass_without_asserts();
- return k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass();
-}
-
inline bool G1CollectedHeap::is_obj_dead(const oop obj, const G1HeapRegion* hr) const {
assert(!hr->is_free(), "looking up obj " PTR_FORMAT " in Free region %u", p2i(obj), hr->hrm_index());
if (hr->is_in_parsable_area(obj)) {
// This object is in the parsable part of the heap, live unless scrubbed.
- return is_obj_filler(obj);
+ return is_filler_object(obj);
} else {
// From Remark until a region has been concurrently scrubbed, parts of the
// region is not guaranteed to be parsable. Use the bitmap for liveness.
diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
index 954ca40a77f..e7bab32129e 100644
--- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "gc/g1/g1CollectionSetChooser.hpp"
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
#include "gc/shared/space.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/quickSort.hpp"
// Determine collection set candidates (from marking): For all regions determine
@@ -50,7 +50,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
G1HeapRegion** _data;
- uint volatile _cur_claim_idx;
+ Atomic _cur_claim_idx;
static int compare_region_gc_efficiency(G1HeapRegion** rr1, G1HeapRegion** rr2) {
G1HeapRegion* r1 = *rr1;
@@ -105,7 +105,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
// Claim a new chunk, returning its bounds [from, to[.
void claim_chunk(uint& from, uint& to) {
- uint result = AtomicAccess::add(&_cur_claim_idx, _chunk_size);
+ uint result = _cur_claim_idx.add_then_fetch(_chunk_size);
assert(_max_size > result - 1,
"Array too small, is %u should be %u with chunk size %u.",
_max_size, result, _chunk_size);
@@ -121,14 +121,15 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
}
void sort_by_gc_efficiency() {
- if (_cur_claim_idx == 0) {
+ uint length = _cur_claim_idx.load_relaxed();
+ if (length == 0) {
return;
}
- for (uint i = _cur_claim_idx; i < _max_size; i++) {
+ for (uint i = length; i < _max_size; i++) {
assert(_data[i] == nullptr, "must be");
}
- qsort(_data, _cur_claim_idx, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
- for (uint i = _cur_claim_idx; i < _max_size; i++) {
+ qsort(_data, length, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
+ for (uint i = length; i < _max_size; i++) {
assert(_data[i] == nullptr, "must be");
}
}
@@ -202,13 +203,13 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
G1CollectedHeap* _g1h;
G1HeapRegionClaimer _hrclaimer;
- uint volatile _num_regions_added;
+ Atomic _num_regions_added;
G1BuildCandidateArray _result;
void update_totals(uint num_regions) {
if (num_regions > 0) {
- AtomicAccess::add(&_num_regions_added, num_regions);
+ _num_regions_added.add_then_fetch(num_regions);
}
}
@@ -220,7 +221,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
void prune(G1HeapRegion** data) {
G1Policy* p = G1CollectedHeap::heap()->policy();
- uint num_candidates = AtomicAccess::load(&_num_regions_added);
+ uint num_candidates = _num_regions_added.load_relaxed();
uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates);
uint num_pruned = 0;
@@ -253,7 +254,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
wasted_bytes,
allowed_waste);
- AtomicAccess::sub(&_num_regions_added, num_pruned, memory_order_relaxed);
+ _num_regions_added.sub_then_fetch(num_pruned, memory_order_relaxed);
}
public:
@@ -274,7 +275,7 @@ public:
_result.sort_by_gc_efficiency();
prune(_result.array());
candidates->set_candidates_from_marking(_result.array(),
- _num_regions_added);
+ _num_regions_added.load_relaxed());
}
};
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index 456d543fa10..2bbfb5032b3 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,9 @@
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/gcVMOperations.hpp"
+#include "gc/shared/partialArraySplitter.inline.hpp"
+#include "gc/shared/partialArrayState.hpp"
+#include "gc/shared/partialArrayTaskStats.hpp"
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shared/taskqueue.inline.hpp"
@@ -67,7 +70,6 @@
#include "nmt/memTracker.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
@@ -76,6 +78,7 @@
#include "runtime/prefetch.inline.hpp"
#include "runtime/threads.hpp"
#include "utilities/align.hpp"
+#include "utilities/checkedCast.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/powerOfTwo.hpp"
@@ -99,7 +102,7 @@ bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
// We move that task's local finger along.
_task->move_finger_to(addr);
- _task->scan_task_entry(G1TaskQueueEntry::from_oop(cast_to_oop(addr)));
+ _task->process_entry(G1TaskQueueEntry(cast_to_oop(addr)), false /* stolen */);
// we only partially drain the local queue and global stack
_task->drain_local_queue(true);
_task->drain_global_stack(true);
@@ -148,25 +151,25 @@ bool G1CMMarkStack::initialize() {
}
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_chunk() {
- if (_size >= _max_capacity) {
+ if (_size.load_relaxed() >= _max_capacity) {
return nullptr;
}
- size_t cur_idx = AtomicAccess::fetch_then_add(&_size, 1u);
+ size_t cur_idx = _size.fetch_then_add(1u);
if (cur_idx >= _max_capacity) {
return nullptr;
}
size_t bucket = get_bucket(cur_idx);
- if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
+ if (_buckets[bucket].load_acquire() == nullptr) {
if (!_should_grow) {
// Prefer to restart the CM.
return nullptr;
}
MutexLocker x(G1MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
- if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
+ if (_buckets[bucket].load_acquire() == nullptr) {
size_t desired_capacity = bucket_size(bucket) * 2;
if (!try_expand_to(desired_capacity)) {
return nullptr;
@@ -175,7 +178,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_
}
size_t bucket_idx = get_bucket_index(cur_idx);
- TaskQueueEntryChunk* result = ::new (&_buckets[bucket][bucket_idx]) TaskQueueEntryChunk;
+ TaskQueueEntryChunk* result = ::new (&_buckets[bucket].load_relaxed()[bucket_idx]) TaskQueueEntryChunk;
result->next = nullptr;
return result;
}
@@ -197,10 +200,10 @@ bool G1CMMarkStack::ChunkAllocator::initialize(size_t initial_capacity, size_t m
_max_capacity = max_capacity;
_num_buckets = get_bucket(_max_capacity) + 1;
- _buckets = NEW_C_HEAP_ARRAY(TaskQueueEntryChunk*, _num_buckets, mtGC);
+ _buckets = NEW_C_HEAP_ARRAY(Atomic, _num_buckets, mtGC);
for (size_t i = 0; i < _num_buckets; i++) {
- _buckets[i] = nullptr;
+ _buckets[i].store_relaxed(nullptr);
}
size_t new_capacity = bucket_size(0);
@@ -240,9 +243,9 @@ G1CMMarkStack::ChunkAllocator::~ChunkAllocator() {
}
for (size_t i = 0; i < _num_buckets; i++) {
- if (_buckets[i] != nullptr) {
- MmapArrayAllocator::free(_buckets[i], bucket_size(i));
- _buckets[i] = nullptr;
+ if (_buckets[i].load_relaxed() != nullptr) {
+ MmapArrayAllocator::free(_buckets[i].load_relaxed(), bucket_size(i));
+ _buckets[i].store_relaxed(nullptr);
}
}
@@ -259,7 +262,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
// and the new capacity (new_capacity). This step ensures that there are no gaps in the
// array and that the capacity accurately reflects the reserved memory.
for (; i <= highest_bucket; i++) {
- if (AtomicAccess::load_acquire(&_buckets[i]) != nullptr) {
+ if (_buckets[i].load_acquire() != nullptr) {
continue; // Skip over already allocated buckets.
}
@@ -279,7 +282,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
return false;
}
_capacity += bucket_capacity;
- AtomicAccess::release_store(&_buckets[i], bucket_base);
+ _buckets[i].release_store(bucket_base);
}
return true;
}
@@ -288,9 +291,9 @@ void G1CMMarkStack::expand() {
_chunk_allocator.try_expand();
}
-void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
- elem->next = *list;
- *list = elem;
+void G1CMMarkStack::add_chunk_to_list(Atomic* list, TaskQueueEntryChunk* elem) {
+ elem->next = list->load_relaxed();
+ list->store_relaxed(elem);
}
void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
@@ -304,10 +307,10 @@ void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
add_chunk_to_list(&_free_list, elem);
}
-G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
- TaskQueueEntryChunk* result = *list;
+G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(Atomic* list) {
+ TaskQueueEntryChunk* result = list->load_relaxed();
if (result != nullptr) {
- *list = (*list)->next;
+ list->store_relaxed(list->load_relaxed()->next);
}
return result;
}
@@ -361,8 +364,8 @@ bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
void G1CMMarkStack::set_empty() {
_chunks_in_chunk_list = 0;
- _chunk_list = nullptr;
- _free_list = nullptr;
+ _chunk_list.store_relaxed(nullptr);
+ _free_list.store_relaxed(nullptr);
_chunk_allocator.reset();
}
@@ -490,6 +493,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
_task_queues(new G1CMTaskQueueSet(_max_num_tasks)),
_terminator(_max_num_tasks, _task_queues),
+ _partial_array_state_manager(new PartialArrayStateManager(_max_num_tasks)),
_first_overflow_barrier_sync(),
_second_overflow_barrier_sync(),
@@ -556,6 +560,10 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
reset_at_marking_complete();
}
+PartialArrayStateManager* G1ConcurrentMark::partial_array_state_manager() const {
+ return _partial_array_state_manager;
+}
+
void G1ConcurrentMark::reset() {
_has_aborted = false;
@@ -650,7 +658,26 @@ void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurr
}
}
+#if TASKQUEUE_STATS
+void G1ConcurrentMark::print_and_reset_taskqueue_stats() {
+
+ _task_queues->print_and_reset_taskqueue_stats("G1ConcurrentMark Oop Queue");
+
+ auto get_pa_stats = [&](uint i) {
+ return _tasks[i]->partial_array_task_stats();
+ };
+
+ PartialArrayTaskStats::log_set(_max_num_tasks, get_pa_stats,
+ "G1ConcurrentMark Partial Array Task Stats");
+
+ for (uint i = 0; i < _max_num_tasks; ++i) {
+ get_pa_stats(i)->reset();
+ }
+}
+#endif
+
void G1ConcurrentMark::reset_at_marking_complete() {
+ TASKQUEUE_STATS_ONLY(print_and_reset_taskqueue_stats());
// We set the global marking state to some default values when we're
// not doing marking.
reset_marking_for_restart();
@@ -804,11 +831,25 @@ void G1ConcurrentMark::cleanup_for_next_mark() {
clear_bitmap(_concurrent_workers, true);
+ reset_partial_array_state_manager();
+
// Repeat the asserts from above.
guarantee(cm_thread()->in_progress(), "invariant");
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
}
+void G1ConcurrentMark::reset_partial_array_state_manager() {
+ for (uint i = 0; i < _max_num_tasks; ++i) {
+ _tasks[i]->unregister_partial_array_splitter();
+ }
+
+ partial_array_state_manager()->reset();
+
+ for (uint i = 0; i < _max_num_tasks; ++i) {
+ _tasks[i]->register_partial_array_splitter();
+ }
+}
+
void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers) {
assert_at_safepoint_on_vm_thread();
// To avoid fragmentation the full collection requesting to clear the bitmap
@@ -1789,17 +1830,18 @@ public:
{ }
void operator()(G1TaskQueueEntry task_entry) const {
- if (task_entry.is_array_slice()) {
- guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
+ if (task_entry.is_partial_array_state()) {
+ oop obj = task_entry.to_partial_array_state()->source();
+ guarantee(_g1h->is_in_reserved(obj), "Partial Array " PTR_FORMAT " must be in heap.", p2i(obj));
return;
}
- guarantee(oopDesc::is_oop(task_entry.obj()),
+ guarantee(oopDesc::is_oop(task_entry.to_oop()),
"Non-oop " PTR_FORMAT ", phase: %s, info: %d",
- p2i(task_entry.obj()), _phase, _info);
- G1HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
+ p2i(task_entry.to_oop()), _phase, _info);
+ G1HeapRegion* r = _g1h->heap_region_containing(task_entry.to_oop());
guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
"obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
- p2i(task_entry.obj()), _phase, _info, r->hrm_index());
+ p2i(task_entry.to_oop()), _phase, _info, r->hrm_index());
}
};
@@ -2055,6 +2097,17 @@ void G1CMTask::reset(G1CMBitMap* mark_bitmap) {
_mark_stats_cache.reset();
}
+void G1CMTask::register_partial_array_splitter() {
+
+ ::new (&_partial_array_splitter) PartialArraySplitter(_cm->partial_array_state_manager(),
+ _cm->max_num_tasks(),
+ ObjArrayMarkingStride);
+}
+
+void G1CMTask::unregister_partial_array_splitter() {
+ _partial_array_splitter.~PartialArraySplitter();
+}
+
bool G1CMTask::should_exit_termination() {
if (!regular_clock_call()) {
return true;
@@ -2185,7 +2238,7 @@ bool G1CMTask::get_entries_from_global_stack() {
if (task_entry.is_null()) {
break;
}
- assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
+ assert(task_entry.is_partial_array_state() || oopDesc::is_oop(task_entry.to_oop()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.to_oop()));
bool success = _task_queue->push(task_entry);
// We only call this when the local queue is empty or under a
// given target limit. So, we do not expect this push to fail.
@@ -2216,7 +2269,7 @@ void G1CMTask::drain_local_queue(bool partially) {
G1TaskQueueEntry entry;
bool ret = _task_queue->pop_local(entry);
while (ret) {
- scan_task_entry(entry);
+ process_entry(entry, false /* stolen */);
if (_task_queue->size() <= target_size || has_aborted()) {
ret = false;
} else {
@@ -2226,6 +2279,37 @@ void G1CMTask::drain_local_queue(bool partially) {
}
}
+size_t G1CMTask::start_partial_array_processing(oop obj) {
+ assert(should_be_sliced(obj), "Must be an array object %d and large %zu", obj->is_objArray(), obj->size());
+
+ objArrayOop obj_array = objArrayOop(obj);
+ size_t array_length = obj_array->length();
+
+ size_t initial_chunk_size = _partial_array_splitter.start(_task_queue, obj_array, nullptr, array_length);
+
+ // Mark objArray klass metadata
+ if (_cm_oop_closure->do_metadata()) {
+ _cm_oop_closure->do_klass(obj_array->klass());
+ }
+
+ process_array_chunk(obj_array, 0, initial_chunk_size);
+
+ // Include object header size
+ return objArrayOopDesc::object_size(checked_cast(initial_chunk_size));
+}
+
+size_t G1CMTask::process_partial_array(const G1TaskQueueEntry& task, bool stolen) {
+ PartialArrayState* state = task.to_partial_array_state();
+ // Access state before release by claim().
+ objArrayOop obj = objArrayOop(state->source());
+
+ PartialArraySplitter::Claim claim =
+ _partial_array_splitter.claim(state, _task_queue, stolen);
+
+ process_array_chunk(obj, claim._start, claim._end);
+ return heap_word_size((claim._end - claim._start) * heapOopSize);
+}
+
void G1CMTask::drain_global_stack(bool partially) {
if (has_aborted()) {
return;
@@ -2430,7 +2514,7 @@ void G1CMTask::attempt_stealing() {
while (!has_aborted()) {
G1TaskQueueEntry entry;
if (_cm->try_stealing(_worker_id, entry)) {
- scan_task_entry(entry);
+ process_entry(entry, true /* stolen */);
// And since we're towards the end, let's totally drain the
// local queue and global stack.
@@ -2759,12 +2843,12 @@ G1CMTask::G1CMTask(uint worker_id,
G1ConcurrentMark* cm,
G1CMTaskQueue* task_queue,
G1RegionMarkStats* mark_stats) :
- _objArray_processor(this),
_worker_id(worker_id),
_g1h(G1CollectedHeap::heap()),
_cm(cm),
_mark_bitmap(nullptr),
_task_queue(task_queue),
+ _partial_array_splitter(_cm->partial_array_state_manager(), _cm->max_num_tasks(), ObjArrayMarkingStride),
_mark_stats_cache(mark_stats, G1RegionMarkStatsCache::RegionMarkStatsCacheSize),
_calls(0),
_time_target_ms(0.0),
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
index 752082ce629..836d7793f81 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,17 +26,20 @@
#define SHARE_GC_G1_G1CONCURRENTMARK_HPP
#include "gc/g1/g1ConcurrentMarkBitMap.hpp"
-#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
#include "gc/g1/g1HeapRegionSet.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1RegionMarkStatsCache.hpp"
#include "gc/shared/gcCause.hpp"
+#include "gc/shared/partialArraySplitter.hpp"
+#include "gc/shared/partialArrayState.hpp"
+#include "gc/shared/partialArrayTaskStats.hpp"
#include "gc/shared/taskqueue.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/verifyOption.hpp"
#include "gc/shared/workerThread.hpp"
#include "gc/shared/workerUtils.hpp"
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/compilerWarnings.hpp"
#include "utilities/numberSeq.hpp"
@@ -53,41 +56,7 @@ class G1RegionToSpaceMapper;
class G1SurvivorRegions;
class ThreadClosure;
-// This is a container class for either an oop or a continuation address for
-// mark stack entries. Both are pushed onto the mark stack.
-class G1TaskQueueEntry {
-private:
- void* _holder;
-
- static const uintptr_t ArraySliceBit = 1;
-
- G1TaskQueueEntry(oop obj) : _holder(obj) {
- assert(_holder != nullptr, "Not allowed to set null task queue element");
- }
- G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { }
-public:
-
- G1TaskQueueEntry() : _holder(nullptr) { }
- // Trivially copyable, for use in GenericTaskQueue.
-
- static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); }
- static G1TaskQueueEntry from_oop(oop obj) { return G1TaskQueueEntry(obj); }
-
- oop obj() const {
- assert(!is_array_slice(), "Trying to read array slice " PTR_FORMAT " as oop", p2i(_holder));
- return cast_to_oop(_holder);
- }
-
- HeapWord* slice() const {
- assert(is_array_slice(), "Trying to read oop " PTR_FORMAT " as array slice", p2i(_holder));
- return (HeapWord*)((uintptr_t)_holder & ~ArraySliceBit);
- }
-
- bool is_oop() const { return !is_array_slice(); }
- bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; }
- bool is_null() const { return _holder == nullptr; }
-};
-
+typedef ScannerTask G1TaskQueueEntry;
typedef GenericTaskQueue G1CMTaskQueue;
typedef GenericTaskQueueSet G1CMTaskQueueSet;
@@ -172,9 +141,9 @@ private:
size_t _capacity;
size_t _num_buckets;
bool _should_grow;
- TaskQueueEntryChunk* volatile* _buckets;
+ Atomic* _buckets;
char _pad0[DEFAULT_PADDING_SIZE];
- volatile size_t _size;
+ Atomic _size;
char _pad4[DEFAULT_PADDING_SIZE - sizeof(size_t)];
size_t bucket_size(size_t bucket) {
@@ -212,7 +181,7 @@ private:
bool initialize(size_t initial_capacity, size_t max_capacity);
void reset() {
- _size = 0;
+ _size.store_relaxed(0);
_should_grow = false;
}
@@ -241,17 +210,17 @@ private:
ChunkAllocator _chunk_allocator;
char _pad0[DEFAULT_PADDING_SIZE];
- TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users.
+ Atomic _free_list; // Linked list of free chunks that can be allocated by users.
char _pad1[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*)];
- TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data.
+ Atomic _chunk_list; // List of chunks currently containing data.
volatile size_t _chunks_in_chunk_list;
char _pad2[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)];
// Atomically add the given chunk to the list.
- void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem);
+ void add_chunk_to_list(Atomic* list, TaskQueueEntryChunk* elem);
// Atomically remove and return a chunk from the given list. Returns null if the
// list is empty.
- TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list);
+ TaskQueueEntryChunk* remove_chunk_from_list(Atomic* list);
void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem);
void add_chunk_to_free_list(TaskQueueEntryChunk* elem);
@@ -283,7 +252,7 @@ private:
// Return whether the chunk list is empty. Racy due to unsynchronized access to
// _chunk_list.
- bool is_empty() const { return _chunk_list == nullptr; }
+ bool is_empty() const { return _chunk_list.load_relaxed() == nullptr; }
size_t capacity() const { return _chunk_allocator.capacity(); }
@@ -411,6 +380,8 @@ class G1ConcurrentMark : public CHeapObj {
G1CMTaskQueueSet* _task_queues; // Task queue set
TaskTerminator _terminator; // For termination
+ PartialArrayStateManager* _partial_array_state_manager;
+
// Two sync barriers that are used to synchronize tasks when an
// overflow occurs. The algorithm is the following. All tasks enter
// the first one to ensure that they have all stopped manipulating
@@ -488,6 +459,8 @@ class G1ConcurrentMark : public CHeapObj {
// Prints all gathered CM-related statistics
void print_stats();
+ void print_and_reset_taskqueue_stats();
+
HeapWord* finger() { return _finger; }
bool concurrent() { return _concurrent; }
uint active_tasks() { return _num_active_tasks; }
@@ -556,14 +529,14 @@ public:
// mark_in_bitmap call. Updates various statistics data.
void add_to_liveness(uint worker_id, oop const obj, size_t size);
// Did the last marking find a live object between bottom and TAMS?
- bool contains_live_object(uint region) const { return _region_mark_stats[region]._live_words != 0; }
+ bool contains_live_object(uint region) const { return _region_mark_stats[region].live_words() != 0; }
// Live bytes in the given region as determined by concurrent marking, i.e. the amount of
// live bytes between bottom and TAMS.
- size_t live_bytes(uint region) const { return _region_mark_stats[region]._live_words * HeapWordSize; }
+ size_t live_bytes(uint region) const { return _region_mark_stats[region].live_words() * HeapWordSize; }
// Set live bytes for concurrent marking.
- void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words = live_bytes / HeapWordSize; }
+ void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words.store_relaxed(live_bytes / HeapWordSize); }
// Approximate number of incoming references found during marking.
- size_t incoming_refs(uint region) const { return _region_mark_stats[region]._incoming_refs; }
+ size_t incoming_refs(uint region) const { return _region_mark_stats[region].incoming_refs(); }
// Update the TAMS for the given region to the current top.
inline void update_top_at_mark_start(G1HeapRegion* r);
@@ -582,6 +555,8 @@ public:
uint worker_id_offset() const { return _worker_id_offset; }
+ uint max_num_tasks() const {return _max_num_tasks; }
+
// Clear statistics gathered during the concurrent cycle for the given region after
// it has been reclaimed.
void clear_statistics(G1HeapRegion* r);
@@ -631,6 +606,8 @@ public:
// Calculates the number of concurrent GC threads to be used in the marking phase.
uint calc_active_marking_workers();
+ PartialArrayStateManager* partial_array_state_manager() const;
+
// Resets the global marking data structures, as well as the
// task local ones; should be called during concurrent start.
void reset();
@@ -642,6 +619,10 @@ public:
// to be called concurrently to the mutator. It will yield to safepoint requests.
void cleanup_for_next_mark();
+ // Recycle the memory that has been requested by allocators associated with
+ // this manager.
+ void reset_partial_array_state_manager();
+
// Clear the next marking bitmap during safepoint.
void clear_bitmap(WorkerThreads* workers);
@@ -732,14 +713,13 @@ private:
refs_reached_period = 1024,
};
- G1CMObjArrayProcessor _objArray_processor;
-
uint _worker_id;
G1CollectedHeap* _g1h;
G1ConcurrentMark* _cm;
G1CMBitMap* _mark_bitmap;
// the task queue of this task
G1CMTaskQueue* _task_queue;
+ PartialArraySplitter _partial_array_splitter;
G1RegionMarkStatsCache _mark_stats_cache;
// Number of calls to this task
@@ -850,13 +830,24 @@ private:
// mark bitmap scan, and so needs to be pushed onto the mark stack.
bool is_below_finger(oop obj, HeapWord* global_finger) const;
- template void process_grey_task_entry(G1TaskQueueEntry task_entry);
+ template void process_grey_task_entry(G1TaskQueueEntry task_entry, bool stolen);
+
+ static bool should_be_sliced(oop obj);
+ // Start processing the given objArrayOop by first pushing its continuations and
+ // then scanning the first chunk including the header.
+ size_t start_partial_array_processing(oop obj);
+ // Process the given continuation. Returns the number of words scanned.
+ size_t process_partial_array(const G1TaskQueueEntry& task, bool stolen);
+ // Apply the closure to the given range of elements in the objArray.
+ inline void process_array_chunk(objArrayOop obj, size_t start, size_t end);
public:
- // Apply the closure on the given area of the objArray. Return the number of words
- // scanned.
- inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
// Resets the task; should be called right at the beginning of a marking phase.
void reset(G1CMBitMap* mark_bitmap);
+ // Register/unregister Partial Array Splitter Allocator with the PartialArrayStateManager.
+ // This allows us to discard memory arenas used for partial object array states at the end
+ // of a concurrent mark cycle.
+ void register_partial_array_splitter();
+ void unregister_partial_array_splitter();
// Clears all the fields that correspond to a claimed region.
void clear_region_fields();
@@ -912,7 +903,7 @@ public:
inline bool deal_with_reference(T* p);
// Scans an object and visits its children.
- inline void scan_task_entry(G1TaskQueueEntry task_entry);
+ inline void process_entry(G1TaskQueueEntry task_entry, bool stolen);
// Pushes an object on the local queue.
inline void push(G1TaskQueueEntry task_entry);
@@ -957,6 +948,11 @@ public:
Pair flush_mark_stats_cache();
// Prints statistics associated with this task
void print_stats();
+#if TASKQUEUE_STATS
+ PartialArrayTaskStats* partial_array_task_stats() {
+ return _partial_array_splitter.stats();
+ }
+#endif
};
// Class that's used to to print out per-region liveness
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
index 6f71012ff7c..2f4824e4cae 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
-#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
@@ -39,6 +38,7 @@
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "utilities/bitMap.inline.hpp"
+#include "utilities/checkedCast.hpp"
inline bool G1CMIsAliveClosure::do_object_b(oop obj) {
// Check whether the passed in object is null. During discovery the referent
@@ -90,7 +90,7 @@ inline void G1CMMarkStack::iterate(Fn fn) const {
size_t num_chunks = 0;
- TaskQueueEntryChunk* cur = _chunk_list;
+ TaskQueueEntryChunk* cur = _chunk_list.load_relaxed();
while (cur != nullptr) {
guarantee(num_chunks <= _chunks_in_chunk_list, "Found %zu oop chunks which is more than there should be", num_chunks);
@@ -107,13 +107,15 @@ inline void G1CMMarkStack::iterate(Fn fn) const {
#endif
// It scans an object and visits its children.
-inline void G1CMTask::scan_task_entry(G1TaskQueueEntry task_entry) { process_grey_task_entry(task_entry); }
+inline void G1CMTask::process_entry(G1TaskQueueEntry task_entry, bool stolen) {
+ process_grey_task_entry(task_entry, stolen);
+}
inline void G1CMTask::push(G1TaskQueueEntry task_entry) {
- assert(task_entry.is_array_slice() || _g1h->is_in_reserved(task_entry.obj()), "invariant");
- assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list(
- _g1h->heap_region_containing(task_entry.obj())), "invariant");
- assert(task_entry.is_array_slice() || _mark_bitmap->is_marked(cast_from_oop(task_entry.obj())), "invariant");
+ assert(task_entry.is_partial_array_state() || _g1h->is_in_reserved(task_entry.to_oop()), "invariant");
+ assert(task_entry.is_partial_array_state() || !_g1h->is_on_master_free_list(
+ _g1h->heap_region_containing(task_entry.to_oop())), "invariant");
+ assert(task_entry.is_partial_array_state() || _mark_bitmap->is_marked(cast_from_oop(task_entry.to_oop())), "invariant");
if (!_task_queue->push(task_entry)) {
// The local task queue looks full. We need to push some entries
@@ -159,29 +161,34 @@ inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
}
template
-inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry) {
- assert(scan || (task_entry.is_oop() && task_entry.obj()->is_typeArray()), "Skipping scan of grey non-typeArray");
- assert(task_entry.is_array_slice() || _mark_bitmap->is_marked(cast_from_oop(task_entry.obj())),
+inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry, bool stolen) {
+ assert(scan || (!task_entry.is_partial_array_state() && task_entry.to_oop()->is_typeArray()), "Skipping scan of grey non-typeArray");
+ assert(task_entry.is_partial_array_state() || _mark_bitmap->is_marked(cast_from_oop(task_entry.to_oop())),
"Any stolen object should be a slice or marked");
if (scan) {
- if (task_entry.is_array_slice()) {
- _words_scanned += _objArray_processor.process_slice(task_entry.slice());
+ if (task_entry.is_partial_array_state()) {
+ _words_scanned += process_partial_array(task_entry, stolen);
} else {
- oop obj = task_entry.obj();
- if (G1CMObjArrayProcessor::should_be_sliced(obj)) {
- _words_scanned += _objArray_processor.process_obj(obj);
+ oop obj = task_entry.to_oop();
+ if (should_be_sliced(obj)) {
+ _words_scanned += start_partial_array_processing(obj);
} else {
- _words_scanned += obj->oop_iterate_size(_cm_oop_closure);;
+ _words_scanned += obj->oop_iterate_size(_cm_oop_closure);
}
}
}
check_limits();
}
-inline size_t G1CMTask::scan_objArray(objArrayOop obj, MemRegion mr) {
- obj->oop_iterate(_cm_oop_closure, mr);
- return mr.word_size();
+inline bool G1CMTask::should_be_sliced(oop obj) {
+ return obj->is_objArray() && ((objArrayOop)obj)->length() >= (int)ObjArrayMarkingStride;
+}
+
+inline void G1CMTask::process_array_chunk(objArrayOop obj, size_t start, size_t end) {
+ obj->oop_iterate_elements_range(_cm_oop_closure,
+ checked_cast(start),
+ checked_cast(end));
}
inline void G1ConcurrentMark::update_top_at_mark_start(G1HeapRegion* r) {
@@ -265,7 +272,7 @@ inline bool G1CMTask::make_reference_grey(oop obj) {
// be pushed on the stack. So, some duplicate work, but no
// correctness problems.
if (is_below_finger(obj, global_finger)) {
- G1TaskQueueEntry entry = G1TaskQueueEntry::from_oop(obj);
+ G1TaskQueueEntry entry(obj);
if (obj->is_typeArray()) {
// Immediately process arrays of primitive types, rather
// than pushing on the mark stack. This keeps us from
@@ -277,7 +284,7 @@ inline bool G1CMTask::make_reference_grey(oop obj) {
// by only doing a bookkeeping update and avoiding the
// actual scan of the object - a typeArray contains no
// references, and the metadata is built-in.
- process_grey_task_entry(entry);
+ process_grey_task_entry(entry, false /* stolen */);
} else {
push(entry);
}
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp
deleted file mode 100644
index 7f62e5527d5..00000000000
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1ConcurrentMark.inline.hpp"
-#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
-#include "gc/g1/g1HeapRegion.inline.hpp"
-#include "gc/shared/gc_globals.hpp"
-#include "memory/memRegion.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-void G1CMObjArrayProcessor::push_array_slice(HeapWord* what) {
- _task->push(G1TaskQueueEntry::from_slice(what));
-}
-
-size_t G1CMObjArrayProcessor::process_array_slice(objArrayOop obj, HeapWord* start_from, size_t remaining) {
- size_t words_to_scan = MIN2(remaining, (size_t)ObjArrayMarkingStride);
-
- if (remaining > ObjArrayMarkingStride) {
- push_array_slice(start_from + ObjArrayMarkingStride);
- }
-
- // Then process current area.
- MemRegion mr(start_from, words_to_scan);
- return _task->scan_objArray(obj, mr);
-}
-
-size_t G1CMObjArrayProcessor::process_obj(oop obj) {
- assert(should_be_sliced(obj), "Must be an array object %d and large %zu", obj->is_objArray(), obj->size());
-
- return process_array_slice(objArrayOop(obj), cast_from_oop(obj), objArrayOop(obj)->size());
-}
-
-size_t G1CMObjArrayProcessor::process_slice(HeapWord* slice) {
-
- // Find the start address of the objArrayOop.
- // Shortcut the BOT access if the given address is from a humongous object. The BOT
- // slide is fast enough for "smaller" objects in non-humongous regions, but is slower
- // than directly using heap region table.
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
- G1HeapRegion* r = g1h->heap_region_containing(slice);
-
- HeapWord* const start_address = r->is_humongous() ?
- r->humongous_start_region()->bottom() :
- r->block_start(slice);
-
- assert(cast_to_oop(start_address)->is_objArray(), "Address " PTR_FORMAT " does not refer to an object array ", p2i(start_address));
- assert(start_address < slice,
- "Object start address " PTR_FORMAT " must be smaller than decoded address " PTR_FORMAT,
- p2i(start_address),
- p2i(slice));
-
- objArrayOop objArray = objArrayOop(cast_to_oop(start_address));
-
- size_t already_scanned = pointer_delta(slice, start_address);
- size_t remaining = objArray->size() - already_scanned;
-
- return process_array_slice(objArray, slice, remaining);
-}
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp
deleted file mode 100644
index c2737dbbda6..00000000000
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP
-#define SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP
-
-#include "oops/oopsHierarchy.hpp"
-
-class G1CMTask;
-
-// Helper class to mark through large objArrays during marking in an efficient way.
-// Instead of pushing large object arrays, we push continuations onto the
-// mark stack. These continuations are identified by having their LSB set.
-// This allows incremental processing of large objects.
-class G1CMObjArrayProcessor {
-private:
- // Reference to the task for doing the actual work.
- G1CMTask* _task;
-
- // Push the continuation at the given address onto the mark stack.
- void push_array_slice(HeapWord* addr);
-
- // Process (apply the closure) on the given continuation of the given objArray.
- size_t process_array_slice(objArrayOop const obj, HeapWord* start_from, size_t remaining);
-public:
- static bool should_be_sliced(oop obj);
-
- G1CMObjArrayProcessor(G1CMTask* task) : _task(task) {
- }
-
- // Process the given continuation. Returns the number of words scanned.
- size_t process_slice(HeapWord* slice);
- // Start processing the given objArrayOop by scanning the header and pushing its
- // continuation.
- size_t process_obj(oop obj);
-};
-
-#endif // SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.cpp
index fdef4214622..4eb11f6d8f6 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
#include "gc/g1/g1HeapRegionPrinter.hpp"
#include "gc/g1/g1RemSetTrackingPolicy.hpp"
#include "logging/log.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/mutexLocker.hpp"
struct G1UpdateRegionLivenessAndSelectForRebuildTask::G1OnRegionClosure : public G1HeapRegionClosure {
@@ -154,7 +153,7 @@ void G1UpdateRegionLivenessAndSelectForRebuildTask::work(uint worker_id) {
G1OnRegionClosure on_region_cl(_g1h, _cm, &local_cleanup_list);
_g1h->heap_region_par_iterate_from_worker_offset(&on_region_cl, &_hrclaimer, worker_id);
- AtomicAccess::add(&_total_selected_for_rebuild, on_region_cl._num_selected_for_rebuild);
+ _total_selected_for_rebuild.add_then_fetch(on_region_cl._num_selected_for_rebuild);
// Update the old/humongous region sets
_g1h->remove_from_old_gen_sets(on_region_cl._num_old_regions_removed,
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.hpp
index 161f0b4b9f5..a256693ff1d 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "gc/g1/g1HeapRegionManager.hpp"
#include "gc/g1/g1HeapRegionSet.hpp"
#include "gc/shared/workerThread.hpp"
+#include "runtime/atomic.hpp"
class G1CollectedHeap;
class G1ConcurrentMark;
@@ -41,7 +42,7 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
G1ConcurrentMark* _cm;
G1HeapRegionClaimer _hrclaimer;
- uint volatile _total_selected_for_rebuild;
+ Atomic _total_selected_for_rebuild;
// Reclaimed empty regions
G1FreeRegionList _cleanup_list;
@@ -57,7 +58,9 @@ public:
void work(uint worker_id) override;
- uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
+ uint total_selected_for_rebuild() const {
+ return _total_selected_for_rebuild.load_relaxed();
+ }
static uint desired_num_workers(uint num_regions);
};
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
index ed6a9ad4292..8546e6e2d64 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
@@ -28,6 +28,7 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
+#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.cpp
index 83a09c55a3f..5160d5ed036 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,7 +22,7 @@
*
*/
-#include "gc/g1/g1ConcurrentRefineStats.hpp"
+#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/timer.hpp"
@@ -39,19 +39,27 @@ G1ConcurrentRefineStats::G1ConcurrentRefineStats() :
{}
void G1ConcurrentRefineStats::add_atomic(G1ConcurrentRefineStats* other) {
- AtomicAccess::add(&_sweep_duration, other->_sweep_duration, memory_order_relaxed);
- AtomicAccess::add(&_yield_during_sweep_duration, other->_yield_during_sweep_duration, memory_order_relaxed);
+ _sweep_duration.add_then_fetch(other->_sweep_duration.load_relaxed(), memory_order_relaxed);
+ _yield_during_sweep_duration.add_then_fetch(other->yield_during_sweep_duration(), memory_order_relaxed);
- AtomicAccess::add(&_cards_scanned, other->_cards_scanned, memory_order_relaxed);
- AtomicAccess::add(&_cards_clean, other->_cards_clean, memory_order_relaxed);
- AtomicAccess::add(&_cards_not_parsable, other->_cards_not_parsable, memory_order_relaxed);
- AtomicAccess::add(&_cards_already_refer_to_cset, other->_cards_already_refer_to_cset, memory_order_relaxed);
- AtomicAccess::add(&_cards_refer_to_cset, other->_cards_refer_to_cset, memory_order_relaxed);
- AtomicAccess::add(&_cards_no_cross_region, other->_cards_no_cross_region, memory_order_relaxed);
+ _cards_scanned.add_then_fetch(other->cards_scanned(), memory_order_relaxed);
+ _cards_clean.add_then_fetch(other->cards_clean(), memory_order_relaxed);
+ _cards_not_parsable.add_then_fetch(other->cards_not_parsable(), memory_order_relaxed);
+ _cards_already_refer_to_cset.add_then_fetch(other->cards_already_refer_to_cset(), memory_order_relaxed);
+ _cards_refer_to_cset.add_then_fetch(other->cards_refer_to_cset(), memory_order_relaxed);
+ _cards_no_cross_region.add_then_fetch(other->cards_no_cross_region(), memory_order_relaxed);
- AtomicAccess::add(&_refine_duration, other->_refine_duration, memory_order_relaxed);
+ _refine_duration.add_then_fetch(other->refine_duration(), memory_order_relaxed);
}
void G1ConcurrentRefineStats::reset() {
- *this = G1ConcurrentRefineStats();
+ _sweep_duration.store_relaxed(0);
+ _yield_during_sweep_duration.store_relaxed(0);
+ _cards_scanned.store_relaxed(0);
+ _cards_clean.store_relaxed(0);
+ _cards_not_parsable.store_relaxed(0);
+ _cards_already_refer_to_cset.store_relaxed(0);
+ _cards_refer_to_cset.store_relaxed(0);
+ _cards_no_cross_region.store_relaxed(0);
+ _refine_duration.store_relaxed(0);
}
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.hpp
index ce22f4317df..5f57c56ba6c 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,61 +26,61 @@
#define SHARE_GC_G1_G1CONCURRENTREFINESTATS_HPP
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
-#include "utilities/ticks.hpp"
// Collection of statistics for concurrent refinement processing.
// Used for collecting per-thread statistics and for summaries over a
// collection of threads.
class G1ConcurrentRefineStats : public CHeapObj {
- jlong _sweep_duration; // Time spent sweeping the table finding non-clean cards
- // and refining them.
- jlong _yield_during_sweep_duration; // Time spent yielding during the sweep (not doing the sweep).
+ Atomic _sweep_duration; // Time spent sweeping the table finding non-clean cards
+ // and refining them.
+ Atomic _yield_during_sweep_duration; // Time spent yielding during the sweep (not doing the sweep).
- size_t _cards_scanned; // Total number of cards scanned.
- size_t _cards_clean; // Number of cards found clean.
- size_t _cards_not_parsable; // Number of cards we could not parse and left unrefined.
- size_t _cards_already_refer_to_cset;// Number of cards marked found to be already young.
- size_t _cards_refer_to_cset; // Number of dirty cards that were recently found to contain a to-cset reference.
- size_t _cards_no_cross_region; // Number of dirty cards that were dirtied, but then cleaned again by the mutator.
+ Atomic _cards_scanned; // Total number of cards scanned.
+ Atomic _cards_clean; // Number of cards found clean.
+ Atomic _cards_not_parsable; // Number of cards we could not parse and left unrefined.
+ Atomic _cards_already_refer_to_cset;// Number of cards marked found to be already young.
+ Atomic _cards_refer_to_cset; // Number of dirty cards that were recently found to contain a to-cset reference.
+ Atomic _cards_no_cross_region; // Number of dirty cards that were dirtied, but then cleaned again by the mutator.
- jlong _refine_duration; // Time spent during actual refinement.
+ Atomic _refine_duration; // Time spent during actual refinement.
public:
G1ConcurrentRefineStats();
// Time spent performing sweeping the refinement table (includes actual refinement,
// but not yield time).
- jlong sweep_duration() const { return _sweep_duration - _yield_during_sweep_duration; }
- jlong yield_during_sweep_duration() const { return _yield_during_sweep_duration; }
- jlong refine_duration() const { return _refine_duration; }
+ inline jlong sweep_duration() const;
+ inline jlong yield_during_sweep_duration() const;
+ inline jlong refine_duration() const;
// Number of refined cards.
- size_t refined_cards() const { return cards_not_clean(); }
+ inline size_t refined_cards() const;
- size_t cards_scanned() const { return _cards_scanned; }
- size_t cards_clean() const { return _cards_clean; }
- size_t cards_not_clean() const { return _cards_scanned - _cards_clean; }
- size_t cards_not_parsable() const { return _cards_not_parsable; }
- size_t cards_already_refer_to_cset() const { return _cards_already_refer_to_cset; }
- size_t cards_refer_to_cset() const { return _cards_refer_to_cset; }
- size_t cards_no_cross_region() const { return _cards_no_cross_region; }
+ inline size_t cards_scanned() const;
+ inline size_t cards_clean() const;
+ inline size_t cards_not_clean() const;
+ inline size_t cards_not_parsable() const;
+ inline size_t cards_already_refer_to_cset() const;
+ inline size_t cards_refer_to_cset() const;
+ inline size_t cards_no_cross_region() const;
// Number of cards that were marked dirty and in need of refinement. This includes cards recently
// found to refer to the collection set as they originally were dirty.
- size_t cards_pending() const { return cards_not_clean() - _cards_already_refer_to_cset; }
+ inline size_t cards_pending() const;
- size_t cards_to_cset() const { return _cards_already_refer_to_cset + _cards_refer_to_cset; }
+ inline size_t cards_to_cset() const;
- void inc_sweep_time(jlong t) { _sweep_duration += t; }
- void inc_yield_during_sweep_duration(jlong t) { _yield_during_sweep_duration += t; }
- void inc_refine_duration(jlong t) { _refine_duration += t; }
+ inline void inc_sweep_time(jlong t);
+ inline void inc_yield_during_sweep_duration(jlong t);
+ inline void inc_refine_duration(jlong t);
- void inc_cards_scanned(size_t increment) { _cards_scanned += increment; }
- void inc_cards_clean(size_t increment) { _cards_clean += increment; }
- void inc_cards_not_parsable() { _cards_not_parsable++; }
- void inc_cards_already_refer_to_cset() { _cards_already_refer_to_cset++; }
- void inc_cards_refer_to_cset() { _cards_refer_to_cset++; }
- void inc_cards_no_cross_region() { _cards_no_cross_region++; }
+ inline void inc_cards_scanned(size_t increment);
+ inline void inc_cards_clean(size_t increment);
+ inline void inc_cards_not_parsable();
+ inline void inc_cards_already_refer_to_cset();
+ inline void inc_cards_refer_to_cset();
+ inline void inc_cards_no_cross_region();
void add_atomic(G1ConcurrentRefineStats* other);
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.inline.hpp
new file mode 100644
index 00000000000..e1a296c6494
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.inline.hpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP
+#define SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP
+
+#include "gc/g1/g1ConcurrentRefineStats.hpp"
+
+inline jlong G1ConcurrentRefineStats::sweep_duration() const {
+ return _sweep_duration.load_relaxed() - yield_during_sweep_duration();
+}
+
+inline jlong G1ConcurrentRefineStats::yield_during_sweep_duration() const {
+ return _yield_during_sweep_duration.load_relaxed();
+}
+
+inline jlong G1ConcurrentRefineStats::refine_duration() const {
+ return _refine_duration.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::refined_cards() const {
+ return cards_not_clean();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_scanned() const {
+ return _cards_scanned.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_clean() const {
+ return _cards_clean.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_not_clean() const {
+ return cards_scanned() - cards_clean();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_not_parsable() const {
+ return _cards_not_parsable.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_already_refer_to_cset() const {
+ return _cards_already_refer_to_cset.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_refer_to_cset() const {
+ return _cards_refer_to_cset.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_no_cross_region() const {
+ return _cards_no_cross_region.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_pending() const {
+ return cards_not_clean() - cards_already_refer_to_cset();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_to_cset() const {
+ return cards_already_refer_to_cset() + cards_refer_to_cset();
+}
+
+inline void G1ConcurrentRefineStats::inc_sweep_time(jlong t) {
+ _sweep_duration.store_relaxed(_sweep_duration.load_relaxed() + t);
+}
+
+inline void G1ConcurrentRefineStats::inc_yield_during_sweep_duration(jlong t) {
+ _yield_during_sweep_duration.store_relaxed(yield_during_sweep_duration() + t);
+}
+
+inline void G1ConcurrentRefineStats::inc_refine_duration(jlong t) {
+ _refine_duration.store_relaxed(refine_duration() + t);
+}
+
+inline void G1ConcurrentRefineStats::inc_cards_scanned(size_t increment) {
+ _cards_scanned.store_relaxed(cards_scanned() + increment);
+}
+
+inline void G1ConcurrentRefineStats::inc_cards_clean(size_t increment) {
+ _cards_clean.store_relaxed(cards_clean() + increment);
+}
+
+inline void G1ConcurrentRefineStats::inc_cards_not_parsable() {
+ _cards_not_parsable.store_relaxed(cards_not_parsable() + 1);
+}
+
+inline void G1ConcurrentRefineStats::inc_cards_already_refer_to_cset() {
+ _cards_already_refer_to_cset.store_relaxed(cards_already_refer_to_cset() + 1);
+}
+
+inline void G1ConcurrentRefineStats::inc_cards_refer_to_cset() {
+ _cards_refer_to_cset.store_relaxed(cards_refer_to_cset() + 1);
+}
+
+inline void G1ConcurrentRefineStats::inc_cards_no_cross_region() {
+ _cards_no_cross_region.store_relaxed(cards_no_cross_region() + 1);
+}
+
+#endif // SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.cpp
index ca5bc9ebe5f..ce944f2254d 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.cpp
@@ -24,6 +24,7 @@
#include "gc/g1/g1CardTableClaimTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
class G1RefineRegionClosure : public G1HeapRegionClosure {
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.hpp
index bf24c5ae850..827b9a3c402 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.hpp
@@ -25,10 +25,10 @@
#ifndef SHARE_GC_G1_G1CONCURRENTREFINESWEEPTASK_HPP
#define SHARE_GC_G1_G1CONCURRENTREFINESWEEPTASK_HPP
-#include "gc/g1/g1ConcurrentRefineStats.hpp"
#include "gc/shared/workerThread.hpp"
class G1CardTableClaimTable;
+class G1ConcurrentRefineStats;
class G1ConcurrentRefineSweepTask : public WorkerTask {
G1CardTableClaimTable* _scan_state;
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp
index eccfe466d48..6b51e5eef62 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp
@@ -26,7 +26,7 @@
#include "gc/g1/g1CardTableClaimTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
-#include "gc/g1/g1ConcurrentRefineStats.hpp"
+#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp
index 7cdc001d348..2ecbdc668eb 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp
@@ -25,7 +25,6 @@
#ifndef SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP
#define SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP
-#include "gc/g1/g1ConcurrentRefineStats.hpp"
#include "gc/shared/concurrentGCThread.hpp"
#include "runtime/mutex.hpp"
#include "utilities/globalDefinitions.hpp"
diff --git a/src/hotspot/share/gc/g1/g1EvacFailureRegions.cpp b/src/hotspot/share/gc/g1/g1EvacFailureRegions.cpp
index ffcb5a0022f..37553e2aa56 100644
--- a/src/hotspot/share/gc/g1/g1EvacFailureRegions.cpp
+++ b/src/hotspot/share/gc/g1/g1EvacFailureRegions.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +29,6 @@
#include "gc/g1/g1EvacFailureRegions.inline.hpp"
#include "gc/g1/g1HeapRegion.hpp"
#include "memory/allocation.hpp"
-#include "runtime/atomicAccess.hpp"
#include "utilities/bitMap.inline.hpp"
G1EvacFailureRegions::G1EvacFailureRegions() :
@@ -43,7 +43,7 @@ G1EvacFailureRegions::~G1EvacFailureRegions() {
}
void G1EvacFailureRegions::pre_collection(uint max_regions) {
- AtomicAccess::store(&_num_regions_evac_failed, 0u);
+ _num_regions_evac_failed.store_relaxed(0u);
_regions_evac_failed.resize(max_regions);
_regions_pinned.resize(max_regions);
_regions_alloc_failed.resize(max_regions);
@@ -69,6 +69,6 @@ void G1EvacFailureRegions::par_iterate(G1HeapRegionClosure* closure,
G1CollectedHeap::heap()->par_iterate_regions_array(closure,
hrclaimer,
_evac_failed_regions,
- AtomicAccess::load(&_num_regions_evac_failed),
+ num_regions_evac_failed(),
worker_id);
}
diff --git a/src/hotspot/share/gc/g1/g1EvacFailureRegions.hpp b/src/hotspot/share/gc/g1/g1EvacFailureRegions.hpp
index 9d29957b782..f752a3f8ab7 100644
--- a/src/hotspot/share/gc/g1/g1EvacFailureRegions.hpp
+++ b/src/hotspot/share/gc/g1/g1EvacFailureRegions.hpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +26,7 @@
#ifndef SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
#define SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
+#include "runtime/atomic.hpp"
#include "utilities/bitMap.hpp"
class G1AbstractSubTask;
@@ -53,14 +55,14 @@ class G1EvacFailureRegions {
// Evacuation failed regions (indexes) in the current collection.
uint* _evac_failed_regions;
// Number of regions evacuation failed in the current collection.
- volatile uint _num_regions_evac_failed;
+ Atomic _num_regions_evac_failed;
public:
G1EvacFailureRegions();
~G1EvacFailureRegions();
uint get_region_idx(uint idx) const {
- assert(idx < _num_regions_evac_failed, "precondition");
+ assert(idx < _num_regions_evac_failed.load_relaxed(), "precondition");
return _evac_failed_regions[idx];
}
diff --git a/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp b/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp
index 6eec9b63e6b..fb456475b56 100644
--- a/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,10 +30,9 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
-#include "runtime/atomicAccess.hpp"
uint G1EvacFailureRegions::num_regions_evac_failed() const {
- return AtomicAccess::load(&_num_regions_evac_failed);
+ return _num_regions_evac_failed.load_relaxed();
}
bool G1EvacFailureRegions::has_regions_evac_failed() const {
@@ -57,7 +57,7 @@ bool G1EvacFailureRegions::record(uint worker_id, uint region_idx, bool cause_pi
bool success = _regions_evac_failed.par_set_bit(region_idx,
memory_order_relaxed);
if (success) {
- size_t offset = AtomicAccess::fetch_then_add(&_num_regions_evac_failed, 1u);
+ size_t offset = _num_regions_evac_failed.fetch_then_add(1u);
_evac_failed_regions[offset] = region_idx;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
diff --git a/src/hotspot/share/gc/g1/g1EvacStats.cpp b/src/hotspot/share/gc/g1/g1EvacStats.cpp
index 049175a4ecc..1d54b184e64 100644
--- a/src/hotspot/share/gc/g1/g1EvacStats.cpp
+++ b/src/hotspot/share/gc/g1/g1EvacStats.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,13 +22,24 @@
*
*/
-#include "gc/g1/g1EvacStats.hpp"
+#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/shared/gcId.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/globals.hpp"
+void G1EvacStats::reset() {
+ PLABStats::reset();
+ _region_end_waste.store_relaxed(0);
+ _regions_filled.store_relaxed(0);
+ _num_plab_filled.store_relaxed(0);
+ _direct_allocated.store_relaxed(0);
+ _num_direct_allocated.store_relaxed(0);
+ _failure_used.store_relaxed(0);
+ _failure_waste.store_relaxed(0);
+}
+
void G1EvacStats::log_plab_allocation() {
log_debug(gc, plab)("%s PLAB allocation: "
"allocated: %zuB, "
@@ -51,13 +62,13 @@ void G1EvacStats::log_plab_allocation() {
"failure used: %zuB, "
"failure wasted: %zuB",
_description,
- _region_end_waste * HeapWordSize,
- _regions_filled,
- _num_plab_filled,
- _direct_allocated * HeapWordSize,
- _num_direct_allocated,
- _failure_used * HeapWordSize,
- _failure_waste * HeapWordSize);
+ region_end_waste() * HeapWordSize,
+ regions_filled(),
+ num_plab_filled(),
+ direct_allocated() * HeapWordSize,
+ num_direct_allocated(),
+ failure_used() * HeapWordSize,
+ failure_waste() * HeapWordSize);
}
void G1EvacStats::log_sizing(size_t calculated_words, size_t net_desired_words) {
@@ -109,7 +120,7 @@ size_t G1EvacStats::compute_desired_plab_size() const {
// threads do not allocate anything but a few rather large objects. In this
// degenerate case the PLAB size would simply quickly tend to minimum PLAB size,
// which is an okay reaction.
- size_t const used_for_waste_calculation = used() > _region_end_waste ? used() - _region_end_waste : 0;
+ size_t const used_for_waste_calculation = used() > region_end_waste() ? used() - region_end_waste() : 0;
size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct;
return (size_t)((double)total_waste_allowed / (100 - G1LastPLABAverageOccupancy));
diff --git a/src/hotspot/share/gc/g1/g1EvacStats.hpp b/src/hotspot/share/gc/g1/g1EvacStats.hpp
index e6eb80442d6..b250d4580b5 100644
--- a/src/hotspot/share/gc/g1/g1EvacStats.hpp
+++ b/src/hotspot/share/gc/g1/g1EvacStats.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "gc/shared/gcUtil.hpp"
#include "gc/shared/plab.hpp"
+#include "runtime/atomic.hpp"
// Records various memory allocation statistics gathered during evacuation. All sizes
// are in HeapWords.
@@ -36,30 +37,21 @@ class G1EvacStats : public PLABStats {
AdaptiveWeightedAverage
_net_plab_size_filter; // Integrator with decay
- size_t _region_end_waste; // Number of words wasted due to skipping to the next region.
- uint _regions_filled; // Number of regions filled completely.
- size_t _num_plab_filled; // Number of PLABs filled and retired.
- size_t _direct_allocated; // Number of words allocated directly into the regions.
- size_t _num_direct_allocated; // Number of direct allocation attempts.
+ Atomic _region_end_waste; // Number of words wasted due to skipping to the next region.
+ Atomic _regions_filled; // Number of regions filled completely.
+ Atomic _num_plab_filled; // Number of PLABs filled and retired.
+ Atomic _direct_allocated; // Number of words allocated directly into the regions.
+ Atomic _num_direct_allocated; // Number of direct allocation attempts.
// Number of words in live objects remaining in regions that ultimately suffered an
// evacuation failure. This is used in the regions when the regions are made old regions.
- size_t _failure_used;
+ Atomic _failure_used;
// Number of words wasted in regions which failed evacuation. This is the sum of space
// for objects successfully copied out of the regions (now dead space) plus waste at the
// end of regions.
- size_t _failure_waste;
+ Atomic _failure_waste;
- virtual void reset() {
- PLABStats::reset();
- _region_end_waste = 0;
- _regions_filled = 0;
- _num_plab_filled = 0;
- _direct_allocated = 0;
- _num_direct_allocated = 0;
- _failure_used = 0;
- _failure_waste = 0;
- }
+ virtual void reset();
void log_plab_allocation();
void log_sizing(size_t calculated_words, size_t net_desired_words);
@@ -77,16 +69,16 @@ public:
// Should be called at the end of a GC pause.
void adjust_desired_plab_size();
- uint regions_filled() const { return _regions_filled; }
- size_t num_plab_filled() const { return _num_plab_filled; }
- size_t region_end_waste() const { return _region_end_waste; }
- size_t direct_allocated() const { return _direct_allocated; }
- size_t num_direct_allocated() const { return _num_direct_allocated; }
+ uint regions_filled() const;
+ size_t num_plab_filled() const;
+ size_t region_end_waste() const;
+ size_t direct_allocated() const;
+ size_t num_direct_allocated() const;
// Amount of space in heapwords used in the failing regions when an evacuation failure happens.
- size_t failure_used() const { return _failure_used; }
+ size_t failure_used() const;
// Amount of space in heapwords wasted (unused) in the failing regions when an evacuation failure happens.
- size_t failure_waste() const { return _failure_waste; }
+ size_t failure_waste() const;
inline void add_num_plab_filled(size_t value);
inline void add_direct_allocated(size_t value);
diff --git a/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp b/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp
index c90598a30cb..2bd3b37719a 100644
--- a/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,28 +27,54 @@
#include "gc/g1/g1EvacStats.hpp"
-#include "runtime/atomicAccess.hpp"
+inline uint G1EvacStats::regions_filled() const {
+ return _regions_filled.load_relaxed();
+}
+
+inline size_t G1EvacStats::num_plab_filled() const {
+ return _num_plab_filled.load_relaxed();
+}
+
+inline size_t G1EvacStats::region_end_waste() const {
+ return _region_end_waste.load_relaxed();
+}
+
+inline size_t G1EvacStats::direct_allocated() const {
+ return _direct_allocated.load_relaxed();
+}
+
+inline size_t G1EvacStats::num_direct_allocated() const {
+ return _num_direct_allocated.load_relaxed();
+}
+
+inline size_t G1EvacStats::failure_used() const {
+ return _failure_used.load_relaxed();
+}
+
+inline size_t G1EvacStats::failure_waste() const {
+ return _failure_waste.load_relaxed();
+}
inline void G1EvacStats::add_direct_allocated(size_t value) {
- AtomicAccess::add(&_direct_allocated, value, memory_order_relaxed);
+ _direct_allocated.add_then_fetch(value, memory_order_relaxed);
}
inline void G1EvacStats::add_num_plab_filled(size_t value) {
- AtomicAccess::add(&_num_plab_filled, value, memory_order_relaxed);
+ _num_plab_filled.add_then_fetch(value, memory_order_relaxed);
}
inline void G1EvacStats::add_num_direct_allocated(size_t value) {
- AtomicAccess::add(&_num_direct_allocated, value, memory_order_relaxed);
+ _num_direct_allocated.add_then_fetch(value, memory_order_relaxed);
}
inline void G1EvacStats::add_region_end_waste(size_t value) {
- AtomicAccess::add(&_region_end_waste, value, memory_order_relaxed);
- AtomicAccess::inc(&_regions_filled, memory_order_relaxed);
+ _region_end_waste.add_then_fetch(value, memory_order_relaxed);
+ _regions_filled.add_then_fetch(1u, memory_order_relaxed);
}
inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) {
- AtomicAccess::add(&_failure_used, used, memory_order_relaxed);
- AtomicAccess::add(&_failure_waste, waste, memory_order_relaxed);
+ _failure_used.add_then_fetch(used, memory_order_relaxed);
+ _failure_waste.add_then_fetch(waste, memory_order_relaxed);
}
#endif // SHARE_GC_G1_G1EVACSTATS_INLINE_HPP
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp
index 5ca5dcef001..6c8cc7028cc 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -116,8 +116,8 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
_num_workers(calc_active_workers()),
_has_compaction_targets(false),
_has_humongous(false),
- _oop_queue_set(_num_workers),
- _array_queue_set(_num_workers),
+ _marking_task_queues(_num_workers),
+ _partial_array_state_manager(nullptr),
_preserved_marks_set(true),
_serial_compaction_point(this, nullptr),
_humongous_compaction_point(this, nullptr),
@@ -134,32 +134,40 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
_compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
_live_stats = NEW_C_HEAP_ARRAY(G1RegionMarkStats, _heap->max_num_regions(), mtGC);
- _compaction_tops = NEW_C_HEAP_ARRAY(HeapWord*, _heap->max_num_regions(), mtGC);
+ _compaction_tops = NEW_C_HEAP_ARRAY(Atomic