diff --git a/doc/testing.html b/doc/testing.html
index 31f4fbd1778..195153c8612 100644
--- a/doc/testing.html
+++ b/doc/testing.html
@@ -72,6 +72,7 @@ id="toc-notes-for-specific-tests">Notes for Specific Tests
Non-US
locale
PKCS11 Tests
+SCTP Tests
Testing Ahead-of-time
Optimizations
@@ -621,6 +622,21 @@ element of the appropriate @Artifact class. (See
JTREG="JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs"
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
+SCTP Tests
+The SCTP tests require the SCTP runtime library, which is often not
+installed by default in popular Linux distributions. Without this
+library, the SCTP tests will be skipped. If you want to enable the SCTP
+tests, you should install the SCTP library before running the tests.
+For distributions using the .deb packaging format and the apt tool
+(such as Debian, Ubuntu, etc.), try this:
+sudo apt install libsctp1
+sudo modprobe sctp
+lsmod | grep sctp
+For distributions using the .rpm packaging format and the dnf tool
+(such as Fedora, Red Hat, etc.), try this:
+sudo dnf install -y lksctp-tools
+sudo modprobe sctp
+lsmod | grep sctp
Testing Ahead-of-time
Optimizations
One way to improve test coverage of ahead-of-time (AOT) optimizations
diff --git a/doc/testing.md b/doc/testing.md
index b95f59de9fd..d0e54aab02b 100644
--- a/doc/testing.md
+++ b/doc/testing.md
@@ -640,6 +640,32 @@ $ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
+
+### SCTP Tests
+
+The SCTP tests require the SCTP runtime library, which is often not installed
+by default in popular Linux distributions. Without this library, the SCTP tests
+will be skipped. If you want to enable the SCTP tests, you should install the
+SCTP library before running the tests.
+
+For distributions using the .deb packaging format and the apt tool
+(such as Debian, Ubuntu, etc.), try this:
+
+```
+sudo apt install libsctp1
+sudo modprobe sctp
+lsmod | grep sctp
+```
+
+For distributions using the .rpm packaging format and the dnf tool
+(such as Fedora, Red Hat, etc.), try this:
+
+```
+sudo dnf install -y lksctp-tools
+sudo modprobe sctp
+lsmod | grep sctp
+```
+
### Testing Ahead-of-time Optimizations
One way to improve test coverage of ahead-of-time (AOT) optimizations in
diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4
index 5a9fdc57c74..639c3852212 100644
--- a/make/autoconf/flags-cflags.m4
+++ b/make/autoconf/flags-cflags.m4
@@ -69,22 +69,18 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
# Debug prefix mapping if supported by compiler
DEBUG_PREFIX_CFLAGS=
- UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: string,
- DEFAULT: "",
- RESULT: DEBUG_SYMBOLS_LEVEL,
+ UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: literal,
+ DEFAULT: [auto], VALID_VALUES: [auto 1 2 3],
+ CHECK_AVAILABLE: [
+ if test x$TOOLCHAIN_TYPE = xmicrosoft; then
+ AVAILABLE=false
+ fi
+ ],
DESC: [set the native debug symbol level (GCC and Clang only)],
- DEFAULT_DESC: [toolchain default])
- AC_SUBST(DEBUG_SYMBOLS_LEVEL)
-
- if test "x${TOOLCHAIN_TYPE}" = xgcc || \
- test "x${TOOLCHAIN_TYPE}" = xclang; then
- DEBUG_SYMBOLS_LEVEL_FLAGS="-g"
- if test "x${DEBUG_SYMBOLS_LEVEL}" != "x"; then
- DEBUG_SYMBOLS_LEVEL_FLAGS="-g${DEBUG_SYMBOLS_LEVEL}"
- FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_SYMBOLS_LEVEL_FLAGS}],
- IF_FALSE: AC_MSG_ERROR("Debug info level ${DEBUG_SYMBOLS_LEVEL} is not supported"))
- fi
- fi
+ DEFAULT_DESC: [toolchain default],
+ IF_AUTO: [
+ RESULT=""
+ ])
# Debug symbols
if test "x$TOOLCHAIN_TYPE" = xgcc; then
@@ -111,8 +107,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
fi
# Debug info level should follow the debug format to be effective.
- CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
- ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
+ CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
+ ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then
# Check if compiler supports -fdebug-prefix-map. If so, use that to make
@@ -132,8 +128,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
IF_FALSE: [GDWARF_FLAGS=""])
# Debug info level should follow the debug format to be effective.
- CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
- ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
+ CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
+ ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CFLAGS_DEBUG_SYMBOLS="-Z7"
fi
diff --git a/make/hotspot/lib/CompileGtest.gmk b/make/hotspot/lib/CompileGtest.gmk
index 60912992134..327014b1e9d 100644
--- a/make/hotspot/lib/CompileGtest.gmk
+++ b/make/hotspot/lib/CompileGtest.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,8 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBGTEST, \
INCLUDE_FILES := gtest-all.cc gmock-all.cc, \
DISABLED_WARNINGS_gcc := format-nonliteral maybe-uninitialized undef \
unused-result zero-as-null-pointer-constant, \
- DISABLED_WARNINGS_clang := format-nonliteral undef unused-result, \
+ DISABLED_WARNINGS_clang := format-nonliteral undef unused-result \
+ zero-as-null-pointer-constant, \
DISABLED_WARNINGS_microsoft := 4530, \
DEFAULT_CFLAGS := false, \
CFLAGS := $(JVM_CFLAGS) \
diff --git a/make/modules/jdk.security.auth/Lib.gmk b/make/modules/jdk.security.auth/Lib.gmk
index 9ead32dbe12..96d609f08d6 100644
--- a/make/modules/jdk.security.auth/Lib.gmk
+++ b/make/modules/jdk.security.auth/Lib.gmk
@@ -31,13 +31,14 @@ include LibCommon.gmk
## Build libjaas
################################################################################
-$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
- NAME := jaas, \
- OPTIMIZATION := LOW, \
- EXTRA_HEADER_DIRS := java.base:libjava, \
- LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
-))
-
-TARGETS += $(BUILD_LIBJAAS)
+ifeq ($(call isTargetOs, windows), true)
+ $(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
+ NAME := jaas, \
+ OPTIMIZATION := LOW, \
+ EXTRA_HEADER_DIRS := java.base:libjava, \
+ LIBS_windows := advapi32.lib mpr.lib netapi32.lib user32.lib, \
+ ))
+ TARGETS += $(BUILD_LIBJAAS)
+endif
################################################################################
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index b8a9afc123f..27428a5c558 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -5782,6 +5782,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
// return false;
bind(A_IS_NOT_NULL);
ldrw(cnt1, Address(a1, length_offset));
+ ldrw(tmp5, Address(a2, length_offset));
+ cmp(cnt1, tmp5);
+ br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
lea(a1, Address(a1, start_offset));
@@ -5848,6 +5851,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
cbz(a1, DONE);
ldrw(cnt1, Address(a1, length_offset));
cbz(a2, DONE);
+ ldrw(tmp5, Address(a2, length_offset));
+ cmp(cnt1, tmp5);
+ br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
index 89ae6bc10e0..73b631029a0 100644
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -722,22 +722,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
- __ andsw(zr, rscratch1, JVM_ACC_STATIC);
- __ br(Assembler::EQ, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
+ __ andsw(zr, rscratch1, JVM_ACC_STATIC);
+ __ br(Assembler::EQ, L_skip_barrier); // non-static
- __ load_method_holder(rscratch2, rmethod);
- __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
- __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+ __ load_method_holder(rscratch2, rmethod);
+ __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@@ -1508,7 +1506,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// SVC, HVC, or SMC. Make it a NOP.
__ nop();
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
__ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
index cde142b39ac..07b469650f0 100644
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2290,7 +2290,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
__ br(Assembler::NE, L_clinit_barrier_slow);
__ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
@@ -2340,8 +2341,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ br(Assembler::NE, L_clinit_barrier_slow);
diff --git a/src/hotspot/cpu/arm/frame_arm.cpp b/src/hotspot/cpu/arm/frame_arm.cpp
index 7a23296a3d4..f791fae7bd7 100644
--- a/src/hotspot/cpu/arm/frame_arm.cpp
+++ b/src/hotspot/cpu/arm/frame_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -356,10 +356,10 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
assert(is_interpreted_frame(), "Not an interpreted frame");
// These are reasonable sanity checks
- if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
+ if (fp() == nullptr || (intptr_t(fp()) & (wordSize-1)) != 0) {
return false;
}
- if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
+ if (sp() == nullptr || (intptr_t(sp()) & (wordSize-1)) != 0) {
return false;
}
if (fp() + interpreter_frame_initial_sp_offset < sp()) {
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
index 232294b246a..df780ac31a6 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -172,7 +172,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
address addr = oop_addr != nullptr ? (address)oop_addr : (address)metadata_addr;
- if(pc == 0) {
+ if (pc == nullptr) {
offset = addr - instruction_address() - 8;
} else {
offset = addr - pc - 8;
@@ -228,7 +228,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
void NativeMovConstReg::set_pc_relative_offset(address addr, address pc) {
int offset;
- if (pc == 0) {
+ if (pc == nullptr) {
offset = addr - instruction_address() - 8;
} else {
offset = addr - pc - 8;
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
index 82385bf0244..2b52db89285 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -371,7 +371,7 @@ class NativeMovConstReg: public NativeInstruction {
public:
intptr_t data() const;
- void set_data(intptr_t x, address pc = 0);
+ void set_data(intptr_t x, address pc = nullptr);
bool is_pc_relative() {
return !is_movw();
}
diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
index fc865be015e..f7bf457f72c 100644
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
@@ -1109,11 +1109,11 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
- add(R11_scratch1, R12_scratch2, R12_scratch2);
+ add(R11_scratch1, R11_scratch1, R12_scratch2);
cmpd(CR0, R11_scratch1, R14_bcp);
beq(CR0, verify_continue);
- call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
+ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), R19_method, R14_bcp, R28_mdx);
bind(verify_continue);
#endif
diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
index 4e427ace404..4eb2028f529 100644
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -1237,26 +1237,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
- __ andi_(R0, R0, JVM_ACC_STATIC);
- __ beq(CR0, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
+ __ andi_(R0, R0, JVM_ACC_STATIC);
+ __ beq(CR0, L_skip_barrier); // non-static
- Register klass = R11_scratch1;
- __ load_method_holder(klass, R19_method);
- __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
+ Register klass = R11_scratch1;
+ __ load_method_holder(klass, R19_method);
+ __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
- __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
- __ mtctr(klass);
- __ bctr();
+ __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
+ __ mtctr(klass);
+ __ bctr();
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);
@@ -2210,7 +2208,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// --------------------------------------------------------------------------
vep_start_pc = (intptr_t)__ pc();
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = r_temp_1;
// Notify OOP recorder (don't need the relocation)
diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
index 8d61ba1b2d7..8a3af748fa1 100644
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2199,7 +2199,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rca
__ isync(); // Order load wrt. succeeding loads.
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = Rscratch;
const Register klass = Rscratch;
@@ -2244,8 +2245,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no, Register Rcac
__ isync(); // Order load wrt. succeeding loads.
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = R4_ARG2;
// InterpreterRuntime::resolve_get_put sets field_holder and finally release-stores put_code.
diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
index eeb6fad1b59..8c343f6ab2b 100644
--- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
+++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -213,7 +213,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
// Is vector's size (in bytes) bigger than a size saved by default?
// riscv does not ovlerlay the floating-point registers on vector registers like aarch64.
bool SharedRuntime::is_wide_vector(int size) {
- return UseRVV;
+ return UseRVV && size > 0;
}
// ---------------------------------------------------------------------------
@@ -637,22 +637,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
- __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
- __ beqz(t1, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
+ __ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
+ __ beqz(t1, L_skip_barrier); // non-static
- __ load_method_holder(t1, xmethod);
- __ clinit_barrier(t1, t0, &L_skip_barrier);
- __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+ __ load_method_holder(t1, xmethod);
+ __ clinit_barrier(t1, t0, &L_skip_barrier);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@@ -1443,7 +1441,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ nop(); // 4 bytes
}
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
__ mov_metadata(t1, method->method_holder()); // InstanceKlass*
__ clinit_barrier(t1, t0, &L_skip_barrier);
diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
index ca41583e4bc..5a3644f70bb 100644
--- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp
+++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -2192,7 +2192,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ mv(t0, (int) code);
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
__ bne(temp, t0, L_clinit_barrier_slow); // have we resolved this bytecode?
__ ld(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(temp, temp);
@@ -2243,8 +2244,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ mv(t0, (int) code); // have we resolved this bytecode?
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ bne(temp, t0, L_clinit_barrier_slow);
diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
index 5b6f7dcd984..00a830a80cd 100644
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -1567,7 +1567,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
//---------------------------------------------------------------------
wrapper_VEPStart = __ offset();
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = Z_R1_scratch;
// Notify OOP recorder (don't need the relocation)
@@ -2378,24 +2379,22 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
- { // Bypass the barrier for non-static methods
- __ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
- __ z_bfalse(L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ __ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
+ __ z_bfalse(L_skip_barrier); // non-static
- Register klass = Z_R11;
- __ load_method_holder(klass, Z_method);
- __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
+ Register klass = Z_R11;
+ __ load_method_holder(klass, Z_method);
+ __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
- __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
- __ z_br(klass);
+ __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
+ __ z_br(klass);
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
return;
diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp
index 4e8fdf275e4..647915ef4fa 100644
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2377,7 +2377,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ z_cli(Address(Rcache, bc_offset), code);
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = Z_R1_scratch;
const Register klass = Z_R1_scratch;
__ z_brne(L_clinit_barrier_slow);
@@ -2427,8 +2428,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ z_cli(Address(cache, code_offset), code);
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = index;
__ z_brne(L_clinit_barrier_slow);
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
index 5a4a5b1809e..bbd43c1a0e8 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1043,26 +1043,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
// Class initialization barrier for static methods
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
- if (VM_Version::supports_fast_class_init_checks()) {
- Label L_skip_barrier;
- Register method = rbx;
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ Label L_skip_barrier;
+ Register method = rbx;
- { // Bypass the barrier for non-static methods
- Register flags = rscratch1;
- __ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
- __ testl(flags, JVM_ACC_STATIC);
- __ jcc(Assembler::zero, L_skip_barrier); // non-static
- }
+ // Bypass the barrier for non-static methods
+ Register flags = rscratch1;
+ __ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
+ __ testl(flags, JVM_ACC_STATIC);
+ __ jcc(Assembler::zero, L_skip_barrier); // non-static
- Register klass = rscratch1;
- __ load_method_holder(klass, method);
- __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
+ Register klass = rscratch1;
+ __ load_method_holder(klass, method);
+ __ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
- __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
+ __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
- __ bind(L_skip_barrier);
- entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
- }
+ __ bind(L_skip_barrier);
+ entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm);
@@ -1904,7 +1902,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
int vep_offset = ((intptr_t)__ pc()) - start;
- if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ if (method->needs_clinit_barrier()) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
Label L_skip_barrier;
Register klass = r10;
__ mov_metadata(klass, method->method_holder()); // InstanceKlass*
@@ -3602,4 +3601,3 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
}
#endif // INCLUDE_JFR
-
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp
index 3e5593322d5..7d5dee6a5df 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,39 @@ static address kyberAvx512ConstsAddr(int offset) {
const Register scratch = r10;
+ATTRIBUTE_ALIGNED(64) static const uint8_t kyberAvx512_12To16Dup[] = {
+// 0 - 63
+ 0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15, 16,
+ 16, 17, 18, 19, 19, 20, 21, 22, 22, 23, 24, 25, 25, 26, 27, 28, 28, 29, 30,
+ 31, 31, 32, 33, 34, 34, 35, 36, 37, 37, 38, 39, 40, 40, 41, 42, 43, 43, 44,
+ 45, 46, 46, 47
+ };
+
+static address kyberAvx512_12To16DupAddr() {
+ return (address) kyberAvx512_12To16Dup;
+}
+
+ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512_12To16Shift[] = {
+// 0 - 31
+ 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0,
+ 4, 0, 4, 0, 4, 0, 4
+ };
+
+static address kyberAvx512_12To16ShiftAddr() {
+ return (address) kyberAvx512_12To16Shift;
+}
+
+ATTRIBUTE_ALIGNED(64) static const uint64_t kyberAvx512_12To16And[] = {
+// 0 - 7
+ 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
+ 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
+ 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF
+ };
+
+static address kyberAvx512_12To16AndAddr() {
+ return (address) kyberAvx512_12To16And;
+}
+
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512NttPerms[] = {
// 0
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
@@ -822,10 +855,65 @@ address generate_kyber12To16_avx512(StubGenerator *stubgen,
const Register perms = r11;
- Label Loop;
+ Label Loop, VBMILoop;
__ addptr(condensed, condensedOffs);
+ if (VM_Version::supports_avx512_vbmi()) {
+ // mask load for the first 48 bytes of each vector
+ __ mov64(rax, 0x0000FFFFFFFFFFFF);
+ __ kmovql(k1, rax);
+
+ __ lea(perms, ExternalAddress(kyberAvx512_12To16DupAddr()));
+ __ evmovdqub(xmm20, Address(perms), Assembler::AVX_512bit);
+
+ __ lea(perms, ExternalAddress(kyberAvx512_12To16ShiftAddr()));
+ __ evmovdquw(xmm21, Address(perms), Assembler::AVX_512bit);
+
+ __ lea(perms, ExternalAddress(kyberAvx512_12To16AndAddr()));
+ __ evmovdquq(xmm22, Address(perms), Assembler::AVX_512bit);
+
+ __ align(OptoLoopAlignment);
+ __ BIND(VBMILoop);
+
+ __ evmovdqub(xmm0, k1, Address(condensed, 0), false,
+ Assembler::AVX_512bit);
+ __ evmovdqub(xmm1, k1, Address(condensed, 48), false,
+ Assembler::AVX_512bit);
+ __ evmovdqub(xmm2, k1, Address(condensed, 96), false,
+ Assembler::AVX_512bit);
+ __ evmovdqub(xmm3, k1, Address(condensed, 144), false,
+ Assembler::AVX_512bit);
+
+ __ evpermb(xmm4, k0, xmm20, xmm0, false, Assembler::AVX_512bit);
+ __ evpermb(xmm5, k0, xmm20, xmm1, false, Assembler::AVX_512bit);
+ __ evpermb(xmm6, k0, xmm20, xmm2, false, Assembler::AVX_512bit);
+ __ evpermb(xmm7, k0, xmm20, xmm3, false, Assembler::AVX_512bit);
+
+ __ evpsrlvw(xmm4, xmm4, xmm21, Assembler::AVX_512bit);
+ __ evpsrlvw(xmm5, xmm5, xmm21, Assembler::AVX_512bit);
+ __ evpsrlvw(xmm6, xmm6, xmm21, Assembler::AVX_512bit);
+ __ evpsrlvw(xmm7, xmm7, xmm21, Assembler::AVX_512bit);
+
+ __ evpandq(xmm0, xmm22, xmm4, Assembler::AVX_512bit);
+ __ evpandq(xmm1, xmm22, xmm5, Assembler::AVX_512bit);
+ __ evpandq(xmm2, xmm22, xmm6, Assembler::AVX_512bit);
+ __ evpandq(xmm3, xmm22, xmm7, Assembler::AVX_512bit);
+
+ store4regs(parsed, 0, xmm0_3, _masm);
+
+ __ addptr(condensed, 192);
+ __ addptr(parsed, 256);
+ __ subl(parsedLength, 128);
+ __ jcc(Assembler::greater, VBMILoop);
+
+ __ leave(); // required for proper stackwalking of RuntimeStub frame
+ __ mov64(rax, 0); // return 0
+ __ ret(0);
+
+ return start;
+ }
+
__ lea(perms, ExternalAddress(kyberAvx512_12To16PermsAddr()));
load4regs(xmm24_27, perms, 0, _masm);
diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp
index 42392b84833..db7749ec482 100644
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2216,7 +2216,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
__ cmpl(temp, code); // have we resolved this bytecode?
// Class initialization barrier for static methods
- if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ if (bytecode() == Bytecodes::_invokestatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register method = temp;
const Register klass = temp;
@@ -2264,8 +2265,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
__ cmpl(temp, code); // have we resolved this bytecode?
// Class initialization barrier for static fields
- if (VM_Version::supports_fast_class_init_checks() &&
- (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
+ if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
const Register field_holder = temp;
__ jcc(Assembler::notEqual, L_clinit_barrier_slow);
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index f88729cdc66..d7c1911a914 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -258,10 +258,18 @@ bool os::free_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
+bool os::Machine::free_memory(physical_memory_size_type& value) {
+ return Aix::available_memory(value);
+}
+
bool os::available_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
+bool os::Machine::available_memory(physical_memory_size_type& value) {
+ return Aix::available_memory(value);
+}
+
bool os::Aix::available_memory(physical_memory_size_type& value) {
os::Aix::meminfo_t mi;
if (os::Aix::get_meminfo(&mi)) {
@@ -273,6 +281,10 @@ bool os::Aix::available_memory(physical_memory_size_type& value) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
@@ -282,6 +294,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
+ return Machine::free_swap_space(value);
+}
+
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
@@ -294,6 +310,10 @@ physical_memory_size_type os::physical_memory() {
return Aix::physical_memory();
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return Aix::physical_memory();
+}
+
size_t os::rss() { return (size_t)0; }
// Cpu architecture string
@@ -2264,6 +2284,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
return online_cpus;
diff --git a/src/hotspot/os/bsd/memMapPrinter_macosx.cpp b/src/hotspot/os/bsd/memMapPrinter_macosx.cpp
index 6fd08d63e85..30e258c9d2c 100644
--- a/src/hotspot/os/bsd/memMapPrinter_macosx.cpp
+++ b/src/hotspot/os/bsd/memMapPrinter_macosx.cpp
@@ -132,7 +132,7 @@ public:
static const char* tagToStr(uint32_t user_tag) {
switch (user_tag) {
case 0:
- return 0;
+ return nullptr;
X1(MALLOC, malloc);
X1(MALLOC_SMALL, malloc_small);
X1(MALLOC_LARGE, malloc_large);
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 61de48bb7fa..0e21c2d1785 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -137,10 +137,18 @@ bool os::available_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
+bool os::Machine::available_memory(physical_memory_size_type& value) {
+ return Bsd::available_memory(value);
+}
+
bool os::free_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
+bool os::Machine::free_memory(physical_memory_size_type& value) {
+ return Bsd::available_memory(value);
+}
+
// Available here means free. Note that this number is of no much use. As an estimate
// for future memory pressure it is far too conservative, since MacOS will use a lot
// of unused memory for caches, and return it willingly in case of needs.
@@ -181,6 +189,10 @@ void os::Bsd::print_uptime_info(outputStream* st) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
@@ -195,6 +207,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
+ return Machine::free_swap_space(value);
+}
+
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
@@ -212,6 +228,10 @@ physical_memory_size_type os::physical_memory() {
return Bsd::physical_memory();
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return Bsd::physical_memory();
+}
+
size_t os::rss() {
size_t rss = 0;
#ifdef __APPLE__
@@ -608,7 +628,7 @@ static void *thread_native_entry(Thread *thread) {
log_info(os, thread)("Thread finished (tid: %zu, pthread id: %zu).",
os::current_thread_id(), (uintx) pthread_self());
- return 0;
+ return nullptr;
}
bool os::create_thread(Thread* thread, ThreadType thr_type,
@@ -1400,7 +1420,7 @@ int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *pa
#elif defined(__APPLE__)
for (uint32_t i = 1; i < _dyld_image_count(); i++) {
// Value for top_address is returned as 0 since we don't have any information about module size
- if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), (address)0, param)) {
+ if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), nullptr, param)) {
return 1;
}
}
@@ -2189,6 +2209,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
return _processor_count;
}
diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
index f9c6f794ebd..e49d070890e 100644
--- a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -631,22 +631,20 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
* return:
* true if there were no errors. false otherwise.
*/
-bool CgroupSubsystem::active_processor_count(int& value) {
- int cpu_count;
- int result = -1;
-
+bool CgroupSubsystem::active_processor_count(double& value) {
// We use a cache with a timeout to avoid performing expensive
// computations in the event this function is called frequently.
// [See 8227006].
- CachingCgroupController* contrl = cpu_controller();
- CachedMetric* cpu_limit = contrl->metrics_cache();
+ CachingCgroupController* contrl = cpu_controller();
+ CachedMetric* cpu_limit = contrl->metrics_cache();
if (!cpu_limit->should_check_metric()) {
- value = (int)cpu_limit->value();
- log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", value);
+ value = cpu_limit->value();
+ log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %.2f", value);
return true;
}
- cpu_count = os::Linux::active_processor_count();
+ int cpu_count = os::Linux::active_processor_count();
+ double result = -1;
if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
return false;
}
@@ -671,8 +669,8 @@ bool CgroupSubsystem::active_processor_count(int& value) {
*/
bool CgroupSubsystem::memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value) {
- CachingCgroupController* contrl = memory_controller();
- CachedMetric* memory_limit = contrl->metrics_cache();
+ CachingCgroupController* contrl = memory_controller();
+ CachedMetric* memory_limit = contrl->metrics_cache();
if (!memory_limit->should_check_metric()) {
value = memory_limit->value();
return true;
diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
index 522b64f8816..d083a9985c2 100644
--- a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -181,20 +181,21 @@ class CgroupController: public CHeapObj {
static bool limit_from_str(char* limit_str, physical_memory_size_type& value);
};
+template
class CachedMetric : public CHeapObj{
private:
- volatile physical_memory_size_type _metric;
+ volatile MetricType _metric;
volatile jlong _next_check_counter;
public:
CachedMetric() {
- _metric = value_unlimited;
+ _metric = static_cast(value_unlimited);
_next_check_counter = min_jlong;
}
bool should_check_metric() {
return os::elapsed_counter() > _next_check_counter;
}
- physical_memory_size_type value() { return _metric; }
- void set_value(physical_memory_size_type value, jlong timeout) {
+ MetricType value() { return _metric; }
+ void set_value(MetricType value, jlong timeout) {
_metric = value;
// Metric is unlikely to change, but we want to remain
// responsive to configuration changes. A very short grace time
@@ -205,19 +206,19 @@ class CachedMetric : public CHeapObj{
}
};
-template
+template
class CachingCgroupController : public CHeapObj {
private:
T* _controller;
- CachedMetric* _metrics_cache;
+ CachedMetric* _metrics_cache;
public:
CachingCgroupController(T* cont) {
_controller = cont;
- _metrics_cache = new CachedMetric();
+ _metrics_cache = new CachedMetric();
}
- CachedMetric* metrics_cache() { return _metrics_cache; }
+ CachedMetric* metrics_cache() { return _metrics_cache; }
T* controller() { return _controller; }
};
@@ -277,7 +278,7 @@ class CgroupMemoryController: public CHeapObj {
class CgroupSubsystem: public CHeapObj {
public:
bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
- bool active_processor_count(int& value);
+ bool active_processor_count(double& value);
virtual bool pids_max(uint64_t& value) = 0;
virtual bool pids_current(uint64_t& value) = 0;
@@ -286,8 +287,8 @@ class CgroupSubsystem: public CHeapObj {
virtual char * cpu_cpuset_cpus() = 0;
virtual char * cpu_cpuset_memory_nodes() = 0;
virtual const char * container_type() = 0;
- virtual CachingCgroupController* memory_controller() = 0;
- virtual CachingCgroupController* cpu_controller() = 0;
+ virtual CachingCgroupController* memory_controller() = 0;
+ virtual CachingCgroupController* cpu_controller() = 0;
virtual CgroupCpuacctController* cpuacct_controller() = 0;
bool cpu_quota(int& value);
diff --git a/src/hotspot/os/linux/cgroupUtil_linux.cpp b/src/hotspot/os/linux/cgroupUtil_linux.cpp
index 7aa07d53148..570b335940b 100644
--- a/src/hotspot/os/linux/cgroupUtil_linux.cpp
+++ b/src/hotspot/os/linux/cgroupUtil_linux.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2024, 2025, Red Hat, Inc.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,9 +26,8 @@
#include "cgroupUtil_linux.hpp"
#include "os_linux.hpp"
-bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, int& value) {
+bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, double& value) {
assert(upper_bound > 0, "upper bound of cpus must be positive");
- int limit_count = upper_bound;
int quota = -1;
int period = -1;
if (!cpu_ctrl->cpu_quota(quota)) {
@@ -37,20 +37,15 @@ bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound,
return false;
}
int quota_count = 0;
- int result = upper_bound;
+ double result = upper_bound;
- if (quota > -1 && period > 0) {
- quota_count = ceilf((float)quota / (float)period);
- log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
+ if (quota > 0 && period > 0) { // Use quotas
+ double cpu_quota = static_cast(quota) / period;
+ log_trace(os, container)("CPU Quota based on quota/period: %.2f", cpu_quota);
+ result = MIN2(result, cpu_quota);
}
- // Use quotas
- if (quota_count != 0) {
- limit_count = quota_count;
- }
-
- result = MIN2(upper_bound, limit_count);
- log_trace(os, container)("OSContainer::active_processor_count: %d", result);
+ log_trace(os, container)("OSContainer::active_processor_count: %.2f", result);
value = result;
return true;
}
@@ -73,11 +68,11 @@ physical_memory_size_type CgroupUtil::get_updated_mem_limit(CgroupMemoryControll
// Get an updated cpu limit. The return value is strictly less than or equal to the
// passed in 'lowest' value.
-int CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
+double CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
int lowest,
int upper_bound) {
assert(lowest > 0 && lowest <= upper_bound, "invariant");
- int cpu_limit_val = -1;
+ double cpu_limit_val = -1;
if (CgroupUtil::processor_count(cpu, upper_bound, cpu_limit_val) && cpu_limit_val != upper_bound) {
assert(cpu_limit_val <= upper_bound, "invariant");
if (lowest > cpu_limit_val) {
@@ -172,7 +167,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
assert(cg_path[0] == '/', "cgroup path must start with '/'");
int host_cpus = os::Linux::active_processor_count();
int lowest_limit = host_cpus;
- int cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
+ double cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
char* limit_cg_path = nullptr;
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
diff --git a/src/hotspot/os/linux/cgroupUtil_linux.hpp b/src/hotspot/os/linux/cgroupUtil_linux.hpp
index d72bbd1cf1e..1fd2a7d872b 100644
--- a/src/hotspot/os/linux/cgroupUtil_linux.hpp
+++ b/src/hotspot/os/linux/cgroupUtil_linux.hpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2024, Red Hat, Inc.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +32,7 @@
class CgroupUtil: AllStatic {
public:
- static bool processor_count(CgroupCpuController* cpu, int upper_bound, int& value);
+ static bool processor_count(CgroupCpuController* cpu, int upper_bound, double& value);
// Given a memory controller, adjust its path to a point in the hierarchy
// that represents the closest memory limit.
static void adjust_controller(CgroupMemoryController* m);
@@ -42,9 +43,7 @@ class CgroupUtil: AllStatic {
static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
physical_memory_size_type lowest,
physical_memory_size_type upper_bound);
- static int get_updated_cpu_limit(CgroupCpuController* c,
- int lowest,
- int upper_bound);
+ static double get_updated_cpu_limit(CgroupCpuController* c, int lowest, int upper_bound);
};
#endif // CGROUP_UTIL_LINUX_HPP
diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
index 2df604083d2..c8f5a290c99 100644
--- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -328,8 +328,8 @@ CgroupV1Subsystem::CgroupV1Subsystem(CgroupV1Controller* cpuset,
_pids(pids) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
- _memory = new CachingCgroupController(memory);
- _cpu = new CachingCgroupController(cpu);
+ _memory = new CachingCgroupController(memory);
+ _cpu = new CachingCgroupController(cpu);
}
bool CgroupV1Subsystem::is_containerized() {
diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
index f556bc57f26..af8d0efd378 100644
--- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -214,15 +214,15 @@ class CgroupV1Subsystem: public CgroupSubsystem {
const char * container_type() override {
return "cgroupv1";
}
- CachingCgroupController* memory_controller() override { return _memory; }
- CachingCgroupController* cpu_controller() override { return _cpu; }
+ CachingCgroupController* memory_controller() override { return _memory; }
+ CachingCgroupController* cpu_controller() override { return _cpu; }
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; }
private:
/* controllers */
- CachingCgroupController* _memory = nullptr;
+ CachingCgroupController* _memory = nullptr;
CgroupV1Controller* _cpuset = nullptr;
- CachingCgroupController* _cpu = nullptr;
+ CachingCgroupController* _cpu = nullptr;
CgroupV1CpuacctController* _cpuacct = nullptr;
CgroupV1Controller* _pids = nullptr;
diff --git a/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
index c61d30e9236..30e1affc646 100644
--- a/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2025, Red Hat Inc.
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -156,8 +156,8 @@ CgroupV2Subsystem::CgroupV2Subsystem(CgroupV2MemoryController * memory,
_unified(unified) {
CgroupUtil::adjust_controller(memory);
CgroupUtil::adjust_controller(cpu);
- _memory = new CachingCgroupController(memory);
- _cpu = new CachingCgroupController(cpu);
+ _memory = new CachingCgroupController(memory);
+ _cpu = new CachingCgroupController(cpu);
_cpuacct = cpuacct;
}
diff --git a/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp b/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
index 39a4fabe9f6..998145c0ff9 100644
--- a/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2024, Red Hat Inc.
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -152,8 +152,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
/* One unified controller */
CgroupV2Controller _unified;
/* Caching wrappers for cpu/memory metrics */
- CachingCgroupController* _memory = nullptr;
- CachingCgroupController* _cpu = nullptr;
+ CachingCgroupController* _memory = nullptr;
+ CachingCgroupController* _cpu = nullptr;
CgroupCpuacctController* _cpuacct = nullptr;
@@ -175,8 +175,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
const char * container_type() override {
return "cgroupv2";
}
- CachingCgroupController* memory_controller() override { return _memory; }
- CachingCgroupController* cpu_controller() override { return _cpu; }
+ CachingCgroupController* memory_controller() override { return _memory; }
+ CachingCgroupController* cpu_controller() override { return _cpu; }
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; };
};
diff --git a/src/hotspot/os/linux/osContainer_linux.cpp b/src/hotspot/os/linux/osContainer_linux.cpp
index 15a6403d07f..b46263efd99 100644
--- a/src/hotspot/os/linux/osContainer_linux.cpp
+++ b/src/hotspot/os/linux/osContainer_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,8 +86,8 @@ void OSContainer::init() {
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
physical_memory_size_type mem_limit_val = value_unlimited;
(void)memory_limit_in_bytes(mem_limit_val); // discard error and use default
- int host_cpus = os::Linux::active_processor_count();
- int cpus = host_cpus;
+ double host_cpus = os::Linux::active_processor_count();
+ double cpus = host_cpus;
(void)active_processor_count(cpus); // discard error and use default
any_mem_cpu_limit_present = mem_limit_val != value_unlimited || host_cpus != cpus;
if (any_mem_cpu_limit_present) {
@@ -127,8 +127,7 @@ bool OSContainer::available_memory_in_bytes(physical_memory_size_type& value) {
return false;
}
-bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_swap,
- physical_memory_size_type& value) {
+bool OSContainer::available_swap_in_bytes(physical_memory_size_type& value) {
physical_memory_size_type mem_limit = 0;
physical_memory_size_type mem_swap_limit = 0;
if (memory_limit_in_bytes(mem_limit) &&
@@ -179,8 +178,7 @@ bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_sw
assert(num < 25, "buffer too small");
mem_limit_buf[num] = '\0';
log_trace(os,container)("OSContainer::available_swap_in_bytes: container_swap_limit=%s"
- " container_mem_limit=%s, host_free_swap: " PHYS_MEM_TYPE_FORMAT,
- mem_swap_buf, mem_limit_buf, host_free_swap);
+ " container_mem_limit=%s", mem_swap_buf, mem_limit_buf);
}
return false;
}
@@ -252,7 +250,7 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
return cgroup_subsystem->cpu_cpuset_memory_nodes();
}
-bool OSContainer::active_processor_count(int& value) {
+bool OSContainer::active_processor_count(double& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->active_processor_count(value);
}
@@ -291,11 +289,13 @@ template struct metric_fmt;
template<> struct metric_fmt { static constexpr const char* fmt = "%llu"; };
template<> struct metric_fmt { static constexpr const char* fmt = "%lu"; };
template<> struct metric_fmt { static constexpr const char* fmt = "%d"; };
+template<> struct metric_fmt { static constexpr const char* fmt = "%.2f"; };
template<> struct metric_fmt { static constexpr const char* fmt = "%s"; };
template void OSContainer::print_container_metric(outputStream*, const char*, unsigned long long int, const char*);
template void OSContainer::print_container_metric(outputStream*, const char*, unsigned long int, const char*);
template void OSContainer::print_container_metric(outputStream*, const char*, int, const char*);
+template void OSContainer::print_container_metric(outputStream*, const char*, double, const char*);
template void OSContainer::print_container_metric(outputStream*, const char*, const char*, const char*);
template
@@ -304,12 +304,13 @@ void OSContainer::print_container_metric(outputStream* st, const char* metrics,
constexpr int longest_value = max_length - 11; // Max length - shortest "metric: " string ("cpu_quota: ")
char value_str[longest_value + 1] = {};
os::snprintf_checked(value_str, longest_value, metric_fmt::fmt, value);
- st->print("%s: %*s", metrics, max_length - static_cast(strlen(metrics)) - 2, value_str); // -2 for the ": "
- if (unit[0] != '\0') {
- st->print_cr(" %s", unit);
- } else {
- st->print_cr("");
- }
+
+ const int pad_width = max_length - static_cast(strlen(metrics)) - 2; // -2 for the ": "
+ const char* unit_prefix = unit[0] != '\0' ? " " : "";
+
+ char line[128] = {};
+ os::snprintf_checked(line, sizeof(line), "%s: %*s%s%s", metrics, pad_width, value_str, unit_prefix, unit);
+ st->print_cr("%s", line);
}
void OSContainer::print_container_helper(outputStream* st, MetricResult& res, const char* metrics) {
diff --git a/src/hotspot/os/linux/osContainer_linux.hpp b/src/hotspot/os/linux/osContainer_linux.hpp
index 11c3e086feb..96b59b98db8 100644
--- a/src/hotspot/os/linux/osContainer_linux.hpp
+++ b/src/hotspot/os/linux/osContainer_linux.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -72,8 +72,7 @@ class OSContainer: AllStatic {
static const char * container_type();
static bool available_memory_in_bytes(physical_memory_size_type& value);
- static bool available_swap_in_bytes(physical_memory_size_type host_free_swap,
- physical_memory_size_type& value);
+ static bool available_swap_in_bytes(physical_memory_size_type& value);
static bool memory_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_usage_in_bytes(physical_memory_size_type& value);
@@ -84,7 +83,7 @@ class OSContainer: AllStatic {
static bool rss_usage_in_bytes(physical_memory_size_type& value);
static bool cache_usage_in_bytes(physical_memory_size_type& value);
- static bool active_processor_count(int& value);
+ static bool active_processor_count(double& value);
static char * cpu_cpuset_cpus();
static char * cpu_cpuset_memory_nodes();
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index 6a2a3974a16..7190845a8ba 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -211,15 +211,58 @@ static bool suppress_primordial_thread_resolution = false;
// utility functions
+bool os::is_containerized() {
+ return OSContainer::is_containerized();
+}
+
+bool os::Container::memory_limit(physical_memory_size_type& value) {
+ physical_memory_size_type result = 0;
+ if (OSContainer::memory_limit_in_bytes(result) && result != value_unlimited) {
+ value = result;
+ return true;
+ }
+ return false;
+}
+
+bool os::Container::memory_soft_limit(physical_memory_size_type& value) {
+ physical_memory_size_type result = 0;
+ if (OSContainer::memory_soft_limit_in_bytes(result) && result != 0 && result != value_unlimited) {
+ value = result;
+ return true;
+ }
+ return false;
+}
+
+bool os::Container::memory_throttle_limit(physical_memory_size_type& value) {
+ physical_memory_size_type result = 0;
+ if (OSContainer::memory_throttle_limit_in_bytes(result) && result != value_unlimited) {
+ value = result;
+ return true;
+ }
+ return false;
+}
+
+bool os::Container::used_memory(physical_memory_size_type& value) {
+ return OSContainer::memory_usage_in_bytes(value);
+}
+
bool os::available_memory(physical_memory_size_type& value) {
- if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
+ if (is_containerized() && Container::available_memory(value)) {
log_trace(os)("available container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
+ return Machine::available_memory(value);
+}
+
+bool os::Machine::available_memory(physical_memory_size_type& value) {
return Linux::available_memory(value);
}
+bool os::Container::available_memory(physical_memory_size_type& value) {
+ return OSContainer::available_memory_in_bytes(value);
+}
+
bool os::Linux::available_memory(physical_memory_size_type& value) {
physical_memory_size_type avail_mem = 0;
@@ -251,11 +294,15 @@ bool os::Linux::available_memory(physical_memory_size_type& value) {
}
bool os::free_memory(physical_memory_size_type& value) {
- if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
+ if (is_containerized() && Container::available_memory(value)) {
log_trace(os)("free container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
+ return Machine::free_memory(value);
+}
+
+bool os::Machine::free_memory(physical_memory_size_type& value) {
return Linux::free_memory(value);
}
@@ -274,21 +321,30 @@ bool os::Linux::free_memory(physical_memory_size_type& value) {
}
bool os::total_swap_space(physical_memory_size_type& value) {
- if (OSContainer::is_containerized()) {
- physical_memory_size_type mem_swap_limit = value_unlimited;
- physical_memory_size_type memory_limit = value_unlimited;
- if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
- OSContainer::memory_limit_in_bytes(memory_limit)) {
- if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
- mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
- value = mem_swap_limit - memory_limit;
- return true;
- }
- }
- } // fallback to the host swap space if the container returned unlimited
+ if (is_containerized() && Container::total_swap_space(value)) {
+ return true;
+ } // fallback to the host swap space if the container value fails
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
return Linux::host_swap(value);
}
+bool os::Container::total_swap_space(physical_memory_size_type& value) {
+ physical_memory_size_type mem_swap_limit = value_unlimited;
+ physical_memory_size_type memory_limit = value_unlimited;
+ if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
+ OSContainer::memory_limit_in_bytes(memory_limit)) {
+ if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
+ mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
+ value = mem_swap_limit - memory_limit;
+ return true;
+ }
+ }
+ return false;
+}
+
static bool host_free_swap_f(physical_memory_size_type& value) {
struct sysinfo si;
int ret = sysinfo(&si);
@@ -309,32 +365,45 @@ bool os::free_swap_space(physical_memory_size_type& value) {
return false;
}
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
- if (OSContainer::is_containerized()) {
- if (OSContainer::available_swap_in_bytes(host_free_swap_val, value)) {
+ if (is_containerized()) {
+ if (Container::free_swap_space(value)) {
return true;
}
// Fall through to use host value
log_trace(os,container)("os::free_swap_space: containerized value unavailable"
" returning host value: " PHYS_MEM_TYPE_FORMAT, host_free_swap_val);
}
+
value = host_free_swap_val;
return true;
}
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
+ return host_free_swap_f(value);
+}
+
+bool os::Container::free_swap_space(physical_memory_size_type& value) {
+ return OSContainer::available_swap_in_bytes(value);
+}
+
physical_memory_size_type os::physical_memory() {
- if (OSContainer::is_containerized()) {
+ if (is_containerized()) {
physical_memory_size_type mem_limit = value_unlimited;
- if (OSContainer::memory_limit_in_bytes(mem_limit) && mem_limit != value_unlimited) {
+ if (Container::memory_limit(mem_limit) && mem_limit != value_unlimited) {
log_trace(os)("total container memory: " PHYS_MEM_TYPE_FORMAT, mem_limit);
return mem_limit;
}
}
- physical_memory_size_type phys_mem = Linux::physical_memory();
+ physical_memory_size_type phys_mem = Machine::physical_memory();
log_trace(os)("total system memory: " PHYS_MEM_TYPE_FORMAT, phys_mem);
return phys_mem;
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return Linux::physical_memory();
+}
+
// Returns the resident set size (RSS) of the process.
// Falls back to using VmRSS from /proc/self/status if /proc/self/smaps_rollup is unavailable.
// Note: On kernels with memory cgroups or shared memory, VmRSS may underreport RSS.
@@ -2439,20 +2508,21 @@ bool os::Linux::print_container_info(outputStream* st) {
OSContainer::print_container_metric(st, "cpu_memory_nodes", p != nullptr ? p : "not supported");
free(p);
- int i = -1;
- bool supported = OSContainer::active_processor_count(i);
+ double cpus = -1;
+ bool supported = OSContainer::active_processor_count(cpus);
if (supported) {
- assert(i > 0, "must be");
+ assert(cpus > 0, "must be");
if (ActiveProcessorCount > 0) {
OSContainer::print_container_metric(st, "active_processor_count", ActiveProcessorCount, "(from -XX:ActiveProcessorCount)");
} else {
- OSContainer::print_container_metric(st, "active_processor_count", i);
+ OSContainer::print_container_metric(st, "active_processor_count", cpus);
}
} else {
OSContainer::print_container_metric(st, "active_processor_count", "not supported");
}
+ int i = -1;
supported = OSContainer::cpu_quota(i);
if (supported && i > 0) {
OSContainer::print_container_metric(st, "cpu_quota", i);
@@ -4737,15 +4807,26 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
- int active_cpus = -1;
- if (OSContainer::is_containerized() && OSContainer::active_processor_count(active_cpus)) {
- log_trace(os)("active_processor_count: determined by OSContainer: %d",
- active_cpus);
- } else {
- active_cpus = os::Linux::active_processor_count();
+ if (is_containerized()) {
+ double cpu_quota;
+ if (Container::processor_count(cpu_quota)) {
+ int active_cpus = ceilf(cpu_quota); // Round fractional CPU quota up.
+ assert(active_cpus <= Machine::active_processor_count(), "must be");
+ log_trace(os)("active_processor_count: determined by OSContainer: %d",
+ active_cpus);
+ return active_cpus;
+ }
}
- return active_cpus;
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
+ return os::Linux::active_processor_count();
+}
+
+bool os::Container::processor_count(double& value) {
+ return OSContainer::active_processor_count(value);
}
static bool should_warn_invalid_processor_id() {
@@ -4882,9 +4963,14 @@ int os::open(const char *path, int oflag, int mode) {
oflag |= O_CLOEXEC;
int fd = ::open(path, oflag, mode);
- if (fd == -1) return -1;
+ // No further checking is needed if open() returned an error or
+ // access mode is not read only.
+ if (fd == -1 || (oflag & O_ACCMODE) != O_RDONLY) {
+ return fd;
+ }
- //If the open succeeded, the file might still be a directory
+ // If the open succeeded and is read only, the file might be a directory
+ // which the JVM doesn't allow to be read.
{
struct stat buf;
int ret = ::fstat(fd, &buf);
diff --git a/src/hotspot/os/posix/perfMemory_posix.cpp b/src/hotspot/os/posix/perfMemory_posix.cpp
index 08a19270943..ce9c2a4f031 100644
--- a/src/hotspot/os/posix/perfMemory_posix.cpp
+++ b/src/hotspot/os/posix/perfMemory_posix.cpp
@@ -112,6 +112,10 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
+ } else {
+ if (!successful_write) {
+ remove(destfile);
+ }
}
}
FREE_C_HEAP_ARRAY(char, destfile);
@@ -949,6 +953,7 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size
warning("Insufficient space for shared memory file: %s/%s\n", dirname, filename);
}
result = OS_ERR;
+ remove(filename);
break;
}
}
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index efbd1fe7c68..b0b7ae18106 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -839,10 +839,18 @@ bool os::available_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
+bool os::Machine::available_memory(physical_memory_size_type& value) {
+ return win32::available_memory(value);
+}
+
bool os::free_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
+bool os::Machine::free_memory(physical_memory_size_type& value) {
+ return win32::available_memory(value);
+}
+
bool os::win32::available_memory(physical_memory_size_type& value) {
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
// value if total memory is larger than 4GB
@@ -858,7 +866,11 @@ bool os::win32::available_memory(physical_memory_size_type& value) {
}
}
-bool os::total_swap_space(physical_memory_size_type& value) {
+bool os::total_swap_space(physical_memory_size_type& value) {
+ return Machine::total_swap_space(value);
+}
+
+bool os::Machine::total_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
@@ -872,6 +884,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
}
bool os::free_swap_space(physical_memory_size_type& value) {
+ return Machine::free_swap_space(value);
+}
+
+bool os::Machine::free_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
@@ -888,6 +904,10 @@ physical_memory_size_type os::physical_memory() {
return win32::physical_memory();
}
+physical_memory_size_type os::Machine::physical_memory() {
+ return win32::physical_memory();
+}
+
size_t os::rss() {
size_t rss = 0;
PROCESS_MEMORY_COUNTERS_EX pmex;
@@ -911,6 +931,10 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
+ return Machine::active_processor_count();
+}
+
+int os::Machine::active_processor_count() {
bool schedules_all_processor_groups = win32::is_windows_11_or_greater() || win32::is_windows_server_2022_or_greater();
if (UseAllWindowsProcessorGroups && !schedules_all_processor_groups && !win32::processor_group_warning_displayed()) {
win32::set_processor_group_warning_displayed(true);
diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp
index 6bbefea5cd9..328bed1ccfb 100644
--- a/src/hotspot/share/cds/archiveBuilder.cpp
+++ b/src/hotspot/share/cds/archiveBuilder.cpp
@@ -571,7 +571,12 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
}
if (is_excluded(klass)) {
ResourceMark rm;
- log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
+ aot_log_trace(aot)("pointer set to null: class (excluded): %s", klass->external_name());
+ return set_to_null;
+ }
+ if (klass->is_array_klass() && CDSConfig::is_dumping_dynamic_archive()) {
+ ResourceMark rm;
+ aot_log_trace(aot)("pointer set to null: array class not supported in dynamic region: %s", klass->external_name());
return set_to_null;
}
}
diff --git a/src/hotspot/share/ci/ciField.cpp b/src/hotspot/share/ci/ciField.cpp
index 19e05784f4d..e0c818f02fc 100644
--- a/src/hotspot/share/ci/ciField.cpp
+++ b/src/hotspot/share/ci/ciField.cpp
@@ -216,6 +216,10 @@ ciField::ciField(fieldDescriptor *fd) :
static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
if (holder == nullptr)
return false;
+ if (holder->trust_final_fields()) {
+ // Explicit opt-in from system classes
+ return true;
+ }
// Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") ||
holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") ||
@@ -230,14 +234,6 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// Trust final fields in records
if (holder->is_record())
return true;
- // Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
- // more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
- if (holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
- holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater() ||
- holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater() ||
- holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl()) {
- return true;
- }
return TrustFinalNonStaticFields;
}
diff --git a/src/hotspot/share/ci/ciInstanceKlass.cpp b/src/hotspot/share/ci/ciInstanceKlass.cpp
index 64b9acf9146..33bcabc4566 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp
@@ -65,6 +65,7 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
_has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
_is_hidden = ik->is_hidden();
_is_record = ik->is_record();
+ _trust_final_fields = ik->trust_final_fields();
_nonstatic_fields = nullptr; // initialized lazily by compute_nonstatic_fields:
_has_injected_fields = -1;
_implementor = nullptr; // we will fill these lazily
diff --git a/src/hotspot/share/ci/ciInstanceKlass.hpp b/src/hotspot/share/ci/ciInstanceKlass.hpp
index a1b2d8dd12d..8ccf1fadfb7 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp
@@ -59,6 +59,7 @@ private:
bool _has_nonstatic_concrete_methods;
bool _is_hidden;
bool _is_record;
+ bool _trust_final_fields;
bool _has_trusted_loader;
ciFlags _flags;
@@ -207,6 +208,10 @@ public:
return _is_record;
}
+ bool trust_final_fields() const {
+ return _trust_final_fields;
+ }
+
ciInstanceKlass* get_canonical_holder(int offset);
ciField* get_field_by_offset(int field_offset, bool is_static);
ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index c9d9d3632b5..817d0c64d11 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -943,6 +943,7 @@ public:
_java_lang_Deprecated_for_removal,
_jdk_internal_vm_annotation_AOTSafeClassInitializer,
_method_AOTRuntimeSetup,
+ _jdk_internal_vm_annotation_TrustFinalFields,
_annotation_LIMIT
};
const Location _location;
@@ -1878,6 +1879,11 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
if (!privileged) break; // only allow in privileged code
return _field_Stable;
}
+ case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_TrustFinalFields_signature): {
+ if (_location != _in_class) break; // only allow for classes
+ if (!privileged) break; // only allow in privileged code
+ return _jdk_internal_vm_annotation_TrustFinalFields;
+ }
case VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature): {
if (_location != _in_field && _location != _in_class) {
break; // only allow for fields and classes
@@ -1992,6 +1998,9 @@ void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
if (has_annotation(_jdk_internal_vm_annotation_AOTSafeClassInitializer)) {
ik->set_has_aot_safe_initializer();
}
+ if (has_annotation(_jdk_internal_vm_annotation_TrustFinalFields)) {
+ ik->set_trust_final_fields(true);
+ }
}
#define MAX_ARGS_SIZE 255
diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp
index c775014cfac..20dfad0d980 100644
--- a/src/hotspot/share/classfile/stringTable.cpp
+++ b/src/hotspot/share/classfile/stringTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -614,6 +614,10 @@ struct StringTableDeleteCheck : StackObj {
};
void StringTable::clean_dead_entries(JavaThread* jt) {
+ // BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
+ // When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
+ // Take SuspendMark first to keep lock order and avoid deadlock.
+ NativeHeapTrimmer::SuspendMark sm("stringtable");
StringTableHash::BulkDeleteTask bdt(_local_table);
if (!bdt.prepare(jt)) {
return;
@@ -621,7 +625,6 @@ void StringTable::clean_dead_entries(JavaThread* jt) {
StringTableDeleteCheck stdc;
StringTableDoDelete stdd;
- NativeHeapTrimmer::SuspendMark sm("stringtable");
{
TraceTime timer("Clean", TRACETIME_LOG(Debug, stringtable, perf));
while(bdt.do_task(jt, stdc, stdd)) {
diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp
index ec639a2b4d3..c49aa10fa0d 100644
--- a/src/hotspot/share/classfile/symbolTable.cpp
+++ b/src/hotspot/share/classfile/symbolTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -763,6 +763,10 @@ struct SymbolTableDeleteCheck : StackObj {
};
void SymbolTable::clean_dead_entries(JavaThread* jt) {
+ // BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
+ // When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
+ // Take SuspendMark first to keep lock order and avoid deadlock.
+ NativeHeapTrimmer::SuspendMark sm("symboltable");
SymbolTableHash::BulkDeleteTask bdt(_local_table);
if (!bdt.prepare(jt)) {
return;
@@ -770,7 +774,6 @@ void SymbolTable::clean_dead_entries(JavaThread* jt) {
SymbolTableDeleteCheck stdc;
SymbolTableDoDelete stdd;
- NativeHeapTrimmer::SuspendMark sm("symboltable");
{
TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf));
while (bdt.do_task(jt, stdc, stdd)) {
diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp
index 8388b98faae..79646f24d0e 100644
--- a/src/hotspot/share/classfile/vmSymbols.hpp
+++ b/src/hotspot/share/classfile/vmSymbols.hpp
@@ -245,10 +245,6 @@ class SerializeClosure;
\
/* Concurrency support */ \
template(java_util_concurrent_locks_AbstractOwnableSynchronizer, "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
- template(java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicIntegerFieldUpdater$AtomicIntegerFieldUpdaterImpl") \
- template(java_util_concurrent_atomic_AtomicLongFieldUpdater_CASUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$CASUpdater") \
- template(java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$LockedUpdater") \
- template(java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl") \
template(jdk_internal_vm_annotation_Contended_signature, "Ljdk/internal/vm/annotation/Contended;") \
template(jdk_internal_vm_annotation_ReservedStackAccess_signature, "Ljdk/internal/vm/annotation/ReservedStackAccess;") \
template(jdk_internal_ValueBased_signature, "Ljdk/internal/ValueBased;") \
@@ -302,6 +298,7 @@ class SerializeClosure;
template(jdk_internal_misc_Scoped_signature, "Ljdk/internal/misc/ScopedMemoryAccess$Scoped;") \
template(jdk_internal_vm_annotation_IntrinsicCandidate_signature, "Ljdk/internal/vm/annotation/IntrinsicCandidate;") \
template(jdk_internal_vm_annotation_Stable_signature, "Ljdk/internal/vm/annotation/Stable;") \
+ template(jdk_internal_vm_annotation_TrustFinalFields_signature, "Ljdk/internal/vm/annotation/TrustFinalFields;") \
\
template(jdk_internal_vm_annotation_ChangesCurrentThread_signature, "Ljdk/internal/vm/annotation/ChangesCurrentThread;") \
template(jdk_internal_vm_annotation_JvmtiHideEvents_signature, "Ljdk/internal/vm/annotation/JvmtiHideEvents;") \
diff --git a/src/hotspot/share/gc/g1/g1Arguments.cpp b/src/hotspot/share/gc/g1/g1Arguments.cpp
index 58e76cdd43a..ffb06a7d822 100644
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -209,6 +209,17 @@ void G1Arguments::initialize() {
FLAG_SET_DEFAULT(GCTimeRatio, 24);
}
+ // Do not interfere with GC-Pressure driven heap resizing unless the user
+ // explicitly sets otherwise. G1 heap sizing should be free to grow or shrink
+ // the heap based on GC pressure, rather than being forced to satisfy
+ // MinHeapFreeRatio or MaxHeapFreeRatio defaults that the user did not set.
+ if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
+ FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
+ }
+ if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
+ FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
+ }
+
// Below, we might need to calculate the pause time interval based on
// the pause target. When we do so we are going to give G1 maximum
// flexibility and allow it to do pauses when it needs to. So, we'll
diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
index ee2c1450d9b..794e5db0634 100644
--- a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
@@ -70,7 +70,11 @@ inline void G1BarrierSet::write_ref_field_pre(T* field) {
template
inline void G1BarrierSet::write_ref_field_post(T* field) {
- volatile CardValue* byte = _card_table->byte_for(field);
+ // Make sure that the card table reference is read only once. Otherwise the compiler
+ // might reload that value in the two accesses below, that could cause writes to
+ // the wrong card table.
+ CardTable* card_table = AtomicAccess::load(&_card_table);
+ CardValue* byte = card_table->byte_for(field);
if (*byte == G1CardTable::clean_card_val()) {
*byte = G1CardTable::dirty_card_val();
}
diff --git a/src/hotspot/share/gc/g1/g1BatchedTask.cpp b/src/hotspot/share/gc/g1/g1BatchedTask.cpp
index 57558301541..1f082153476 100644
--- a/src/hotspot/share/gc/g1/g1BatchedTask.cpp
+++ b/src/hotspot/share/gc/g1/g1BatchedTask.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#include "gc/g1/g1BatchedTask.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1GCParPhaseTimesTracker.hpp"
-#include "runtime/atomicAccess.hpp"
#include "utilities/growableArray.hpp"
void G1AbstractSubTask::record_work_item(uint worker_id, uint index, size_t count) {
@@ -40,7 +39,7 @@ const char* G1AbstractSubTask::name() const {
}
bool G1BatchedTask::try_claim_serial_task(int& task) {
- task = AtomicAccess::fetch_then_add(&_num_serial_tasks_done, 1);
+ task = _num_serial_tasks_done.fetch_then_add(1);
return task < _serial_tasks.length();
}
@@ -96,8 +95,8 @@ void G1BatchedTask::work(uint worker_id) {
}
G1BatchedTask::~G1BatchedTask() {
- assert(AtomicAccess::load(&_num_serial_tasks_done) >= _serial_tasks.length(),
- "Only %d tasks of %d claimed", AtomicAccess::load(&_num_serial_tasks_done), _serial_tasks.length());
+ assert(_num_serial_tasks_done.load_relaxed() >= _serial_tasks.length(),
+ "Only %d tasks of %d claimed", _num_serial_tasks_done.load_relaxed(), _serial_tasks.length());
for (G1AbstractSubTask* task : _parallel_tasks) {
delete task;
diff --git a/src/hotspot/share/gc/g1/g1BatchedTask.hpp b/src/hotspot/share/gc/g1/g1BatchedTask.hpp
index 020fda634e4..a6d2ef923c0 100644
--- a/src/hotspot/share/gc/g1/g1BatchedTask.hpp
+++ b/src/hotspot/share/gc/g1/g1BatchedTask.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/shared/workerThread.hpp"
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
template
class GrowableArrayCHeap;
@@ -120,7 +121,7 @@ public:
// 5) ~T()
//
class G1BatchedTask : public WorkerTask {
- volatile int _num_serial_tasks_done;
+ Atomic _num_serial_tasks_done;
G1GCPhaseTimes* _phase_times;
bool try_claim_serial_task(int& task);
diff --git a/src/hotspot/share/gc/g1/g1CardSet.cpp b/src/hotspot/share/gc/g1/g1CardSet.cpp
index 3441e6bc608..60ad63e812c 100644
--- a/src/hotspot/share/gc/g1/g1CardSet.cpp
+++ b/src/hotspot/share/gc/g1/g1CardSet.cpp
@@ -29,7 +29,6 @@
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "memory/allocation.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "utilities/bitMap.inline.hpp"
@@ -192,32 +191,32 @@ const char* G1CardSetConfiguration::mem_object_type_name_str(uint index) {
void G1CardSetCoarsenStats::reset() {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
- _coarsen_from[i] = 0;
- _coarsen_collision[i] = 0;
+ _coarsen_from[i].store_relaxed(0);
+ _coarsen_collision[i].store_relaxed(0);
}
}
void G1CardSetCoarsenStats::set(G1CardSetCoarsenStats& other) {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
- _coarsen_from[i] = other._coarsen_from[i];
- _coarsen_collision[i] = other._coarsen_collision[i];
+ _coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed());
+ _coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed());
}
}
void G1CardSetCoarsenStats::subtract_from(G1CardSetCoarsenStats& other) {
STATIC_ASSERT(ARRAY_SIZE(_coarsen_from) == ARRAY_SIZE(_coarsen_collision));
for (uint i = 0; i < ARRAY_SIZE(_coarsen_from); i++) {
- _coarsen_from[i] = other._coarsen_from[i] - _coarsen_from[i];
- _coarsen_collision[i] = other._coarsen_collision[i] - _coarsen_collision[i];
+ _coarsen_from[i].store_relaxed(other._coarsen_from[i].load_relaxed() - _coarsen_from[i].load_relaxed());
+ _coarsen_collision[i].store_relaxed(other._coarsen_collision[i].load_relaxed() - _coarsen_collision[i].load_relaxed());
}
}
void G1CardSetCoarsenStats::record_coarsening(uint tag, bool collision) {
assert(tag < ARRAY_SIZE(_coarsen_from), "tag %u out of bounds", tag);
- AtomicAccess::inc(&_coarsen_from[tag], memory_order_relaxed);
+ _coarsen_from[tag].add_then_fetch(1u, memory_order_relaxed);
if (collision) {
- AtomicAccess::inc(&_coarsen_collision[tag], memory_order_relaxed);
+ _coarsen_collision[tag].add_then_fetch(1u, memory_order_relaxed);
}
}
@@ -228,13 +227,13 @@ void G1CardSetCoarsenStats::print_on(outputStream* out) {
"Inline->AoC %zu (%zu) "
"AoC->BitMap %zu (%zu) "
"BitMap->Full %zu (%zu) ",
- _coarsen_from[0], _coarsen_collision[0],
- _coarsen_from[1], _coarsen_collision[1],
+ _coarsen_from[0].load_relaxed(), _coarsen_collision[0].load_relaxed(),
+ _coarsen_from[1].load_relaxed(), _coarsen_collision[1].load_relaxed(),
// There is no BitMap at the first level so we can't .
- _coarsen_from[3], _coarsen_collision[3],
- _coarsen_from[4], _coarsen_collision[4],
- _coarsen_from[5], _coarsen_collision[5],
- _coarsen_from[6], _coarsen_collision[6]
+ _coarsen_from[3].load_relaxed(), _coarsen_collision[3].load_relaxed(),
+ _coarsen_from[4].load_relaxed(), _coarsen_collision[4].load_relaxed(),
+ _coarsen_from[5].load_relaxed(), _coarsen_collision[5].load_relaxed(),
+ _coarsen_from[6].load_relaxed(), _coarsen_collision[6].load_relaxed()
);
}
@@ -248,7 +247,7 @@ class G1CardSetHashTable : public CHeapObj {
// the per region cardsets.
const static uint GroupBucketClaimSize = 4;
// Did we insert at least one card in the table?
- bool volatile _inserted_card;
+ Atomic _inserted_card;
G1CardSetMemoryManager* _mm;
CardSetHash _table;
@@ -311,10 +310,10 @@ public:
G1CardSetHashTableValue value(region_idx, G1CardSetInlinePtr());
bool inserted = _table.insert_get(Thread::current(), lookup, value, found, should_grow);
- if (!_inserted_card && inserted) {
+ if (!_inserted_card.load_relaxed() && inserted) {
// It does not matter to us who is setting the flag so a regular atomic store
// is sufficient.
- AtomicAccess::store(&_inserted_card, true);
+ _inserted_card.store_relaxed(true);
}
return found.value();
@@ -343,9 +342,9 @@ public:
}
void reset() {
- if (AtomicAccess::load(&_inserted_card)) {
+ if (_inserted_card.load_relaxed()) {
_table.unsafe_reset(InitialLogTableSize);
- AtomicAccess::store(&_inserted_card, false);
+ _inserted_card.store_relaxed(false);
}
}
@@ -455,14 +454,14 @@ void G1CardSet::free_mem_object(ContainerPtr container) {
_mm->free(container_type_to_mem_object_type(type), value);
}
-G1CardSet::ContainerPtr G1CardSet::acquire_container(ContainerPtr volatile* container_addr) {
+G1CardSet::ContainerPtr G1CardSet::acquire_container(Atomic* container_addr) {
// Update reference counts under RCU critical section to avoid a
// use-after-cleapup bug where we increment a reference count for
// an object whose memory has already been cleaned up and reused.
GlobalCounter::CriticalSection cs(Thread::current());
while (true) {
// Get ContainerPtr and increment refcount atomically wrt to memory reuse.
- ContainerPtr container = AtomicAccess::load_acquire(container_addr);
+ ContainerPtr container = container_addr->load_acquire();
uint cs_type = container_type(container);
if (container == FullCardSet || cs_type == ContainerInlinePtr) {
return container;
@@ -503,15 +502,15 @@ class G1ReleaseCardsets : public StackObj {
G1CardSet* _card_set;
using ContainerPtr = G1CardSet::ContainerPtr;
- void coarsen_to_full(ContainerPtr* container_addr) {
+ void coarsen_to_full(Atomic* container_addr) {
while (true) {
- ContainerPtr cur_container = AtomicAccess::load_acquire(container_addr);
+ ContainerPtr cur_container = container_addr->load_acquire();
uint cs_type = G1CardSet::container_type(cur_container);
if (cur_container == G1CardSet::FullCardSet) {
return;
}
- ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, G1CardSet::FullCardSet);
+ ContainerPtr old_value = container_addr->compare_exchange(cur_container, G1CardSet::FullCardSet);
if (old_value == cur_container) {
_card_set->release_and_maybe_free_container(cur_container);
@@ -523,7 +522,7 @@ class G1ReleaseCardsets : public StackObj {
public:
explicit G1ReleaseCardsets(G1CardSet* card_set) : _card_set(card_set) { }
- void operator ()(ContainerPtr* container_addr) {
+ void operator ()(Atomic* container_addr) {
coarsen_to_full(container_addr);
}
};
@@ -544,10 +543,10 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
ContainerPtr container;
uint bucket = _config->howl_bucket_index(card_in_region);
- ContainerPtr volatile* bucket_entry = howl->container_addr(bucket);
+ Atomic* bucket_entry = howl->container_addr(bucket);
while (true) {
- if (AtomicAccess::load(&howl->_num_entries) >= _config->cards_in_howl_threshold()) {
+ if (howl->_num_entries.load_relaxed() >= _config->cards_in_howl_threshold()) {
return Overflow;
}
@@ -571,7 +570,7 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
}
if (increment_total && add_result == Added) {
- AtomicAccess::inc(&howl->_num_entries, memory_order_relaxed);
+ howl->_num_entries.add_then_fetch(1u, memory_order_relaxed);
}
if (to_transfer != nullptr) {
@@ -588,7 +587,7 @@ G1AddCardResult G1CardSet::add_to_bitmap(ContainerPtr container, uint card_in_re
return bitmap->add(card_offset, _config->cards_in_howl_bitmap_threshold(), _config->max_cards_in_howl_bitmap());
}
-G1AddCardResult G1CardSet::add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region) {
+G1AddCardResult G1CardSet::add_to_inline_ptr(Atomic* container_addr, ContainerPtr container, uint card_in_region) {
G1CardSetInlinePtr value(container_addr, container);
return value.add(card_in_region, _config->inline_ptr_bits_per_card(), _config->max_cards_in_inline_ptr());
}
@@ -610,7 +609,7 @@ G1CardSet::ContainerPtr G1CardSet::create_coarsened_array_of_cards(uint card_in_
return new_container;
}
-bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
+bool G1CardSet::coarsen_container(Atomic* container_addr,
ContainerPtr cur_container,
uint card_in_region,
bool within_howl) {
@@ -640,7 +639,7 @@ bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
ShouldNotReachHere();
}
- ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, new_container); // Memory order?
+ ContainerPtr old_value = container_addr->compare_exchange(cur_container, new_container); // Memory order?
if (old_value == cur_container) {
// Success. Indicate that the cards from the current card set must be transferred
// by this caller.
@@ -687,7 +686,7 @@ void G1CardSet::transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPt
assert(container_type(source_container) == ContainerHowl, "must be");
// Need to correct for that the Full remembered set occupies more cards than the
// AoCS before.
- AtomicAccess::add(&_num_occupied, _config->max_cards_in_region() - table_entry->_num_occupied, memory_order_relaxed);
+ _num_occupied.add_then_fetch(_config->max_cards_in_region() - table_entry->_num_occupied.load_relaxed(), memory_order_relaxed);
}
}
@@ -713,18 +712,18 @@ void G1CardSet::transfer_cards_in_howl(ContainerPtr parent_container,
diff -= 1;
G1CardSetHowl* howling_array = container_ptr(parent_container);
- AtomicAccess::add(&howling_array->_num_entries, diff, memory_order_relaxed);
+ howling_array->_num_entries.add_then_fetch(diff, memory_order_relaxed);
G1CardSetHashTableValue* table_entry = get_container(card_region);
assert(table_entry != nullptr, "Table entry not found for transferred cards");
- AtomicAccess::add(&table_entry->_num_occupied, diff, memory_order_relaxed);
+ table_entry->_num_occupied.add_then_fetch(diff, memory_order_relaxed);
- AtomicAccess::add(&_num_occupied, diff, memory_order_relaxed);
+ _num_occupied.add_then_fetch(diff, memory_order_relaxed);
}
}
-G1AddCardResult G1CardSet::add_to_container(ContainerPtr volatile* container_addr,
+G1AddCardResult G1CardSet::add_to_container(Atomic* container_addr,
ContainerPtr container,
uint card_region,
uint card_in_region,
@@ -827,8 +826,8 @@ G1AddCardResult G1CardSet::add_card(uint card_region, uint card_in_region, bool
}
if (increment_total && add_result == Added) {
- AtomicAccess::inc(&table_entry->_num_occupied, memory_order_relaxed);
- AtomicAccess::inc(&_num_occupied, memory_order_relaxed);
+ table_entry->_num_occupied.add_then_fetch(1u, memory_order_relaxed);
+ _num_occupied.add_then_fetch(1u, memory_order_relaxed);
}
if (should_grow_table) {
_table->grow();
@@ -853,7 +852,7 @@ bool G1CardSet::contains_card(uint card_region, uint card_in_region) {
return false;
}
- ContainerPtr container = table_entry->_container;
+ ContainerPtr container = table_entry->_container.load_relaxed();
if (container == FullCardSet) {
// contains_card() is not a performance critical method so we do not hide that
// case in the switch below.
@@ -889,7 +888,7 @@ void G1CardSet::print_info(outputStream* st, uintptr_t card) {
return;
}
- ContainerPtr container = table_entry->_container;
+ ContainerPtr container = table_entry->_container.load_relaxed();
if (container == FullCardSet) {
st->print("FULL card set)");
return;
@@ -940,7 +939,7 @@ void G1CardSet::iterate_cards_during_transfer(ContainerPtr const container, Card
void G1CardSet::iterate_containers(ContainerPtrClosure* cl, bool at_safepoint) {
auto do_value =
[&] (G1CardSetHashTableValue* value) {
- cl->do_containerptr(value->_region_idx, value->_num_occupied, value->_container);
+ cl->do_containerptr(value->_region_idx, value->_num_occupied.load_relaxed(), value->_container.load_relaxed());
return true;
};
@@ -1001,11 +1000,11 @@ bool G1CardSet::occupancy_less_or_equal_to(size_t limit) const {
}
bool G1CardSet::is_empty() const {
- return _num_occupied == 0;
+ return _num_occupied.load_relaxed() == 0;
}
size_t G1CardSet::occupied() const {
- return _num_occupied;
+ return _num_occupied.load_relaxed();
}
size_t G1CardSet::num_containers() {
@@ -1051,7 +1050,7 @@ size_t G1CardSet::static_mem_size() {
void G1CardSet::clear() {
_table->reset();
- _num_occupied = 0;
+ _num_occupied.store_relaxed(0);
_mm->flush();
}
diff --git a/src/hotspot/share/gc/g1/g1CardSet.hpp b/src/hotspot/share/gc/g1/g1CardSet.hpp
index 9cefc4b1c22..64ddf0ca6a4 100644
--- a/src/hotspot/share/gc/g1/g1CardSet.hpp
+++ b/src/hotspot/share/gc/g1/g1CardSet.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/concurrentHashTable.hpp"
class G1CardSetAllocOptions;
@@ -154,8 +155,8 @@ public:
private:
// Indices are "from" indices.
- size_t _coarsen_from[NumCoarsenCategories];
- size_t _coarsen_collision[NumCoarsenCategories];
+ Atomic _coarsen_from[NumCoarsenCategories];
+ Atomic _coarsen_collision[NumCoarsenCategories];
public:
G1CardSetCoarsenStats() { reset(); }
@@ -271,11 +272,11 @@ private:
// Total number of cards in this card set. This is a best-effort value, i.e. there may
// be (slightly) more cards in the card set than this value in reality.
- size_t _num_occupied;
+ Atomic _num_occupied;
ContainerPtr make_container_ptr(void* value, uintptr_t type);
- ContainerPtr acquire_container(ContainerPtr volatile* container_addr);
+ ContainerPtr acquire_container(Atomic* container_addr);
// Returns true if the card set container should be released
bool release_container(ContainerPtr container);
// Release card set and free if needed.
@@ -288,7 +289,7 @@ private:
// coarsen_container does not transfer cards from cur_container
// to the new container. Transfer is achieved by transfer_cards.
// Returns true if this was the thread that coarsened the container (and added the card).
- bool coarsen_container(ContainerPtr volatile* container_addr,
+ bool coarsen_container(Atomic* container_addr,
ContainerPtr cur_container,
uint card_in_region, bool within_howl = false);
@@ -300,9 +301,9 @@ private:
void transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPtr source_container, uint card_region);
void transfer_cards_in_howl(ContainerPtr parent_container, ContainerPtr source_container, uint card_region);
- G1AddCardResult add_to_container(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
+ G1AddCardResult add_to_container(Atomic* container_addr, ContainerPtr container, uint card_region, uint card, bool increment_total = true);
- G1AddCardResult add_to_inline_ptr(ContainerPtr volatile* container_addr, ContainerPtr container, uint card_in_region);
+ G1AddCardResult add_to_inline_ptr(Atomic* container_addr, ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_array(ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_bitmap(ContainerPtr container, uint card_in_region);
G1AddCardResult add_to_howl(ContainerPtr parent_container, uint card_region, uint card_in_region, bool increment_total = true);
@@ -366,7 +367,6 @@ public:
size_t num_containers();
- static G1CardSetCoarsenStats coarsen_stats();
static void print_coarsen_stats(outputStream* out);
// Returns size of the actual remembered set containers in bytes.
@@ -412,8 +412,15 @@ public:
using ContainerPtr = G1CardSet::ContainerPtr;
const uint _region_idx;
- uint volatile _num_occupied;
- ContainerPtr volatile _container;
+ Atomic _num_occupied;
+ Atomic _container;
+
+ // Copy constructor needed for use in ConcurrentHashTable.
+ G1CardSetHashTableValue(const G1CardSetHashTableValue& other) :
+ _region_idx(other._region_idx),
+ _num_occupied(other._num_occupied.load_relaxed()),
+ _container(other._container.load_relaxed())
+ { }
G1CardSetHashTableValue(uint region_idx, ContainerPtr container) : _region_idx(region_idx), _num_occupied(0), _container(container) { }
};
diff --git a/src/hotspot/share/gc/g1/g1CardSetContainers.hpp b/src/hotspot/share/gc/g1/g1CardSetContainers.hpp
index 72c7795be2e..78551479e06 100644
--- a/src/hotspot/share/gc/g1/g1CardSetContainers.hpp
+++ b/src/hotspot/share/gc/g1/g1CardSetContainers.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "gc/g1/g1CardSet.hpp"
#include "memory/allocation.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -67,7 +67,7 @@ class G1CardSetInlinePtr : public StackObj {
using ContainerPtr = G1CardSet::ContainerPtr;
- ContainerPtr volatile * _value_addr;
+ Atomic* _value_addr;
ContainerPtr _value;
static const uint SizeFieldLen = 3;
@@ -103,7 +103,7 @@ public:
explicit G1CardSetInlinePtr(ContainerPtr value) :
G1CardSetInlinePtr(nullptr, value) {}
- G1CardSetInlinePtr(ContainerPtr volatile* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
+ G1CardSetInlinePtr(Atomic* value_addr, ContainerPtr value) : _value_addr(value_addr), _value(value) {
assert(G1CardSet::container_type(_value) == G1CardSet::ContainerInlinePtr, "Value " PTR_FORMAT " is not a valid G1CardSetInlinePtr.", p2i(_value));
}
@@ -145,13 +145,13 @@ public:
// All but inline pointers are of this kind. For those, card entries are stored
// directly in the ContainerPtr of the ConcurrentHashTable node.
class G1CardSetContainer {
- uintptr_t _ref_count;
+ Atomic _ref_count;
protected:
~G1CardSetContainer() = default;
public:
G1CardSetContainer() : _ref_count(3) { }
- uintptr_t refcount() const { return AtomicAccess::load_acquire(&_ref_count); }
+ uintptr_t refcount() const { return _ref_count.load_acquire(); }
bool try_increment_refcount();
@@ -172,7 +172,7 @@ public:
using ContainerPtr = G1CardSet::ContainerPtr;
private:
EntryCountType _size;
- EntryCountType volatile _num_entries;
+ Atomic _num_entries;
// VLA implementation.
EntryDataType _data[1];
@@ -180,10 +180,10 @@ private:
static const EntryCountType EntryMask = LockBitMask - 1;
class G1CardSetArrayLocker : public StackObj {
- EntryCountType volatile* _num_entries_addr;
+ Atomic* _num_entries_addr;
EntryCountType _local_num_entries;
public:
- G1CardSetArrayLocker(EntryCountType volatile* value);
+ G1CardSetArrayLocker(Atomic* value);
EntryCountType num_entries() const { return _local_num_entries; }
void inc_num_entries() {
@@ -192,7 +192,7 @@ private:
}
~G1CardSetArrayLocker() {
- AtomicAccess::release_store(_num_entries_addr, _local_num_entries);
+ _num_entries_addr->release_store(_local_num_entries);
}
};
@@ -213,7 +213,7 @@ public:
template
void iterate(CardVisitor& found);
- size_t num_entries() const { return _num_entries & EntryMask; }
+ size_t num_entries() const { return _num_entries.load_relaxed() & EntryMask; }
static size_t header_size_in_bytes();
@@ -223,7 +223,7 @@ public:
};
class G1CardSetBitMap : public G1CardSetContainer {
- size_t _num_bits_set;
+ Atomic _num_bits_set;
BitMap::bm_word_t _bits[1];
public:
@@ -236,7 +236,7 @@ public:
return bm.at(card_idx);
}
- uint num_bits_set() const { return (uint)_num_bits_set; }
+ uint num_bits_set() const { return (uint)_num_bits_set.load_relaxed(); }
template
void iterate(CardVisitor& found, size_t const size_in_bits, uint offset);
@@ -255,10 +255,10 @@ class G1CardSetHowl : public G1CardSetContainer {
public:
typedef uint EntryCountType;
using ContainerPtr = G1CardSet::ContainerPtr;
- EntryCountType volatile _num_entries;
+ Atomic _num_entries;
private:
// VLA implementation.
- ContainerPtr _buckets[1];
+ Atomic _buckets[1];
// Do not add class member variables beyond this point.
// Iterates over the given ContainerPtr with at index in this Howl card set,
@@ -268,14 +268,14 @@ private:
ContainerPtr at(EntryCountType index) const;
- ContainerPtr const* buckets() const;
+ Atomic const* buckets() const;
public:
G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config);
- ContainerPtr const* container_addr(EntryCountType index) const;
+ Atomic const* container_addr(EntryCountType index) const;
- ContainerPtr* container_addr(EntryCountType index);
+ Atomic* container_addr(EntryCountType index);
bool contains(uint card_idx, G1CardSetConfiguration* config);
// Iterates over all ContainerPtrs in this Howl card set, applying a CardOrRangeVisitor
diff --git a/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp b/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp
index 1958309f517..3c6fb9d1a02 100644
--- a/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -67,7 +67,7 @@ inline G1AddCardResult G1CardSetInlinePtr::add(uint card_idx, uint bits_per_card
return Overflow;
}
ContainerPtr new_value = merge(_value, card_idx, num_cards, bits_per_card);
- ContainerPtr old_value = AtomicAccess::cmpxchg(_value_addr, _value, new_value, memory_order_relaxed);
+ ContainerPtr old_value = _value_addr->compare_exchange(_value, new_value, memory_order_relaxed);
if (_value == old_value) {
return Added;
}
@@ -126,7 +126,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
}
uintptr_t new_value = old_value + 2;
- uintptr_t ref_count = AtomicAccess::cmpxchg(&_ref_count, old_value, new_value);
+ uintptr_t ref_count = _ref_count.compare_exchange(old_value, new_value);
if (ref_count == old_value) {
return true;
}
@@ -137,7 +137,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
inline uintptr_t G1CardSetContainer::decrement_refcount() {
uintptr_t old_value = refcount();
assert((old_value & 0x1) != 0 && old_value >= 3, "precondition");
- return AtomicAccess::sub(&_ref_count, 2u);
+ return _ref_count.sub_then_fetch(2u);
}
inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_cards) :
@@ -149,14 +149,13 @@ inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_ca
*entry_addr(0) = checked_cast(card_in_region);
}
-inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(EntryCountType volatile* num_entries_addr) :
+inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(Atomic* num_entries_addr) :
_num_entries_addr(num_entries_addr) {
SpinYield s;
- EntryCountType num_entries = AtomicAccess::load(_num_entries_addr) & EntryMask;
+ EntryCountType num_entries = _num_entries_addr->load_relaxed() & EntryMask;
while (true) {
- EntryCountType old_value = AtomicAccess::cmpxchg(_num_entries_addr,
- num_entries,
- (EntryCountType)(num_entries | LockBitMask));
+ EntryCountType old_value = _num_entries_addr->compare_exchange(num_entries,
+ (EntryCountType)(num_entries | LockBitMask));
if (old_value == num_entries) {
// Succeeded locking the array.
_local_num_entries = num_entries;
@@ -174,7 +173,7 @@ inline G1CardSetArray::EntryDataType const* G1CardSetArray::base_addr() const {
}
inline G1CardSetArray::EntryDataType const* G1CardSetArray::entry_addr(EntryCountType index) const {
- assert(index < _num_entries, "precondition");
+ assert(index < _num_entries.load_relaxed(), "precondition");
return base_addr() + index;
}
@@ -189,7 +188,7 @@ inline G1CardSetArray::EntryDataType G1CardSetArray::at(EntryCountType index) co
inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
assert(card_idx < (1u << (sizeof(EntryDataType) * BitsPerByte)),
"Card index %u does not fit allowed card value range.", card_idx);
- EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
+ EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
EntryCountType idx = 0;
for (; idx < num_entries; idx++) {
if (at(idx) == card_idx) {
@@ -223,7 +222,7 @@ inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
}
inline bool G1CardSetArray::contains(uint card_idx) {
- EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
+ EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) {
if (at(idx) == card_idx) {
@@ -235,7 +234,7 @@ inline bool G1CardSetArray::contains(uint card_idx) {
template
void G1CardSetArray::iterate(CardVisitor& found) {
- EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
+ EntryCountType num_entries = _num_entries.load_acquire() & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) {
found(at(idx));
}
@@ -256,11 +255,11 @@ inline G1CardSetBitMap::G1CardSetBitMap(uint card_in_region, uint size_in_bits)
inline G1AddCardResult G1CardSetBitMap::add(uint card_idx, size_t threshold, size_t size_in_bits) {
BitMapView bm(_bits, size_in_bits);
- if (_num_bits_set >= threshold) {
+ if (_num_bits_set.load_relaxed() >= threshold) {
return bm.at(card_idx) ? Found : Overflow;
}
if (bm.par_set_bit(card_idx)) {
- AtomicAccess::inc(&_num_bits_set, memory_order_relaxed);
+ _num_bits_set.add_then_fetch(1u, memory_order_relaxed);
return Added;
}
return Found;
@@ -276,22 +275,22 @@ inline size_t G1CardSetBitMap::header_size_in_bytes() {
return offset_of(G1CardSetBitMap, _bits);
}
-inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::container_addr(EntryCountType index) const {
- assert(index < _num_entries, "precondition");
+inline Atomic const* G1CardSetHowl::container_addr(EntryCountType index) const {
+ assert(index < _num_entries.load_relaxed(), "precondition");
return buckets() + index;
}
-inline G1CardSetHowl::ContainerPtr* G1CardSetHowl::container_addr(EntryCountType index) {
- return const_cast(const_cast(this)->container_addr(index));
+inline Atomic* G1CardSetHowl::container_addr(EntryCountType index) {
+ return const_cast*>(const_cast(this)->container_addr(index));
}
inline G1CardSetHowl::ContainerPtr G1CardSetHowl::at(EntryCountType index) const {
- return *container_addr(index);
+ return (*container_addr(index)).load_relaxed();
}
-inline G1CardSetHowl::ContainerPtr const* G1CardSetHowl::buckets() const {
+inline Atomic const* G1CardSetHowl::buckets() const {
const void* ptr = reinterpret_cast(this) + header_size_in_bytes();
- return reinterpret_cast(ptr);
+ return reinterpret_cast const*>(ptr);
}
inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConfiguration* config) :
@@ -300,7 +299,7 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
EntryCountType num_buckets = config->num_buckets_in_howl();
EntryCountType bucket = config->howl_bucket_index(card_in_region);
for (uint i = 0; i < num_buckets; ++i) {
- *container_addr(i) = G1CardSetInlinePtr();
+ container_addr(i)->store_relaxed(G1CardSetInlinePtr());
if (i == bucket) {
G1CardSetInlinePtr value(container_addr(i), at(i));
value.add(card_in_region, config->inline_ptr_bits_per_card(), config->max_cards_in_inline_ptr());
@@ -310,8 +309,8 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
inline bool G1CardSetHowl::contains(uint card_idx, G1CardSetConfiguration* config) {
EntryCountType bucket = config->howl_bucket_index(card_idx);
- ContainerPtr* array_entry = container_addr(bucket);
- ContainerPtr container = AtomicAccess::load_acquire(array_entry);
+ Atomic* array_entry = container_addr(bucket);
+ ContainerPtr container = array_entry->load_acquire();
switch (G1CardSet::container_type(container)) {
case G1CardSet::ContainerArrayOfCards: {
diff --git a/src/hotspot/share/gc/g1/g1CardSetMemory.cpp b/src/hotspot/share/gc/g1/g1CardSetMemory.cpp
index d13a6fe2dca..60602ef942b 100644
--- a/src/hotspot/share/gc/g1/g1CardSetMemory.cpp
+++ b/src/hotspot/share/gc/g1/g1CardSetMemory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#include "gc/g1/g1CardSetContainers.inline.hpp"
#include "gc/g1/g1CardSetMemory.inline.hpp"
#include "gc/g1/g1MonotonicArena.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "utilities/ostream.hpp"
G1CardSetAllocator::G1CardSetAllocator(const char* name,
diff --git a/src/hotspot/share/gc/g1/g1CardTableClaimTable.cpp b/src/hotspot/share/gc/g1/g1CardTableClaimTable.cpp
index e0cadbdd907..d8cabaa00a4 100644
--- a/src/hotspot/share/gc/g1/g1CardTableClaimTable.cpp
+++ b/src/hotspot/share/gc/g1/g1CardTableClaimTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,20 +44,20 @@ G1CardTableClaimTable::~G1CardTableClaimTable() {
void G1CardTableClaimTable::initialize(uint max_reserved_regions) {
assert(_card_claims == nullptr, "Must not be initialized twice");
- _card_claims = NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC);
+ _card_claims = NEW_C_HEAP_ARRAY(Atomic, max_reserved_regions, mtGC);
_max_reserved_regions = max_reserved_regions;
reset_all_to_unclaimed();
}
void G1CardTableClaimTable::reset_all_to_unclaimed() {
for (uint i = 0; i < _max_reserved_regions; i++) {
- _card_claims[i] = 0;
+ _card_claims[i].store_relaxed(0);
}
}
void G1CardTableClaimTable::reset_all_to_claimed() {
for (uint i = 0; i < _max_reserved_regions; i++) {
- _card_claims[i] = (uint)G1HeapRegion::CardsPerRegion;
+ _card_claims[i].store_relaxed((uint)G1HeapRegion::CardsPerRegion);
}
}
diff --git a/src/hotspot/share/gc/g1/g1CardTableClaimTable.hpp b/src/hotspot/share/gc/g1/g1CardTableClaimTable.hpp
index 4f524b83f97..822ef45c722 100644
--- a/src/hotspot/share/gc/g1/g1CardTableClaimTable.hpp
+++ b/src/hotspot/share/gc/g1/g1CardTableClaimTable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "gc/g1/g1CardTable.hpp"
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
class G1HeapRegionClosure;
@@ -45,7 +46,7 @@ class G1CardTableClaimTable : public CHeapObj {
// Card table iteration claim values for every heap region, from 0 (completely unclaimed)
// to (>=) G1HeapRegion::CardsPerRegion (completely claimed).
- uint volatile* _card_claims;
+ Atomic* _card_claims;
uint _cards_per_chunk; // For conversion between card index and chunk index.
diff --git a/src/hotspot/share/gc/g1/g1CardTableClaimTable.inline.hpp b/src/hotspot/share/gc/g1/g1CardTableClaimTable.inline.hpp
index d682f0d17ae..35b2484982c 100644
--- a/src/hotspot/share/gc/g1/g1CardTableClaimTable.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1CardTableClaimTable.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,26 +29,25 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
-#include "runtime/atomicAccess.hpp"
bool G1CardTableClaimTable::has_unclaimed_cards(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- return AtomicAccess::load(&_card_claims[region]) < G1HeapRegion::CardsPerRegion;
+ return _card_claims[region].load_relaxed() < G1HeapRegion::CardsPerRegion;
}
void G1CardTableClaimTable::reset_to_unclaimed(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- AtomicAccess::store(&_card_claims[region], 0u);
+ _card_claims[region].store_relaxed(0u);
}
uint G1CardTableClaimTable::claim_cards(uint region, uint increment) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- return AtomicAccess::fetch_then_add(&_card_claims[region], increment, memory_order_relaxed);
+ return _card_claims[region].fetch_then_add(increment, memory_order_relaxed);
}
uint G1CardTableClaimTable::claim_chunk(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
- return AtomicAccess::fetch_then_add(&_card_claims[region], cards_per_chunk(), memory_order_relaxed);
+ return _card_claims[region].fetch_then_add(cards_per_chunk(), memory_order_relaxed);
}
uint G1CardTableClaimTable::claim_all_cards(uint region) {
diff --git a/src/hotspot/share/gc/g1/g1CodeRootSet.cpp b/src/hotspot/share/gc/g1/g1CodeRootSet.cpp
index 60ad3a2af32..ca4487876b9 100644
--- a/src/hotspot/share/gc/g1/g1CodeRootSet.cpp
+++ b/src/hotspot/share/gc/g1/g1CodeRootSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "gc/g1/g1HeapRegion.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/concurrentHashTable.inline.hpp"
#include "utilities/concurrentHashTableTasks.inline.hpp"
@@ -60,7 +60,7 @@ class G1CodeRootSetHashTable : public CHeapObj {
HashTable _table;
HashTableScanTask _table_scanner;
- size_t volatile _num_entries;
+ Atomic _num_entries;
bool is_empty() const { return number_of_entries() == 0; }
@@ -120,7 +120,7 @@ public:
bool grow_hint = false;
bool inserted = _table.insert(Thread::current(), lookup, method, &grow_hint);
if (inserted) {
- AtomicAccess::inc(&_num_entries);
+ _num_entries.add_then_fetch(1u);
}
if (grow_hint) {
_table.grow(Thread::current());
@@ -131,7 +131,7 @@ public:
HashTableLookUp lookup(method);
bool removed = _table.remove(Thread::current(), lookup);
if (removed) {
- AtomicAccess::dec(&_num_entries);
+ _num_entries.sub_then_fetch(1u);
}
return removed;
}
@@ -182,7 +182,7 @@ public:
guarantee(succeeded, "unable to clean table");
if (num_deleted != 0) {
- size_t current_size = AtomicAccess::sub(&_num_entries, num_deleted);
+ size_t current_size = _num_entries.sub_then_fetch(num_deleted);
shrink_to_match(current_size);
}
}
@@ -226,7 +226,7 @@ public:
size_t mem_size() { return sizeof(*this) + _table.get_mem_size(Thread::current()); }
- size_t number_of_entries() const { return AtomicAccess::load(&_num_entries); }
+ size_t number_of_entries() const { return _num_entries.load_relaxed(); }
};
uintx G1CodeRootSetHashTable::HashTableLookUp::get_hash() const {
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 3a0c4a04441..b6c3c0b0907 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -103,7 +103,6 @@
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/cpuTimeCounters.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index aff7166d391..8009df1fa6a 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,7 @@
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/threadSMR.hpp"
#include "utilities/bitMap.hpp"
@@ -124,7 +125,7 @@ class G1JavaThreadsListClaimer : public StackObj {
ThreadsListHandle _list;
uint _claim_step;
- volatile uint _cur_claim;
+ Atomic _cur_claim;
// Attempts to claim _claim_step JavaThreads, returning an array of claimed
// JavaThread* with count elements. Returns null (and a zero count) if there
@@ -1267,7 +1268,6 @@ public:
bool is_marked(oop obj) const;
- inline static bool is_obj_filler(const oop obj);
// Determine if an object is dead, given the object and also
// the region to which the object belongs.
inline bool is_obj_dead(const oop obj, const G1HeapRegion* hr) const;
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
index abd61e72d57..8782b65b6f9 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
#include "gc/g1/g1EvacFailureRegions.hpp"
+#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "gc/g1/g1HeapRegionManager.inline.hpp"
#include "gc/g1/g1HeapRegionRemSet.hpp"
@@ -38,10 +39,10 @@
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RegionPinCache.inline.hpp"
#include "gc/g1/g1RemSet.hpp"
+#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/markBitMap.inline.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "oops/stackChunkOop.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/threadSMR.inline.hpp"
#include "utilities/bitMap.inline.hpp"
@@ -53,10 +54,10 @@ inline bool G1STWIsAliveClosure::do_object_b(oop p) {
inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) {
count = 0;
- if (AtomicAccess::load(&_cur_claim) >= _list.length()) {
+ if (_cur_claim.load_relaxed() >= _list.length()) {
return nullptr;
}
- uint claim = AtomicAccess::fetch_then_add(&_cur_claim, _claim_step);
+ uint claim = _cur_claim.fetch_then_add(_claim_step);
if (claim >= _list.length()) {
return nullptr;
}
@@ -230,16 +231,11 @@ inline bool G1CollectedHeap::requires_barriers(stackChunkOop obj) const {
return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary null check
}
-inline bool G1CollectedHeap::is_obj_filler(const oop obj) {
- Klass* k = obj->klass_without_asserts();
- return k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass();
-}
-
inline bool G1CollectedHeap::is_obj_dead(const oop obj, const G1HeapRegion* hr) const {
assert(!hr->is_free(), "looking up obj " PTR_FORMAT " in Free region %u", p2i(obj), hr->hrm_index());
if (hr->is_in_parsable_area(obj)) {
// This object is in the parsable part of the heap, live unless scrubbed.
- return is_obj_filler(obj);
+ return is_filler_object(obj);
} else {
// From Remark until a region has been concurrently scrubbed, parts of the
// region is not guaranteed to be parsable. Use the bitmap for liveness.
diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
index 954ca40a77f..e7bab32129e 100644
--- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "gc/g1/g1CollectionSetChooser.hpp"
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
#include "gc/shared/space.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/quickSort.hpp"
// Determine collection set candidates (from marking): For all regions determine
@@ -50,7 +50,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
G1HeapRegion** _data;
- uint volatile _cur_claim_idx;
+ Atomic _cur_claim_idx;
static int compare_region_gc_efficiency(G1HeapRegion** rr1, G1HeapRegion** rr2) {
G1HeapRegion* r1 = *rr1;
@@ -105,7 +105,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
// Claim a new chunk, returning its bounds [from, to[.
void claim_chunk(uint& from, uint& to) {
- uint result = AtomicAccess::add(&_cur_claim_idx, _chunk_size);
+ uint result = _cur_claim_idx.add_then_fetch(_chunk_size);
assert(_max_size > result - 1,
"Array too small, is %u should be %u with chunk size %u.",
_max_size, result, _chunk_size);
@@ -121,14 +121,15 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
}
void sort_by_gc_efficiency() {
- if (_cur_claim_idx == 0) {
+ uint length = _cur_claim_idx.load_relaxed();
+ if (length == 0) {
return;
}
- for (uint i = _cur_claim_idx; i < _max_size; i++) {
+ for (uint i = length; i < _max_size; i++) {
assert(_data[i] == nullptr, "must be");
}
- qsort(_data, _cur_claim_idx, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
- for (uint i = _cur_claim_idx; i < _max_size; i++) {
+ qsort(_data, length, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
+ for (uint i = length; i < _max_size; i++) {
assert(_data[i] == nullptr, "must be");
}
}
@@ -202,13 +203,13 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
G1CollectedHeap* _g1h;
G1HeapRegionClaimer _hrclaimer;
- uint volatile _num_regions_added;
+ Atomic _num_regions_added;
G1BuildCandidateArray _result;
void update_totals(uint num_regions) {
if (num_regions > 0) {
- AtomicAccess::add(&_num_regions_added, num_regions);
+ _num_regions_added.add_then_fetch(num_regions);
}
}
@@ -220,7 +221,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
void prune(G1HeapRegion** data) {
G1Policy* p = G1CollectedHeap::heap()->policy();
- uint num_candidates = AtomicAccess::load(&_num_regions_added);
+ uint num_candidates = _num_regions_added.load_relaxed();
uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates);
uint num_pruned = 0;
@@ -253,7 +254,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
wasted_bytes,
allowed_waste);
- AtomicAccess::sub(&_num_regions_added, num_pruned, memory_order_relaxed);
+ _num_regions_added.sub_then_fetch(num_pruned, memory_order_relaxed);
}
public:
@@ -274,7 +275,7 @@ public:
_result.sort_by_gc_efficiency();
prune(_result.array());
candidates->set_candidates_from_marking(_result.array(),
- _num_regions_added);
+ _num_regions_added.load_relaxed());
}
};
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index 456d543fa10..2bbfb5032b3 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,9 @@
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/gcVMOperations.hpp"
+#include "gc/shared/partialArraySplitter.inline.hpp"
+#include "gc/shared/partialArrayState.hpp"
+#include "gc/shared/partialArrayTaskStats.hpp"
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shared/taskqueue.inline.hpp"
@@ -67,7 +70,6 @@
#include "nmt/memTracker.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
@@ -76,6 +78,7 @@
#include "runtime/prefetch.inline.hpp"
#include "runtime/threads.hpp"
#include "utilities/align.hpp"
+#include "utilities/checkedCast.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/powerOfTwo.hpp"
@@ -99,7 +102,7 @@ bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
// We move that task's local finger along.
_task->move_finger_to(addr);
- _task->scan_task_entry(G1TaskQueueEntry::from_oop(cast_to_oop(addr)));
+ _task->process_entry(G1TaskQueueEntry(cast_to_oop(addr)), false /* stolen */);
// we only partially drain the local queue and global stack
_task->drain_local_queue(true);
_task->drain_global_stack(true);
@@ -148,25 +151,25 @@ bool G1CMMarkStack::initialize() {
}
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_chunk() {
- if (_size >= _max_capacity) {
+ if (_size.load_relaxed() >= _max_capacity) {
return nullptr;
}
- size_t cur_idx = AtomicAccess::fetch_then_add(&_size, 1u);
+ size_t cur_idx = _size.fetch_then_add(1u);
if (cur_idx >= _max_capacity) {
return nullptr;
}
size_t bucket = get_bucket(cur_idx);
- if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
+ if (_buckets[bucket].load_acquire() == nullptr) {
if (!_should_grow) {
// Prefer to restart the CM.
return nullptr;
}
MutexLocker x(G1MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
- if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
+ if (_buckets[bucket].load_acquire() == nullptr) {
size_t desired_capacity = bucket_size(bucket) * 2;
if (!try_expand_to(desired_capacity)) {
return nullptr;
@@ -175,7 +178,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_
}
size_t bucket_idx = get_bucket_index(cur_idx);
- TaskQueueEntryChunk* result = ::new (&_buckets[bucket][bucket_idx]) TaskQueueEntryChunk;
+ TaskQueueEntryChunk* result = ::new (&_buckets[bucket].load_relaxed()[bucket_idx]) TaskQueueEntryChunk;
result->next = nullptr;
return result;
}
@@ -197,10 +200,10 @@ bool G1CMMarkStack::ChunkAllocator::initialize(size_t initial_capacity, size_t m
_max_capacity = max_capacity;
_num_buckets = get_bucket(_max_capacity) + 1;
- _buckets = NEW_C_HEAP_ARRAY(TaskQueueEntryChunk*, _num_buckets, mtGC);
+ _buckets = NEW_C_HEAP_ARRAY(Atomic, _num_buckets, mtGC);
for (size_t i = 0; i < _num_buckets; i++) {
- _buckets[i] = nullptr;
+ _buckets[i].store_relaxed(nullptr);
}
size_t new_capacity = bucket_size(0);
@@ -240,9 +243,9 @@ G1CMMarkStack::ChunkAllocator::~ChunkAllocator() {
}
for (size_t i = 0; i < _num_buckets; i++) {
- if (_buckets[i] != nullptr) {
- MmapArrayAllocator::free(_buckets[i], bucket_size(i));
- _buckets[i] = nullptr;
+ if (_buckets[i].load_relaxed() != nullptr) {
+ MmapArrayAllocator::free(_buckets[i].load_relaxed(), bucket_size(i));
+ _buckets[i].store_relaxed(nullptr);
}
}
@@ -259,7 +262,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
// and the new capacity (new_capacity). This step ensures that there are no gaps in the
// array and that the capacity accurately reflects the reserved memory.
for (; i <= highest_bucket; i++) {
- if (AtomicAccess::load_acquire(&_buckets[i]) != nullptr) {
+ if (_buckets[i].load_acquire() != nullptr) {
continue; // Skip over already allocated buckets.
}
@@ -279,7 +282,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
return false;
}
_capacity += bucket_capacity;
- AtomicAccess::release_store(&_buckets[i], bucket_base);
+ _buckets[i].release_store(bucket_base);
}
return true;
}
@@ -288,9 +291,9 @@ void G1CMMarkStack::expand() {
_chunk_allocator.try_expand();
}
-void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
- elem->next = *list;
- *list = elem;
+void G1CMMarkStack::add_chunk_to_list(Atomic* list, TaskQueueEntryChunk* elem) {
+ elem->next = list->load_relaxed();
+ list->store_relaxed(elem);
}
void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
@@ -304,10 +307,10 @@ void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
add_chunk_to_list(&_free_list, elem);
}
-G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
- TaskQueueEntryChunk* result = *list;
+G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(Atomic* list) {
+ TaskQueueEntryChunk* result = list->load_relaxed();
if (result != nullptr) {
- *list = (*list)->next;
+ list->store_relaxed(list->load_relaxed()->next);
}
return result;
}
@@ -361,8 +364,8 @@ bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
void G1CMMarkStack::set_empty() {
_chunks_in_chunk_list = 0;
- _chunk_list = nullptr;
- _free_list = nullptr;
+ _chunk_list.store_relaxed(nullptr);
+ _free_list.store_relaxed(nullptr);
_chunk_allocator.reset();
}
@@ -490,6 +493,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
_task_queues(new G1CMTaskQueueSet(_max_num_tasks)),
_terminator(_max_num_tasks, _task_queues),
+ _partial_array_state_manager(new PartialArrayStateManager(_max_num_tasks)),
_first_overflow_barrier_sync(),
_second_overflow_barrier_sync(),
@@ -556,6 +560,10 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
reset_at_marking_complete();
}
+PartialArrayStateManager* G1ConcurrentMark::partial_array_state_manager() const {
+ return _partial_array_state_manager;
+}
+
void G1ConcurrentMark::reset() {
_has_aborted = false;
@@ -650,7 +658,26 @@ void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurr
}
}
+#if TASKQUEUE_STATS
+void G1ConcurrentMark::print_and_reset_taskqueue_stats() {
+
+ _task_queues->print_and_reset_taskqueue_stats("G1ConcurrentMark Oop Queue");
+
+ auto get_pa_stats = [&](uint i) {
+ return _tasks[i]->partial_array_task_stats();
+ };
+
+ PartialArrayTaskStats::log_set(_max_num_tasks, get_pa_stats,
+ "G1ConcurrentMark Partial Array Task Stats");
+
+ for (uint i = 0; i < _max_num_tasks; ++i) {
+ get_pa_stats(i)->reset();
+ }
+}
+#endif
+
void G1ConcurrentMark::reset_at_marking_complete() {
+ TASKQUEUE_STATS_ONLY(print_and_reset_taskqueue_stats());
// We set the global marking state to some default values when we're
// not doing marking.
reset_marking_for_restart();
@@ -804,11 +831,25 @@ void G1ConcurrentMark::cleanup_for_next_mark() {
clear_bitmap(_concurrent_workers, true);
+ reset_partial_array_state_manager();
+
// Repeat the asserts from above.
guarantee(cm_thread()->in_progress(), "invariant");
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
}
+void G1ConcurrentMark::reset_partial_array_state_manager() {
+ for (uint i = 0; i < _max_num_tasks; ++i) {
+ _tasks[i]->unregister_partial_array_splitter();
+ }
+
+ partial_array_state_manager()->reset();
+
+ for (uint i = 0; i < _max_num_tasks; ++i) {
+ _tasks[i]->register_partial_array_splitter();
+ }
+}
+
void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers) {
assert_at_safepoint_on_vm_thread();
// To avoid fragmentation the full collection requesting to clear the bitmap
@@ -1789,17 +1830,18 @@ public:
{ }
void operator()(G1TaskQueueEntry task_entry) const {
- if (task_entry.is_array_slice()) {
- guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
+ if (task_entry.is_partial_array_state()) {
+ oop obj = task_entry.to_partial_array_state()->source();
+ guarantee(_g1h->is_in_reserved(obj), "Partial Array " PTR_FORMAT " must be in heap.", p2i(obj));
return;
}
- guarantee(oopDesc::is_oop(task_entry.obj()),
+ guarantee(oopDesc::is_oop(task_entry.to_oop()),
"Non-oop " PTR_FORMAT ", phase: %s, info: %d",
- p2i(task_entry.obj()), _phase, _info);
- G1HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
+ p2i(task_entry.to_oop()), _phase, _info);
+ G1HeapRegion* r = _g1h->heap_region_containing(task_entry.to_oop());
guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
"obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
- p2i(task_entry.obj()), _phase, _info, r->hrm_index());
+ p2i(task_entry.to_oop()), _phase, _info, r->hrm_index());
}
};
@@ -2055,6 +2097,17 @@ void G1CMTask::reset(G1CMBitMap* mark_bitmap) {
_mark_stats_cache.reset();
}
+void G1CMTask::register_partial_array_splitter() {
+
+ ::new (&_partial_array_splitter) PartialArraySplitter(_cm->partial_array_state_manager(),
+ _cm->max_num_tasks(),
+ ObjArrayMarkingStride);
+}
+
+void G1CMTask::unregister_partial_array_splitter() {
+ _partial_array_splitter.~PartialArraySplitter();
+}
+
bool G1CMTask::should_exit_termination() {
if (!regular_clock_call()) {
return true;
@@ -2185,7 +2238,7 @@ bool G1CMTask::get_entries_from_global_stack() {
if (task_entry.is_null()) {
break;
}
- assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
+ assert(task_entry.is_partial_array_state() || oopDesc::is_oop(task_entry.to_oop()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.to_oop()));
bool success = _task_queue->push(task_entry);
// We only call this when the local queue is empty or under a
// given target limit. So, we do not expect this push to fail.
@@ -2216,7 +2269,7 @@ void G1CMTask::drain_local_queue(bool partially) {
G1TaskQueueEntry entry;
bool ret = _task_queue->pop_local(entry);
while (ret) {
- scan_task_entry(entry);
+ process_entry(entry, false /* stolen */);
if (_task_queue->size() <= target_size || has_aborted()) {
ret = false;
} else {
@@ -2226,6 +2279,37 @@ void G1CMTask::drain_local_queue(bool partially) {
}
}
+size_t G1CMTask::start_partial_array_processing(oop obj) {
+ assert(should_be_sliced(obj), "Must be an array object %d and large %zu", obj->is_objArray(), obj->size());
+
+ objArrayOop obj_array = objArrayOop(obj);
+ size_t array_length = obj_array->length();
+
+ size_t initial_chunk_size = _partial_array_splitter.start(_task_queue, obj_array, nullptr, array_length);
+
+ // Mark objArray klass metadata
+ if (_cm_oop_closure->do_metadata()) {
+ _cm_oop_closure->do_klass(obj_array->klass());
+ }
+
+ process_array_chunk(obj_array, 0, initial_chunk_size);
+
+ // Include object header size
+ return objArrayOopDesc::object_size(checked_cast(initial_chunk_size));
+}
+
+size_t G1CMTask::process_partial_array(const G1TaskQueueEntry& task, bool stolen) {
+ PartialArrayState* state = task.to_partial_array_state();
+ // Access state before release by claim().
+ objArrayOop obj = objArrayOop(state->source());
+
+ PartialArraySplitter::Claim claim =
+ _partial_array_splitter.claim(state, _task_queue, stolen);
+
+ process_array_chunk(obj, claim._start, claim._end);
+ return heap_word_size((claim._end - claim._start) * heapOopSize);
+}
+
void G1CMTask::drain_global_stack(bool partially) {
if (has_aborted()) {
return;
@@ -2430,7 +2514,7 @@ void G1CMTask::attempt_stealing() {
while (!has_aborted()) {
G1TaskQueueEntry entry;
if (_cm->try_stealing(_worker_id, entry)) {
- scan_task_entry(entry);
+ process_entry(entry, true /* stolen */);
// And since we're towards the end, let's totally drain the
// local queue and global stack.
@@ -2759,12 +2843,12 @@ G1CMTask::G1CMTask(uint worker_id,
G1ConcurrentMark* cm,
G1CMTaskQueue* task_queue,
G1RegionMarkStats* mark_stats) :
- _objArray_processor(this),
_worker_id(worker_id),
_g1h(G1CollectedHeap::heap()),
_cm(cm),
_mark_bitmap(nullptr),
_task_queue(task_queue),
+ _partial_array_splitter(_cm->partial_array_state_manager(), _cm->max_num_tasks(), ObjArrayMarkingStride),
_mark_stats_cache(mark_stats, G1RegionMarkStatsCache::RegionMarkStatsCacheSize),
_calls(0),
_time_target_ms(0.0),
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
index 752082ce629..836d7793f81 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,17 +26,20 @@
#define SHARE_GC_G1_G1CONCURRENTMARK_HPP
#include "gc/g1/g1ConcurrentMarkBitMap.hpp"
-#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
#include "gc/g1/g1HeapRegionSet.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1RegionMarkStatsCache.hpp"
#include "gc/shared/gcCause.hpp"
+#include "gc/shared/partialArraySplitter.hpp"
+#include "gc/shared/partialArrayState.hpp"
+#include "gc/shared/partialArrayTaskStats.hpp"
#include "gc/shared/taskqueue.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/verifyOption.hpp"
#include "gc/shared/workerThread.hpp"
#include "gc/shared/workerUtils.hpp"
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/compilerWarnings.hpp"
#include "utilities/numberSeq.hpp"
@@ -53,41 +56,7 @@ class G1RegionToSpaceMapper;
class G1SurvivorRegions;
class ThreadClosure;
-// This is a container class for either an oop or a continuation address for
-// mark stack entries. Both are pushed onto the mark stack.
-class G1TaskQueueEntry {
-private:
- void* _holder;
-
- static const uintptr_t ArraySliceBit = 1;
-
- G1TaskQueueEntry(oop obj) : _holder(obj) {
- assert(_holder != nullptr, "Not allowed to set null task queue element");
- }
- G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { }
-public:
-
- G1TaskQueueEntry() : _holder(nullptr) { }
- // Trivially copyable, for use in GenericTaskQueue.
-
- static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); }
- static G1TaskQueueEntry from_oop(oop obj) { return G1TaskQueueEntry(obj); }
-
- oop obj() const {
- assert(!is_array_slice(), "Trying to read array slice " PTR_FORMAT " as oop", p2i(_holder));
- return cast_to_oop(_holder);
- }
-
- HeapWord* slice() const {
- assert(is_array_slice(), "Trying to read oop " PTR_FORMAT " as array slice", p2i(_holder));
- return (HeapWord*)((uintptr_t)_holder & ~ArraySliceBit);
- }
-
- bool is_oop() const { return !is_array_slice(); }
- bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; }
- bool is_null() const { return _holder == nullptr; }
-};
-
+typedef ScannerTask G1TaskQueueEntry;
typedef GenericTaskQueue G1CMTaskQueue;
typedef GenericTaskQueueSet G1CMTaskQueueSet;
@@ -172,9 +141,9 @@ private:
size_t _capacity;
size_t _num_buckets;
bool _should_grow;
- TaskQueueEntryChunk* volatile* _buckets;
+ Atomic* _buckets;
char _pad0[DEFAULT_PADDING_SIZE];
- volatile size_t _size;
+ Atomic _size;
char _pad4[DEFAULT_PADDING_SIZE - sizeof(size_t)];
size_t bucket_size(size_t bucket) {
@@ -212,7 +181,7 @@ private:
bool initialize(size_t initial_capacity, size_t max_capacity);
void reset() {
- _size = 0;
+ _size.store_relaxed(0);
_should_grow = false;
}
@@ -241,17 +210,17 @@ private:
ChunkAllocator _chunk_allocator;
char _pad0[DEFAULT_PADDING_SIZE];
- TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users.
+ Atomic _free_list; // Linked list of free chunks that can be allocated by users.
char _pad1[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*)];
- TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data.
+ Atomic _chunk_list; // List of chunks currently containing data.
volatile size_t _chunks_in_chunk_list;
char _pad2[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)];
// Atomically add the given chunk to the list.
- void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem);
+ void add_chunk_to_list(Atomic* list, TaskQueueEntryChunk* elem);
// Atomically remove and return a chunk from the given list. Returns null if the
// list is empty.
- TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list);
+ TaskQueueEntryChunk* remove_chunk_from_list(Atomic* list);
void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem);
void add_chunk_to_free_list(TaskQueueEntryChunk* elem);
@@ -283,7 +252,7 @@ private:
// Return whether the chunk list is empty. Racy due to unsynchronized access to
// _chunk_list.
- bool is_empty() const { return _chunk_list == nullptr; }
+ bool is_empty() const { return _chunk_list.load_relaxed() == nullptr; }
size_t capacity() const { return _chunk_allocator.capacity(); }
@@ -411,6 +380,8 @@ class G1ConcurrentMark : public CHeapObj {
G1CMTaskQueueSet* _task_queues; // Task queue set
TaskTerminator _terminator; // For termination
+ PartialArrayStateManager* _partial_array_state_manager;
+
// Two sync barriers that are used to synchronize tasks when an
// overflow occurs. The algorithm is the following. All tasks enter
// the first one to ensure that they have all stopped manipulating
@@ -488,6 +459,8 @@ class G1ConcurrentMark : public CHeapObj {
// Prints all gathered CM-related statistics
void print_stats();
+ void print_and_reset_taskqueue_stats();
+
HeapWord* finger() { return _finger; }
bool concurrent() { return _concurrent; }
uint active_tasks() { return _num_active_tasks; }
@@ -556,14 +529,14 @@ public:
// mark_in_bitmap call. Updates various statistics data.
void add_to_liveness(uint worker_id, oop const obj, size_t size);
// Did the last marking find a live object between bottom and TAMS?
- bool contains_live_object(uint region) const { return _region_mark_stats[region]._live_words != 0; }
+ bool contains_live_object(uint region) const { return _region_mark_stats[region].live_words() != 0; }
// Live bytes in the given region as determined by concurrent marking, i.e. the amount of
// live bytes between bottom and TAMS.
- size_t live_bytes(uint region) const { return _region_mark_stats[region]._live_words * HeapWordSize; }
+ size_t live_bytes(uint region) const { return _region_mark_stats[region].live_words() * HeapWordSize; }
// Set live bytes for concurrent marking.
- void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words = live_bytes / HeapWordSize; }
+ void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words.store_relaxed(live_bytes / HeapWordSize); }
// Approximate number of incoming references found during marking.
- size_t incoming_refs(uint region) const { return _region_mark_stats[region]._incoming_refs; }
+ size_t incoming_refs(uint region) const { return _region_mark_stats[region].incoming_refs(); }
// Update the TAMS for the given region to the current top.
inline void update_top_at_mark_start(G1HeapRegion* r);
@@ -582,6 +555,8 @@ public:
uint worker_id_offset() const { return _worker_id_offset; }
+ uint max_num_tasks() const {return _max_num_tasks; }
+
// Clear statistics gathered during the concurrent cycle for the given region after
// it has been reclaimed.
void clear_statistics(G1HeapRegion* r);
@@ -631,6 +606,8 @@ public:
// Calculates the number of concurrent GC threads to be used in the marking phase.
uint calc_active_marking_workers();
+ PartialArrayStateManager* partial_array_state_manager() const;
+
// Resets the global marking data structures, as well as the
// task local ones; should be called during concurrent start.
void reset();
@@ -642,6 +619,10 @@ public:
// to be called concurrently to the mutator. It will yield to safepoint requests.
void cleanup_for_next_mark();
+ // Recycle the memory that has been requested by allocators associated with
+ // this manager.
+ void reset_partial_array_state_manager();
+
// Clear the next marking bitmap during safepoint.
void clear_bitmap(WorkerThreads* workers);
@@ -732,14 +713,13 @@ private:
refs_reached_period = 1024,
};
- G1CMObjArrayProcessor _objArray_processor;
-
uint _worker_id;
G1CollectedHeap* _g1h;
G1ConcurrentMark* _cm;
G1CMBitMap* _mark_bitmap;
// the task queue of this task
G1CMTaskQueue* _task_queue;
+ PartialArraySplitter _partial_array_splitter;
G1RegionMarkStatsCache _mark_stats_cache;
// Number of calls to this task
@@ -850,13 +830,24 @@ private:
// mark bitmap scan, and so needs to be pushed onto the mark stack.
bool is_below_finger(oop obj, HeapWord* global_finger) const;
- template void process_grey_task_entry(G1TaskQueueEntry task_entry);
+ template void process_grey_task_entry(G1TaskQueueEntry task_entry, bool stolen);
+
+ static bool should_be_sliced(oop obj);
+ // Start processing the given objArrayOop by first pushing its continuations and
+ // then scanning the first chunk including the header.
+ size_t start_partial_array_processing(oop obj);
+ // Process the given continuation. Returns the number of words scanned.
+ size_t process_partial_array(const G1TaskQueueEntry& task, bool stolen);
+ // Apply the closure to the given range of elements in the objArray.
+ inline void process_array_chunk(objArrayOop obj, size_t start, size_t end);
public:
- // Apply the closure on the given area of the objArray. Return the number of words
- // scanned.
- inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
// Resets the task; should be called right at the beginning of a marking phase.
void reset(G1CMBitMap* mark_bitmap);
+ // Register/unregister Partial Array Splitter Allocator with the PartialArrayStateManager.
+ // This allows us to discard memory arenas used for partial object array states at the end
+ // of a concurrent mark cycle.
+ void register_partial_array_splitter();
+ void unregister_partial_array_splitter();
// Clears all the fields that correspond to a claimed region.
void clear_region_fields();
@@ -912,7 +903,7 @@ public:
inline bool deal_with_reference(T* p);
// Scans an object and visits its children.
- inline void scan_task_entry(G1TaskQueueEntry task_entry);
+ inline void process_entry(G1TaskQueueEntry task_entry, bool stolen);
// Pushes an object on the local queue.
inline void push(G1TaskQueueEntry task_entry);
@@ -957,6 +948,11 @@ public:
Pair flush_mark_stats_cache();
// Prints statistics associated with this task
void print_stats();
+#if TASKQUEUE_STATS
+ PartialArrayTaskStats* partial_array_task_stats() {
+ return _partial_array_splitter.stats();
+ }
+#endif
};
// Class that's used to to print out per-region liveness
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
index 6f71012ff7c..2f4824e4cae 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
-#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
@@ -39,6 +38,7 @@
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "utilities/bitMap.inline.hpp"
+#include "utilities/checkedCast.hpp"
inline bool G1CMIsAliveClosure::do_object_b(oop obj) {
// Check whether the passed in object is null. During discovery the referent
@@ -90,7 +90,7 @@ inline void G1CMMarkStack::iterate(Fn fn) const {
size_t num_chunks = 0;
- TaskQueueEntryChunk* cur = _chunk_list;
+ TaskQueueEntryChunk* cur = _chunk_list.load_relaxed();
while (cur != nullptr) {
guarantee(num_chunks <= _chunks_in_chunk_list, "Found %zu oop chunks which is more than there should be", num_chunks);
@@ -107,13 +107,15 @@ inline void G1CMMarkStack::iterate(Fn fn) const {
#endif
// It scans an object and visits its children.
-inline void G1CMTask::scan_task_entry(G1TaskQueueEntry task_entry) { process_grey_task_entry(task_entry); }
+inline void G1CMTask::process_entry(G1TaskQueueEntry task_entry, bool stolen) {
+ process_grey_task_entry(task_entry, stolen);
+}
inline void G1CMTask::push(G1TaskQueueEntry task_entry) {
- assert(task_entry.is_array_slice() || _g1h->is_in_reserved(task_entry.obj()), "invariant");
- assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list(
- _g1h->heap_region_containing(task_entry.obj())), "invariant");
- assert(task_entry.is_array_slice() || _mark_bitmap->is_marked(cast_from_oop(task_entry.obj())), "invariant");
+ assert(task_entry.is_partial_array_state() || _g1h->is_in_reserved(task_entry.to_oop()), "invariant");
+ assert(task_entry.is_partial_array_state() || !_g1h->is_on_master_free_list(
+ _g1h->heap_region_containing(task_entry.to_oop())), "invariant");
+ assert(task_entry.is_partial_array_state() || _mark_bitmap->is_marked(cast_from_oop(task_entry.to_oop())), "invariant");
if (!_task_queue->push(task_entry)) {
// The local task queue looks full. We need to push some entries
@@ -159,29 +161,34 @@ inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
}
template
-inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry) {
- assert(scan || (task_entry.is_oop() && task_entry.obj()->is_typeArray()), "Skipping scan of grey non-typeArray");
- assert(task_entry.is_array_slice() || _mark_bitmap->is_marked(cast_from_oop(task_entry.obj())),
+inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry, bool stolen) {
+ assert(scan || (!task_entry.is_partial_array_state() && task_entry.to_oop()->is_typeArray()), "Skipping scan of grey non-typeArray");
+ assert(task_entry.is_partial_array_state() || _mark_bitmap->is_marked(cast_from_oop(task_entry.to_oop())),
"Any stolen object should be a slice or marked");
if (scan) {
- if (task_entry.is_array_slice()) {
- _words_scanned += _objArray_processor.process_slice(task_entry.slice());
+ if (task_entry.is_partial_array_state()) {
+ _words_scanned += process_partial_array(task_entry, stolen);
} else {
- oop obj = task_entry.obj();
- if (G1CMObjArrayProcessor::should_be_sliced(obj)) {
- _words_scanned += _objArray_processor.process_obj(obj);
+ oop obj = task_entry.to_oop();
+ if (should_be_sliced(obj)) {
+ _words_scanned += start_partial_array_processing(obj);
} else {
- _words_scanned += obj->oop_iterate_size(_cm_oop_closure);;
+ _words_scanned += obj->oop_iterate_size(_cm_oop_closure);
}
}
}
check_limits();
}
-inline size_t G1CMTask::scan_objArray(objArrayOop obj, MemRegion mr) {
- obj->oop_iterate(_cm_oop_closure, mr);
- return mr.word_size();
+inline bool G1CMTask::should_be_sliced(oop obj) {
+ return obj->is_objArray() && ((objArrayOop)obj)->length() >= (int)ObjArrayMarkingStride;
+}
+
+inline void G1CMTask::process_array_chunk(objArrayOop obj, size_t start, size_t end) {
+ obj->oop_iterate_elements_range(_cm_oop_closure,
+ checked_cast(start),
+ checked_cast(end));
}
inline void G1ConcurrentMark::update_top_at_mark_start(G1HeapRegion* r) {
@@ -265,7 +272,7 @@ inline bool G1CMTask::make_reference_grey(oop obj) {
// be pushed on the stack. So, some duplicate work, but no
// correctness problems.
if (is_below_finger(obj, global_finger)) {
- G1TaskQueueEntry entry = G1TaskQueueEntry::from_oop(obj);
+ G1TaskQueueEntry entry(obj);
if (obj->is_typeArray()) {
// Immediately process arrays of primitive types, rather
// than pushing on the mark stack. This keeps us from
@@ -277,7 +284,7 @@ inline bool G1CMTask::make_reference_grey(oop obj) {
// by only doing a bookkeeping update and avoiding the
// actual scan of the object - a typeArray contains no
// references, and the metadata is built-in.
- process_grey_task_entry(entry);
+ process_grey_task_entry(entry, false /* stolen */);
} else {
push(entry);
}
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp
deleted file mode 100644
index 7f62e5527d5..00000000000
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1ConcurrentMark.inline.hpp"
-#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
-#include "gc/g1/g1HeapRegion.inline.hpp"
-#include "gc/shared/gc_globals.hpp"
-#include "memory/memRegion.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-void G1CMObjArrayProcessor::push_array_slice(HeapWord* what) {
- _task->push(G1TaskQueueEntry::from_slice(what));
-}
-
-size_t G1CMObjArrayProcessor::process_array_slice(objArrayOop obj, HeapWord* start_from, size_t remaining) {
- size_t words_to_scan = MIN2(remaining, (size_t)ObjArrayMarkingStride);
-
- if (remaining > ObjArrayMarkingStride) {
- push_array_slice(start_from + ObjArrayMarkingStride);
- }
-
- // Then process current area.
- MemRegion mr(start_from, words_to_scan);
- return _task->scan_objArray(obj, mr);
-}
-
-size_t G1CMObjArrayProcessor::process_obj(oop obj) {
- assert(should_be_sliced(obj), "Must be an array object %d and large %zu", obj->is_objArray(), obj->size());
-
- return process_array_slice(objArrayOop(obj), cast_from_oop(obj), objArrayOop(obj)->size());
-}
-
-size_t G1CMObjArrayProcessor::process_slice(HeapWord* slice) {
-
- // Find the start address of the objArrayOop.
- // Shortcut the BOT access if the given address is from a humongous object. The BOT
- // slide is fast enough for "smaller" objects in non-humongous regions, but is slower
- // than directly using heap region table.
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
- G1HeapRegion* r = g1h->heap_region_containing(slice);
-
- HeapWord* const start_address = r->is_humongous() ?
- r->humongous_start_region()->bottom() :
- r->block_start(slice);
-
- assert(cast_to_oop(start_address)->is_objArray(), "Address " PTR_FORMAT " does not refer to an object array ", p2i(start_address));
- assert(start_address < slice,
- "Object start address " PTR_FORMAT " must be smaller than decoded address " PTR_FORMAT,
- p2i(start_address),
- p2i(slice));
-
- objArrayOop objArray = objArrayOop(cast_to_oop(start_address));
-
- size_t already_scanned = pointer_delta(slice, start_address);
- size_t remaining = objArray->size() - already_scanned;
-
- return process_array_slice(objArray, slice, remaining);
-}
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp
deleted file mode 100644
index c2737dbbda6..00000000000
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP
-#define SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP
-
-#include "oops/oopsHierarchy.hpp"
-
-class G1CMTask;
-
-// Helper class to mark through large objArrays during marking in an efficient way.
-// Instead of pushing large object arrays, we push continuations onto the
-// mark stack. These continuations are identified by having their LSB set.
-// This allows incremental processing of large objects.
-class G1CMObjArrayProcessor {
-private:
- // Reference to the task for doing the actual work.
- G1CMTask* _task;
-
- // Push the continuation at the given address onto the mark stack.
- void push_array_slice(HeapWord* addr);
-
- // Process (apply the closure) on the given continuation of the given objArray.
- size_t process_array_slice(objArrayOop const obj, HeapWord* start_from, size_t remaining);
-public:
- static bool should_be_sliced(oop obj);
-
- G1CMObjArrayProcessor(G1CMTask* task) : _task(task) {
- }
-
- // Process the given continuation. Returns the number of words scanned.
- size_t process_slice(HeapWord* slice);
- // Start processing the given objArrayOop by scanning the header and pushing its
- // continuation.
- size_t process_obj(oop obj);
-};
-
-#endif // SHARE_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.cpp
index fdef4214622..4eb11f6d8f6 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
#include "gc/g1/g1HeapRegionPrinter.hpp"
#include "gc/g1/g1RemSetTrackingPolicy.hpp"
#include "logging/log.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/mutexLocker.hpp"
struct G1UpdateRegionLivenessAndSelectForRebuildTask::G1OnRegionClosure : public G1HeapRegionClosure {
@@ -154,7 +153,7 @@ void G1UpdateRegionLivenessAndSelectForRebuildTask::work(uint worker_id) {
G1OnRegionClosure on_region_cl(_g1h, _cm, &local_cleanup_list);
_g1h->heap_region_par_iterate_from_worker_offset(&on_region_cl, &_hrclaimer, worker_id);
- AtomicAccess::add(&_total_selected_for_rebuild, on_region_cl._num_selected_for_rebuild);
+ _total_selected_for_rebuild.add_then_fetch(on_region_cl._num_selected_for_rebuild);
// Update the old/humongous region sets
_g1h->remove_from_old_gen_sets(on_region_cl._num_old_regions_removed,
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.hpp
index 161f0b4b9f5..a256693ff1d 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkRemarkTasks.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "gc/g1/g1HeapRegionManager.hpp"
#include "gc/g1/g1HeapRegionSet.hpp"
#include "gc/shared/workerThread.hpp"
+#include "runtime/atomic.hpp"
class G1CollectedHeap;
class G1ConcurrentMark;
@@ -41,7 +42,7 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
G1ConcurrentMark* _cm;
G1HeapRegionClaimer _hrclaimer;
- uint volatile _total_selected_for_rebuild;
+ Atomic _total_selected_for_rebuild;
// Reclaimed empty regions
G1FreeRegionList _cleanup_list;
@@ -57,7 +58,9 @@ public:
void work(uint worker_id) override;
- uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
+ uint total_selected_for_rebuild() const {
+ return _total_selected_for_rebuild.load_relaxed();
+ }
static uint desired_num_workers(uint num_regions);
};
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
index ed6a9ad4292..8546e6e2d64 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
@@ -28,6 +28,7 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
+#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.cpp
index 83a09c55a3f..5160d5ed036 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,7 +22,7 @@
*
*/
-#include "gc/g1/g1ConcurrentRefineStats.hpp"
+#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/timer.hpp"
@@ -39,19 +39,27 @@ G1ConcurrentRefineStats::G1ConcurrentRefineStats() :
{}
void G1ConcurrentRefineStats::add_atomic(G1ConcurrentRefineStats* other) {
- AtomicAccess::add(&_sweep_duration, other->_sweep_duration, memory_order_relaxed);
- AtomicAccess::add(&_yield_during_sweep_duration, other->_yield_during_sweep_duration, memory_order_relaxed);
+ _sweep_duration.add_then_fetch(other->_sweep_duration.load_relaxed(), memory_order_relaxed);
+ _yield_during_sweep_duration.add_then_fetch(other->yield_during_sweep_duration(), memory_order_relaxed);
- AtomicAccess::add(&_cards_scanned, other->_cards_scanned, memory_order_relaxed);
- AtomicAccess::add(&_cards_clean, other->_cards_clean, memory_order_relaxed);
- AtomicAccess::add(&_cards_not_parsable, other->_cards_not_parsable, memory_order_relaxed);
- AtomicAccess::add(&_cards_already_refer_to_cset, other->_cards_already_refer_to_cset, memory_order_relaxed);
- AtomicAccess::add(&_cards_refer_to_cset, other->_cards_refer_to_cset, memory_order_relaxed);
- AtomicAccess::add(&_cards_no_cross_region, other->_cards_no_cross_region, memory_order_relaxed);
+ _cards_scanned.add_then_fetch(other->cards_scanned(), memory_order_relaxed);
+ _cards_clean.add_then_fetch(other->cards_clean(), memory_order_relaxed);
+ _cards_not_parsable.add_then_fetch(other->cards_not_parsable(), memory_order_relaxed);
+ _cards_already_refer_to_cset.add_then_fetch(other->cards_already_refer_to_cset(), memory_order_relaxed);
+ _cards_refer_to_cset.add_then_fetch(other->cards_refer_to_cset(), memory_order_relaxed);
+ _cards_no_cross_region.add_then_fetch(other->cards_no_cross_region(), memory_order_relaxed);
- AtomicAccess::add(&_refine_duration, other->_refine_duration, memory_order_relaxed);
+ _refine_duration.add_then_fetch(other->refine_duration(), memory_order_relaxed);
}
void G1ConcurrentRefineStats::reset() {
- *this = G1ConcurrentRefineStats();
+ _sweep_duration.store_relaxed(0);
+ _yield_during_sweep_duration.store_relaxed(0);
+ _cards_scanned.store_relaxed(0);
+ _cards_clean.store_relaxed(0);
+ _cards_not_parsable.store_relaxed(0);
+ _cards_already_refer_to_cset.store_relaxed(0);
+ _cards_refer_to_cset.store_relaxed(0);
+ _cards_no_cross_region.store_relaxed(0);
+ _refine_duration.store_relaxed(0);
}
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.hpp
index ce22f4317df..5f57c56ba6c 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,61 +26,61 @@
#define SHARE_GC_G1_G1CONCURRENTREFINESTATS_HPP
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
-#include "utilities/ticks.hpp"
// Collection of statistics for concurrent refinement processing.
// Used for collecting per-thread statistics and for summaries over a
// collection of threads.
class G1ConcurrentRefineStats : public CHeapObj {
- jlong _sweep_duration; // Time spent sweeping the table finding non-clean cards
- // and refining them.
- jlong _yield_during_sweep_duration; // Time spent yielding during the sweep (not doing the sweep).
+ Atomic _sweep_duration; // Time spent sweeping the table finding non-clean cards
+ // and refining them.
+ Atomic _yield_during_sweep_duration; // Time spent yielding during the sweep (not doing the sweep).
- size_t _cards_scanned; // Total number of cards scanned.
- size_t _cards_clean; // Number of cards found clean.
- size_t _cards_not_parsable; // Number of cards we could not parse and left unrefined.
- size_t _cards_already_refer_to_cset;// Number of cards marked found to be already young.
- size_t _cards_refer_to_cset; // Number of dirty cards that were recently found to contain a to-cset reference.
- size_t _cards_no_cross_region; // Number of dirty cards that were dirtied, but then cleaned again by the mutator.
+ Atomic _cards_scanned; // Total number of cards scanned.
+ Atomic _cards_clean; // Number of cards found clean.
+ Atomic _cards_not_parsable; // Number of cards we could not parse and left unrefined.
+ Atomic _cards_already_refer_to_cset;// Number of cards marked found to be already young.
+ Atomic _cards_refer_to_cset; // Number of dirty cards that were recently found to contain a to-cset reference.
+ Atomic _cards_no_cross_region; // Number of dirty cards that were dirtied, but then cleaned again by the mutator.
- jlong _refine_duration; // Time spent during actual refinement.
+ Atomic _refine_duration; // Time spent during actual refinement.
public:
G1ConcurrentRefineStats();
// Time spent performing sweeping the refinement table (includes actual refinement,
// but not yield time).
- jlong sweep_duration() const { return _sweep_duration - _yield_during_sweep_duration; }
- jlong yield_during_sweep_duration() const { return _yield_during_sweep_duration; }
- jlong refine_duration() const { return _refine_duration; }
+ inline jlong sweep_duration() const;
+ inline jlong yield_during_sweep_duration() const;
+ inline jlong refine_duration() const;
// Number of refined cards.
- size_t refined_cards() const { return cards_not_clean(); }
+ inline size_t refined_cards() const;
- size_t cards_scanned() const { return _cards_scanned; }
- size_t cards_clean() const { return _cards_clean; }
- size_t cards_not_clean() const { return _cards_scanned - _cards_clean; }
- size_t cards_not_parsable() const { return _cards_not_parsable; }
- size_t cards_already_refer_to_cset() const { return _cards_already_refer_to_cset; }
- size_t cards_refer_to_cset() const { return _cards_refer_to_cset; }
- size_t cards_no_cross_region() const { return _cards_no_cross_region; }
+ inline size_t cards_scanned() const;
+ inline size_t cards_clean() const;
+ inline size_t cards_not_clean() const;
+ inline size_t cards_not_parsable() const;
+ inline size_t cards_already_refer_to_cset() const;
+ inline size_t cards_refer_to_cset() const;
+ inline size_t cards_no_cross_region() const;
// Number of cards that were marked dirty and in need of refinement. This includes cards recently
// found to refer to the collection set as they originally were dirty.
- size_t cards_pending() const { return cards_not_clean() - _cards_already_refer_to_cset; }
+ inline size_t cards_pending() const;
- size_t cards_to_cset() const { return _cards_already_refer_to_cset + _cards_refer_to_cset; }
+ inline size_t cards_to_cset() const;
- void inc_sweep_time(jlong t) { _sweep_duration += t; }
- void inc_yield_during_sweep_duration(jlong t) { _yield_during_sweep_duration += t; }
- void inc_refine_duration(jlong t) { _refine_duration += t; }
+ inline void inc_sweep_time(jlong t);
+ inline void inc_yield_during_sweep_duration(jlong t);
+ inline void inc_refine_duration(jlong t);
- void inc_cards_scanned(size_t increment) { _cards_scanned += increment; }
- void inc_cards_clean(size_t increment) { _cards_clean += increment; }
- void inc_cards_not_parsable() { _cards_not_parsable++; }
- void inc_cards_already_refer_to_cset() { _cards_already_refer_to_cset++; }
- void inc_cards_refer_to_cset() { _cards_refer_to_cset++; }
- void inc_cards_no_cross_region() { _cards_no_cross_region++; }
+ inline void inc_cards_scanned(size_t increment);
+ inline void inc_cards_clean(size_t increment);
+ inline void inc_cards_not_parsable();
+ inline void inc_cards_already_refer_to_cset();
+ inline void inc_cards_refer_to_cset();
+ inline void inc_cards_no_cross_region();
void add_atomic(G1ConcurrentRefineStats* other);
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.inline.hpp
new file mode 100644
index 00000000000..e1a296c6494
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineStats.inline.hpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP
+#define SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP
+
+#include "gc/g1/g1ConcurrentRefineStats.hpp"
+
+inline jlong G1ConcurrentRefineStats::sweep_duration() const {
+ return _sweep_duration.load_relaxed() - yield_during_sweep_duration();
+}
+
+inline jlong G1ConcurrentRefineStats::yield_during_sweep_duration() const {
+ return _yield_during_sweep_duration.load_relaxed();
+}
+
+inline jlong G1ConcurrentRefineStats::refine_duration() const {
+ return _refine_duration.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::refined_cards() const {
+ return cards_not_clean();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_scanned() const {
+ return _cards_scanned.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_clean() const {
+ return _cards_clean.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_not_clean() const {
+ return cards_scanned() - cards_clean();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_not_parsable() const {
+ return _cards_not_parsable.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_already_refer_to_cset() const {
+ return _cards_already_refer_to_cset.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_refer_to_cset() const {
+ return _cards_refer_to_cset.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_no_cross_region() const {
+ return _cards_no_cross_region.load_relaxed();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_pending() const {
+ return cards_not_clean() - cards_already_refer_to_cset();
+}
+
+inline size_t G1ConcurrentRefineStats::cards_to_cset() const {
+ return cards_already_refer_to_cset() + cards_refer_to_cset();
+}
+
+inline void G1ConcurrentRefineStats::inc_sweep_time(jlong t) {
+ _sweep_duration.store_relaxed(_sweep_duration.load_relaxed() + t);
+}
+
+inline void G1ConcurrentRefineStats::inc_yield_during_sweep_duration(jlong t) {
+ _yield_during_sweep_duration.store_relaxed(yield_during_sweep_duration() + t);
+}
+
+inline void G1ConcurrentRefineStats::inc_refine_duration(jlong t) {
+ _refine_duration.store_relaxed(refine_duration() + t);
+}
+
+inline void G1ConcurrentRefineStats::inc_cards_scanned(size_t increment) {
+ _cards_scanned.store_relaxed(cards_scanned() + increment);
+}
+
+inline void G1ConcurrentRefineStats::inc_cards_clean(size_t increment) {
+ _cards_clean.store_relaxed(cards_clean() + increment);
+}
+
+inline void G1ConcurrentRefineStats::inc_cards_not_parsable() {
+ _cards_not_parsable.store_relaxed(cards_not_parsable() + 1);
+}
+
+inline void G1ConcurrentRefineStats::inc_cards_already_refer_to_cset() {
+ _cards_already_refer_to_cset.store_relaxed(cards_already_refer_to_cset() + 1);
+}
+
+inline void G1ConcurrentRefineStats::inc_cards_refer_to_cset() {
+ _cards_refer_to_cset.store_relaxed(cards_refer_to_cset() + 1);
+}
+
+inline void G1ConcurrentRefineStats::inc_cards_no_cross_region() {
+ _cards_no_cross_region.store_relaxed(cards_no_cross_region() + 1);
+}
+
+#endif // SHARE_GC_G1_G1CONCURRENTREFINESTATS_INLINE_HPP
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.cpp
index ca5bc9ebe5f..ce944f2254d 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.cpp
@@ -24,6 +24,7 @@
#include "gc/g1/g1CardTableClaimTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
class G1RefineRegionClosure : public G1HeapRegionClosure {
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.hpp
index bf24c5ae850..827b9a3c402 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineSweepTask.hpp
@@ -25,10 +25,10 @@
#ifndef SHARE_GC_G1_G1CONCURRENTREFINESWEEPTASK_HPP
#define SHARE_GC_G1_G1CONCURRENTREFINESWEEPTASK_HPP
-#include "gc/g1/g1ConcurrentRefineStats.hpp"
#include "gc/shared/workerThread.hpp"
class G1CardTableClaimTable;
+class G1ConcurrentRefineStats;
class G1ConcurrentRefineSweepTask : public WorkerTask {
G1CardTableClaimTable* _scan_state;
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp
index eccfe466d48..6b51e5eef62 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp
@@ -26,7 +26,7 @@
#include "gc/g1/g1CardTableClaimTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
-#include "gc/g1/g1ConcurrentRefineStats.hpp"
+#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "gc/g1/g1ConcurrentRefineSweepTask.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp
index 7cdc001d348..2ecbdc668eb 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp
@@ -25,7 +25,6 @@
#ifndef SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP
#define SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP
-#include "gc/g1/g1ConcurrentRefineStats.hpp"
#include "gc/shared/concurrentGCThread.hpp"
#include "runtime/mutex.hpp"
#include "utilities/globalDefinitions.hpp"
diff --git a/src/hotspot/share/gc/g1/g1EvacFailureRegions.cpp b/src/hotspot/share/gc/g1/g1EvacFailureRegions.cpp
index ffcb5a0022f..37553e2aa56 100644
--- a/src/hotspot/share/gc/g1/g1EvacFailureRegions.cpp
+++ b/src/hotspot/share/gc/g1/g1EvacFailureRegions.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +29,6 @@
#include "gc/g1/g1EvacFailureRegions.inline.hpp"
#include "gc/g1/g1HeapRegion.hpp"
#include "memory/allocation.hpp"
-#include "runtime/atomicAccess.hpp"
#include "utilities/bitMap.inline.hpp"
G1EvacFailureRegions::G1EvacFailureRegions() :
@@ -43,7 +43,7 @@ G1EvacFailureRegions::~G1EvacFailureRegions() {
}
void G1EvacFailureRegions::pre_collection(uint max_regions) {
- AtomicAccess::store(&_num_regions_evac_failed, 0u);
+ _num_regions_evac_failed.store_relaxed(0u);
_regions_evac_failed.resize(max_regions);
_regions_pinned.resize(max_regions);
_regions_alloc_failed.resize(max_regions);
@@ -69,6 +69,6 @@ void G1EvacFailureRegions::par_iterate(G1HeapRegionClosure* closure,
G1CollectedHeap::heap()->par_iterate_regions_array(closure,
hrclaimer,
_evac_failed_regions,
- AtomicAccess::load(&_num_regions_evac_failed),
+ num_regions_evac_failed(),
worker_id);
}
diff --git a/src/hotspot/share/gc/g1/g1EvacFailureRegions.hpp b/src/hotspot/share/gc/g1/g1EvacFailureRegions.hpp
index 9d29957b782..f752a3f8ab7 100644
--- a/src/hotspot/share/gc/g1/g1EvacFailureRegions.hpp
+++ b/src/hotspot/share/gc/g1/g1EvacFailureRegions.hpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +26,7 @@
#ifndef SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
#define SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
+#include "runtime/atomic.hpp"
#include "utilities/bitMap.hpp"
class G1AbstractSubTask;
@@ -53,14 +55,14 @@ class G1EvacFailureRegions {
// Evacuation failed regions (indexes) in the current collection.
uint* _evac_failed_regions;
// Number of regions evacuation failed in the current collection.
- volatile uint _num_regions_evac_failed;
+ Atomic _num_regions_evac_failed;
public:
G1EvacFailureRegions();
~G1EvacFailureRegions();
uint get_region_idx(uint idx) const {
- assert(idx < _num_regions_evac_failed, "precondition");
+ assert(idx < _num_regions_evac_failed.load_relaxed(), "precondition");
return _evac_failed_regions[idx];
}
diff --git a/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp b/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp
index 6eec9b63e6b..fb456475b56 100644
--- a/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,10 +30,9 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
-#include "runtime/atomicAccess.hpp"
uint G1EvacFailureRegions::num_regions_evac_failed() const {
- return AtomicAccess::load(&_num_regions_evac_failed);
+ return _num_regions_evac_failed.load_relaxed();
}
bool G1EvacFailureRegions::has_regions_evac_failed() const {
@@ -57,7 +57,7 @@ bool G1EvacFailureRegions::record(uint worker_id, uint region_idx, bool cause_pi
bool success = _regions_evac_failed.par_set_bit(region_idx,
memory_order_relaxed);
if (success) {
- size_t offset = AtomicAccess::fetch_then_add(&_num_regions_evac_failed, 1u);
+ size_t offset = _num_regions_evac_failed.fetch_then_add(1u);
_evac_failed_regions[offset] = region_idx;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
diff --git a/src/hotspot/share/gc/g1/g1EvacStats.cpp b/src/hotspot/share/gc/g1/g1EvacStats.cpp
index 049175a4ecc..1d54b184e64 100644
--- a/src/hotspot/share/gc/g1/g1EvacStats.cpp
+++ b/src/hotspot/share/gc/g1/g1EvacStats.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,13 +22,24 @@
*
*/
-#include "gc/g1/g1EvacStats.hpp"
+#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/shared/gcId.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/globals.hpp"
+void G1EvacStats::reset() {
+ PLABStats::reset();
+ _region_end_waste.store_relaxed(0);
+ _regions_filled.store_relaxed(0);
+ _num_plab_filled.store_relaxed(0);
+ _direct_allocated.store_relaxed(0);
+ _num_direct_allocated.store_relaxed(0);
+ _failure_used.store_relaxed(0);
+ _failure_waste.store_relaxed(0);
+}
+
void G1EvacStats::log_plab_allocation() {
log_debug(gc, plab)("%s PLAB allocation: "
"allocated: %zuB, "
@@ -51,13 +62,13 @@ void G1EvacStats::log_plab_allocation() {
"failure used: %zuB, "
"failure wasted: %zuB",
_description,
- _region_end_waste * HeapWordSize,
- _regions_filled,
- _num_plab_filled,
- _direct_allocated * HeapWordSize,
- _num_direct_allocated,
- _failure_used * HeapWordSize,
- _failure_waste * HeapWordSize);
+ region_end_waste() * HeapWordSize,
+ regions_filled(),
+ num_plab_filled(),
+ direct_allocated() * HeapWordSize,
+ num_direct_allocated(),
+ failure_used() * HeapWordSize,
+ failure_waste() * HeapWordSize);
}
void G1EvacStats::log_sizing(size_t calculated_words, size_t net_desired_words) {
@@ -109,7 +120,7 @@ size_t G1EvacStats::compute_desired_plab_size() const {
// threads do not allocate anything but a few rather large objects. In this
// degenerate case the PLAB size would simply quickly tend to minimum PLAB size,
// which is an okay reaction.
- size_t const used_for_waste_calculation = used() > _region_end_waste ? used() - _region_end_waste : 0;
+ size_t const used_for_waste_calculation = used() > region_end_waste() ? used() - region_end_waste() : 0;
size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct;
return (size_t)((double)total_waste_allowed / (100 - G1LastPLABAverageOccupancy));
diff --git a/src/hotspot/share/gc/g1/g1EvacStats.hpp b/src/hotspot/share/gc/g1/g1EvacStats.hpp
index e6eb80442d6..b250d4580b5 100644
--- a/src/hotspot/share/gc/g1/g1EvacStats.hpp
+++ b/src/hotspot/share/gc/g1/g1EvacStats.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "gc/shared/gcUtil.hpp"
#include "gc/shared/plab.hpp"
+#include "runtime/atomic.hpp"
// Records various memory allocation statistics gathered during evacuation. All sizes
// are in HeapWords.
@@ -36,30 +37,21 @@ class G1EvacStats : public PLABStats {
AdaptiveWeightedAverage
_net_plab_size_filter; // Integrator with decay
- size_t _region_end_waste; // Number of words wasted due to skipping to the next region.
- uint _regions_filled; // Number of regions filled completely.
- size_t _num_plab_filled; // Number of PLABs filled and retired.
- size_t _direct_allocated; // Number of words allocated directly into the regions.
- size_t _num_direct_allocated; // Number of direct allocation attempts.
+ Atomic _region_end_waste; // Number of words wasted due to skipping to the next region.
+ Atomic _regions_filled; // Number of regions filled completely.
+ Atomic _num_plab_filled; // Number of PLABs filled and retired.
+ Atomic _direct_allocated; // Number of words allocated directly into the regions.
+ Atomic _num_direct_allocated; // Number of direct allocation attempts.
// Number of words in live objects remaining in regions that ultimately suffered an
// evacuation failure. This is used in the regions when the regions are made old regions.
- size_t _failure_used;
+ Atomic _failure_used;
// Number of words wasted in regions which failed evacuation. This is the sum of space
// for objects successfully copied out of the regions (now dead space) plus waste at the
// end of regions.
- size_t _failure_waste;
+ Atomic _failure_waste;
- virtual void reset() {
- PLABStats::reset();
- _region_end_waste = 0;
- _regions_filled = 0;
- _num_plab_filled = 0;
- _direct_allocated = 0;
- _num_direct_allocated = 0;
- _failure_used = 0;
- _failure_waste = 0;
- }
+ virtual void reset();
void log_plab_allocation();
void log_sizing(size_t calculated_words, size_t net_desired_words);
@@ -77,16 +69,16 @@ public:
// Should be called at the end of a GC pause.
void adjust_desired_plab_size();
- uint regions_filled() const { return _regions_filled; }
- size_t num_plab_filled() const { return _num_plab_filled; }
- size_t region_end_waste() const { return _region_end_waste; }
- size_t direct_allocated() const { return _direct_allocated; }
- size_t num_direct_allocated() const { return _num_direct_allocated; }
+ uint regions_filled() const;
+ size_t num_plab_filled() const;
+ size_t region_end_waste() const;
+ size_t direct_allocated() const;
+ size_t num_direct_allocated() const;
// Amount of space in heapwords used in the failing regions when an evacuation failure happens.
- size_t failure_used() const { return _failure_used; }
+ size_t failure_used() const;
// Amount of space in heapwords wasted (unused) in the failing regions when an evacuation failure happens.
- size_t failure_waste() const { return _failure_waste; }
+ size_t failure_waste() const;
inline void add_num_plab_filled(size_t value);
inline void add_direct_allocated(size_t value);
diff --git a/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp b/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp
index c90598a30cb..2bd3b37719a 100644
--- a/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,28 +27,54 @@
#include "gc/g1/g1EvacStats.hpp"
-#include "runtime/atomicAccess.hpp"
+inline uint G1EvacStats::regions_filled() const {
+ return _regions_filled.load_relaxed();
+}
+
+inline size_t G1EvacStats::num_plab_filled() const {
+ return _num_plab_filled.load_relaxed();
+}
+
+inline size_t G1EvacStats::region_end_waste() const {
+ return _region_end_waste.load_relaxed();
+}
+
+inline size_t G1EvacStats::direct_allocated() const {
+ return _direct_allocated.load_relaxed();
+}
+
+inline size_t G1EvacStats::num_direct_allocated() const {
+ return _num_direct_allocated.load_relaxed();
+}
+
+inline size_t G1EvacStats::failure_used() const {
+ return _failure_used.load_relaxed();
+}
+
+inline size_t G1EvacStats::failure_waste() const {
+ return _failure_waste.load_relaxed();
+}
inline void G1EvacStats::add_direct_allocated(size_t value) {
- AtomicAccess::add(&_direct_allocated, value, memory_order_relaxed);
+ _direct_allocated.add_then_fetch(value, memory_order_relaxed);
}
inline void G1EvacStats::add_num_plab_filled(size_t value) {
- AtomicAccess::add(&_num_plab_filled, value, memory_order_relaxed);
+ _num_plab_filled.add_then_fetch(value, memory_order_relaxed);
}
inline void G1EvacStats::add_num_direct_allocated(size_t value) {
- AtomicAccess::add(&_num_direct_allocated, value, memory_order_relaxed);
+ _num_direct_allocated.add_then_fetch(value, memory_order_relaxed);
}
inline void G1EvacStats::add_region_end_waste(size_t value) {
- AtomicAccess::add(&_region_end_waste, value, memory_order_relaxed);
- AtomicAccess::inc(&_regions_filled, memory_order_relaxed);
+ _region_end_waste.add_then_fetch(value, memory_order_relaxed);
+ _regions_filled.add_then_fetch(1u, memory_order_relaxed);
}
inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) {
- AtomicAccess::add(&_failure_used, used, memory_order_relaxed);
- AtomicAccess::add(&_failure_waste, waste, memory_order_relaxed);
+ _failure_used.add_then_fetch(used, memory_order_relaxed);
+ _failure_waste.add_then_fetch(waste, memory_order_relaxed);
}
#endif // SHARE_GC_G1_G1EVACSTATS_INLINE_HPP
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp
index 5ca5dcef001..6c8cc7028cc 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -116,8 +116,8 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
_num_workers(calc_active_workers()),
_has_compaction_targets(false),
_has_humongous(false),
- _oop_queue_set(_num_workers),
- _array_queue_set(_num_workers),
+ _marking_task_queues(_num_workers),
+ _partial_array_state_manager(nullptr),
_preserved_marks_set(true),
_serial_compaction_point(this, nullptr),
_humongous_compaction_point(this, nullptr),
@@ -134,32 +134,40 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
_compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
_live_stats = NEW_C_HEAP_ARRAY(G1RegionMarkStats, _heap->max_num_regions(), mtGC);
- _compaction_tops = NEW_C_HEAP_ARRAY(HeapWord*, _heap->max_num_regions(), mtGC);
+ _compaction_tops = NEW_C_HEAP_ARRAY(Atomic, _heap->max_num_regions(), mtGC);
for (uint j = 0; j < heap->max_num_regions(); j++) {
_live_stats[j].clear();
- _compaction_tops[j] = nullptr;
+ ::new (&_compaction_tops[j]) Atomic{};
}
+ _partial_array_state_manager = new PartialArrayStateManager(_num_workers);
+
for (uint i = 0; i < _num_workers; i++) {
_markers[i] = new G1FullGCMarker(this, i, _live_stats);
_compaction_points[i] = new G1FullGCCompactionPoint(this, _preserved_marks_set.get(i));
- _oop_queue_set.register_queue(i, marker(i)->oop_stack());
- _array_queue_set.register_queue(i, marker(i)->objarray_stack());
+ _marking_task_queues.register_queue(i, marker(i)->task_queue());
}
+
_serial_compaction_point.set_preserved_stack(_preserved_marks_set.get(0));
_humongous_compaction_point.set_preserved_stack(_preserved_marks_set.get(0));
_region_attr_table.initialize(heap->reserved(), G1HeapRegion::GrainBytes);
}
+PartialArrayStateManager* G1FullCollector::partial_array_state_manager() const {
+ return _partial_array_state_manager;
+}
+
G1FullCollector::~G1FullCollector() {
for (uint i = 0; i < _num_workers; i++) {
delete _markers[i];
delete _compaction_points[i];
}
+ delete _partial_array_state_manager;
+
FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
- FREE_C_HEAP_ARRAY(HeapWord*, _compaction_tops);
+ FREE_C_HEAP_ARRAY(Atomic, _compaction_tops);
FREE_C_HEAP_ARRAY(G1RegionMarkStats, _live_stats);
}
@@ -279,8 +287,8 @@ public:
uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
G1FullKeepAliveClosure keep_alive(_collector.marker(index));
BarrierEnqueueDiscoveredFieldClosure enqueue;
- G1FollowStackClosure* complete_gc = _collector.marker(index)->stack_closure();
- _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_gc);
+ G1MarkStackClosure* complete_marking = _collector.marker(index)->stack_closure();
+ _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_marking);
}
};
@@ -302,7 +310,7 @@ void G1FullCollector::phase1_mark_live_objects() {
const ReferenceProcessorStats& stats = reference_processor()->process_discovered_references(task, _heap->workers(), pt);
scope()->tracer()->report_gc_reference_stats(stats);
pt.print_all_references();
- assert(marker(0)->oop_stack()->is_empty(), "Should be no oops on the stack");
+ assert(marker(0)->task_queue()->is_empty(), "Should be no oops on the stack");
}
{
@@ -328,8 +336,7 @@ void G1FullCollector::phase1_mark_live_objects() {
scope()->tracer()->report_object_count_after_gc(&_is_alive, _heap->workers());
}
#if TASKQUEUE_STATS
- oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
- array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
+ marking_task_queues()->print_and_reset_taskqueue_stats("Marking Task Queue");
#endif
}
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.hpp b/src/hotspot/share/gc/g1/g1FullCollector.hpp
index ed8225fc004..7e455b07013 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.hpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -79,8 +79,8 @@ class G1FullCollector : StackObj {
bool _has_humongous;
G1FullGCMarker** _markers;
G1FullGCCompactionPoint** _compaction_points;
- OopQueueSet _oop_queue_set;
- ObjArrayTaskQueueSet _array_queue_set;
+ G1MarkTasksQueueSet _marking_task_queues;
+ PartialArrayStateManager* _partial_array_state_manager;
PreservedMarksSet _preserved_marks_set;
G1FullGCCompactionPoint _serial_compaction_point;
G1FullGCCompactionPoint _humongous_compaction_point;
@@ -96,7 +96,7 @@ class G1FullCollector : StackObj {
G1FullGCHeapRegionAttr _region_attr_table;
- HeapWord* volatile* _compaction_tops;
+ Atomic* _compaction_tops;
public:
G1FullCollector(G1CollectedHeap* heap,
@@ -113,8 +113,7 @@ public:
uint workers() { return _num_workers; }
G1FullGCMarker* marker(uint id) { return _markers[id]; }
G1FullGCCompactionPoint* compaction_point(uint id) { return _compaction_points[id]; }
- OopQueueSet* oop_queue_set() { return &_oop_queue_set; }
- ObjArrayTaskQueueSet* array_queue_set() { return &_array_queue_set; }
+ G1MarkTasksQueueSet* marking_task_queues() { return &_marking_task_queues; }
PreservedMarksSet* preserved_mark_set() { return &_preserved_marks_set; }
G1FullGCCompactionPoint* serial_compaction_point() { return &_serial_compaction_point; }
G1FullGCCompactionPoint* humongous_compaction_point() { return &_humongous_compaction_point; }
@@ -122,9 +121,11 @@ public:
ReferenceProcessor* reference_processor();
size_t live_words(uint region_index) const {
assert(region_index < _heap->max_num_regions(), "sanity");
- return _live_stats[region_index]._live_words;
+ return _live_stats[region_index].live_words();
}
+ PartialArrayStateManager* partial_array_state_manager() const;
+
void before_marking_update_attribute_table(G1HeapRegion* hr);
inline bool is_compacting(oop obj) const;
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp b/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp
index b52f3d79604..0c201f0e43f 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -63,11 +63,11 @@ void G1FullCollector::update_from_skip_compacting_to_compacting(uint region_idx)
}
void G1FullCollector::set_compaction_top(G1HeapRegion* r, HeapWord* value) {
- AtomicAccess::store(&_compaction_tops[r->hrm_index()], value);
+ _compaction_tops[r->hrm_index()].store_relaxed(value);
}
HeapWord* G1FullCollector::compaction_top(G1HeapRegion* r) const {
- return AtomicAccess::load(&_compaction_tops[r->hrm_index()]);
+ return _compaction_tops[r->hrm_index()].load_relaxed();
}
void G1FullCollector::set_has_compaction_targets() {
diff --git a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp
index 83c846e84d4..c02b028112b 100644
--- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,6 @@
#include "gc/shared/weakProcessor.inline.hpp"
#include "logging/log.hpp"
#include "memory/iterator.inline.hpp"
-#include "runtime/atomicAccess.hpp"
class G1AdjustLiveClosure : public StackObj {
G1AdjustClosure* _adjust_closure;
diff --git a/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp b/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp
index 52b0d04a500..f14e1108db8 100644
--- a/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
G1FullGCMarkTask::G1FullGCMarkTask(G1FullCollector* collector) :
G1FullGCTask("G1 Parallel Marking Task", collector),
_root_processor(G1CollectedHeap::heap(), collector->workers()),
- _terminator(collector->workers(), collector->array_queue_set()) {
+ _terminator(collector->workers(), collector->marking_task_queues()) {
}
void G1FullGCMarkTask::work(uint worker_id) {
@@ -54,10 +54,9 @@ void G1FullGCMarkTask::work(uint worker_id) {
}
// Mark stack is populated, now process and drain it.
- marker->complete_marking(collector()->oop_queue_set(), collector()->array_queue_set(), &_terminator);
+ marker->complete_marking(collector()->marking_task_queues(), &_terminator);
// This is the point where the entire marking should have completed.
- assert(marker->oop_stack()->is_empty(), "Marking should have completed");
- assert(marker->objarray_stack()->is_empty(), "Array marking should have completed");
+ assert(marker->task_queue()->is_empty(), "Marking should have completed");
log_task("Marking task", worker_id, start);
}
diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp
index aa8f12a2d1b..2b0b78ac1ce 100644
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "gc/g1/g1FullGCMarker.inline.hpp"
+#include "gc/shared/partialArraySplitter.inline.hpp"
+#include "gc/shared/partialArrayState.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/verifyOption.hpp"
@@ -36,8 +38,8 @@ G1FullGCMarker::G1FullGCMarker(G1FullCollector* collector,
_collector(collector),
_worker_id(worker_id),
_bitmap(collector->mark_bitmap()),
- _oop_stack(),
- _objarray_stack(),
+ _task_queue(),
+ _partial_array_splitter(collector->partial_array_state_manager(), collector->workers(), ObjArrayMarkingStride),
_mark_closure(worker_id, this, ClassLoaderData::_claim_stw_fullgc_mark, G1CollectedHeap::heap()->ref_processor_stw()),
_stack_closure(this),
_cld_closure(mark_closure(), ClassLoaderData::_claim_stw_fullgc_mark),
@@ -47,24 +49,36 @@ G1FullGCMarker::G1FullGCMarker(G1FullCollector* collector,
}
G1FullGCMarker::~G1FullGCMarker() {
- assert(is_empty(), "Must be empty at this point");
+ assert(is_task_queue_empty(), "Must be empty at this point");
}
-void G1FullGCMarker::complete_marking(OopQueueSet* oop_stacks,
- ObjArrayTaskQueueSet* array_stacks,
+void G1FullGCMarker::process_partial_array(PartialArrayState* state, bool stolen) {
+ // Access state before release by claim().
+ objArrayOop obj_array = objArrayOop(state->source());
+ PartialArraySplitter::Claim claim =
+ _partial_array_splitter.claim(state, task_queue(), stolen);
+ process_array_chunk(obj_array, claim._start, claim._end);
+}
+
+void G1FullGCMarker::start_partial_array_processing(objArrayOop obj) {
+ mark_closure()->do_klass(obj->klass());
+ // Don't push empty arrays to avoid unnecessary work.
+ size_t array_length = obj->length();
+ if (array_length > 0) {
+ size_t initial_chunk_size = _partial_array_splitter.start(task_queue(), obj, nullptr, array_length);
+ process_array_chunk(obj, 0, initial_chunk_size);
+ }
+}
+
+void G1FullGCMarker::complete_marking(G1ScannerTasksQueueSet* task_queues,
TaskTerminator* terminator) {
do {
- follow_marking_stacks();
- ObjArrayTask steal_array;
- if (array_stacks->steal(_worker_id, steal_array)) {
- follow_array_chunk(objArrayOop(steal_array.obj()), steal_array.index());
- } else {
- oop steal_oop;
- if (oop_stacks->steal(_worker_id, steal_oop)) {
- follow_object(steal_oop);
- }
+ process_marking_stacks();
+ ScannerTask stolen_task;
+ if (task_queues->steal(_worker_id, stolen_task)) {
+ dispatch_task(stolen_task, true);
}
- } while (!is_empty() || !terminator->offer_termination());
+ } while (!is_task_queue_empty() || !terminator->offer_termination());
}
void G1FullGCMarker::flush_mark_stats_cache() {
diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.hpp b/src/hotspot/share/gc/g1/g1FullGCMarker.hpp
index b1b750eae90..5973cc841c5 100644
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,8 @@
#include "gc/g1/g1FullGCOopClosures.hpp"
#include "gc/g1/g1OopClosures.hpp"
#include "gc/g1/g1RegionMarkStatsCache.hpp"
+#include "gc/shared/partialArraySplitter.hpp"
+#include "gc/shared/partialArrayState.hpp"
#include "gc/shared/stringdedup/stringDedup.hpp"
#include "gc/shared/taskqueue.hpp"
#include "memory/iterator.hpp"
@@ -38,16 +40,15 @@
#include "utilities/growableArray.hpp"
#include "utilities/stack.hpp"
-typedef OverflowTaskQueue OopQueue;
-typedef OverflowTaskQueue ObjArrayTaskQueue;
-typedef GenericTaskQueueSet OopQueueSet;
-typedef GenericTaskQueueSet ObjArrayTaskQueueSet;
class G1CMBitMap;
class G1FullCollector;
class TaskTerminator;
+typedef OverflowTaskQueue G1MarkTasksQueue;
+typedef GenericTaskQueueSet G1MarkTasksQueueSet;
+
class G1FullGCMarker : public CHeapObj {
G1FullCollector* _collector;
@@ -56,56 +57,50 @@ class G1FullGCMarker : public CHeapObj {
G1CMBitMap* _bitmap;
// Mark stack
- OopQueue _oop_stack;
- ObjArrayTaskQueue _objarray_stack;
+ G1MarkTasksQueue _task_queue;
+ PartialArraySplitter _partial_array_splitter;
// Marking closures
G1MarkAndPushClosure _mark_closure;
- G1FollowStackClosure _stack_closure;
+ G1MarkStackClosure _stack_closure;
CLDToOopClosure _cld_closure;
StringDedup::Requests _string_dedup_requests;
G1RegionMarkStatsCache _mark_stats_cache;
- inline bool is_empty();
- inline void push_objarray(oop obj, size_t index);
+ inline bool is_task_queue_empty();
inline bool mark_object(oop obj);
// Marking helpers
- inline void follow_object(oop obj);
- inline void follow_array(objArrayOop array);
- inline void follow_array_chunk(objArrayOop array, int index);
+ inline void process_array_chunk(objArrayOop obj, size_t start, size_t end);
+ inline void dispatch_task(const ScannerTask& task, bool stolen);
+ // Start processing the given objArrayOop by first pushing its continuations and
+ // then scanning the first chunk.
+ void start_partial_array_processing(objArrayOop obj);
+ // Process the given continuation.
+ void process_partial_array(PartialArrayState* state, bool stolen);
inline void publish_and_drain_oop_tasks();
- // Try to publish all contents from the objArray task queue overflow stack to
- // the shared objArray stack.
- // Returns true and a valid task if there has not been enough space in the shared
- // objArray stack, otherwise returns false and the task is invalid.
- inline bool publish_or_pop_objarray_tasks(ObjArrayTask& task);
-
public:
G1FullGCMarker(G1FullCollector* collector,
uint worker_id,
G1RegionMarkStats* mark_stats);
~G1FullGCMarker();
- // Stack getters
- OopQueue* oop_stack() { return &_oop_stack; }
- ObjArrayTaskQueue* objarray_stack() { return &_objarray_stack; }
+ G1MarkTasksQueue* task_queue() { return &_task_queue; }
// Marking entry points
template inline void mark_and_push(T* p);
- inline void follow_marking_stacks();
- void complete_marking(OopQueueSet* oop_stacks,
- ObjArrayTaskQueueSet* array_stacks,
+ inline void process_marking_stacks();
+ void complete_marking(G1MarkTasksQueueSet* task_queues,
TaskTerminator* terminator);
// Closure getters
CLDToOopClosure* cld_closure() { return &_cld_closure; }
G1MarkAndPushClosure* mark_closure() { return &_mark_closure; }
- G1FollowStackClosure* stack_closure() { return &_stack_closure; }
+ G1MarkStackClosure* stack_closure() { return &_stack_closure; }
// Flush live bytes to regions
void flush_mark_stats_cache();
diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
index 398ef046bf5..a6f45abe005 100644
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,6 +42,7 @@
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
+#include "utilities/checkedCast.hpp"
#include "utilities/debug.hpp"
inline bool G1FullGCMarker::mark_object(oop obj) {
@@ -71,94 +72,55 @@ template inline void G1FullGCMarker::mark_and_push(T* p) {
if (!CompressedOops::is_null(heap_oop)) {
oop obj = CompressedOops::decode_not_null(heap_oop);
if (mark_object(obj)) {
- _oop_stack.push(obj);
+ _task_queue.push(ScannerTask(obj));
}
assert(_bitmap->is_marked(obj), "Must be marked");
}
}
-inline bool G1FullGCMarker::is_empty() {
- return _oop_stack.is_empty() && _objarray_stack.is_empty();
+inline bool G1FullGCMarker::is_task_queue_empty() {
+ return _task_queue.is_empty();
}
-inline void G1FullGCMarker::push_objarray(oop obj, size_t index) {
- ObjArrayTask task(obj, index);
- assert(task.is_valid(), "bad ObjArrayTask");
- _objarray_stack.push(task);
+inline void G1FullGCMarker::process_array_chunk(objArrayOop obj, size_t start, size_t end) {
+ obj->oop_iterate_elements_range(mark_closure(),
+ checked_cast(start),
+ checked_cast(end));
}
-inline void G1FullGCMarker::follow_array(objArrayOop array) {
- mark_closure()->do_klass(array->klass());
- // Don't push empty arrays to avoid unnecessary work.
- if (array->length() > 0) {
- push_objarray(array, 0);
- }
-}
-
-void G1FullGCMarker::follow_array_chunk(objArrayOop array, int index) {
- const int len = array->length();
- const int beg_index = index;
- assert(beg_index < len || len == 0, "index too large");
-
- const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
- const int end_index = beg_index + stride;
-
- // Push the continuation first to allow more efficient work stealing.
- if (end_index < len) {
- push_objarray(array, end_index);
- }
-
- array->oop_iterate_elements_range(mark_closure(), beg_index, end_index);
-}
-
-inline void G1FullGCMarker::follow_object(oop obj) {
- assert(_bitmap->is_marked(obj), "should be marked");
- if (obj->is_objArray()) {
- // Handle object arrays explicitly to allow them to
- // be split into chunks if needed.
- follow_array((objArrayOop)obj);
+inline void G1FullGCMarker::dispatch_task(const ScannerTask& task, bool stolen) {
+ if (task.is_partial_array_state()) {
+ assert(_bitmap->is_marked(task.to_partial_array_state()->source()), "should be marked");
+ process_partial_array(task.to_partial_array_state(), stolen);
} else {
- obj->oop_iterate(mark_closure());
+ oop obj = task.to_oop();
+ assert(_bitmap->is_marked(obj), "should be marked");
+ if (obj->is_objArray()) {
+ // Handle object arrays explicitly to allow them to
+ // be split into chunks if needed.
+ start_partial_array_processing((objArrayOop)obj);
+ } else {
+ obj->oop_iterate(mark_closure());
+ }
}
}
inline void G1FullGCMarker::publish_and_drain_oop_tasks() {
- oop obj;
- while (_oop_stack.pop_overflow(obj)) {
- if (!_oop_stack.try_push_to_taskqueue(obj)) {
- assert(_bitmap->is_marked(obj), "must be marked");
- follow_object(obj);
+ ScannerTask task;
+ while (_task_queue.pop_overflow(task)) {
+ if (!_task_queue.try_push_to_taskqueue(task)) {
+ dispatch_task(task, false);
}
}
- while (_oop_stack.pop_local(obj)) {
- assert(_bitmap->is_marked(obj), "must be marked");
- follow_object(obj);
+ while (_task_queue.pop_local(task)) {
+ dispatch_task(task, false);
}
}
-inline bool G1FullGCMarker::publish_or_pop_objarray_tasks(ObjArrayTask& task) {
- // It is desirable to move as much as possible work from the overflow queue to
- // the shared queue as quickly as possible.
- while (_objarray_stack.pop_overflow(task)) {
- if (!_objarray_stack.try_push_to_taskqueue(task)) {
- return true;
- }
- }
- return false;
-}
-
-void G1FullGCMarker::follow_marking_stacks() {
+void G1FullGCMarker::process_marking_stacks() {
do {
- // First, drain regular oop stack.
publish_and_drain_oop_tasks();
-
- // Then process ObjArrays one at a time to avoid marking stack bloat.
- ObjArrayTask task;
- if (publish_or_pop_objarray_tasks(task) ||
- _objarray_stack.pop_local(task)) {
- follow_array_chunk(objArrayOop(task.obj()), task.index());
- }
- } while (!is_empty());
+ } while (!is_task_queue_empty());
}
#endif // SHARE_GC_G1_G1FULLGCMARKER_INLINE_HPP
diff --git a/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp b/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp
index d9cf64a3655..273508ea9e3 100644
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp
@@ -35,7 +35,7 @@
G1IsAliveClosure::G1IsAliveClosure(G1FullCollector* collector) :
G1IsAliveClosure(collector, collector->mark_bitmap()) { }
-void G1FollowStackClosure::do_void() { _marker->follow_marking_stacks(); }
+void G1MarkStackClosure::do_void() { _marker->process_marking_stacks(); }
void G1FullKeepAliveClosure::do_oop(oop* p) { do_oop_work(p); }
void G1FullKeepAliveClosure::do_oop(narrowOop* p) { do_oop_work(p); }
diff --git a/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp b/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp
index 388f8032de4..08ed5f982e1 100644
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp
@@ -86,11 +86,11 @@ public:
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
};
-class G1FollowStackClosure: public VoidClosure {
+class G1MarkStackClosure: public VoidClosure {
G1FullGCMarker* _marker;
public:
- G1FollowStackClosure(G1FullGCMarker* marker) : _marker(marker) {}
+ G1MarkStackClosure(G1FullGCMarker* marker) : _marker(marker) {}
virtual void do_void();
};
diff --git a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.cpp b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.cpp
index fae73a2c6bf..13c7a6a8d3e 100644
--- a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.cpp
+++ b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,6 @@
#include "memory/allocation.hpp"
#include "memory/padded.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
diff --git a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.hpp b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.hpp
index 878d35397aa..950098c706e 100644
--- a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.hpp
+++ b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.hpp
@@ -30,7 +30,6 @@
#include "gc/g1/g1CodeRootSet.hpp"
#include "gc/g1/g1CollectionSetCandidates.hpp"
#include "gc/g1/g1FromCardCache.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/bitMap.hpp"
diff --git a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.inline.hpp b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.inline.hpp
index fbd529cb1d3..f621b1318c1 100644
--- a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
#include "gc/g1/g1CardSet.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "utilities/bitMap.inline.hpp"
void G1HeapRegionRemSet::set_state_untracked() {
diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.cpp b/src/hotspot/share/gc/g1/g1MonotonicArena.cpp
index a9c6462680f..3f97870a67f 100644
--- a/src/hotspot/share/gc/g1/g1MonotonicArena.cpp
+++ b/src/hotspot/share/gc/g1/g1MonotonicArena.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
#include "gc/g1/g1MonotonicArena.inline.hpp"
#include "memory/allocation.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/vmOperations.hpp"
#include "utilities/globalCounter.inline.hpp"
@@ -61,13 +60,13 @@ void G1MonotonicArena::SegmentFreeList::bulk_add(Segment& first,
size_t num,
size_t mem_size) {
_list.prepend(first, last);
- AtomicAccess::add(&_num_segments, num, memory_order_relaxed);
- AtomicAccess::add(&_mem_size, mem_size, memory_order_relaxed);
+ _num_segments.add_then_fetch(num, memory_order_relaxed);
+ _mem_size.add_then_fetch(mem_size, memory_order_relaxed);
}
void G1MonotonicArena::SegmentFreeList::print_on(outputStream* out, const char* prefix) {
out->print_cr("%s: segments %zu size %zu",
- prefix, AtomicAccess::load(&_num_segments), AtomicAccess::load(&_mem_size));
+ prefix, _num_segments.load_relaxed(), _mem_size.load_relaxed());
}
G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& num_segments,
@@ -75,12 +74,12 @@ G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& nu
GlobalCounter::CriticalSection cs(Thread::current());
Segment* result = _list.pop_all();
- num_segments = AtomicAccess::load(&_num_segments);
- mem_size = AtomicAccess::load(&_mem_size);
+ num_segments = _num_segments.load_relaxed();
+ mem_size = _mem_size.load_relaxed();
if (result != nullptr) {
- AtomicAccess::sub(&_num_segments, num_segments, memory_order_relaxed);
- AtomicAccess::sub(&_mem_size, mem_size, memory_order_relaxed);
+ _num_segments.sub_then_fetch(num_segments, memory_order_relaxed);
+ _mem_size.sub_then_fetch(mem_size, memory_order_relaxed);
}
return result;
}
@@ -96,8 +95,8 @@ void G1MonotonicArena::SegmentFreeList::free_all() {
Segment::delete_segment(cur);
}
- AtomicAccess::sub(&_num_segments, num_freed, memory_order_relaxed);
- AtomicAccess::sub(&_mem_size, mem_size_freed, memory_order_relaxed);
+ _num_segments.sub_then_fetch(num_freed, memory_order_relaxed);
+ _mem_size.sub_then_fetch(mem_size_freed, memory_order_relaxed);
}
G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
@@ -115,7 +114,7 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
}
// Install it as current allocation segment.
- Segment* old = AtomicAccess::cmpxchg(&_first, prev, next);
+ Segment* old = _first.compare_exchange(prev, next);
if (old != prev) {
// Somebody else installed the segment, use that one.
Segment::delete_segment(next);
@@ -126,9 +125,9 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
_last = next;
}
// Successfully installed the segment into the list.
- AtomicAccess::inc(&_num_segments, memory_order_relaxed);
- AtomicAccess::add(&_mem_size, next->mem_size(), memory_order_relaxed);
- AtomicAccess::add(&_num_total_slots, next->num_slots(), memory_order_relaxed);
+ _num_segments.add_then_fetch(1u, memory_order_relaxed);
+ _mem_size.add_then_fetch(next->mem_size(), memory_order_relaxed);
+ _num_total_slots.add_then_fetch(next->num_slots(), memory_order_relaxed);
return next;
}
}
@@ -155,7 +154,7 @@ uint G1MonotonicArena::slot_size() const {
}
void G1MonotonicArena::drop_all() {
- Segment* cur = AtomicAccess::load_acquire(&_first);
+ Segment* cur = _first.load_acquire();
if (cur != nullptr) {
assert(_last != nullptr, "If there is at least one segment, there must be a last one.");
@@ -175,25 +174,25 @@ void G1MonotonicArena::drop_all() {
cur = next;
}
#endif
- assert(num_segments == _num_segments, "Segment count inconsistent %u %u", num_segments, _num_segments);
- assert(mem_size == _mem_size, "Memory size inconsistent");
+ assert(num_segments == _num_segments.load_relaxed(), "Segment count inconsistent %u %u", num_segments, _num_segments.load_relaxed());
+ assert(mem_size == _mem_size.load_relaxed(), "Memory size inconsistent");
assert(last == _last, "Inconsistent last segment");
- _segment_free_list->bulk_add(*first, *_last, _num_segments, _mem_size);
+ _segment_free_list->bulk_add(*first, *_last, _num_segments.load_relaxed(), _mem_size.load_relaxed());
}
- _first = nullptr;
+ _first.store_relaxed(nullptr);
_last = nullptr;
- _num_segments = 0;
- _mem_size = 0;
- _num_total_slots = 0;
- _num_allocated_slots = 0;
+ _num_segments.store_relaxed(0);
+ _mem_size.store_relaxed(0);
+ _num_total_slots.store_relaxed(0);
+ _num_allocated_slots.store_relaxed(0);
}
void* G1MonotonicArena::allocate() {
assert(slot_size() > 0, "instance size not set.");
- Segment* cur = AtomicAccess::load_acquire(&_first);
+ Segment* cur = _first.load_acquire();
if (cur == nullptr) {
cur = new_segment(cur);
}
@@ -201,7 +200,7 @@ void* G1MonotonicArena::allocate() {
while (true) {
void* slot = cur->allocate_slot();
if (slot != nullptr) {
- AtomicAccess::inc(&_num_allocated_slots, memory_order_relaxed);
+ _num_allocated_slots.add_then_fetch(1u, memory_order_relaxed);
guarantee(is_aligned(slot, _alloc_options->slot_alignment()),
"result " PTR_FORMAT " not aligned at %u", p2i(slot), _alloc_options->slot_alignment());
return slot;
@@ -213,7 +212,7 @@ void* G1MonotonicArena::allocate() {
}
uint G1MonotonicArena::num_segments() const {
- return AtomicAccess::load(&_num_segments);
+ return _num_segments.load_relaxed();
}
#ifdef ASSERT
@@ -238,7 +237,7 @@ uint G1MonotonicArena::calculate_length() const {
template
void G1MonotonicArena::iterate_segments(SegmentClosure& closure) const {
- Segment* cur = AtomicAccess::load_acquire(&_first);
+ Segment* cur = _first.load_acquire();
assert((cur != nullptr) == (_last != nullptr),
"If there is at least one segment, there must be a last one");
diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.hpp b/src/hotspot/share/gc/g1/g1MonotonicArena.hpp
index 211820c5254..d8e658b5a64 100644
--- a/src/hotspot/share/gc/g1/g1MonotonicArena.hpp
+++ b/src/hotspot/share/gc/g1/g1MonotonicArena.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -28,6 +28,7 @@
#include "gc/shared/freeListAllocator.hpp"
#include "nmt/memTag.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/lockFreeStack.hpp"
@@ -65,27 +66,27 @@ private:
// AllocOptions provides parameters for Segment sizing and expansion.
const AllocOptions* _alloc_options;
- Segment* volatile _first; // The (start of the) list of all segments.
- Segment* _last; // The last segment of the list of all segments.
- volatile uint _num_segments; // Number of assigned segments to this allocator.
- volatile size_t _mem_size; // Memory used by all segments.
+ Atomic _first; // The (start of the) list of all segments.
+ Segment* _last; // The last segment of the list of all segments.
+ Atomic _num_segments; // Number of assigned segments to this allocator.
+ Atomic _mem_size; // Memory used by all segments.
SegmentFreeList* _segment_free_list; // The global free segment list to preferentially
// get new segments from.
- volatile uint _num_total_slots; // Number of slots available in all segments (allocated + not yet used).
- volatile uint _num_allocated_slots; // Number of total slots allocated ever (including free and pending).
+ Atomic _num_total_slots; // Number of slots available in all segments (allocated + not yet used).
+ Atomic _num_allocated_slots; // Number of total slots allocated ever (including free and pending).
inline Segment* new_segment(Segment* const prev);
DEBUG_ONLY(uint calculate_length() const;)
public:
- const Segment* first_segment() const { return AtomicAccess::load(&_first); }
+ const Segment* first_segment() const { return _first.load_relaxed(); }
- uint num_total_slots() const { return AtomicAccess::load(&_num_total_slots); }
+ uint num_total_slots() const { return _num_total_slots.load_relaxed(); }
uint num_allocated_slots() const {
- uint allocated = AtomicAccess::load(&_num_allocated_slots);
+ uint allocated = _num_allocated_slots.load_relaxed();
assert(calculate_length() == allocated, "Must be");
return allocated;
}
@@ -116,11 +117,11 @@ static constexpr uint SegmentPayloadMaxAlignment = 8;
class alignas(SegmentPayloadMaxAlignment) G1MonotonicArena::Segment {
const uint _slot_size;
const uint _num_slots;
- Segment* volatile _next;
+ Atomic _next;
// Index into the next free slot to allocate into. Full if equal (or larger)
// to _num_slots (can be larger because we atomically increment this value and
// check only afterwards if the allocation has been successful).
- uint volatile _next_allocate;
+ Atomic _next_allocate;
const MemTag _mem_tag;
static size_t header_size() { return align_up(sizeof(Segment), SegmentPayloadMaxAlignment); }
@@ -139,21 +140,21 @@ class alignas(SegmentPayloadMaxAlignment) G1MonotonicArena::Segment {
Segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag);
~Segment() = default;
public:
- Segment* volatile* next_addr() { return &_next; }
+ Atomic* next_addr() { return &_next; }
void* allocate_slot();
uint num_slots() const { return _num_slots; }
- Segment* next() const { return _next; }
+ Segment* next() const { return _next.load_relaxed(); }
void set_next(Segment* next) {
assert(next != this, " loop condition");
- _next = next;
+ _next.store_relaxed(next);
}
void reset(Segment* next) {
- _next_allocate = 0;
+ _next_allocate.store_relaxed(0);
assert(next != this, " loop condition");
set_next(next);
memset(payload(0), 0, payload_size());
@@ -166,7 +167,7 @@ public:
uint length() const {
// _next_allocate might grow larger than _num_slots in multi-thread environments
// due to races.
- return MIN2(_next_allocate, _num_slots);
+ return MIN2(_next_allocate.load_relaxed(), _num_slots);
}
static size_t size_in_bytes(uint slot_size, uint num_slots) {
@@ -176,7 +177,7 @@ public:
static Segment* create_segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag);
static void delete_segment(Segment* segment);
- bool is_full() const { return _next_allocate >= _num_slots; }
+ bool is_full() const { return _next_allocate.load_relaxed() >= _num_slots; }
};
static_assert(alignof(G1MonotonicArena::Segment) >= SegmentPayloadMaxAlignment, "assert alignment of Segment (and indirectly its payload)");
@@ -186,15 +187,15 @@ static_assert(alignof(G1MonotonicArena::Segment) >= SegmentPayloadMaxAlignment,
// performed by multiple threads concurrently.
// Counts and memory usage are current on a best-effort basis if accessed concurrently.
class G1MonotonicArena::SegmentFreeList {
- static Segment* volatile* next_ptr(Segment& segment) {
+ static Atomic* next_ptr(Segment& segment) {
return segment.next_addr();
}
using SegmentStack = LockFreeStack;
SegmentStack _list;
- volatile size_t _num_segments;
- volatile size_t _mem_size;
+ Atomic _num_segments;
+ Atomic _mem_size;
public:
SegmentFreeList() : _list(), _num_segments(0), _mem_size(0) { }
@@ -210,8 +211,8 @@ public:
void print_on(outputStream* out, const char* prefix = "");
- size_t num_segments() const { return AtomicAccess::load(&_num_segments); }
- size_t mem_size() const { return AtomicAccess::load(&_mem_size); }
+ size_t num_segments() const { return _num_segments.load_relaxed(); }
+ size_t mem_size() const { return _mem_size.load_relaxed(); }
};
// Configuration for G1MonotonicArena, e.g slot size, slot number of next Segment.
diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp b/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp
index dd9ccae1849..cf1b35ccead 100644
--- a/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -28,14 +28,13 @@
#include "gc/g1/g1MonotonicArena.hpp"
-#include "runtime/atomicAccess.hpp"
#include "utilities/globalCounter.inline.hpp"
inline void* G1MonotonicArena::Segment::allocate_slot() {
- if (_next_allocate >= _num_slots) {
+ if (_next_allocate.load_relaxed() >= _num_slots) {
return nullptr;
}
- uint result = AtomicAccess::fetch_then_add(&_next_allocate, 1u, memory_order_relaxed);
+ uint result = _next_allocate.fetch_then_add(1u, memory_order_relaxed);
if (result >= _num_slots) {
return nullptr;
}
@@ -48,8 +47,8 @@ inline G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get() {
Segment* result = _list.pop();
if (result != nullptr) {
- AtomicAccess::dec(&_num_segments, memory_order_relaxed);
- AtomicAccess::sub(&_mem_size, result->mem_size(), memory_order_relaxed);
+ _num_segments.sub_then_fetch(1u, memory_order_relaxed);
+ _mem_size.sub_then_fetch(result->mem_size(), memory_order_relaxed);
}
return result;
}
diff --git a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp
index d7e0c6e394f..529ef62b44d 100644
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,6 @@
#include "nmt/memTracker.hpp"
#include "oops/markWord.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/bitMap.inline.hpp"
diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
index e7b02ed68e7..75a8ef1a336 100644
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,6 @@
#include "memory/allocation.inline.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/prefetch.inline.hpp"
#include "utilities/globalDefinitions.hpp"
diff --git a/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp b/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp
index 8d5e2a3239c..e3eabff5a50 100644
--- a/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp
+++ b/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
#include "gc/g1/g1ParallelCleaning.hpp"
-#include "runtime/atomicAccess.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmci.hpp"
#endif
@@ -35,11 +34,11 @@ JVMCICleaningTask::JVMCICleaningTask() :
}
bool JVMCICleaningTask::claim_cleaning_task() {
- if (AtomicAccess::load(&_cleaning_claimed)) {
+ if (_cleaning_claimed.load_relaxed()) {
return false;
}
- return !AtomicAccess::cmpxchg(&_cleaning_claimed, false, true);
+ return _cleaning_claimed.compare_set(false, true);
}
void JVMCICleaningTask::work(bool unloading_occurred) {
diff --git a/src/hotspot/share/gc/g1/g1ParallelCleaning.hpp b/src/hotspot/share/gc/g1/g1ParallelCleaning.hpp
index d8725cb110d..815b0883e16 100644
--- a/src/hotspot/share/gc/g1/g1ParallelCleaning.hpp
+++ b/src/hotspot/share/gc/g1/g1ParallelCleaning.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,10 +26,13 @@
#define SHARE_GC_G1_G1PARALLELCLEANING_HPP
#include "gc/shared/parallelCleaning.hpp"
+#if INCLUDE_JVMCI
+#include "runtime/atomic.hpp"
+#endif
#if INCLUDE_JVMCI
class JVMCICleaningTask : public StackObj {
- volatile bool _cleaning_claimed;
+ Atomic _cleaning_claimed;
public:
JVMCICleaningTask();
diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp
index 6eef6cbfa87..1d0b29c303e 100644
--- a/src/hotspot/share/gc/g1/g1Policy.cpp
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
-#include "gc/g1/g1ConcurrentRefineStats.hpp"
+#include "gc/g1/g1ConcurrentRefineStats.inline.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
@@ -203,8 +203,8 @@ void G1Policy::update_young_length_bounds(size_t pending_cards, size_t card_rs_l
// allocation.
// That is "fine" - at most this will schedule a GC (hopefully only a little) too
// early or too late.
- AtomicAccess::store(&_young_list_desired_length, new_young_list_desired_length);
- AtomicAccess::store(&_young_list_target_length, new_young_list_target_length);
+ _young_list_desired_length.store_relaxed(new_young_list_desired_length);
+ _young_list_target_length.store_relaxed(new_young_list_target_length);
}
// Calculates desired young gen length. It is calculated from:
@@ -943,7 +943,7 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSToYoungGenCards));
}
- record_pause(this_pause, start_time_sec, end_time_sec, allocation_failure);
+ record_pause(this_pause, start_time_sec, end_time_sec);
if (G1GCPauseTypeHelper::is_last_young_pause(this_pause)) {
assert(!G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause),
@@ -1389,16 +1389,13 @@ void G1Policy::update_gc_pause_time_ratios(G1GCPauseType gc_type, double start_t
void G1Policy::record_pause(G1GCPauseType gc_type,
double start,
- double end,
- bool allocation_failure) {
+ double end) {
// Manage the MMU tracker. For some reason it ignores Full GCs.
if (gc_type != G1GCPauseType::FullGC) {
_mmu_tracker->add_pause(start, end);
}
- if (!allocation_failure) {
- update_gc_pause_time_ratios(gc_type, start, end);
- }
+ update_gc_pause_time_ratios(gc_type, start, end);
update_time_to_mixed_tracking(gc_type, start, end);
diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp
index 72fdc6deb5b..9513c79869e 100644
--- a/src/hotspot/share/gc/g1/g1Policy.hpp
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@
#include "gc/g1/g1RemSetTrackingPolicy.hpp"
#include "gc/g1/g1YoungGenSizer.hpp"
#include "gc/shared/gcCause.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/pair.hpp"
#include "utilities/ticks.hpp"
@@ -81,12 +81,9 @@ class G1Policy: public CHeapObj {
// Desired young gen length without taking actually available free regions into
// account.
- volatile uint _young_list_desired_length;
+ Atomic _young_list_desired_length;
// Actual target length given available free memory.
- volatile uint _young_list_target_length;
- // The max number of regions we can extend the eden by while the GC
- // locker is active. This should be >= _young_list_target_length;
- volatile uint _young_list_max_length;
+ Atomic _young_list_target_length;
// The survivor rate groups below must be initialized after the predictor because they
// indirectly use it through the "this" object passed to their constructor.
@@ -275,8 +272,7 @@ private:
// Record the given STW pause with the given start and end times (in s).
void record_pause(G1GCPauseType gc_type,
double start,
- double end,
- bool allocation_failure = false);
+ double end);
void update_gc_pause_time_ratios(G1GCPauseType gc_type, double start_sec, double end_sec);
@@ -363,8 +359,8 @@ public:
// This must be called at the very beginning of an evacuation pause.
void decide_on_concurrent_start_pause();
- uint young_list_desired_length() const { return AtomicAccess::load(&_young_list_desired_length); }
- uint young_list_target_length() const { return AtomicAccess::load(&_young_list_target_length); }
+ uint young_list_desired_length() const { return _young_list_desired_length.load_relaxed(); }
+ uint young_list_target_length() const { return _young_list_target_length.load_relaxed(); }
bool should_allocate_mutator_region() const;
bool should_expand_on_mutator_allocation() const;
diff --git a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp
index 3c1a6ed4667..4dcdd33846e 100644
--- a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/pair.hpp"
@@ -40,20 +41,23 @@
// * the number of incoming references found during marking. This is an approximate
// value because we do not mark through all objects.
struct G1RegionMarkStats {
- size_t _live_words;
- size_t _incoming_refs;
+ Atomic _live_words;
+ Atomic _incoming_refs;
// Clear all members.
void clear() {
- _live_words = 0;
- _incoming_refs = 0;
+ _live_words.store_relaxed(0);
+ _incoming_refs.store_relaxed(0);
}
// Clear all members after a marking overflow. Only needs to clear the number of
// incoming references as all objects will be rescanned, while the live words are
// gathered whenever a thread can mark an object, which is synchronized.
void clear_during_overflow() {
- _incoming_refs = 0;
+ _incoming_refs.store_relaxed(0);
}
+
+ size_t live_words() const { return _live_words.load_relaxed(); }
+ size_t incoming_refs() const { return _incoming_refs.load_relaxed(); }
};
// Per-marking thread cache for the region mark statistics.
@@ -112,12 +116,16 @@ public:
void add_live_words(oop obj);
void add_live_words(uint region_idx, size_t live_words) {
G1RegionMarkStatsCacheEntry* const cur = find_for_add(region_idx);
- cur->_stats._live_words += live_words;
+ // This method is only ever called single-threaded, so we do not need atomic
+ // update here.
+ cur->_stats._live_words.store_relaxed(cur->_stats.live_words() + live_words);
}
void inc_incoming_refs(uint region_idx) {
G1RegionMarkStatsCacheEntry* const cur = find_for_add(region_idx);
- cur->_stats._incoming_refs++;
+ // This method is only ever called single-threaded, so we do not need atomic
+ // update here.
+ cur->_stats._incoming_refs.store_relaxed(cur->_stats.incoming_refs() + 1u);
}
void reset(uint region_idx) {
diff --git a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp
index 6b0ebb34e0d..71cd33e71ae 100644
--- a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,8 +27,6 @@
#include "gc/g1/g1RegionMarkStatsCache.hpp"
-#include "runtime/atomicAccess.hpp"
-
inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCache::find_for_add(uint region_idx) {
uint const cache_idx = hash(region_idx);
@@ -46,12 +44,12 @@ inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCac
inline void G1RegionMarkStatsCache::evict(uint idx) {
G1RegionMarkStatsCacheEntry* cur = &_cache[idx];
- if (cur->_stats._live_words != 0) {
- AtomicAccess::add(&_target[cur->_region_idx]._live_words, cur->_stats._live_words);
+ if (cur->_stats.live_words() != 0) {
+ _target[cur->_region_idx]._live_words.add_then_fetch(cur->_stats.live_words());
}
- if (cur->_stats._incoming_refs != 0) {
- AtomicAccess::add(&_target[cur->_region_idx]._incoming_refs, cur->_stats._incoming_refs);
+ if (cur->_stats.incoming_refs() != 0) {
+ _target[cur->_region_idx]._incoming_refs.add_then_fetch(cur->_stats.incoming_refs());
}
cur->clear();
diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp
index c7724de280f..0c9a0fad8f2 100644
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,7 @@
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -107,46 +107,48 @@ class G1RemSetScanState : public CHeapObj {
// Set of (unique) regions that can be added to concurrently.
class G1DirtyRegions : public CHeapObj {
uint* _buffer;
- uint _cur_idx;
+ Atomic _cur_idx;
size_t _max_reserved_regions;
- bool* _contains;
+ Atomic* _contains;
public:
G1DirtyRegions(size_t max_reserved_regions) :
_buffer(NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC)),
_cur_idx(0),
_max_reserved_regions(max_reserved_regions),
- _contains(NEW_C_HEAP_ARRAY(bool, max_reserved_regions, mtGC)) {
+ _contains(NEW_C_HEAP_ARRAY(Atomic, max_reserved_regions, mtGC)) {
reset();
}
~G1DirtyRegions() {
FREE_C_HEAP_ARRAY(uint, _buffer);
- FREE_C_HEAP_ARRAY(bool, _contains);
+ FREE_C_HEAP_ARRAY(Atomic, _contains);
}
void reset() {
- _cur_idx = 0;
- ::memset(_contains, false, _max_reserved_regions * sizeof(bool));
+ _cur_idx.store_relaxed(0);
+ for (uint i = 0; i < _max_reserved_regions; i++) {
+ _contains[i].store_relaxed(false);
+ }
}
- uint size() const { return _cur_idx; }
+ uint size() const { return _cur_idx.load_relaxed(); }
uint at(uint idx) const {
- assert(idx < _cur_idx, "Index %u beyond valid regions", idx);
+ assert(idx < size(), "Index %u beyond valid regions", idx);
return _buffer[idx];
}
void add_dirty_region(uint region) {
- if (_contains[region]) {
+ if (_contains[region].load_relaxed()) {
return;
}
- bool marked_as_dirty = AtomicAccess::cmpxchg(&_contains[region], false, true) == false;
+ bool marked_as_dirty = _contains[region].compare_set(false, true);
if (marked_as_dirty) {
- uint allocated = AtomicAccess::fetch_then_add(&_cur_idx, 1u);
+ uint allocated = _cur_idx.fetch_then_add(1u);
_buffer[allocated] = region;
}
}
@@ -155,9 +157,11 @@ class G1RemSetScanState : public CHeapObj {
void merge(const G1DirtyRegions* other) {
for (uint i = 0; i < other->size(); i++) {
uint region = other->at(i);
- if (!_contains[region]) {
- _buffer[_cur_idx++] = region;
- _contains[region] = true;
+ if (!_contains[region].load_relaxed()) {
+ uint cur = _cur_idx.load_relaxed();
+ _buffer[cur] = region;
+ _cur_idx.store_relaxed(cur + 1);
+ _contains[region].store_relaxed(true);
}
}
}
@@ -173,7 +177,7 @@ class G1RemSetScanState : public CHeapObj {
class G1ClearCardTableTask : public G1AbstractSubTask {
G1CollectedHeap* _g1h;
G1DirtyRegions* _regions;
- uint volatile _cur_dirty_regions;
+ Atomic _cur_dirty_regions;
G1RemSetScanState* _scan_state;
@@ -210,8 +214,9 @@ class G1ClearCardTableTask : public G1AbstractSubTask {
void do_work(uint worker_id) override {
const uint num_regions_per_worker = num_cards_per_worker / (uint)G1HeapRegion::CardsPerRegion;
- while (_cur_dirty_regions < _regions->size()) {
- uint next = AtomicAccess::fetch_then_add(&_cur_dirty_regions, num_regions_per_worker);
+ uint cur = _cur_dirty_regions.load_relaxed();
+ while (cur < _regions->size()) {
+ uint next = _cur_dirty_regions.fetch_then_add(num_regions_per_worker);
uint max = MIN2(next + num_regions_per_worker, _regions->size());
for (uint i = next; i < max; i++) {
@@ -226,6 +231,7 @@ class G1ClearCardTableTask : public G1AbstractSubTask {
// old regions use it for old->collection set candidates, so they should not be cleared
// either.
}
+ cur = max;
}
}
};
@@ -1115,7 +1121,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
bool _initial_evacuation;
- volatile bool _fast_reclaim_handled;
+ Atomic _fast_reclaim_handled;
public:
G1MergeHeapRootsTask(G1RemSetScanState* scan_state, uint num_workers, bool initial_evacuation) :
@@ -1143,8 +1149,8 @@ public:
// 1. eager-reclaim candidates
if (_initial_evacuation &&
g1h->has_humongous_reclaim_candidates() &&
- !_fast_reclaim_handled &&
- !AtomicAccess::cmpxchg(&_fast_reclaim_handled, false, true)) {
+ !_fast_reclaim_handled.load_relaxed() &&
+ _fast_reclaim_handled.compare_set(false, true)) {
G1GCParPhaseTimesTracker subphase_x(p, G1GCPhaseTimes::MergeER, worker_id);
diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.cpp b/src/hotspot/share/gc/g1/g1YoungCollector.cpp
index fa5c617f1db..36cc44a8b7c 100644
--- a/src/hotspot/share/gc/g1/g1YoungCollector.cpp
+++ b/src/hotspot/share/gc/g1/g1YoungCollector.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -58,6 +58,7 @@
#include "gc/shared/workerThread.hpp"
#include "jfr/jfrEvents.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/threads.hpp"
#include "utilities/ticks.hpp"
@@ -459,8 +460,8 @@ class G1PrepareEvacuationTask : public WorkerTask {
G1CollectedHeap* _g1h;
G1HeapRegionClaimer _claimer;
- volatile uint _humongous_total;
- volatile uint _humongous_candidates;
+ Atomic _humongous_total;
+ Atomic _humongous_candidates;
G1MonotonicArenaMemoryStats _all_card_set_stats;
@@ -481,19 +482,19 @@ public:
}
void add_humongous_candidates(uint candidates) {
- AtomicAccess::add(&_humongous_candidates, candidates);
+ _humongous_candidates.add_then_fetch(candidates);
}
void add_humongous_total(uint total) {
- AtomicAccess::add(&_humongous_total, total);
+ _humongous_total.add_then_fetch(total);
}
uint humongous_candidates() {
- return _humongous_candidates;
+ return _humongous_candidates.load_relaxed();
}
uint humongous_total() {
- return _humongous_total;
+ return _humongous_total.load_relaxed();
}
const G1MonotonicArenaMemoryStats all_card_set_stats() const {
@@ -698,7 +699,7 @@ protected:
virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
private:
- volatile bool _pinned_regions_recorded;
+ Atomic _pinned_regions_recorded;
public:
G1EvacuateRegionsBaseTask(const char* name,
@@ -722,7 +723,7 @@ public:
G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
pss->set_ref_discoverer(_g1h->ref_processor_stw());
- if (!AtomicAccess::cmpxchg(&_pinned_regions_recorded, false, true)) {
+ if (_pinned_regions_recorded.compare_set(false, true)) {
record_pinned_regions(pss, worker_id);
}
scan_roots(pss, worker_id);
diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp
index ec5d2393d8c..46d12df575c 100644
--- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp
+++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp
@@ -46,6 +46,7 @@
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/threads.hpp"
#include "runtime/threadSMR.hpp"
@@ -759,7 +760,7 @@ class G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask : public G1
const size_t* _surviving_young_words;
uint _active_workers;
G1EvacFailureRegions* _evac_failure_regions;
- volatile uint _num_retained_regions;
+ Atomic _num_retained_regions;
FreeCSetStats* worker_stats(uint worker) {
return &_worker_stats[worker];
@@ -794,7 +795,7 @@ public:
virtual ~FreeCollectionSetTask() {
Ticks serial_time = Ticks::now();
- bool has_new_retained_regions = AtomicAccess::load(&_num_retained_regions) != 0;
+ bool has_new_retained_regions = _num_retained_regions.load_relaxed() != 0;
if (has_new_retained_regions) {
G1CollectionSetCandidates* candidates = _g1h->collection_set()->candidates();
candidates->sort_by_efficiency();
@@ -829,7 +830,7 @@ public:
// Report per-region type timings.
cl.report_timing();
- AtomicAccess::add(&_num_retained_regions, cl.num_retained_regions(), memory_order_relaxed);
+ _num_retained_regions.add_then_fetch(cl.num_retained_regions(), memory_order_relaxed);
}
};
diff --git a/src/hotspot/share/gc/g1/g1YoungGCPreEvacuateTasks.cpp b/src/hotspot/share/gc/g1/g1YoungGCPreEvacuateTasks.cpp
index b11213ddeb3..c0870b7a726 100644
--- a/src/hotspot/share/gc/g1/g1YoungGCPreEvacuateTasks.cpp
+++ b/src/hotspot/share/gc/g1/g1YoungGCPreEvacuateTasks.cpp
@@ -23,7 +23,6 @@
*/
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1ConcurrentRefineStats.hpp"
#include "gc/g1/g1RegionPinCache.inline.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/g1YoungGCPreEvacuateTasks.hpp"
diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
index b1b07b4bc5c..bab72296d4c 100644
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
#include "gc/parallel/psStringDedup.hpp"
#include "gc/parallel/psYoungGen.hpp"
#include "gc/shared/classUnloadingContext.hpp"
+#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/fullGCForwarding.inline.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcHeapSummary.hpp"
@@ -932,6 +933,17 @@ void PSParallelCompact::summary_phase(bool should_do_max_compaction)
}
}
+void PSParallelCompact::report_object_count_after_gc() {
+ GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
+ // The heap is compacted, all objects are iterable. However there may be
+ // filler objects in the heap which we should ignore.
+ class SkipFillerObjectClosure : public BoolObjectClosure {
+ public:
+ bool do_object_b(oop obj) override { return !CollectedHeap::is_filler_object(obj); }
+ } cl;
+ _gc_tracer.report_object_count_after_gc(&cl, &ParallelScavengeHeap::heap()->workers());
+}
+
bool PSParallelCompact::invoke(bool clear_all_soft_refs, bool should_do_max_compaction) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(),
@@ -1027,6 +1039,8 @@ bool PSParallelCompact::invoke(bool clear_all_soft_refs, bool should_do_max_comp
heap->print_heap_change(pre_gc_values);
+ report_object_count_after_gc();
+
// Track memory usage and detect low memory
MemoryService::track_memory_usage();
heap->update_counters();
@@ -1274,10 +1288,6 @@ void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
}
}
- {
- GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
- _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers());
- }
#if TASKQUEUE_STATS
ParCompactionManager::print_and_reset_taskqueue_stats();
#endif
@@ -1835,8 +1845,7 @@ void PSParallelCompact::verify_filler_in_dense_prefix() {
oop obj = cast_to_oop(cur_addr);
oopDesc::verify(obj);
if (!mark_bitmap()->is_marked(cur_addr)) {
- Klass* k = cast_to_oop(cur_addr)->klass();
- assert(k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass(), "inv");
+ assert(CollectedHeap::is_filler_object(cast_to_oop(cur_addr)), "inv");
}
cur_addr += obj->size();
}
diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.hpp b/src/hotspot/share/gc/parallel/psParallelCompact.hpp
index 2297d720b35..4ac9395d727 100644
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp
@@ -749,6 +749,7 @@ private:
// Move objects to new locations.
static void compact();
+ static void report_object_count_after_gc();
// Add available regions to the stack and draining tasks to the task queue.
static void prepare_region_draining_tasks(uint parallel_gc_threads);
diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
index c4eefee5f65..53577bad1d8 100644
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
@@ -771,7 +771,7 @@ Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobi
// this will require extensive changes to the loop optimization in order to
// prevent a degradation of the optimization.
// See comment in memnode.hpp, around line 227 in class LoadPNode.
- Node* tlab_end = macro->make_load(toobig_false, mem, tlab_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
+ Node* tlab_end = macro->make_load_raw(toobig_false, mem, tlab_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
// Load the TLAB top.
Node* old_tlab_top = new LoadPNode(toobig_false, mem, tlab_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered);
diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp
index 6f335b1cdf4..363ccf321b2 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -309,6 +309,8 @@ protected:
fill_with_object(start, pointer_delta(end, start), zap);
}
+ inline static bool is_filler_object(oop obj);
+
virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
static size_t min_dummy_object_size() {
return oopDesc::header_size();
diff --git a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp
index c9d84f54449..194c1fe0bf2 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,9 @@
#include "gc/shared/collectedHeap.hpp"
+#include "classfile/vmClasses.hpp"
#include "gc/shared/memAllocator.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/align.hpp"
@@ -50,4 +52,9 @@ inline void CollectedHeap::add_vmthread_cpu_time(jlong time) {
_vmthread_cpu_time += time;
}
+inline bool CollectedHeap::is_filler_object(oop obj) {
+ Klass* k = obj->klass_without_asserts();
+ return k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass();
+}
+
#endif // SHARE_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
diff --git a/src/hotspot/share/gc/shared/taskqueue.hpp b/src/hotspot/share/gc/shared/taskqueue.hpp
index 4334773a4e9..5c2fe4e5178 100644
--- a/src/hotspot/share/gc/shared/taskqueue.hpp
+++ b/src/hotspot/share/gc/shared/taskqueue.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -641,6 +641,10 @@ public:
return (raw_value() & PartialArrayTag) != 0;
}
+ bool is_null() const {
+ return _p == nullptr;
+ }
+
oop* to_oop_ptr() const {
return static_cast(decode(OopTag));
}
diff --git a/src/hotspot/share/gc/shared/workerThread.cpp b/src/hotspot/share/gc/shared/workerThread.cpp
index 7a9404a195a..e4831d25d26 100644
--- a/src/hotspot/share/gc/shared/workerThread.cpp
+++ b/src/hotspot/share/gc/shared/workerThread.cpp
@@ -96,8 +96,22 @@ void WorkerThreads::initialize_workers() {
}
}
+bool WorkerThreads::allow_inject_creation_failure() const {
+ if (!is_init_completed()) {
+ // Never allow creation failures during VM init
+ return false;
+ }
+
+ if (_created_workers == 0) {
+ // Never allow creation failures of the first worker, it will cause the VM to exit
+ return false;
+ }
+
+ return true;
+}
+
WorkerThread* WorkerThreads::create_worker(uint name_suffix) {
- if (is_init_completed() && InjectGCWorkerCreationFailure) {
+ if (InjectGCWorkerCreationFailure && allow_inject_creation_failure()) {
return nullptr;
}
diff --git a/src/hotspot/share/gc/shared/workerThread.hpp b/src/hotspot/share/gc/shared/workerThread.hpp
index a1f7282abe4..003ce8a2959 100644
--- a/src/hotspot/share/gc/shared/workerThread.hpp
+++ b/src/hotspot/share/gc/shared/workerThread.hpp
@@ -104,6 +104,7 @@ public:
WorkerThreads(const char* name, uint max_workers);
void initialize_workers();
+ bool allow_inject_creation_failure() const;
uint max_workers() const { return _max_workers; }
uint created_workers() const { return _created_workers; }
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
index 7a8bd55c795..46d9f19d35f 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
@@ -68,9 +68,9 @@ ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics(ShenandoahSpaceInfo*
ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {}
-void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
- RegionData* data, size_t size,
- size_t actual_free) {
+size_t ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+ RegionData* data, size_t size,
+ size_t actual_free) {
size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
// The logic for cset selection in adaptive is as follows:
@@ -124,6 +124,7 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
cur_garbage = new_garbage;
}
}
+ return 0;
}
void ShenandoahAdaptiveHeuristics::record_cycle_start() {
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
index 1ba18f37c2b..c4fdf819391 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
@@ -108,9 +108,9 @@ public:
virtual ~ShenandoahAdaptiveHeuristics();
- virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
- RegionData* data, size_t size,
- size_t actual_free) override;
+ virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+ RegionData* data, size_t size,
+ size_t actual_free) override;
virtual void record_cycle_start() override;
virtual void record_success_concurrent() override;
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
index a833e39631c..990b59ec853 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
@@ -39,15 +39,16 @@ ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics(ShenandoahSpaceIn
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahEvacReserveOverflow);
}
-void ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
- RegionData* data, size_t size,
- size_t free) {
+size_t ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+ RegionData* data, size_t size,
+ size_t free) {
for (size_t idx = 0; idx < size; idx++) {
ShenandoahHeapRegion* r = data[idx].get_region();
if (r->garbage() > 0) {
cset->add_region(r);
}
}
+ return 0;
}
bool ShenandoahAggressiveHeuristics::should_start_gc() {
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp
index 5075258f1ce..25c8635489f 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp
@@ -35,9 +35,9 @@ class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
public:
ShenandoahAggressiveHeuristics(ShenandoahSpaceInfo* space_info);
- virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
- RegionData* data, size_t size,
- size_t free);
+ virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+ RegionData* data, size_t size,
+ size_t free);
virtual bool should_start_gc();
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
index 28673b28612..09a8394a4b1 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
@@ -76,9 +76,9 @@ bool ShenandoahCompactHeuristics::should_start_gc() {
return ShenandoahHeuristics::should_start_gc();
}
-void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
- RegionData* data, size_t size,
- size_t actual_free) {
+size_t ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+ RegionData* data, size_t size,
+ size_t actual_free) {
// Do not select too large CSet that would overflow the available free space
size_t max_cset = actual_free * 3 / 4;
@@ -97,4 +97,5 @@ void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(Shenando
cset->add_region(r);
}
}
+ return 0;
}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp
index 21ec99eabc0..4988d5d495d 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp
@@ -37,9 +37,9 @@ public:
virtual bool should_start_gc();
- virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
- RegionData* data, size_t size,
- size_t actual_free);
+ virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+ RegionData* data, size_t size,
+ size_t actual_free);
virtual const char* name() { return "Compact"; }
virtual bool is_diagnostic() { return false; }
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp
index b14d72f249b..ee315ce5c7e 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp
@@ -37,7 +37,7 @@ ShenandoahGenerationalHeuristics::ShenandoahGenerationalHeuristics(ShenandoahGen
: ShenandoahAdaptiveHeuristics(generation), _generation(generation) {
}
-void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
+size_t ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
assert(collection_set->is_empty(), "Must be empty");
auto heap = ShenandoahGenerationalHeap::heap();
@@ -168,16 +168,12 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage));
size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage);
-
bool doing_promote_in_place = (humongous_regions_promoted + regular_regions_promoted_in_place > 0);
- if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
- // Only young collections need to prime the collection set.
- if (_generation->is_young()) {
- heap->old_generation()->heuristics()->prime_collection_set(collection_set);
- }
+ size_t add_regions_to_old = 0;
+ if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
// Call the subclasses to add young-gen regions into the collection set.
- choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
+ add_regions_to_old = choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
}
if (collection_set->has_old_regions()) {
@@ -194,6 +190,7 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
regular_regions_promoted_free,
immediate_regions,
immediate_garbage);
+ return add_regions_to_old;
}
@@ -210,13 +207,6 @@ size_t ShenandoahGenerationalHeuristics::add_preselected_regions_to_collection_s
assert(ShenandoahGenerationalHeap::heap()->is_tenurable(r), "Preselected regions must have tenure age");
// Entire region will be promoted, This region does not impact young-gen or old-gen evacuation reserve.
// This region has been pre-selected and its impact on promotion reserve is already accounted for.
-
- // r->used() is r->garbage() + r->get_live_data_bytes()
- // Since all live data in this region is being evacuated from young-gen, it is as if this memory
- // is garbage insofar as young-gen is concerned. Counting this as garbage reduces the need to
- // reclaim highly utilized young-gen regions just for the sake of finding min_garbage to reclaim
- // within young-gen memory.
-
cur_young_garbage += r->garbage();
cset->add_region(r);
}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp
index 31c016bb4b7..9b4c93af9b4 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp
@@ -44,7 +44,7 @@ class ShenandoahGenerationalHeuristics : public ShenandoahAdaptiveHeuristics {
public:
explicit ShenandoahGenerationalHeuristics(ShenandoahGeneration* generation);
- void choose_collection_set(ShenandoahCollectionSet* collection_set) override;
+ size_t choose_collection_set(ShenandoahCollectionSet* collection_set) override;
protected:
ShenandoahGeneration* _generation;
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp
index 51805b205dd..f47371c14d5 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp
@@ -24,6 +24,7 @@
*/
#include "gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp"
+#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
#include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
@@ -35,13 +36,14 @@ ShenandoahGlobalHeuristics::ShenandoahGlobalHeuristics(ShenandoahGlobalGeneratio
}
-void ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
- RegionData* data, size_t size,
- size_t actual_free) {
+size_t ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+ RegionData* data, size_t size,
+ size_t actual_free) {
// Better select garbage-first regions
QuickSort::sort(data, (int) size, compare_by_garbage);
choose_global_collection_set(cset, data, size, actual_free, 0 /* cur_young_garbage */);
+ return 0;
}
@@ -49,94 +51,212 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti
const ShenandoahHeuristics::RegionData* data,
size_t size, size_t actual_free,
size_t cur_young_garbage) const {
+ shenandoah_assert_heaplocked_or_safepoint();
auto heap = ShenandoahGenerationalHeap::heap();
+ auto free_set = heap->free_set();
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
size_t capacity = heap->soft_max_capacity();
+
size_t garbage_threshold = region_size_bytes * ShenandoahGarbageThreshold / 100;
size_t ignore_threshold = region_size_bytes * ShenandoahIgnoreGarbageThreshold / 100;
size_t young_evac_reserve = heap->young_generation()->get_evacuation_reserve();
+ size_t original_young_evac_reserve = young_evac_reserve;
size_t old_evac_reserve = heap->old_generation()->get_evacuation_reserve();
- size_t max_young_cset = (size_t) (young_evac_reserve / ShenandoahEvacWaste);
- size_t young_cur_cset = 0;
- size_t max_old_cset = (size_t) (old_evac_reserve / ShenandoahOldEvacWaste);
- size_t old_cur_cset = 0;
+ size_t old_promo_reserve = heap->old_generation()->get_promoted_reserve();
- // Figure out how many unaffiliated young regions are dedicated to mutator and to evacuator. Allow the young
- // collector's unaffiliated regions to be transferred to old-gen if old-gen has more easily reclaimed garbage
- // than young-gen. At the end of this cycle, any excess regions remaining in old-gen will be transferred back
- // to young. Do not transfer the mutator's unaffiliated regions to old-gen. Those must remain available
- // to the mutator as it needs to be able to consume this memory during concurrent GC.
-
- size_t unaffiliated_young_regions = heap->young_generation()->free_unaffiliated_regions();
+ size_t unaffiliated_young_regions = free_set->collector_unaffiliated_regions();
size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
+ size_t unaffiliated_old_regions = free_set->old_collector_unaffiliated_regions();
+ size_t unaffiliated_old_memory = unaffiliated_old_regions * region_size_bytes;
- if (unaffiliated_young_memory > max_young_cset) {
- size_t unaffiliated_mutator_memory = unaffiliated_young_memory - max_young_cset;
- unaffiliated_young_memory -= unaffiliated_mutator_memory;
- unaffiliated_young_regions = unaffiliated_young_memory / region_size_bytes; // round down
- unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
+ // Figure out how many unaffiliated regions are dedicated to Collector and OldCollector reserves. Let these
+ // be shuffled between young and old generations in order to expedite evacuation of whichever regions have the
+ // most garbage, regardless of whether these garbage-first regions reside in young or old generation.
+ // Excess reserves will be transferred back to the mutator after collection set has been chosen. At the end
+ // of evacuation, any reserves not consumed by evacuation will also be transferred to the mutator free set.
+
+ // Truncate reserves to only target unaffiliated memory
+ size_t shared_reserve_regions = 0;
+ if (young_evac_reserve > unaffiliated_young_memory) {
+ shared_reserve_regions += unaffiliated_young_regions;
+ } else {
+ size_t delta_regions = young_evac_reserve / region_size_bytes;
+ shared_reserve_regions += delta_regions;
}
+ young_evac_reserve = 0;
+ size_t total_old_reserve = old_evac_reserve + old_promo_reserve;
+ if (total_old_reserve > unaffiliated_old_memory) {
+ // Give all the unaffiliated memory to the shared reserves. Leave the rest for promo reserve.
+ shared_reserve_regions += unaffiliated_old_regions;
+ old_promo_reserve = total_old_reserve - unaffiliated_old_memory;
+ } else {
+ size_t delta_regions = old_evac_reserve / region_size_bytes;
+ shared_reserve_regions += delta_regions;
+ }
+ old_evac_reserve = 0;
+ assert(shared_reserve_regions <=
+ (heap->young_generation()->free_unaffiliated_regions() + heap->old_generation()->free_unaffiliated_regions()),
+ "simple math");
- // We'll affiliate these unaffiliated regions with either old or young, depending on need.
- max_young_cset -= unaffiliated_young_memory;
+ size_t shared_reserves = shared_reserve_regions * region_size_bytes;
+ size_t committed_from_shared_reserves = 0;
- // Keep track of how many regions we plan to transfer from young to old.
- size_t regions_transferred_to_old = 0;
+ size_t promo_bytes = 0;
+ size_t old_evac_bytes = 0;
+ size_t young_evac_bytes = 0;
- size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_young_cset;
+ size_t consumed_by_promo = 0; // promo_bytes * ShenandoahPromoEvacWaste
+ size_t consumed_by_old_evac = 0; // old_evac_bytes * ShenandoahOldEvacWaste
+ size_t consumed_by_young_evac = 0; // young_evac_bytes * ShenandoahEvacWaste
+
+ // Of the memory reclaimed by GC, some of this will need to be reserved for the next GC collection. Use the current
+ // young reserve as an approximation of the future Collector reserve requirement. Try to end with at least
+ // (capacity * ShenandoahMinFreeThreshold) / 100 bytes available to the mutator.
+ size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + original_young_evac_reserve;
size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0;
- log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Max Young Evacuation: %zu"
- "%s, Max Old Evacuation: %zu%s, Max Either Evacuation: %zu%s, Actual Free: %zu%s.",
- byte_size_in_proper_unit(max_young_cset), proper_unit_for_byte_size(max_young_cset),
- byte_size_in_proper_unit(max_old_cset), proper_unit_for_byte_size(max_old_cset),
- byte_size_in_proper_unit(unaffiliated_young_memory), proper_unit_for_byte_size(unaffiliated_young_memory),
- byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free));
+ size_t aged_regions_promoted = 0;
+ size_t young_regions_evacuated = 0;
+ size_t old_regions_evacuated = 0;
+ log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Discretionary evacuation budget (for either old or young): %zu%s"
+ ", Actual Free: %zu%s.",
+ byte_size_in_proper_unit(shared_reserves), proper_unit_for_byte_size(shared_reserves),
+ byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free));
+
+ size_t cur_garbage = cur_young_garbage;
for (size_t idx = 0; idx < size; idx++) {
ShenandoahHeapRegion* r = data[idx].get_region();
assert(!cset->is_preselected(r->index()), "There should be no preselected regions during GLOBAL GC");
bool add_region = false;
- if (r->is_old() || heap->is_tenurable(r)) {
- size_t new_cset = old_cur_cset + r->get_live_data_bytes();
- if ((r->garbage() > garbage_threshold)) {
- while ((new_cset > max_old_cset) && (unaffiliated_young_regions > 0)) {
- unaffiliated_young_regions--;
- regions_transferred_to_old++;
- max_old_cset += region_size_bytes / ShenandoahOldEvacWaste;
+ size_t region_garbage = r->garbage();
+ size_t new_garbage = cur_garbage + region_garbage;
+ bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
+ size_t live_bytes = r->get_live_data_bytes();
+ if (add_regardless || (region_garbage >= garbage_threshold)) {
+ if (r->is_old()) {
+ size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahOldEvacWaste);
+ size_t new_old_consumption = consumed_by_old_evac + anticipated_consumption;
+ size_t new_old_evac_reserve = old_evac_reserve;
+ size_t proposed_old_region_expansion = 0;
+ while ((new_old_consumption > new_old_evac_reserve) && (committed_from_shared_reserves < shared_reserves)) {
+ committed_from_shared_reserves += region_size_bytes;
+ proposed_old_region_expansion++;
+ new_old_evac_reserve += region_size_bytes;
}
- }
- if ((new_cset <= max_old_cset) && (r->garbage() > garbage_threshold)) {
- add_region = true;
- old_cur_cset = new_cset;
- }
- } else {
- assert(r->is_young() && !heap->is_tenurable(r), "DeMorgan's law (assuming r->is_affiliated)");
- size_t new_cset = young_cur_cset + r->get_live_data_bytes();
- size_t region_garbage = r->garbage();
- size_t new_garbage = cur_young_garbage + region_garbage;
- bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
-
- if (add_regardless || (r->garbage() > garbage_threshold)) {
- while ((new_cset > max_young_cset) && (unaffiliated_young_regions > 0)) {
- unaffiliated_young_regions--;
- max_young_cset += region_size_bytes / ShenandoahEvacWaste;
+ // If this region has free memory and we choose to place it in the collection set, its free memory is no longer
+ // available to hold promotion results. So we behave as if its free memory is consumed within the promotion reserve.
+ size_t anticipated_loss_from_promo_reserve = r->free();
+ size_t new_promo_consumption = consumed_by_promo + anticipated_loss_from_promo_reserve;
+ size_t new_promo_reserve = old_promo_reserve;
+ while ((new_promo_consumption > new_promo_reserve) && (committed_from_shared_reserves < shared_reserves)) {
+ committed_from_shared_reserves += region_size_bytes;
+ proposed_old_region_expansion++;
+ new_promo_reserve += region_size_bytes;
+ }
+ if ((new_old_consumption <= new_old_evac_reserve) && (new_promo_consumption <= new_promo_reserve)) {
+ add_region = true;
+ old_evac_reserve = new_old_evac_reserve;
+ old_promo_reserve = new_promo_reserve;
+ old_evac_bytes += live_bytes;
+ consumed_by_old_evac = new_old_consumption;
+ consumed_by_promo = new_promo_consumption;
+ cur_garbage = new_garbage;
+ old_regions_evacuated++;
+ } else {
+ // We failed to sufficiently expand old so unwind proposed expansion
+ committed_from_shared_reserves -= proposed_old_region_expansion * region_size_bytes;
+ }
+ } else if (heap->is_tenurable(r)) {
+ size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahPromoEvacWaste);
+ size_t new_promo_consumption = consumed_by_promo + anticipated_consumption;
+ size_t new_promo_reserve = old_promo_reserve;
+ size_t proposed_old_region_expansion = 0;
+ while ((new_promo_consumption > new_promo_reserve) && (committed_from_shared_reserves < shared_reserves)) {
+ committed_from_shared_reserves += region_size_bytes;
+ proposed_old_region_expansion++;
+ new_promo_reserve += region_size_bytes;
+ }
+ if (new_promo_consumption <= new_promo_reserve) {
+ add_region = true;
+ old_promo_reserve = new_promo_reserve;
+ promo_bytes += live_bytes;
+ consumed_by_promo = new_promo_consumption;
+ cur_garbage = new_garbage;
+ aged_regions_promoted++;
+ } else {
+ // We failed to sufficiently expand old so unwind proposed expansion
+ committed_from_shared_reserves -= proposed_old_region_expansion * region_size_bytes;
+ }
+ } else {
+ assert(r->is_young() && !heap->is_tenurable(r), "DeMorgan's law (assuming r->is_affiliated)");
+ size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahEvacWaste);
+ size_t new_young_evac_consumption = consumed_by_young_evac + anticipated_consumption;
+ size_t new_young_evac_reserve = young_evac_reserve;
+ size_t proposed_young_region_expansion = 0;
+ while ((new_young_evac_consumption > new_young_evac_reserve) && (committed_from_shared_reserves < shared_reserves)) {
+ committed_from_shared_reserves += region_size_bytes;
+ proposed_young_region_expansion++;
+ new_young_evac_reserve += region_size_bytes;
+ }
+ if (new_young_evac_consumption <= new_young_evac_reserve) {
+ add_region = true;
+ young_evac_reserve = new_young_evac_reserve;
+ young_evac_bytes += live_bytes;
+ consumed_by_young_evac = new_young_evac_consumption;
+ cur_garbage = new_garbage;
+ young_regions_evacuated++;
+ } else {
+ // We failed to sufficiently expand old so unwind proposed expansion
+ committed_from_shared_reserves -= proposed_young_region_expansion * region_size_bytes;
}
- }
- if ((new_cset <= max_young_cset) && (add_regardless || (region_garbage > garbage_threshold))) {
- add_region = true;
- young_cur_cset = new_cset;
- cur_young_garbage = new_garbage;
}
}
if (add_region) {
cset->add_region(r);
}
}
- if (regions_transferred_to_old > 0) {
- assert(young_evac_reserve > regions_transferred_to_old * region_size_bytes, "young reserve cannot be negative");
- heap->young_generation()->set_evacuation_reserve(young_evac_reserve - regions_transferred_to_old * region_size_bytes);
- heap->old_generation()->set_evacuation_reserve(old_evac_reserve + regions_transferred_to_old * region_size_bytes);
+
+ if (committed_from_shared_reserves < shared_reserves) {
+ // Give all the rest to promotion
+ old_promo_reserve += (shared_reserves - committed_from_shared_reserves);
+ // dead code: committed_from_shared_reserves = shared_reserves;
}
+
+ // Consider the effects of round-off:
+ // 1. We know that the sum over each evacuation mutiplied by Evacuation Waste is <= total evacuation reserve
+ // 2. However, the reserve for each individual evacuation may be rounded down. In the worst case, we will be over budget
+ // by the number of regions evacuated, since each region's reserve might be under-estimated by at most 1
+ // 3. Likewise, if we take the sum of bytes evacuated and multiply this by the Evacuation Waste and then round down
+ // to nearest integer, the calculated reserve will underestimate the true reserve needs by at most 1.
+ // 4. This explains the adjustments to subtotals in the assert statements below.
+ assert(young_evac_bytes * ShenandoahEvacWaste <= young_evac_reserve + young_regions_evacuated,
+ "budget: %zu <= %zu", (size_t) (young_evac_bytes * ShenandoahEvacWaste), young_evac_reserve);
+ assert(old_evac_bytes * ShenandoahOldEvacWaste <= old_evac_reserve + old_regions_evacuated,
+ "budget: %zu <= %zu", (size_t) (old_evac_bytes * ShenandoahOldEvacWaste), old_evac_reserve);
+ assert(promo_bytes * ShenandoahPromoEvacWaste <= old_promo_reserve + aged_regions_promoted,
+ "budget: %zu <= %zu", (size_t) (promo_bytes * ShenandoahPromoEvacWaste), old_promo_reserve);
+ assert(young_evac_reserve + old_evac_reserve + old_promo_reserve <=
+ heap->young_generation()->get_evacuation_reserve() + heap->old_generation()->get_evacuation_reserve() +
+ heap->old_generation()->get_promoted_reserve(), "Exceeded budget");
+
+ if (heap->young_generation()->get_evacuation_reserve() < young_evac_reserve) {
+ size_t delta_bytes = young_evac_reserve - heap->young_generation()->get_evacuation_reserve();
+ size_t delta_regions = delta_bytes / region_size_bytes;
+ size_t regions_to_transfer = MIN2(unaffiliated_old_regions, delta_regions);
+ log_info(gc)("Global GC moves %zu unaffiliated regions from old collector to young collector reserves", regions_to_transfer);
+ ssize_t negated_regions = -regions_to_transfer;
+ heap->free_set()->move_unaffiliated_regions_from_collector_to_old_collector(negated_regions);
+ } else if (heap->young_generation()->get_evacuation_reserve() > young_evac_reserve) {
+ size_t delta_bytes = heap->young_generation()->get_evacuation_reserve() - young_evac_reserve;
+ size_t delta_regions = delta_bytes / region_size_bytes;
+ size_t regions_to_transfer = MIN2(unaffiliated_young_regions, delta_regions);
+ log_info(gc)("Global GC moves %zu unaffiliated regions from young collector to old collector reserves", regions_to_transfer);
+ heap->free_set()->move_unaffiliated_regions_from_collector_to_old_collector(regions_to_transfer);
+ }
+
+ heap->young_generation()->set_evacuation_reserve(young_evac_reserve);
+ heap->old_generation()->set_evacuation_reserve(old_evac_reserve);
+ heap->old_generation()->set_promoted_reserve(old_promo_reserve);
}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp
index 1f95f75c521..e0513f60da9 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp
@@ -39,9 +39,9 @@ class ShenandoahGlobalHeuristics : public ShenandoahGenerationalHeuristics {
public:
ShenandoahGlobalHeuristics(ShenandoahGlobalGeneration* generation);
- void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
- RegionData* data, size_t size,
- size_t actual_free) override;
+ size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+ RegionData* data, size_t size,
+ size_t actual_free) override;
private:
void choose_global_collection_set(ShenandoahCollectionSet* cset,
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp
index eb740cfac61..aeb64b6f1df 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp
@@ -72,7 +72,7 @@ ShenandoahHeuristics::~ShenandoahHeuristics() {
FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
}
-void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
+size_t ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
assert(collection_set->is_empty(), "Must be empty");
@@ -153,8 +153,8 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
if (immediate_percent <= ShenandoahImmediateThreshold) {
choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
}
-
collection_set->summarize(total_garbage, immediate_garbage, immediate_regions);
+ return 0;
}
void ShenandoahHeuristics::record_cycle_start() {
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
index fb8cfb36353..ae34a9743a9 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
@@ -129,6 +129,13 @@ protected:
#endif
}
+ inline void update_livedata(size_t live) {
+ _region_union._live_data = live;
+#ifdef ASSERT
+ _union_tag = is_live_data;
+#endif
+ }
+
inline ShenandoahHeapRegion* get_region() const {
assert(_union_tag != is_uninitialized, "Cannot fetch region from uninitialized RegionData");
return _region;
@@ -176,9 +183,12 @@ protected:
static int compare_by_garbage(RegionData a, RegionData b);
- virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
- RegionData* data, size_t data_size,
- size_t free) = 0;
+ // This is a helper function to choose_collection_set(), returning the number of regions that need to be transferred to
+ // the old reserve from the young reserve in order to effectively evacuate the chosen collection set. In non-generational
+ // mode, the return value is 0.
+ virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
+ RegionData* data, size_t data_size,
+ size_t free) = 0;
void adjust_penalty(intx step);
@@ -226,7 +236,9 @@ public:
virtual void record_requested_gc();
- virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
+ // Choose the collection set, returning the number of regions that need to be transferred to the old reserve from the young
+ // reserve in order to effectively evacuate the chosen collection set. In non-generational mode, the return value is 0.
+ virtual size_t choose_collection_set(ShenandoahCollectionSet* collection_set);
virtual bool can_unload_classes();
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp
index 029a4dd98fb..f47d0cbe819 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp
@@ -26,9 +26,11 @@
#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
+#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
#include "logging/log.hpp"
#include "utilities/quickSort.hpp"
@@ -77,18 +79,31 @@ ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* genera
}
bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) {
- if (unprocessed_old_collection_candidates() == 0) {
- return false;
- }
+ _mixed_evac_cset = collection_set;
+ _included_old_regions = 0;
+ _evacuated_old_bytes = 0;
+ _collected_old_bytes = 0;
if (_old_generation->is_preparing_for_mark()) {
// We have unprocessed old collection candidates, but the heuristic has given up on evacuating them.
// This is most likely because they were _all_ pinned at the time of the last mixed evacuation (and
// this in turn is most likely because there are just one or two candidate regions remaining).
- log_info(gc, ergo)("Remaining " UINT32_FORMAT " old regions are being coalesced and filled", unprocessed_old_collection_candidates());
+ log_info(gc, ergo)("Remaining " UINT32_FORMAT
+ " old regions are being coalesced and filled", unprocessed_old_collection_candidates());
return false;
}
+ // Between consecutive mixed-evacuation cycles, the live data within each candidate region may change due to
+ // promotions and old-gen evacuations. Re-sort the candidate regions in order to first evacuate regions that have
+ // the smallest amount of live data. These are easiest to evacuate with least effort. Doing these first allows
+ // us to more quickly replenish free memory with empty regions.
+ for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; i++) {
+ ShenandoahHeapRegion* r = _region_data[i].get_region();
+ _region_data[i].update_livedata(r->get_mixed_candidate_live_data_bytes());
+ }
+ QuickSort::sort(_region_data + _next_old_collection_candidate, unprocessed_old_collection_candidates(),
+ compare_by_live);
+
_first_pinned_candidate = NOT_FOUND;
uint included_old_regions = 0;
@@ -100,150 +115,44 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll
// of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount
// of live memory in that region and by the amount of unallocated memory in that region if the evacuation
// budget is constrained by availability of free memory.
- const size_t old_evacuation_reserve = _old_generation->get_evacuation_reserve();
- const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste);
- size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
- size_t fragmented_available;
- size_t excess_fragmented_available;
+ _old_evacuation_reserve = _old_generation->get_evacuation_reserve();
+ _old_evacuation_budget = (size_t) ((double) _old_evacuation_reserve / ShenandoahOldEvacWaste);
- if (unfragmented_available > old_evacuation_budget) {
- unfragmented_available = old_evacuation_budget;
- fragmented_available = 0;
- excess_fragmented_available = 0;
+ // fragmented_available is the amount of memory within partially consumed old regions that may be required to
+ // hold the results of old evacuations. If all of the memory required by the old evacuation reserve is available
+ // in unfragmented regions (unaffiliated old regions), then fragmented_available is zero because we do not need
+ // to evacuate into the existing partially consumed old regions.
+
+ // if fragmented_available is non-zero, excess_fragmented_old_budget represents the amount of fragmented memory
+ // that is available within old, but is not required to hold the resuilts of old evacuation. As old-gen regions
+ // are added into the collection set, their free memory is subtracted from excess_fragmented_old_budget until the
+ // excess is exhausted. For old-gen regions subsequently added to the collection set, their free memory is
+ // subtracted from fragmented_available and from the old_evacuation_budget (since the budget decreases when this
+ // fragmented_available memory decreases). After fragmented_available has been exhausted, any further old regions
+ // selected for the cset do not further decrease the old_evacuation_budget because all further evacuation is targeted
+ // to unfragmented regions.
+
+ size_t unaffiliated_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
+ if (unaffiliated_available > _old_evacuation_reserve) {
+ _unspent_unfragmented_old_budget = _old_evacuation_budget;
+ _unspent_fragmented_old_budget = 0;
+ _excess_fragmented_old_budget = 0;
} else {
- assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available");
- fragmented_available = _old_generation->available() - unfragmented_available;
- assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up");
- if (fragmented_available + unfragmented_available > old_evacuation_budget) {
- excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget;
- fragmented_available -= excess_fragmented_available;
+ assert(_old_generation->available() >= _old_evacuation_reserve, "Cannot reserve more than is available");
+ size_t affiliated_available = _old_generation->available() - unaffiliated_available;
+ assert(affiliated_available + unaffiliated_available >= _old_evacuation_reserve, "Budgets do not add up");
+ if (affiliated_available + unaffiliated_available > _old_evacuation_reserve) {
+ _excess_fragmented_old_budget = (affiliated_available + unaffiliated_available) - _old_evacuation_reserve;
+ affiliated_available -= _excess_fragmented_old_budget;
}
+ _unspent_fragmented_old_budget = (size_t) ((double) affiliated_available / ShenandoahOldEvacWaste);
+ _unspent_unfragmented_old_budget = (size_t) ((double) unaffiliated_available / ShenandoahOldEvacWaste);
}
- size_t remaining_old_evacuation_budget = old_evacuation_budget;
- log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: %zu%s, candidates: %u",
- byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget),
+ log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: " PROPERFMT ", candidates: %u",
+ PROPERFMTARGS(_old_evacuation_budget),
unprocessed_old_collection_candidates());
-
- size_t lost_evacuation_capacity = 0;
-
- // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
- // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
- // Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to
- // evacuate region N, then there is no need to even consider evacuating region N+1.
- while (unprocessed_old_collection_candidates() > 0) {
- // Old collection candidates are sorted in order of decreasing garbage contained therein.
- ShenandoahHeapRegion* r = next_old_collection_candidate();
- if (r == nullptr) {
- break;
- }
- assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
-
- // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
- // to decrease the capacity of the fragmented memory by the scaled loss.
-
- const size_t live_data_for_evacuation = r->get_live_data_bytes();
- size_t lost_available = r->free();
-
- if ((lost_available > 0) && (excess_fragmented_available > 0)) {
- if (lost_available < excess_fragmented_available) {
- excess_fragmented_available -= lost_available;
- lost_evacuation_capacity -= lost_available;
- lost_available = 0;
- } else {
- lost_available -= excess_fragmented_available;
- lost_evacuation_capacity -= excess_fragmented_available;
- excess_fragmented_available = 0;
- }
- }
- size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste);
- if ((lost_available > 0) && (fragmented_available > 0)) {
- if (scaled_loss + live_data_for_evacuation < fragmented_available) {
- fragmented_available -= scaled_loss;
- scaled_loss = 0;
- } else {
- // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother
- // to decrement scaled_loss
- }
- }
- if (scaled_loss > 0) {
- // We were not able to account for the lost free memory within fragmented memory, so we need to take this
- // allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free.
- if (live_data_for_evacuation > unfragmented_available) {
- // There is no room to evacuate this region or any that come after it in within the candidates array.
- log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)",
- unfragmented_available, live_data_for_evacuation, r->index());
- break;
- } else {
- unfragmented_available -= live_data_for_evacuation;
- }
- } else {
- // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either
- // fragmented or unfragmented available memory. Use up the fragmented memory budget first.
- size_t evacuation_need = live_data_for_evacuation;
-
- if (evacuation_need > fragmented_available) {
- evacuation_need -= fragmented_available;
- fragmented_available = 0;
- } else {
- fragmented_available -= evacuation_need;
- evacuation_need = 0;
- }
- if (evacuation_need > unfragmented_available) {
- // There is no room to evacuate this region or any that come after it in within the candidates array.
- log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)",
- unfragmented_available, live_data_for_evacuation, r->index());
- break;
- } else {
- unfragmented_available -= evacuation_need;
- // dead code: evacuation_need == 0;
- }
- }
- collection_set->add_region(r);
- included_old_regions++;
- evacuated_old_bytes += live_data_for_evacuation;
- collected_old_bytes += r->garbage();
- consume_old_collection_candidate();
- }
-
- if (_first_pinned_candidate != NOT_FOUND) {
- // Need to deal with pinned regions
- slide_pinned_regions_to_front();
- }
- decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes);
- if (included_old_regions > 0) {
- log_info(gc, ergo)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " PROPERFMT ", reclaiming: " PROPERFMT ")",
- included_old_regions, PROPERFMTARGS(evacuated_old_bytes), PROPERFMTARGS(collected_old_bytes));
- }
-
- if (unprocessed_old_collection_candidates() == 0) {
- // We have added the last of our collection candidates to a mixed collection.
- // Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate.
- clear_triggers();
-
- _old_generation->complete_mixed_evacuations();
- } else if (included_old_regions == 0) {
- // We have candidates, but none were included for evacuation - are they all pinned?
- // or did we just not have enough room for any of them in this collection set?
- // We don't want a region with a stuck pin to prevent subsequent old collections, so
- // if they are all pinned we transition to a state that will allow us to make these uncollected
- // (pinned) regions parsable.
- if (all_candidates_are_pinned()) {
- log_info(gc, ergo)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
- _old_generation->abandon_mixed_evacuations();
- } else {
- log_info(gc, ergo)("No regions selected for mixed collection. "
- "Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT
- ", Lost capacity: " PROPERFMT
- ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
- PROPERFMTARGS(old_evacuation_reserve),
- PROPERFMTARGS(remaining_old_evacuation_budget),
- PROPERFMTARGS(lost_evacuation_capacity),
- _next_old_collection_candidate, _last_old_collection_candidate);
- }
- }
-
- return (included_old_regions > 0);
+ return add_old_regions_to_cset();
}
bool ShenandoahOldHeuristics::all_candidates_are_pinned() {
@@ -317,6 +226,187 @@ void ShenandoahOldHeuristics::slide_pinned_regions_to_front() {
_next_old_collection_candidate = write_index + 1;
}
+bool ShenandoahOldHeuristics::add_old_regions_to_cset() {
+ if (unprocessed_old_collection_candidates() == 0) {
+ return false;
+ }
+ _first_pinned_candidate = NOT_FOUND;
+
+ // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
+ // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
+ // Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to
+ // evacuate region N, then there is no need to even consider evacuating region N+1.
+ while (unprocessed_old_collection_candidates() > 0) {
+ // Old collection candidates are sorted in order of decreasing garbage contained therein.
+ ShenandoahHeapRegion* r = next_old_collection_candidate();
+ if (r == nullptr) {
+ break;
+ }
+ assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
+
+ // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
+ // to decrease the capacity of the fragmented memory by the scaled loss.
+
+ const size_t live_data_for_evacuation = r->get_live_data_bytes();
+ size_t lost_available = r->free();
+
+ ssize_t fragmented_delta = 0;
+ ssize_t unfragmented_delta = 0;
+ ssize_t excess_delta = 0;
+
+ // We must decrease our mixed-evacuation budgets proportional to the lost available memory. This memory that is no
+ // longer available was likely "promised" to promotions, so we must decrease our mixed evacuations now.
+ // (e.g. if we loose 14 bytes of available old memory, we must decrease the evacuation budget by 10 bytes.)
+ size_t scaled_loss = (size_t) (((double) lost_available) / ShenandoahOldEvacWaste);
+ if (lost_available > 0) {
+ // We need to subtract lost_available from our working evacuation budgets
+ if (scaled_loss < _excess_fragmented_old_budget) {
+ excess_delta -= scaled_loss;
+ _excess_fragmented_old_budget -= scaled_loss;
+ } else {
+ excess_delta -= _excess_fragmented_old_budget;
+ _excess_fragmented_old_budget = 0;
+ }
+
+ if (scaled_loss < _unspent_fragmented_old_budget) {
+ _unspent_fragmented_old_budget -= scaled_loss;
+ fragmented_delta = -scaled_loss;
+ scaled_loss = 0;
+ } else {
+ scaled_loss -= _unspent_fragmented_old_budget;
+ fragmented_delta = -_unspent_fragmented_old_budget;
+ _unspent_fragmented_old_budget = 0;
+ }
+
+ if (scaled_loss < _unspent_unfragmented_old_budget) {
+ _unspent_unfragmented_old_budget -= scaled_loss;
+ unfragmented_delta = -scaled_loss;
+ scaled_loss = 0;
+ } else {
+ scaled_loss -= _unspent_unfragmented_old_budget;
+ fragmented_delta = -_unspent_unfragmented_old_budget;
+ _unspent_unfragmented_old_budget = 0;
+ }
+ }
+
+ // Allocate replica from unfragmented memory if that exists
+ size_t evacuation_need = live_data_for_evacuation;
+ if (evacuation_need < _unspent_unfragmented_old_budget) {
+ _unspent_unfragmented_old_budget -= evacuation_need;
+ } else {
+ if (_unspent_unfragmented_old_budget > 0) {
+ evacuation_need -= _unspent_unfragmented_old_budget;
+ unfragmented_delta -= _unspent_unfragmented_old_budget;
+ _unspent_unfragmented_old_budget = 0;
+ }
+ // Take the remaining allocation out of fragmented available
+ if (_unspent_fragmented_old_budget > evacuation_need) {
+ _unspent_fragmented_old_budget -= evacuation_need;
+ } else {
+ // We cannot add this region into the collection set. We're done. Undo the adjustments to available.
+ _unspent_fragmented_old_budget -= fragmented_delta;
+ _unspent_unfragmented_old_budget -= unfragmented_delta;
+ _excess_fragmented_old_budget -= excess_delta;
+ break;
+ }
+ }
+ _mixed_evac_cset->add_region(r);
+ _included_old_regions++;
+ _evacuated_old_bytes += live_data_for_evacuation;
+ _collected_old_bytes += r->garbage();
+ consume_old_collection_candidate();
+ }
+ return true;
+}
+
+bool ShenandoahOldHeuristics::finalize_mixed_evacs() {
+ if (_first_pinned_candidate != NOT_FOUND) {
+ // Need to deal with pinned regions
+ slide_pinned_regions_to_front();
+ }
+ decrease_unprocessed_old_collection_candidates_live_memory(_evacuated_old_bytes);
+ if (_included_old_regions > 0) {
+ log_info(gc)("Old-gen mixed evac (%zu regions, evacuating %zu%s, reclaiming: %zu%s)",
+ _included_old_regions,
+ byte_size_in_proper_unit(_evacuated_old_bytes), proper_unit_for_byte_size(_evacuated_old_bytes),
+ byte_size_in_proper_unit(_collected_old_bytes), proper_unit_for_byte_size(_collected_old_bytes));
+ }
+
+ if (unprocessed_old_collection_candidates() == 0) {
+ // We have added the last of our collection candidates to a mixed collection.
+ // Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate.
+ clear_triggers();
+ _old_generation->complete_mixed_evacuations();
+ } else if (_included_old_regions == 0) {
+ // We have candidates, but none were included for evacuation - are they all pinned?
+ // or did we just not have enough room for any of them in this collection set?
+ // We don't want a region with a stuck pin to prevent subsequent old collections, so
+ // if they are all pinned we transition to a state that will allow us to make these uncollected
+ // (pinned) regions parsable.
+ if (all_candidates_are_pinned()) {
+ log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
+ _old_generation->abandon_mixed_evacuations();
+ } else {
+ log_info(gc)("No regions selected for mixed collection. "
+ "Old evacuation budget: " PROPERFMT ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
+ PROPERFMTARGS(_old_evacuation_reserve),
+ _next_old_collection_candidate, _last_old_collection_candidate);
+ }
+ }
+ return (_included_old_regions > 0);
+}
+
+bool ShenandoahOldHeuristics::top_off_collection_set(size_t &add_regions_to_old) {
+ if (unprocessed_old_collection_candidates() == 0) {
+ add_regions_to_old = 0;
+ return false;
+ } else {
+ ShenandoahYoungGeneration* young_generation = _heap->young_generation();
+ size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
+ size_t max_young_cset = young_generation->get_evacuation_reserve();
+
+ // We have budgeted to assure the live_bytes_in_tenurable_regions() get evacuated into old generation. Young reserves
+ // only for untenurable region evacuations.
+ size_t planned_young_evac = _mixed_evac_cset->get_live_bytes_in_untenurable_regions();
+ size_t consumed_from_young_cset = (size_t) (planned_young_evac * ShenandoahEvacWaste);
+
+ size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+ size_t regions_required_for_collector_reserve = (consumed_from_young_cset + region_size_bytes - 1) / region_size_bytes;
+
+ assert(consumed_from_young_cset <= max_young_cset, "sanity");
+ assert(max_young_cset <= young_unaffiliated_regions * region_size_bytes, "sanity");
+
+ size_t regions_for_old_expansion;
+ if (consumed_from_young_cset < max_young_cset) {
+ size_t excess_young_reserves = max_young_cset - consumed_from_young_cset;
+ // We can only transfer empty regions from young to old. Furthermore, we must be careful to assure that the young
+ // Collector reserve that remains after transfer is comprised entirely of empty (unaffiliated) regions.
+ size_t consumed_unaffiliated_regions = (consumed_from_young_cset + region_size_bytes - 1) / region_size_bytes;
+ size_t available_unaffiliated_regions = ((young_unaffiliated_regions > consumed_unaffiliated_regions)?
+ young_unaffiliated_regions - consumed_unaffiliated_regions: 0);
+ regions_for_old_expansion = MIN2(available_unaffiliated_regions, excess_young_reserves / region_size_bytes);
+ } else {
+ regions_for_old_expansion = 0;
+ }
+ if (regions_for_old_expansion > 0) {
+ log_info(gc)("Augmenting old-gen evacuation budget from unexpended young-generation reserve by %zu regions",
+ regions_for_old_expansion);
+ add_regions_to_old = regions_for_old_expansion;
+ size_t budget_supplement = region_size_bytes * regions_for_old_expansion;
+ size_t supplement_without_waste = (size_t) (((double) budget_supplement) / ShenandoahOldEvacWaste);
+ _old_evacuation_budget += supplement_without_waste;
+ _unspent_unfragmented_old_budget += supplement_without_waste;
+ _old_generation->augment_evacuation_reserve(budget_supplement);
+ young_generation->set_evacuation_reserve(max_young_cset - budget_supplement);
+
+ return add_old_regions_to_cset();
+ } else {
+ add_regions_to_old = 0;
+ return false;
+ }
+ }
+}
+
void ShenandoahOldHeuristics::prepare_for_old_collections() {
ShenandoahHeap* heap = ShenandoahHeap::heap();
@@ -325,7 +415,6 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
size_t immediate_garbage = 0;
size_t immediate_regions = 0;
size_t live_data = 0;
-
RegionData* candidates = _region_data;
for (size_t i = 0; i < num_regions; i++) {
ShenandoahHeapRegion* region = heap->get_region(i);
@@ -344,10 +433,10 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
// else, regions that were promoted in place had 0 old live data at mark start
if (region->is_regular() || region->is_regular_pinned()) {
- // Only place regular or pinned regions with live data into the candidate set.
- // Pinned regions cannot be evacuated, but we are not actually choosing candidates
- // for the collection set here. That happens later during the next young GC cycle,
- // by which time, the pinned region may no longer be pinned.
+ // Only place regular or pinned regions with live data into the candidate set.
+ // Pinned regions cannot be evacuated, but we are not actually choosing candidates
+ // for the collection set here. That happens later during the next young GC cycle,
+ // by which time, the pinned region may no longer be pinned.
if (!region->has_live()) {
assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
region->make_trash_immediate();
@@ -414,6 +503,8 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
ShenandoahHeapRegion* r = candidates[i].get_region();
size_t region_garbage = r->garbage();
size_t region_free = r->free();
+
+ r->capture_mixed_candidate_garbage();
candidates_garbage += region_garbage;
unfragmented += region_free;
}
@@ -456,6 +547,8 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
r->index(), ShenandoahHeapRegion::region_state_to_string(r->state()));
const size_t region_garbage = r->garbage();
const size_t region_free = r->free();
+
+ r->capture_mixed_candidate_garbage();
candidates_garbage += region_garbage;
unfragmented += region_free;
defrag_count++;
@@ -546,6 +639,7 @@ unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(Shenandoa
void ShenandoahOldHeuristics::abandon_collection_candidates() {
_last_old_collection_candidate = 0;
_next_old_collection_candidate = 0;
+ _live_bytes_in_unprocessed_candidates = 0;
_last_old_region = 0;
}
@@ -790,8 +884,9 @@ bool ShenandoahOldHeuristics::is_experimental() {
return true;
}
-void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
- ShenandoahHeuristics::RegionData* data,
- size_t data_size, size_t free) {
+size_t ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
+ ShenandoahHeuristics::RegionData* data,
+ size_t data_size, size_t free) {
ShouldNotReachHere();
+ return 0;
}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp
index f38194c1ee7..97a5b1ebf24 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp
@@ -102,6 +102,30 @@ private:
size_t _fragmentation_first_old_region;
size_t _fragmentation_last_old_region;
+ // State variables involved in construction of a mixed-evacuation collection set. These variables are initialized
+ // when client code invokes prime_collection_set(). They are consulted, and sometimes modified, when client code
+ // calls top_off_collection_set() to possibly expand the number of old-gen regions in a mixed evacuation cset, and by
+ // finalize_mixed_evacs(), which prepares the way for mixed evacuations to begin.
+ ShenandoahCollectionSet* _mixed_evac_cset;
+ size_t _evacuated_old_bytes;
+ size_t _collected_old_bytes;
+ size_t _included_old_regions;
+ size_t _old_evacuation_reserve;
+ size_t _old_evacuation_budget;
+
+ // This represents the amount of memory that can be evacuated from old into initially empty regions during a mixed evacuation.
+ // This is the total amount of unfragmented free memory in old divided by ShenandoahOldEvacWaste.
+ size_t _unspent_unfragmented_old_budget;
+
+ // This represents the amount of memory that can be evacuated from old into initially non-empty regions during a mixed
+ // evacuation. This is the total amount of initially fragmented free memory in old divided by ShenandoahOldEvacWaste.
+ size_t _unspent_fragmented_old_budget;
+
+ // If there is more available memory in old than is required by the intended mixed evacuation, the amount of excess
+ // memory is represented by _excess_fragmented_old. To convert this value into a promotion budget, multiply by
+ // ShenandoahOldEvacWaste and divide by ShenandoahPromoWaste.
+ size_t _excess_fragmented_old_budget;
+
// The value of command-line argument ShenandoahOldGarbageThreshold represents the percent of garbage that must
// be present within an old-generation region before that region is considered a good candidate for inclusion in
// the collection set under normal circumstances. For our purposes, normal circustances are when the memory consumed
@@ -131,7 +155,15 @@ private:
void set_trigger_if_old_is_overgrown();
protected:
- void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override;
+ size_t
+ choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override;
+
+ // This internal helper routine adds as many mixed evacuation candidate regions as fit within the old-gen evacuation budget
+ // to the collection set. This may be called twice to prepare for any given mixed evacuation cycle, the first time with
+ // a conservative old evacuation budget, and the second time with a larger more aggressive old evacuation budget. Returns
+ // true iff we need to finalize mixed evacs. (If no regions are added to the collection set, there is no need to finalize
+ // mixed evacuations.)
+ bool add_old_regions_to_cset();
public:
explicit ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap);
@@ -139,8 +171,22 @@ public:
// Prepare for evacuation of old-gen regions by capturing the mark results of a recently completed concurrent mark pass.
void prepare_for_old_collections();
- // Return true iff the collection set is primed with at least one old-gen region.
- bool prime_collection_set(ShenandoahCollectionSet* set);
+ // Initialize instance variables to support the preparation of a mixed-evacuation collection set. Adds as many
+ // old candidate regions into the collection set as can fit within the iniital conservative old evacuation budget.
+ // Returns true iff we need to finalize mixed evacs.
+ bool prime_collection_set(ShenandoahCollectionSet* collection_set);
+
+ // If young evacuation did not consume all of its available evacuation reserve, add as many additional mixed-
+ // evacuation candidate regions into the collection set as will fit within this excess repurposed reserved.
+ // Returns true iff we need to finalize mixed evacs. Upon return, the var parameter regions_to_xfer holds the
+ // number of regions to transfer from young to old.
+ bool top_off_collection_set(size_t &add_regions_to_old);
+
+ // Having added all eligible mixed-evacuation candidates to the collection set, this function updates the total count
+ // of how much old-gen memory remains to be evacuated and adjusts the representation of old-gen regions that remain to
+ // be evacuated, giving special attention to regions that are currently pinned. It outputs relevant log messages and
+ // returns true iff the collection set holds at least one unpinned mixed evacuation candidate.
+ bool finalize_mixed_evacs();
// How many old-collection candidates have not yet been processed?
uint unprocessed_old_collection_candidates() const;
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp
index b5e9cc433ea..d4a38278161 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp
@@ -50,9 +50,9 @@ bool ShenandoahPassiveHeuristics::should_degenerate_cycle() {
return ShenandoahDegeneratedGC;
}
-void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
- RegionData* data, size_t size,
- size_t actual_free) {
+size_t ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+ RegionData* data, size_t size,
+ size_t actual_free) {
assert(ShenandoahDegeneratedGC, "This path is only taken for Degenerated GC");
// Do not select too large CSet that would overflow the available free space.
@@ -76,4 +76,5 @@ void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenando
cset->add_region(r);
}
}
+ return 0;
}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp
index be4e91b1800..7a64fad7cc9 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp
@@ -46,9 +46,9 @@ public:
virtual bool should_degenerate_cycle();
- virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
- RegionData* data, size_t data_size,
- size_t free);
+ virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
+ RegionData* data, size_t data_size,
+ size_t free);
virtual const char* name() { return "Passive"; }
virtual bool is_diagnostic() { return true; }
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp
index d4d66fef6a1..3843e434781 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp
@@ -59,9 +59,9 @@ bool ShenandoahStaticHeuristics::should_start_gc() {
return ShenandoahHeuristics::should_start_gc();
}
-void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
- RegionData* data, size_t size,
- size_t free) {
+size_t ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+ RegionData* data, size_t size,
+ size_t free) {
size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
for (size_t idx = 0; idx < size; idx++) {
@@ -70,4 +70,5 @@ void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(Shenandoa
cset->add_region(r);
}
}
+ return 0;
}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp
index 24cb5547921..27dc3c8e0ae 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp
@@ -40,9 +40,9 @@ public:
virtual bool should_start_gc();
- virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
- RegionData* data, size_t size,
- size_t free);
+ virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+ RegionData* data, size_t size,
+ size_t free);
virtual const char* name() { return "Static"; }
virtual bool is_diagnostic() { return false; }
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp
index 15d1058d7cd..01c3873df72 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp
@@ -33,11 +33,11 @@
#include "utilities/quickSort.hpp"
ShenandoahYoungHeuristics::ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation)
- : ShenandoahGenerationalHeuristics(generation) {
+ : ShenandoahGenerationalHeuristics(generation) {
}
-void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+size_t ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
// See comments in ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata():
@@ -48,6 +48,8 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah
// array before younger regions that typically contain more garbage. This is one reason why,
// for example, we continue examining regions even after rejecting a region that has
// more live data than we can evacuate.
+ ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
+ bool need_to_finalize_mixed = heap->old_generation()->heuristics()->prime_collection_set(cset);
// Better select garbage-first regions
QuickSort::sort(data, (int) size, compare_by_garbage);
@@ -55,6 +57,17 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah
size_t cur_young_garbage = add_preselected_regions_to_collection_set(cset, data, size);
choose_young_collection_set(cset, data, size, actual_free, cur_young_garbage);
+
+ // Especially when young-gen trigger is expedited in order to finish mixed evacuations, there may not be
+ // enough consolidated garbage to make effective use of young-gen evacuation reserve. If there is still
+ // young-gen reserve available following selection of the young-gen collection set, see if we can use
+ // this memory to expand the old-gen evacuation collection set.
+ size_t add_regions_to_old;
+ need_to_finalize_mixed |= heap->old_generation()->heuristics()->top_off_collection_set(add_regions_to_old);
+ if (need_to_finalize_mixed) {
+ heap->old_generation()->heuristics()->finalize_mixed_evacs();
+ }
+ return add_regions_to_old;
}
void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollectionSet* cset,
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp
index b9d64059680..85587887663 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp
@@ -38,9 +38,9 @@ public:
explicit ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation);
- void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
- RegionData* data, size_t size,
- size_t actual_free) override;
+ size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+ RegionData* data, size_t size,
+ size_t actual_free) override;
bool should_start_gc() override;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
index e58a7f40796..c1c6b876d90 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
@@ -50,6 +50,8 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS
_region_count(0),
_old_garbage(0),
_preselected_regions(nullptr),
+ _young_available_bytes_collected(0),
+ _old_available_bytes_collected(0),
_current_index(0) {
// The collection set map is reserved to cover the entire heap *and* zero addresses.
@@ -104,6 +106,7 @@ void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
}
} else if (r->is_old()) {
_old_bytes_to_evacuate += live;
+ _old_available_bytes_collected += free;
_old_garbage += garbage;
}
@@ -140,6 +143,7 @@ void ShenandoahCollectionSet::clear() {
_old_bytes_to_evacuate = 0;
_young_available_bytes_collected = 0;
+ _old_available_bytes_collected = 0;
_has_old_regions = false;
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
index a1b77baa2d3..c99271de1fb 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
@@ -75,6 +75,10 @@ private:
// should be subtracted from what's available.
size_t _young_available_bytes_collected;
+ // When a region having memory available to be allocated is added to the collection set, the region's available memory
+ // should be subtracted from what's available.
+ size_t _old_available_bytes_collected;
+
shenandoah_padding(0);
volatile size_t _current_index;
shenandoah_padding(1);
@@ -121,6 +125,9 @@ public:
// Returns the amount of free bytes in young regions in the collection set.
size_t get_young_available_bytes_collected() const { return _young_available_bytes_collected; }
+ // Returns the amount of free bytes in old regions in the collection set.
+ size_t get_old_available_bytes_collected() const { return _old_available_bytes_collected; }
+
// Returns the amount of garbage in old regions in the collection set.
inline size_t get_old_garbage() const;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
index cee8727a3f4..364279deafe 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
@@ -204,9 +204,8 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
return false;
}
- entry_concurrent_update_refs_prepare(heap);
-
// Perform update-refs phase.
+ entry_concurrent_update_refs_prepare(heap);
if (ShenandoahVerify) {
vmop_entry_init_update_refs();
}
@@ -227,6 +226,7 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
// Update references freed up collection set, kick the cleanup to reclaim the space.
entry_cleanup_complete();
} else {
+ _abbreviated = true;
if (!entry_final_roots()) {
assert(_degen_point != _degenerated_unset, "Need to know where to start degenerated cycle");
return false;
@@ -235,7 +235,6 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
if (VerifyAfterGC) {
vmop_entry_verify_final_roots();
}
- _abbreviated = true;
}
// We defer generation resizing actions until after cset regions have been recycled. We do this even following an
@@ -282,7 +281,6 @@ bool ShenandoahConcurrentGC::complete_abbreviated_cycle() {
return true;
}
-
void ShenandoahConcurrentGC::vmop_entry_init_mark() {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
@@ -536,6 +534,12 @@ void ShenandoahConcurrentGC::entry_cleanup_early() {
// This phase does not use workers, no need for setup
heap->try_inject_alloc_failure();
op_cleanup_early();
+ if (!heap->is_evacuation_in_progress()) {
+ // This is an abbreviated cycle. Rebuild the freeset in order to establish reserves for the next GC cycle. Doing
+ // the rebuild ASAP also expedites availability of immediate trash, reducing the likelihood that we will degenerate
+ // during promote-in-place processing.
+ heap->rebuild_free_set(true /*concurrent*/);
+ }
}
void ShenandoahConcurrentGC::entry_evacuate() {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
index a8c97801824..c4fe9103fcb 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
@@ -326,7 +326,7 @@ void ShenandoahRegionPartitions::initialize_old_collector() {
}
void ShenandoahRegionPartitions::make_all_regions_unavailable() {
- shenandoah_assert_heaplocked();
+ shenandoah_assert_heaplocked_or_safepoint();
for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
_membership[partition_id].clear_all();
_leftmosts[partition_id] = _max;
@@ -439,6 +439,13 @@ void ShenandoahRegionPartitions::set_capacity_of(ShenandoahFreeSetPartitionId wh
_available[int(which_partition)] = value - _used[int(which_partition)];
}
+void ShenandoahRegionPartitions::set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
+ shenandoah_assert_heaplocked();
+ assert (which_partition < NumPartitions, "selected free set must be valid");
+ _used[int(which_partition)] = value;
+ _available[int(which_partition)] = _capacity[int(which_partition)] - value;
+}
+
void ShenandoahRegionPartitions::increase_capacity(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
shenandoah_assert_heaplocked();
@@ -900,7 +907,7 @@ idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId w
#ifdef ASSERT
-void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
+void ShenandoahRegionPartitions::assert_bounds() {
size_t capacities[UIntNumPartitions];
size_t used[UIntNumPartitions];
@@ -936,7 +943,7 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
switch (partition) {
case ShenandoahFreeSetPartitionId::NotFree:
{
- assert(!validate_totals || (capacity != _region_size_bytes), "Should not be retired if empty");
+ assert(capacity != _region_size_bytes, "Should not be retired if empty");
ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i);
if (r->is_humongous()) {
if (r->is_old()) {
@@ -976,12 +983,12 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
case ShenandoahFreeSetPartitionId::Collector:
case ShenandoahFreeSetPartitionId::OldCollector:
{
+ ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i);
assert(capacity > 0, "free regions must have allocation capacity");
bool is_empty = (capacity == _region_size_bytes);
regions[int(partition)]++;
used[int(partition)] += _region_size_bytes - capacity;
capacities[int(partition)] += _region_size_bytes;
-
if (i < leftmosts[int(partition)]) {
leftmosts[int(partition)] = i;
}
@@ -1020,20 +1027,20 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator),
- "Mutator free regions before the leftmost: %zd, bound %zd",
+ "Mutator free region before the leftmost: %zd, bound %zd",
beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator));
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Mutator),
- "Mutator free regions past the rightmost: %zd, bound %zd",
+ "Mutator free region past the rightmost: %zd, bound %zd",
end_off, rightmost(ShenandoahFreeSetPartitionId::Mutator));
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
- assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator),
- "Mutator free empty regions before the leftmost: %zd, bound %zd",
- beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator));
- assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator),
- "Mutator free empty regions past the rightmost: %zd, bound %zd",
- end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
+ assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
+ "free empty region (%zd) before the leftmost bound %zd",
+ beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]);
+ assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
+ "free empty region (%zd) past the rightmost bound %zd",
+ end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]);
// Performance invariants. Failing these would not break the free partition, but performance would suffer.
assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: %zd < %zd",
@@ -1053,20 +1060,20 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
- "Collector free regions before the leftmost: %zd, bound %zd",
+ "Collector free region before the leftmost: %zd, bound %zd",
beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
- "Collector free regions past the rightmost: %zd, bound %zd",
+ "Collector free region past the rightmost: %zd, bound %zd",
end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
- "Collector free empty regions before the leftmost: %zd, bound %zd",
- beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
+ "Collector free empty region before the leftmost: %zd, bound %zd",
+ beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
- "Collector free empty regions past the rightmost: %zd, bound %zd",
- end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
+ "Collector free empty region past the rightmost: %zd, bound %zd",
+ end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
// Performance invariants. Failing these would not break the free partition, but performance would suffer.
assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "OldCollector leftmost in bounds: %zd < %zd",
@@ -1083,106 +1090,109 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
ShenandoahFreeSetPartitionId::OldCollector),
"OldCollector rightmost region should be free: %zd", rightmost(ShenandoahFreeSetPartitionId::OldCollector));
+ // Concurrent recycling of trash recycles a region (changing its state from is_trash to is_empty without the heap lock),
+
// If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
// Likewise for empty region partitions.
beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
- assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector),
- "OldCollector free regions before the leftmost: %zd, bound %zd",
+ assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector), "free regions before the leftmost: %zd, bound %zd",
beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
- assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector),
- "OldCollector free regions past the rightmost: %zd, bound %zd",
+ assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector), "free regions past the rightmost: %zd, bound %zd",
end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
- "OldCollector free empty regions before the leftmost: %zd, bound %zd",
- beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
+ "free empty region (%zd) before the leftmost bound %zd, region %s trash",
+ beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
+ ((beg_off >= _max)? "out of bounds is not":
+ (ShenandoahHeap::heap()->get_region(_leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)])->is_trash()?
+ "is": "is not")));
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
- "OldCollector free empty regions past the rightmost: %zd, bound %zd",
- end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
+ "free empty region (%zd) past the rightmost bound %zd, region %s trash",
+ end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
+ ((end_off < 0)? "out of bounds is not" :
+ (ShenandoahHeap::heap()->get_region(_rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)])->is_trash()?
+ "is": "is not")));
- if (validate_totals) {
- // young_retired_regions need to be added to either Mutator or Collector partitions, 100% used.
- // Give enough of young_retired_regions, young_retired_capacity, young_retired_user
- // to the Mutator partition to top it off so that it matches the running totals.
- //
- // Give any remnants to the Collector partition. After topping off the Collector partition, its values
- // should also match running totals.
+ // young_retired_regions need to be added to either Mutator or Collector partitions, 100% used.
+ // Give enough of young_retired_regions, young_retired_capacity, young_retired_user
+ // to the Mutator partition to top it off so that it matches the running totals.
+ //
+ // Give any remnants to the Collector partition. After topping off the Collector partition, its values
+ // should also match running totals.
+ assert(young_retired_regions * _region_size_bytes == young_retired_capacity, "sanity");
+ assert(young_retired_capacity == young_retired_used, "sanity");
- assert(young_retired_regions * _region_size_bytes == young_retired_capacity, "sanity");
- assert(young_retired_capacity == young_retired_used, "sanity");
+ assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)]
+ == _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector capacities must match (%zu != %zu)",
+ capacities[int(ShenandoahFreeSetPartitionId::OldCollector)],
+ _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]);
+ assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)]
+ == _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector used must match");
+ assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)]
+ == _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] / _region_size_bytes, "Old collector regions must match");
+ assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]
+ >= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector capacity must be >= used");
+ assert(_available[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
+ (_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]),
+ "Old Collector available must equal capacity minus used");
+ assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
+ humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector humongous waste must match");
+ assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)],
+ "Capacity total must be >= counted tally");
+ size_t mutator_capacity_shortfall =
+ _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - capacities[int(ShenandoahFreeSetPartitionId::Mutator)];
+ assert(mutator_capacity_shortfall <= young_retired_capacity, "sanity");
+ capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_shortfall;
+ young_retired_capacity -= mutator_capacity_shortfall;
+ capacities[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_capacity;
- assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)]
- == _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector capacities must match");
- assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)]
- == _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector used must match");
- assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)]
- == _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] / _region_size_bytes, "Old collector regions must match");
- assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]
- >= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector capacity must be >= used");
- assert(_available[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
- (_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]),
- "Old Collector available must equal capacity minus used");
- assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
- humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector humongous waste must match");
+ assert(_used[int(ShenandoahFreeSetPartitionId::Mutator)] >= used[int(ShenandoahFreeSetPartitionId::Mutator)],
+ "Used total must be >= counted tally");
+ size_t mutator_used_shortfall =
+ _used[int(ShenandoahFreeSetPartitionId::Mutator)] - used[int(ShenandoahFreeSetPartitionId::Mutator)];
+ assert(mutator_used_shortfall <= young_retired_used, "sanity");
+ used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_shortfall;
+ young_retired_used -= mutator_used_shortfall;
+ used[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_used;
- assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)],
- "Capacity total must be >= counted tally");
- size_t mutator_capacity_shortfall =
- _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - capacities[int(ShenandoahFreeSetPartitionId::Mutator)];
- assert(mutator_capacity_shortfall <= young_retired_capacity, "sanity");
- capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_shortfall;
- young_retired_capacity -= mutator_capacity_shortfall;
- capacities[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_capacity;
+ assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
+ >= regions[int(ShenandoahFreeSetPartitionId::Mutator)], "Region total must be >= counted tally");
+ size_t mutator_regions_shortfall = (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
+ - regions[int(ShenandoahFreeSetPartitionId::Mutator)]);
+ assert(mutator_regions_shortfall <= young_retired_regions, "sanity");
+ regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_shortfall;
+ young_retired_regions -= mutator_regions_shortfall;
+ regions[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_regions;
+ assert(capacities[int(ShenandoahFreeSetPartitionId::Collector)] == _capacity[int(ShenandoahFreeSetPartitionId::Collector)],
+ "Collector capacities must match");
+ assert(used[int(ShenandoahFreeSetPartitionId::Collector)] == _used[int(ShenandoahFreeSetPartitionId::Collector)],
+ "Collector used must match");
+ assert(regions[int(ShenandoahFreeSetPartitionId::Collector)]
+ == _capacity[int(ShenandoahFreeSetPartitionId::Collector)] / _region_size_bytes, "Collector regions must match");
+ assert(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] >= _used[int(ShenandoahFreeSetPartitionId::Collector)],
+ "Collector Capacity must be >= used");
+ assert(_available[int(ShenandoahFreeSetPartitionId::Collector)] ==
+ (_capacity[int(ShenandoahFreeSetPartitionId::Collector)] - _used[int(ShenandoahFreeSetPartitionId::Collector)]),
+ "Collector Available must equal capacity minus used");
- assert(_used[int(ShenandoahFreeSetPartitionId::Mutator)] >= used[int(ShenandoahFreeSetPartitionId::Mutator)],
- "Used total must be >= counted tally");
- size_t mutator_used_shortfall =
- _used[int(ShenandoahFreeSetPartitionId::Mutator)] - used[int(ShenandoahFreeSetPartitionId::Mutator)];
- assert(mutator_used_shortfall <= young_retired_used, "sanity");
- used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_shortfall;
- young_retired_used -= mutator_used_shortfall;
- used[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_used;
-
- assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
- >= regions[int(ShenandoahFreeSetPartitionId::Mutator)], "Region total must be >= counted tally");
- size_t mutator_regions_shortfall = (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
- - regions[int(ShenandoahFreeSetPartitionId::Mutator)]);
- assert(mutator_regions_shortfall <= young_retired_regions, "sanity");
- regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_shortfall;
- young_retired_regions -= mutator_regions_shortfall;
- regions[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_regions;
-
- assert(capacities[int(ShenandoahFreeSetPartitionId::Collector)] == _capacity[int(ShenandoahFreeSetPartitionId::Collector)],
- "Collector capacities must match");
- assert(used[int(ShenandoahFreeSetPartitionId::Collector)] == _used[int(ShenandoahFreeSetPartitionId::Collector)],
- "Collector used must match");
- assert(regions[int(ShenandoahFreeSetPartitionId::Collector)]
- == _capacity[int(ShenandoahFreeSetPartitionId::Collector)] / _region_size_bytes, "Collector regions must match");
- assert(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] >= _used[int(ShenandoahFreeSetPartitionId::Collector)],
- "Collector Capacity must be >= used");
- assert(_available[int(ShenandoahFreeSetPartitionId::Collector)] ==
- (_capacity[int(ShenandoahFreeSetPartitionId::Collector)] - _used[int(ShenandoahFreeSetPartitionId::Collector)]),
- "Collector Available must equal capacity minus used");
-
- assert(capacities[int(ShenandoahFreeSetPartitionId::Mutator)] == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)],
- "Mutator capacities must match");
- assert(used[int(ShenandoahFreeSetPartitionId::Mutator)] == _used[int(ShenandoahFreeSetPartitionId::Mutator)],
- "Mutator used must match");
- assert(regions[int(ShenandoahFreeSetPartitionId::Mutator)]
- == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes, "Mutator regions must match");
- assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= _used[int(ShenandoahFreeSetPartitionId::Mutator)],
- "Mutator capacity must be >= used");
- assert(_available[int(ShenandoahFreeSetPartitionId::Mutator)] ==
- (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]),
- "Mutator available must equal capacity minus used");
- assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] == young_humongous_waste,
- "Mutator humongous waste must match");
- }
+ assert(capacities[int(ShenandoahFreeSetPartitionId::Mutator)] == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)],
+ "Mutator capacities must match");
+ assert(used[int(ShenandoahFreeSetPartitionId::Mutator)] == _used[int(ShenandoahFreeSetPartitionId::Mutator)],
+ "Mutator used must match");
+ assert(regions[int(ShenandoahFreeSetPartitionId::Mutator)]
+ == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes, "Mutator regions must match");
+ assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= _used[int(ShenandoahFreeSetPartitionId::Mutator)],
+ "Mutator capacity must be >= used");
+ assert(_available[int(ShenandoahFreeSetPartitionId::Mutator)] ==
+ (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]),
+ "Mutator available must equal capacity minus used");
+ assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] == young_humongous_waste,
+ "Mutator humongous waste must match");
}
#endif
@@ -1206,6 +1216,36 @@ ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
clear_internal();
}
+void ShenandoahFreeSet::move_unaffiliated_regions_from_collector_to_old_collector(ssize_t count) {
+ shenandoah_assert_heaplocked();
+ size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+
+ size_t old_capacity = _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector);
+ size_t collector_capacity = _partitions.get_capacity(ShenandoahFreeSetPartitionId::Collector);
+ if (count > 0) {
+ size_t ucount = count;
+ size_t bytes_moved = ucount * region_size_bytes;
+ assert(collector_capacity >= bytes_moved, "Cannot transfer");
+ assert(_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector) >= ucount,
+ "Cannot transfer %zu of %zu", ucount, _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector));
+ _partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, ucount);
+ _partitions.set_capacity_of(ShenandoahFreeSetPartitionId::Collector, collector_capacity - bytes_moved);
+ _partitions.set_capacity_of(ShenandoahFreeSetPartitionId::OldCollector, old_capacity + bytes_moved);
+ _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, ucount);
+ } else if (count < 0) {
+ size_t ucount = -count;
+ size_t bytes_moved = ucount * region_size_bytes;
+ assert(old_capacity >= bytes_moved, "Cannot transfer");
+ assert(_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector) >= ucount,
+ "Cannot transfer %zu of %zu", ucount, _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector));
+ _partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, ucount);
+ _partitions.set_capacity_of(ShenandoahFreeSetPartitionId::OldCollector, old_capacity - bytes_moved);
+ _partitions.set_capacity_of(ShenandoahFreeSetPartitionId::Collector, collector_capacity + bytes_moved);
+ _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, ucount);
+ }
+ // else, do nothing
+}
+
// was pip_pad_bytes
void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) {
shenandoah_assert_heaplocked();
@@ -1261,7 +1301,7 @@ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(Shenandoah
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
}
template
@@ -1496,9 +1536,12 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
return nullptr;
}
HeapWord* result = nullptr;
+ // We must call try_recycle_under_lock() even if !r->is_trash(). The reason is that if r is being recycled at this
+ // moment by a GC worker thread, it may appear to be not trash even though it has not yet been fully recycled. If
+ // we proceed without waiting for the worker to finish recycling the region, the worker thread may overwrite the
+ // region's affiliation with FREE after we set the region's affiliation to req.afiliation() below
r->try_recycle_under_lock();
in_new_region = r->is_empty();
-
if (in_new_region) {
log_debug(gc, free)("Using new region (%zu) for %s (" PTR_FORMAT ").",
r->index(), req.type_string(), p2i(&req));
@@ -1668,7 +1711,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
default:
assert(false, "won't happen");
}
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
return result;
}
@@ -1799,6 +1842,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
increase_bytes_allocated(waste_bytes);
}
}
+
_partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_used);
increase_bytes_allocated(total_used);
req.set_actual_size(words_size);
@@ -1819,14 +1863,16 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ false,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
return _heap->get_region(beg)->bottom();
}
class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionClosure {
public:
void heap_region_do(ShenandoahHeapRegion* r) {
- r->try_recycle();
+ if (r->is_trash()) {
+ r->try_recycle();
+ }
}
bool is_thread_safe() {
@@ -1861,7 +1907,7 @@ bool ShenandoahFreeSet::transfer_one_region_from_mutator_to_old_collector(size_t
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
return true;
} else {
return false;
@@ -1914,7 +1960,7 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
// 4. Do not adjust capacities for generations, we just swapped the regions that have already
// been accounted for. However, we should adjust the evacuation reserves as those may have changed.
shenandoah_assert_heaplocked();
@@ -1945,7 +1991,7 @@ void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ false,
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
// We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
// to recycle trash before attempting to allocate anything in the region.
}
@@ -2025,16 +2071,23 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
for (size_t idx = 0; idx < num_regions; idx++) {
ShenandoahHeapRegion* region = _heap->get_region(idx);
if (region->is_trash()) {
- // Trashed regions represent immediate garbage identified by final mark and regions that had been in the collection
- // partition but have not yet been "cleaned up" following update refs.
+ // Trashed regions represent regions that had been in the collection set (or may have been identified as immediate garbage)
+ // but have not yet been "cleaned up". The cset regions are not "trashed" until we have finished update refs.
if (region->is_old()) {
+ // We're going to place this region into the Mutator set. We increment old_trashed_regions because this count represents
+ // regions that the old generation is entitled to without any transfer from young. We do not place this region into
+ // the OldCollector partition at this time. Instead, we let reserve_regions() decide whether to place this region
+ // into the OldCollector partition. Deferring the decision allows reserve_regions() to more effectively pack the
+ // OldCollector regions into high-address memory. We do not adjust capacities of old and young generations at this
+ // time. At the end of finish_rebuild(), the capacities are adjusted based on the results of reserve_regions().
old_trashed_regions++;
} else {
assert(region->is_young(), "Trashed region should be old or young");
young_trashed_regions++;
}
} else if (region->is_old()) {
- // count both humongous and regular regions, but don't count trash (cset) regions.
+ // We count humongous and regular regions as "old regions". We do not count trashed regions that are old. Those
+ // are counted (above) as old_trashed_regions.
old_region_count++;
if (first_old_region > idx) {
first_old_region = idx;
@@ -2048,7 +2101,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
size_t ac = alloc_capacity(region);
if (ac >= PLAB::min_size() * HeapWordSize) {
if (region->is_trash() || !region->is_old()) {
- // Both young and old collected regions (trashed) are placed into the Mutator set
+ // Both young and old (possibly immediately) collected regions (trashed) are placed into the Mutator set
_partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
if (idx < mutator_leftmost) {
mutator_leftmost = idx;
@@ -2111,10 +2164,19 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Region should have been retired");
size_t humongous_waste_bytes = 0;
if (region->is_humongous_start()) {
- oop obj = cast_to_oop(region->bottom());
- size_t byte_size = obj->size() * HeapWordSize;
- size_t region_span = ShenandoahHeapRegion::required_regions(byte_size);
- humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_bytes() - byte_size;
+ // Since rebuild does not necessarily happen at a safepoint, a newly allocated humongous object may not have been
+ // fully initialized. Therefore, we cannot safely consult its header.
+ ShenandoahHeapRegion* last_of_humongous_continuation = region;
+ size_t next_idx;
+ for (next_idx = idx + 1; next_idx < num_regions; next_idx++) {
+ ShenandoahHeapRegion* humongous_cont_candidate = _heap->get_region(next_idx);
+ if (!humongous_cont_candidate->is_humongous_continuation()) {
+ break;
+ }
+ last_of_humongous_continuation = humongous_cont_candidate;
+ }
+ // For humongous regions, used() is established while holding the global heap lock so it is reliable here
+ humongous_waste_bytes = ShenandoahHeapRegion::region_size_bytes() - last_of_humongous_continuation->used();
}
if (region->is_old()) {
old_collector_used += region_size_bytes;
@@ -2183,7 +2245,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
#ifdef ASSERT
if (_heap->mode()->is_generational()) {
assert(young_affiliated_regions() == _heap->young_generation()->get_affiliated_region_count(), "sanity");
@@ -2221,7 +2283,7 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
// global_used is unaffected by this transfer
// No need to adjust ranges because humongous regions are not allocatable
@@ -2303,7 +2365,7 @@ void ShenandoahFreeSet::transfer_empty_regions_from_to(ShenandoahFreeSetPartitio
/* UnaffiliatedChangesAreYoungNeutral */ true>();
}
}
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
}
// Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
@@ -2370,7 +2432,7 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
}
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
return transferred_regions;
}
@@ -2445,7 +2507,7 @@ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPa
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
}
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
return transferred_regions;
}
@@ -2507,14 +2569,13 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t
first_old_region, last_old_region, old_region_count);
}
-void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t old_region_count,
- bool have_evacuation_reserves) {
+
+void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count) {
shenandoah_assert_heaplocked();
size_t young_reserve(0), old_reserve(0);
if (_heap->mode()->is_generational()) {
- compute_young_and_old_reserves(young_trashed_regions, old_trashed_regions, have_evacuation_reserves,
- young_reserve, old_reserve);
+ compute_young_and_old_reserves(young_cset_regions, old_cset_regions, young_reserve, old_reserve);
} else {
young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
old_reserve = 0;
@@ -2531,8 +2592,41 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
// Release the rebuild lock now. What remains in this function is read-only
rebuild_lock()->unlock();
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
log_status();
+ if (_heap->mode()->is_generational()) {
+ // Clear the region balance until it is adjusted in preparation for a subsequent GC cycle.
+ _heap->old_generation()->set_region_balance(0);
+ }
+}
+
+
+// Reduce old reserve (when there are insufficient resources to satisfy the original request).
+void ShenandoahFreeSet::reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve) {
+ ShenandoahOldGeneration* const old_generation = _heap->old_generation();
+ size_t requested_promoted_reserve = old_generation->get_promoted_reserve();
+ size_t requested_old_evac_reserve = old_generation->get_evacuation_reserve();
+ assert(adjusted_old_reserve < requested_old_reserve, "Only allow reduction");
+ assert(requested_promoted_reserve + requested_old_evac_reserve >= adjusted_old_reserve, "Sanity");
+ size_t delta = requested_old_reserve - adjusted_old_reserve;
+
+ if (requested_promoted_reserve >= delta) {
+ requested_promoted_reserve -= delta;
+ old_generation->set_promoted_reserve(requested_promoted_reserve);
+ } else {
+ delta -= requested_promoted_reserve;
+ requested_promoted_reserve = 0;
+ requested_old_evac_reserve -= delta;
+ old_generation->set_promoted_reserve(requested_promoted_reserve);
+ old_generation->set_evacuation_reserve(requested_old_evac_reserve);
+ }
+}
+
+// Reduce young reserve (when there are insufficient resources to satisfy the original request).
+void ShenandoahFreeSet::reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve) {
+ ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
+ assert(adjusted_young_reserve < requested_young_reserve, "Only allow reduction");
+ young_generation->set_evacuation_reserve(adjusted_young_reserve);
}
/**
@@ -2549,7 +2643,6 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
* this value should computed by ShenandoahGenerationalHeap::compute_old_generation_balance().
*/
void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regions, size_t old_trashed_regions,
- bool have_evacuation_reserves,
size_t& young_reserve_result, size_t& old_reserve_result) const {
shenandoah_assert_generational();
shenandoah_assert_heaplocked();
@@ -2566,6 +2659,15 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
old_available += old_trashed_regions * region_size_bytes;
young_unaffiliated_regions += young_trashed_regions;
+ assert(young_capacity >= young_generation->used(),
+ "Young capacity (%zu) must exceed used (%zu)", young_capacity, young_generation->used());
+
+ size_t young_available = young_capacity - young_generation->used();
+ young_available += young_trashed_regions * region_size_bytes;
+
+ assert(young_available >= young_unaffiliated_regions * region_size_bytes, "sanity");
+ assert(old_available >= old_unaffiliated_regions * region_size_bytes, "sanity");
+
// Consult old-region balance to make adjustments to current generation capacities and availability.
// The generation region transfers take place after we rebuild. old_region_balance represents number of regions
// to transfer from old to young.
@@ -2585,6 +2687,7 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
ssize_t xfer_bytes = old_region_balance * checked_cast(region_size_bytes);
old_available -= xfer_bytes;
old_unaffiliated_regions -= old_region_balance;
+ young_available += xfer_bytes;
young_capacity += xfer_bytes;
young_unaffiliated_regions += old_region_balance;
}
@@ -2593,41 +2696,22 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
// promotions and evacuations. The partition between which old memory is reserved for evacuation and
// which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
// each PLAB's available memory.
- if (have_evacuation_reserves) {
- // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
- const size_t promoted_reserve = old_generation->get_promoted_reserve();
- const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
- young_reserve_result = young_generation->get_evacuation_reserve();
- old_reserve_result = promoted_reserve + old_evac_reserve;
- if (old_reserve_result > old_available) {
- // Try to transfer memory from young to old.
- size_t old_deficit = old_reserve_result - old_available;
- size_t old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
- if (young_unaffiliated_regions < old_region_deficit) {
- old_region_deficit = young_unaffiliated_regions;
- }
- young_unaffiliated_regions -= old_region_deficit;
- old_unaffiliated_regions += old_region_deficit;
- old_region_balance -= old_region_deficit;
- old_generation->set_region_balance(old_region_balance);
- }
- } else {
- // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
- young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
- // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
- // Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of
- // unaffiliated regions.
- old_reserve_result = old_available;
- }
+ const size_t promoted_reserve = old_generation->get_promoted_reserve();
+ const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
+ young_reserve_result = young_generation->get_evacuation_reserve();
+ old_reserve_result = promoted_reserve + old_evac_reserve;
+ assert(old_reserve_result + young_reserve_result <= old_available + young_available,
+ "Cannot reserve (%zu + %zu + %zu) more than is available: %zu + %zu",
+ promoted_reserve, old_evac_reserve, young_reserve_result, old_available, young_available);
// Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
// free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust
// the reserve downward to account for this possibility. This loss is part of the reason why the original budget
// was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
if (old_reserve_result >
- _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
+ _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
old_reserve_result =
- _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
+ _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
}
if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) {
@@ -2791,19 +2875,17 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
ShenandoahFreeSetPartitionId p = _partitions.membership(idx);
size_t ac = alloc_capacity(r);
assert(ac != region_size_bytes, "Empty regions should be in Mutator partion at entry to reserve_regions");
- if (p == ShenandoahFreeSetPartitionId::Collector) {
- if (ac != region_size_bytes) {
- young_used_regions++;
- young_used_bytes = region_size_bytes - ac;
- }
- // else, unaffiliated region has no used
- } else if (p == ShenandoahFreeSetPartitionId::OldCollector) {
- if (ac != region_size_bytes) {
- old_used_regions++;
- old_used_bytes = region_size_bytes - ac;
- }
- // else, unaffiliated region has no used
- } else if (p == ShenandoahFreeSetPartitionId::NotFree) {
+ assert(p != ShenandoahFreeSetPartitionId::Collector, "Collector regions must be converted from Mutator regions");
+ if (p == ShenandoahFreeSetPartitionId::OldCollector) {
+ assert(!r->is_empty(), "Empty regions should be in Mutator partition at entry to reserve_regions");
+ old_used_regions++;
+ old_used_bytes = region_size_bytes - ac;
+ // This region is within the range for OldCollector partition, as established by find_regions_with_alloc_capacity()
+ assert((_partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= idx) &&
+ (_partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector) >= idx),
+ "find_regions_with_alloc_capacity() should have established this is in range");
+ } else {
+ assert(p == ShenandoahFreeSetPartitionId::NotFree, "sanity");
// This region has been retired
if (r->is_old()) {
old_used_regions++;
@@ -2813,21 +2895,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
young_used_regions++;
young_used_bytes += region_size_bytes - ac;
}
- } else {
- assert(p == ShenandoahFreeSetPartitionId::OldCollector, "Not mutator and not NotFree, so must be OldCollector");
- assert(!r->is_empty(), "Empty regions should be in Mutator partition at entry to reserve_regions");
- if (idx < old_collector_low_idx) {
- old_collector_low_idx = idx;
- }
- if (idx > old_collector_high_idx) {
- old_collector_high_idx = idx;
- }
- if (idx < old_collector_empty_low_idx) {
- old_collector_empty_low_idx = idx;
- }
- if (idx > old_collector_empty_high_idx) {
- old_collector_empty_high_idx = idx;
- }
}
}
}
@@ -2856,14 +2923,14 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
_partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, used_to_old_collector);
}
- _partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector,
- collector_low_idx, collector_high_idx,
- collector_empty_low_idx, collector_empty_high_idx);
+ _partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator,
+ mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx);
+ _partitions.establish_interval(ShenandoahFreeSetPartitionId::Collector,
+ collector_low_idx, collector_high_idx, collector_empty_low_idx, collector_empty_high_idx);
+
_partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::OldCollector,
old_collector_low_idx, old_collector_high_idx,
old_collector_empty_low_idx, old_collector_empty_high_idx);
- _partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator,
- mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx);
recompute_total_used* UsedByMutatorChanged */ true,
/* UsedByCollectorChanged */ true, /* UsedByOldCollectorChanged */ true>();
@@ -2872,17 +2939,22 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
- _partitions.assert_bounds(true);
+ _partitions.assert_bounds();
if (LogTarget(Info, gc, free)::is_enabled()) {
size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);
if (old_reserve < to_reserve_old) {
log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
+ assert(_heap->mode()->is_generational(), "to_old_reserve > 0 implies generational mode");
+ reduce_old_reserve(old_reserve, to_reserve_old);
}
size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector);
if (reserve < to_reserve) {
+ if (_heap->mode()->is_generational()) {
+ reduce_young_reserve(reserve, to_reserve);
+ }
log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
- PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
+ PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
}
}
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
index 364637740f2..4e0aea80a9b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
@@ -224,6 +224,10 @@ public:
void transfer_used_capacity_from_to(ShenandoahFreeSetPartitionId from_partition, ShenandoahFreeSetPartitionId to_partition,
size_t regions);
+ // For recycled region r in the OldCollector partition but possibly not within the interval for empty OldCollector regions,
+ // expand the empty interval to include this region.
+ inline void adjust_interval_for_recycled_old_region_under_lock(ShenandoahHeapRegion* r);
+
const char* partition_membership_name(idx_t idx) const;
// Return the index of the next available region >= start_index, or maximum_regions if not found.
@@ -373,12 +377,7 @@ public:
inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value);
- inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
- shenandoah_assert_heaplocked();
- assert (which_partition < NumPartitions, "selected free set must be valid");
- _used[int(which_partition)] = value;
- _available[int(which_partition)] = _capacity[int(which_partition)] - value;
- }
+ inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value);
inline size_t count(ShenandoahFreeSetPartitionId which_partition) const { return _region_counts[int(which_partition)]; }
@@ -402,7 +401,7 @@ public:
// idx >= leftmost &&
// idx <= rightmost
// }
- void assert_bounds(bool validate_totals) NOT_DEBUG_RETURN;
+ void assert_bounds() NOT_DEBUG_RETURN;
};
// Publicly, ShenandoahFreeSet represents memory that is available to mutator threads. The public capacity(), used(),
@@ -634,7 +633,11 @@ private:
void establish_old_collector_alloc_bias();
size_t get_usable_free_words(size_t free_bytes) const;
+ void reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve);
+ void reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve);
+
void log_freeset_stats(ShenandoahFreeSetPartitionId partition_id, LogStream& ls);
+
// log status, assuming lock has already been acquired by the caller.
void log_status();
@@ -685,35 +688,46 @@ public:
return _total_global_used;
}
- size_t global_unaffiliated_regions() {
+ // A negative argument results in moving from old_collector to collector
+ void move_unaffiliated_regions_from_collector_to_old_collector(ssize_t regions);
+
+ inline size_t global_unaffiliated_regions() {
return _global_unaffiliated_regions;
}
- size_t young_unaffiliated_regions() {
+ inline size_t young_unaffiliated_regions() {
return _young_unaffiliated_regions;
}
- size_t old_unaffiliated_regions() {
+ inline size_t collector_unaffiliated_regions() {
+ return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector);
+ }
+
+ inline size_t old_collector_unaffiliated_regions() {
return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector);
}
- size_t young_affiliated_regions() {
+ inline size_t old_unaffiliated_regions() {
+ return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector);
+ }
+
+ inline size_t young_affiliated_regions() {
return _young_affiliated_regions;
}
- size_t old_affiliated_regions() {
+ inline size_t old_affiliated_regions() {
return _old_affiliated_regions;
}
- size_t global_affiliated_regions() {
+ inline size_t global_affiliated_regions() {
return _global_affiliated_regions;
}
- size_t total_young_regions() {
+ inline size_t total_young_regions() {
return _total_young_regions;
}
- size_t total_old_regions() {
+ inline size_t total_old_regions() {
return _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector) / ShenandoahHeapRegion::region_size_bytes();
}
@@ -725,36 +739,27 @@ public:
// Examine the existing free set representation, capturing the current state into var arguments:
//
- // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
- // old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
+ // young_trashed_regions is the number of trashed regions (immediate garbage at final mark, cset regions after update refs)
+ // old_trashed_regions is the number of trashed regions
+ // (immediate garbage at final old mark, cset regions after update refs for mixed evac)
// first_old_region is the index of the first region that is part of the OldCollector set
// last_old_region is the index of the last region that is part of the OldCollector set
// old_region_count is the number of regions in the OldCollector set that have memory available to be allocated
- void prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
+ void prepare_to_rebuild(size_t &young_trashed_regions, size_t &old_trashed_regions,
size_t &first_old_region, size_t &last_old_region, size_t &old_region_count);
// At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to
- // hold the results of evacuating to young-gen and to old-gen, and have_evacuation_reserves should be true.
- // These quantities, stored as reserves for their respective generations, are consulted prior to rebuilding
- // the free set (ShenandoahFreeSet) in preparation for evacuation. When the free set is rebuilt, we make sure
- // to reserve sufficient memory in the collector and old_collector sets to hold evacuations.
+ // hold the results of evacuating to young-gen and to old-gen. These quantities, stored in reserves for their
+ // respective generations, are consulted prior to rebuilding the free set (ShenandoahFreeSet) in preparation for
+ // evacuation. When the free set is rebuilt, we make sure to reserve sufficient memory in the collector and
+ // old_collector sets to hold evacuations. Likewise, at the end of update refs, we rebuild the free set in order
+ // to set aside reserves to be consumed during the next GC cycle.
//
- // We also rebuild the free set at the end of GC, as we prepare to idle GC until the next trigger. In this case,
- // have_evacuation_reserves is false because we don't yet know how much memory will need to be evacuated in the
- // next GC cycle. When have_evacuation_reserves is false, the free set rebuild operation reserves for the collector
- // and old_collector sets based on alternative mechanisms, such as ShenandoahEvacReserve, ShenandoahOldEvacReserve, and
- // ShenandoahOldCompactionReserve. In a future planned enhancement, the reserve for old_collector set when the
- // evacuation reserves are unknown, is based in part on anticipated promotion as determined by analysis of live data
- // found during the previous GC pass which is one less than the current tenure age.
- //
- // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
- // old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
+ // young_trashed_regions is the number of trashed regions (immediate garbage at final mark, cset regions after update refs)
+ // old_trashed_regions is the number of trashed regions
+ // (immediate garbage at final old mark, cset regions after update refs for mixed evac)
// num_old_regions is the number of old-gen regions that have available memory for further allocations (excluding old cset)
- // have_evacuation_reserves is true iff the desired values of young-gen and old-gen evacuation reserves and old-gen
- // promotion reserve have been precomputed (and can be obtained by invoking
- // ->get_evacuation_reserve() or old_gen->get_promoted_reserve()
- void finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t num_old_regions,
- bool have_evacuation_reserves = false);
+ void finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t num_old_regions);
// When a region is promoted in place, we add the region's available memory if it is greater than plab_min_size()
// into the old collector partition by invoking this method.
@@ -806,9 +811,18 @@ public:
return _partitions.available_in_locked_for_rebuild(ShenandoahFreeSetPartitionId::Mutator);
}
+ // Use this version of available() if the heap lock is held.
+ inline size_t available_locked() const {
+ return _partitions.available_in(ShenandoahFreeSetPartitionId::Mutator);
+ }
+
inline size_t total_humongous_waste() const { return _total_humongous_waste; }
- inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); }
- inline size_t humongous_waste_in_old() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::OldCollector); }
+ inline size_t humongous_waste_in_mutator() const {
+ return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator);
+ }
+ inline size_t humongous_waste_in_old() const {
+ return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::OldCollector);
+ }
void decrease_humongous_waste_for_regular_bypass(ShenandoahHeapRegion* r, size_t waste);
@@ -874,7 +888,7 @@ public:
// Reserve space for evacuations, with regions reserved for old evacuations placed to the right
// of regions reserved of young evacuations.
- void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves,
+ void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions,
size_t &young_reserve_result, size_t &old_reserve_result) const;
};
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
index fa3a7a42209..3c92750cc0c 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
@@ -522,6 +522,7 @@ public:
void heap_region_do(ShenandoahHeapRegion* r) override {
if (r->is_trash()) {
r->try_recycle_under_lock();
+ // No need to adjust_interval_for_recycled_old_region. That will be taken care of during freeset rebuild.
}
if (r->is_cset()) {
// Leave affiliation unchanged
@@ -966,6 +967,7 @@ public:
if (r->is_trash()) {
live = 0;
r->try_recycle_under_lock();
+ // No need to adjust_interval_for_recycled_old_region. That will be taken care of during freeset rebuild.
} else {
if (r->is_old()) {
ShenandoahGenerationalFullGC::account_for_region(r, _old_regions, _old_usage, _old_humongous_waste);
@@ -1113,16 +1115,16 @@ void ShenandoahFullGC::phase5_epilog() {
ShenandoahPostCompactClosure post_compact;
heap->heap_region_iterate(&post_compact);
heap->collection_set()->clear();
- size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
- ShenandoahFreeSet* free_set = heap->free_set();
{
- free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
+ ShenandoahFreeSet* free_set = heap->free_set();
+ size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
+ free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
// We also do not expand old generation size following Full GC because we have scrambled age populations and
// no longer have objects separated by age into distinct regions.
if (heap->mode()->is_generational()) {
ShenandoahGenerationalFullGC::compute_balances();
}
- free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
+ free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
}
// Set mark incomplete because the marking bitmaps have been reset except pinned regions.
_generation->set_mark_incomplete();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
index a5d8cca458d..cdc7e1a328a 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
@@ -250,6 +250,7 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
ShenandoahOldGeneration* const old_generation = heap->old_generation();
ShenandoahYoungGeneration* const young_generation = heap->young_generation();
+ const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
// During initialization and phase changes, it is more likely that fewer objects die young and old-gen
// memory is not yet full (or is in the process of being replaced). During these times especially, it
@@ -263,15 +264,15 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// First priority is to reclaim the easy garbage out of young-gen.
- // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young
- const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
- size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve());
+ // maximum_young_evacuation_reserve is upper bound on memory to be evacuated into young Collector Reserve. This is
+ // bounded at the end of previous GC cycle, based on available memory and balancing of evacuation to old and young.
+ size_t maximum_young_evacuation_reserve = young_generation->get_evacuation_reserve();
// maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted),
// clamped by the old generation space available.
//
// Here's the algebra.
- // Let SOEP = ShenandoahOldEvacRatioPercent,
+ // Let SOEP = ShenandoahOldEvacPercent,
// OE = old evac,
// YE = young evac, and
// TE = total evac = OE + YE
@@ -283,12 +284,14 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// => OE = YE*SOEP/(100-SOEP)
// We have to be careful in the event that SOEP is set to 100 by the user.
- assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
+ assert(ShenandoahOldEvacPercent <= 100, "Error");
const size_t old_available = old_generation->available();
- const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacRatioPercent == 100) ?
- old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent),
+ const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacPercent == 100) ?
+ old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent),
old_available);
+ // In some cases, maximum_old_reserve < old_available (when limited by ShenandoahOldEvacPercent)
+ // This limit affects mixed evacuations, but does not affect promotions.
// Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates. Third priority
// is to promote as much as we have room to promote. However, if old-gen memory is in short supply, this means young
@@ -305,10 +308,8 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found.
// Global GC will adjust generation sizes to accommodate the collection set it chooses.
- // Set old_promo_reserve to enforce that no regions are preselected for promotion. Such regions typically
- // have relatively high memory utilization. We still call select_aged_regions() because this will prepare for
- // promotions in place, if relevant.
- old_promo_reserve = 0;
+ // Use remnant of old_available to hold promotions.
+ old_promo_reserve = old_available - maximum_old_evacuation_reserve;
// Dedicate all available old memory to old_evacuation reserve. This may be small, because old-gen is only
// expanded based on an existing mixed evacuation workload at the end of the previous GC cycle. We'll expand
@@ -319,43 +320,48 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote. Prioritize compaction
// over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
old_evacuation_reserve = maximum_old_evacuation_reserve;
- old_promo_reserve = 0;
+ old_promo_reserve = old_available - maximum_old_evacuation_reserve;
} else {
// Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation.
- old_evacuation_reserve = 0;
+ old_evacuation_reserve = old_available - maximum_old_evacuation_reserve;
old_promo_reserve = maximum_old_evacuation_reserve;
}
assert(old_evacuation_reserve <= old_available, "Error");
+
// We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
// So we limit the old-evacuation reserve to unfragmented memory. Even so, old-evacuation is free to fill in nooks and
// crannies within existing partially used regions and it generally tries to do so.
- const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
+ const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * region_size_bytes;
if (old_evacuation_reserve > old_free_unfragmented) {
const size_t delta = old_evacuation_reserve - old_free_unfragmented;
old_evacuation_reserve -= delta;
- // Let promo consume fragments of old-gen memory if not global
- if (!is_global()) {
- old_promo_reserve += delta;
- }
+ // Let promo consume fragments of old-gen memory
+ old_promo_reserve += delta;
}
- // Preselect regions for promotion by evacuation (obtaining the live data to seed promoted_reserve),
- // and identify regions that will promote in place. These use the tenuring threshold.
- const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve);
- assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory");
+ // If is_global(), we let garbage-first heuristic determine cset membership. Otherwise, we give priority
+ // to tenurable regions by preselecting regions for promotion by evacuation (obtaining the live data to seed promoted_reserve).
+ // This also identifies regions that will be promoted in place. These use the tenuring threshold.
+ const size_t consumed_by_advance_promotion = select_aged_regions(is_global()? 0: old_promo_reserve);
+ assert(consumed_by_advance_promotion <= old_promo_reserve, "Do not promote more than budgeted");
+
+ // The young evacuation reserve can be no larger than young_unaffiliated. Planning to evacuate into partially consumed
+ // young regions is doomed to failure if any of those partially consumed regions is selected for the collection set.
+ size_t young_unaffiliated = young_generation->free_unaffiliated_regions() * region_size_bytes;
// If any regions have been selected for promotion in place, this has the effect of decreasing available within mutator
// and collector partitions, due to padding of remnant memory within each promoted in place region. This will affect
// young_evacuation_reserve but not old_evacuation_reserve or consumed_by_advance_promotion. So recompute.
- young_evacuation_reserve = MIN2(young_evacuation_reserve, young_generation->available_with_reserve());
+ size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_unaffiliated);
// Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this
// to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood
- // of old evacuation failure.
+ // of old evacuation failure. Leave this memory in the promoted reserve as it may be targeted by opportunistic
+ // promotions (found during evacuation of young regions).
young_generation->set_evacuation_reserve(young_evacuation_reserve);
old_generation->set_evacuation_reserve(old_evacuation_reserve);
- old_generation->set_promoted_reserve(consumed_by_advance_promotion);
+ old_generation->set_promoted_reserve(old_promo_reserve);
// There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the
// case of a GLOBAL gc. During choose_collection_set() of GLOBAL, old will be expanded on demand.
@@ -363,8 +369,8 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// Having chosen the collection set, adjust the budgets for generational mode based on its composition. Note
// that young_generation->available() now knows about recently discovered immediate garbage.
-//
-void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) {
+void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
+ ShenandoahCollectionSet* const collection_set, size_t add_regions_to_old) {
shenandoah_assert_generational();
// We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may
// be able to increase regions_available_to_loan
@@ -398,7 +404,8 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
// Leave old_evac_reserve as previously configured
} else if (old_evacuated_committed < old_evacuation_reserve) {
// This happens if the old-gen collection consumes less than full budget.
- log_debug(gc, cset)("Shrinking old evac reserve to match old_evac_commited: " PROPERFMT, PROPERFMTARGS(old_evacuated_committed));
+ log_debug(gc, cset)("Shrinking old evac reserve to match old_evac_commited: " PROPERFMT,
+ PROPERFMTARGS(old_evacuated_committed));
old_evacuation_reserve = old_evacuated_committed;
old_generation->set_evacuation_reserve(old_evacuation_reserve);
}
@@ -409,11 +416,17 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
size_t young_evacuated = collection_set->get_live_bytes_in_untenurable_regions();
size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated));
- size_t total_young_available = young_generation->available_with_reserve();
- assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young");
+ size_t total_young_available = young_generation->available_with_reserve() - add_regions_to_old * region_size_bytes;;
+ assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate (%zu) more than is available in young (%zu)",
+ young_evacuated_reserve_used, total_young_available);
young_generation->set_evacuation_reserve(young_evacuated_reserve_used);
- size_t old_available = old_generation->available();
+ // We have not yet rebuilt the free set. Some of the memory that is thought to be avaiable within old may no
+ // longer be available if that memory had been free within regions that were selected for the collection set.
+ // Make the necessary adjustments to old_available.
+ size_t old_available =
+ old_generation->available() + add_regions_to_old * region_size_bytes - collection_set->get_old_available_bytes_collected();
+
// Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation
// and promotion reserves. Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during
// evac and update phases.
@@ -422,21 +435,27 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
if (old_available < old_consumed) {
// This can happen due to round-off errors when adding the results of truncated integer arithmetic.
// We've already truncated old_evacuated_committed. Truncate young_advance_promoted_reserve_used here.
+
assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32,
"Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu",
young_advance_promoted_reserve_used, old_available - old_evacuated_committed);
- young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
+ if (old_available > old_evacuated_committed) {
+ young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
+ } else {
+ young_advance_promoted_reserve_used = 0;
+ old_evacuated_committed = old_available;
+ }
+ // TODO: reserve for full promotion reserve, not just for advance (preselected) promotion
old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
}
assert(old_available >= old_consumed, "Cannot consume (%zu) more than is available (%zu)",
old_consumed, old_available);
size_t excess_old = old_available - old_consumed;
- size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions();
+ size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions() + add_regions_to_old;
size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
- assert(old_available >= unaffiliated_old,
- "Unaffiliated old (%zu is %zu * %zu) is a subset of old available (%zu)",
- unaffiliated_old, unaffiliated_old_regions, region_size_bytes, old_available);
+ assert(unaffiliated_old >= old_evacuated_committed, "Do not evacuate (%zu) more than unaffiliated old (%zu)",
+ old_evacuated_committed, unaffiliated_old);
// Make sure old_evac_committed is unaffiliated
if (old_evacuated_committed > 0) {
@@ -454,20 +473,22 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
}
// If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation
- // runway during evacuation and update-refs.
- size_t regions_to_xfer = 0;
+ // runway during evacuation and update-refs. We may make further adjustments to balance.
+ ssize_t add_regions_to_young = 0;
if (excess_old > unaffiliated_old) {
// we can give back unaffiliated_old (all of unaffiliated is excess)
if (unaffiliated_old_regions > 0) {
- regions_to_xfer = unaffiliated_old_regions;
+ add_regions_to_young = unaffiliated_old_regions;
}
} else if (unaffiliated_old_regions > 0) {
// excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions)
size_t excess_regions = excess_old / region_size_bytes;
- regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions);
+ add_regions_to_young = MIN2(excess_regions, unaffiliated_old_regions);
}
- if (regions_to_xfer > 0) {
- excess_old -= regions_to_xfer * region_size_bytes;
+
+ if (add_regions_to_young > 0) {
+ assert(excess_old >= add_regions_to_young * region_size_bytes, "Cannot xfer more than excess old");
+ excess_old -= add_regions_to_young * region_size_bytes;
log_debug(gc, ergo)("Before start of evacuation, total_promotion reserve is young_advance_promoted_reserve: %zu "
"plus excess: old: %zu", young_advance_promoted_reserve_used, excess_old);
}
@@ -475,6 +496,7 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
// Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated
// promotions than fit in reserved memory, they will be deferred until a future GC pass.
size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old;
+
old_generation->set_promoted_reserve(total_promotion_reserve);
old_generation->reset_promoted_expended();
}
@@ -782,17 +804,13 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions());
// Find the amount that will be promoted, regions that will be promoted in
- // place, and preselect older regions that will be promoted by evacuation.
+ // place, and preselected older regions that will be promoted by evacuation.
compute_evacuation_budgets(heap);
- // Choose the collection set, including the regions preselected above for
- // promotion into the old generation.
- _heuristics->choose_collection_set(collection_set);
- if (!collection_set->is_empty()) {
- // only make use of evacuation budgets when we are evacuating
- adjust_evacuation_budgets(heap, collection_set);
- }
-
+ // Choose the collection set, including the regions preselected above for promotion into the old generation.
+ size_t add_regions_to_old = _heuristics->choose_collection_set(collection_set);
+ // Even if collection_set->is_empty(), we want to adjust budgets, making reserves available to mutator.
+ adjust_evacuation_budgets(heap, collection_set, add_regions_to_old);
if (is_global()) {
// We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so
// the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will
@@ -816,17 +834,16 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
ShenandoahHeapLocker locker(heap->lock());
- // We are preparing for evacuation. At this time, we ignore cset region tallies.
- size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
- _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
-
+ // We are preparing for evacuation.
+ size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
+ _free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
if (heap->mode()->is_generational()) {
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
- gen_heap->compute_old_generation_balance(young_cset_regions, old_cset_regions);
+ size_t allocation_runway =
+ gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trashed_regions);
+ gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
}
-
- // Free set construction uses reserve quantities, because they are known to be valid here
- _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true);
+ _free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
}
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
index 06cf132f946..d49e3bed5f8 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
@@ -63,9 +63,10 @@ private:
// Compute evacuation budgets prior to choosing collection set.
void compute_evacuation_budgets(ShenandoahHeap* heap);
- // Adjust evacuation budgets after choosing collection set.
+ // Adjust evacuation budgets after choosing collection set. The argument regions_to_xfer represents regions to be
+ // transfered to old based on decisions made in top_off_collection_set()
void adjust_evacuation_budgets(ShenandoahHeap* heap,
- ShenandoahCollectionSet* collection_set);
+ ShenandoahCollectionSet* collection_set, size_t regions_to_xfer);
// Preselect for possible inclusion into the collection set exactly the most
// garbage-dense regions, including those that satisfy criteria 1 & 2 below,
@@ -144,6 +145,22 @@ private:
virtual void prepare_gc();
// Called during final mark, chooses collection set, rebuilds free set.
+ // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
+ // evacuation efforts that are about to begin. In particular:
+ //
+ // old_generation->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
+ // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage
+ // of the live young-gen memory within the collection set. If there is more data ready to be promoted than
+ // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
+ // pass.
+ //
+ // old_generation->get_evacuation_reserve() represents the amount of memory within old-gen's available memory that has been
+ // set aside to hold objects evacuated from the old-gen collection set.
+ //
+ // young_generation->get_evacuation_reserve() represents the amount of memory within young-gen's available memory that has
+ // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value
+ // equals the entire amount of live young-gen memory within the collection set, even though some of this memory
+ // will likely be promoted.
virtual void prepare_regions_and_collection_set(bool concurrent);
// Cancel marking (used by Full collect and when cancelling cycle).
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp
index 78672ee10a5..1b11c696d18 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp
@@ -55,9 +55,6 @@ void ShenandoahGenerationalFullGC::prepare() {
// Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
heap->set_active_generation(heap->global_generation());
- // No need for old_gen->increase_used() as this was done when plabs were allocated.
- heap->reset_generation_reserves();
-
// Full GC supersedes any marking or coalescing in old generation.
heap->old_generation()->cancel_gc();
}
@@ -156,8 +153,11 @@ void ShenandoahGenerationalFullGC::compute_balances() {
// In case this Full GC resulted from degeneration, clear the tally on anticipated promotion.
heap->old_generation()->set_promotion_potential(0);
- // Invoke this in case we are able to transfer memory from OLD to YOUNG.
- heap->compute_old_generation_balance(0, 0);
+
+ // Invoke this in case we are able to transfer memory from OLD to YOUNG
+ size_t allocation_runway =
+ heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0L);
+ heap->compute_old_generation_balance(allocation_runway, 0, 0);
}
ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks,
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
index fa78e02e6af..36ea0b9e497 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
@@ -299,9 +299,9 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, uint
alloc_from_lab = false;
}
// else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
- // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
+ // We choose not to promote objects smaller than size_threshold by way of shared allocations as this is too
// costly. Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
- // evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
+ // evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= size_threshhold).
}
#ifdef ASSERT
}
@@ -576,19 +576,18 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) {
// Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
// and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
-// xfer_limit, and any surplus is transferred to the young generation.
-//
-// xfer_limit is the maximum we're able to transfer from young to old based on either:
-// 1. an assumption that we will be able to replenish memory "borrowed" from young at the end of collection, or
-// 2. there is sufficient excess in the allocation runway during GC idle cycles
-void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) {
-
+// mutator_xfer_limit, and any surplus is transferred to the young generation. mutator_xfer_limit is
+// the maximum we're able to transfer from young to old. This is called at the end of GC, as we prepare
+// for the idle span that precedes the next GC.
+void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_xfer_limit,
+ size_t old_trashed_regions, size_t young_trashed_regions) {
+ shenandoah_assert_heaplocked();
// We can limit the old reserve to the size of anticipated promotions:
// max_old_reserve is an upper bound on memory evacuated from old and promoted to old,
// clamped by the old generation space available.
//
// Here's the algebra.
- // Let SOEP = ShenandoahOldEvacRatioPercent,
+ // Let SOEP = ShenandoahOldEvacPercent,
// OE = old evac,
// YE = young evac, and
// TE = total evac = OE + YE
@@ -600,81 +599,171 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_
// => OE = YE*SOEP/(100-SOEP)
// We have to be careful in the event that SOEP is set to 100 by the user.
- assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
- const size_t old_available = old_generation()->available();
- // The free set will reserve this amount of memory to hold young evacuations
- const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
-
- // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit.
-
- const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve;
- const double max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve:
- MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent)
- / double(100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve));
-
+ assert(ShenandoahOldEvacPercent <= 100, "Error");
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+ ShenandoahOldGeneration* old_gen = old_generation();
+ size_t old_capacity = old_gen->max_capacity();
+ size_t old_usage = old_gen->used(); // includes humongous waste
+ size_t old_available = ((old_capacity >= old_usage)? old_capacity - old_usage: 0) + old_trashed_regions * region_size_bytes;
+
+ ShenandoahYoungGeneration* young_gen = young_generation();
+ size_t young_capacity = young_gen->max_capacity();
+ size_t young_usage = young_gen->used(); // includes humongous waste
+ size_t young_available = ((young_capacity >= young_usage)? young_capacity - young_usage: 0);
+ size_t freeset_available = free_set()->available_locked();
+ if (young_available > freeset_available) {
+ young_available = freeset_available;
+ }
+ young_available += young_trashed_regions * region_size_bytes;
+
+ // The free set will reserve this amount of memory to hold young evacuations (initialized to the ideal reserve)
+ size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
+
+ // If ShenandoahOldEvacPercent equals 100, max_old_reserve is limited only by mutator_xfer_limit and young_reserve
+ const size_t bound_on_old_reserve = ((old_available + mutator_xfer_limit + young_reserve) * ShenandoahOldEvacPercent) / 100;
+ size_t proposed_max_old = ((ShenandoahOldEvacPercent == 100)?
+ bound_on_old_reserve:
+ MIN2((young_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent),
+ bound_on_old_reserve));
+ if (young_reserve > young_available) {
+ young_reserve = young_available;
+ }
+
// Decide how much old space we should reserve for a mixed collection
- double reserve_for_mixed = 0;
- if (old_generation()->has_unprocessed_collection_candidates()) {
+ size_t reserve_for_mixed = 0;
+ const size_t old_fragmented_available =
+ old_available - (old_generation()->free_unaffiliated_regions() + old_trashed_regions) * region_size_bytes;
+
+ if (old_fragmented_available > proposed_max_old) {
+ // After we've promoted regions in place, there may be an abundance of old-fragmented available memory,
+ // even more than the desired percentage for old reserve. We cannot transfer these fragmented regions back
+ // to young. Instead we make the best of the situation by using this fragmented memory for both promotions
+ // and evacuations.
+ proposed_max_old = old_fragmented_available;
+ }
+ size_t reserve_for_promo = old_fragmented_available;
+ const size_t max_old_reserve = proposed_max_old;
+ const size_t mixed_candidate_live_memory = old_generation()->unprocessed_collection_candidates_live_memory();
+ const bool doing_mixed = (mixed_candidate_live_memory > 0);
+ if (doing_mixed) {
// We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we
// may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
- const double max_evac_need =
- (double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste);
+ const size_t max_evac_need = (size_t) (mixed_candidate_live_memory * ShenandoahOldEvacWaste);
assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
"Unaffiliated available must be less than total available");
- const double old_fragmented_available =
- double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes);
- reserve_for_mixed = max_evac_need + old_fragmented_available;
- if (reserve_for_mixed > max_old_reserve) {
- reserve_for_mixed = max_old_reserve;
+
+ // We prefer to evacuate all of mixed into unfragmented memory, and will expand old in order to do so, unless
+ // we already have too much fragmented available memory in old.
+ reserve_for_mixed = max_evac_need;
+ if (reserve_for_mixed + reserve_for_promo > max_old_reserve) {
+ // In this case, we'll allow old-evac to target some of the fragmented old memory.
+ size_t excess_reserves = (reserve_for_mixed + reserve_for_promo) - max_old_reserve;
+ if (reserve_for_promo > excess_reserves) {
+ reserve_for_promo -= excess_reserves;
+ } else {
+ excess_reserves -= reserve_for_promo;
+ reserve_for_promo = 0;
+ reserve_for_mixed -= excess_reserves;
+ }
}
}
- // Decide how much space we should reserve for promotions from young
- size_t reserve_for_promo = 0;
+ // Decide how much additional space we should reserve for promotions from young. We give priority to mixed evacations
+ // over promotions.
const size_t promo_load = old_generation()->get_promotion_potential();
const bool doing_promotions = promo_load > 0;
if (doing_promotions) {
- // We're promoting and have a bound on the maximum amount that can be promoted
- assert(max_old_reserve >= reserve_for_mixed, "Sanity");
- const size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
- reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions);
+ // We've already set aside all of the fragmented available memory within old-gen to represent old objects
+ // to be promoted from young generation. promo_load represents the memory that we anticipate to be promoted
+ // from regions that have reached tenure age. In the ideal, we will always use fragmented old-gen memory
+ // to hold individually promoted objects and will use unfragmented old-gen memory to represent the old-gen
+ // evacuation workloa.
+
+ // We're promoting and have an estimate of memory to be promoted from aged regions
+ assert(max_old_reserve >= (reserve_for_mixed + reserve_for_promo), "Sanity");
+ const size_t available_for_additional_promotions = max_old_reserve - (reserve_for_mixed + reserve_for_promo);
+ size_t promo_need = (size_t)(promo_load * ShenandoahPromoEvacWaste);
+ if (promo_need > reserve_for_promo) {
+ reserve_for_promo += MIN2(promo_need - reserve_for_promo, available_for_additional_promotions);
+ }
+ // We've already reserved all the memory required for the promo_load, and possibly more. The excess
+ // can be consumed by objects promoted from regions that have not yet reached tenure age.
}
- // This is the total old we want to ideally reserve
- const size_t old_reserve = reserve_for_mixed + reserve_for_promo;
- assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
+ // This is the total old we want to reserve (initialized to the ideal reserve)
+ size_t old_reserve = reserve_for_mixed + reserve_for_promo;
// We now check if the old generation is running a surplus or a deficit.
- const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
- if (max_old_available >= old_reserve) {
- // We are running a surplus, so the old region surplus can go to young
- const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes;
- const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
- const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions);
- old_generation()->set_region_balance(checked_cast(old_region_surplus));
- } else {
- // We are running a deficit which we'd like to fill from young.
- // Ignore that this will directly impact young_generation()->max_capacity(),
- // indirectly impacting young_reserve and old_reserve. These computations are conservative.
- // Note that deficit is rounded up by one region.
- const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes;
- const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes;
+ size_t old_region_deficit = 0;
+ size_t old_region_surplus = 0;
- // Round down the regions we can transfer from young to old. If we're running short
- // on young-gen memory, we restrict the xfer. Old-gen collection activities will be
- // curtailed if the budget is restricted.
- const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer);
+ size_t mutator_region_xfer_limit = mutator_xfer_limit / region_size_bytes;
+ // align the mutator_xfer_limit on region size
+ mutator_xfer_limit = mutator_region_xfer_limit * region_size_bytes;
+
+ if (old_available >= old_reserve) {
+ // We are running a surplus, so the old region surplus can go to young
+ const size_t old_surplus = old_available - old_reserve;
+ old_region_surplus = old_surplus / region_size_bytes;
+ const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_trashed_regions;
+ old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions);
+ old_generation()->set_region_balance(checked_cast(old_region_surplus));
+ } else if (old_available + mutator_xfer_limit >= old_reserve) {
+ // Mutator's xfer limit is sufficient to satisfy our need: transfer all memory from there
+ size_t old_deficit = old_reserve - old_available;
+ old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
+ old_generation()->set_region_balance(0 - checked_cast(old_region_deficit));
+ } else {
+ // We'll try to xfer from both mutator excess and from young collector reserve
+ size_t available_reserves = old_available + young_reserve + mutator_xfer_limit;
+ size_t old_entitlement = (available_reserves * ShenandoahOldEvacPercent) / 100;
+
+ // Round old_entitlement down to nearest multiple of regions to be transferred to old
+ size_t entitled_xfer = old_entitlement - old_available;
+ entitled_xfer = region_size_bytes * (entitled_xfer / region_size_bytes);
+ size_t unaffiliated_young_regions = young_generation()->free_unaffiliated_regions();
+ size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
+ if (entitled_xfer > unaffiliated_young_memory) {
+ entitled_xfer = unaffiliated_young_memory;
+ }
+ old_entitlement = old_available + entitled_xfer;
+ if (old_entitlement < old_reserve) {
+ // There's not enough memory to satisfy our desire. Scale back our old-gen intentions.
+ size_t budget_overrun = old_reserve - old_entitlement;;
+ if (reserve_for_promo > budget_overrun) {
+ reserve_for_promo -= budget_overrun;
+ old_reserve -= budget_overrun;
+ } else {
+ budget_overrun -= reserve_for_promo;
+ reserve_for_promo = 0;
+ reserve_for_mixed = (reserve_for_mixed > budget_overrun)? reserve_for_mixed - budget_overrun: 0;
+ old_reserve = reserve_for_promo + reserve_for_mixed;
+ }
+ }
+
+ // Because of adjustments above, old_reserve may be smaller now than it was when we tested the branch
+ // condition above: "(old_available + mutator_xfer_limit >= old_reserve)
+ // Therefore, we do NOT know that: mutator_xfer_limit < old_reserve - old_available
+
+ size_t old_deficit = old_reserve - old_available;
+ old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
+
+ // Shrink young_reserve to account for loan to old reserve
+ const size_t reserve_xfer_regions = old_region_deficit - mutator_region_xfer_limit;
+ young_reserve -= reserve_xfer_regions * region_size_bytes;
old_generation()->set_region_balance(0 - checked_cast(old_region_deficit));
}
-}
-void ShenandoahGenerationalHeap::reset_generation_reserves() {
- ShenandoahHeapLocker locker(lock());
- young_generation()->set_evacuation_reserve(0);
- old_generation()->set_evacuation_reserve(0);
- old_generation()->set_promoted_reserve(0);
+ assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both");
+ assert(young_reserve + reserve_for_mixed + reserve_for_promo <= old_available + young_available,
+ "Cannot reserve more memory than is available: %zu + %zu + %zu <= %zu + %zu",
+ young_reserve, reserve_for_mixed, reserve_for_promo, old_available, young_available);
+
+ // deficit/surplus adjustments to generation sizes will precede rebuild
+ young_generation()->set_evacuation_reserve(young_reserve);
+ old_generation()->set_evacuation_reserve(reserve_for_mixed);
+ old_generation()->set_promoted_reserve(reserve_for_promo);
}
void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {
@@ -1015,10 +1104,6 @@ void ShenandoahGenerationalHeap::final_update_refs_update_region_states() {
void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
shenandoah_assert_heaplocked_or_safepoint();
- // In case degeneration interrupted concurrent evacuation or update references, we need to clean up
- // transient state. Otherwise, these actions have no effect.
- reset_generation_reserves();
-
if (!old_generation()->is_parsable()) {
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill);
coalesce_and_fill_old_regions(false);
@@ -1036,7 +1121,6 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
// throw off the heuristics.
entry_global_coalesce_and_fill();
}
- reset_generation_reserves();
}
void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp
index a2ae4a68cd0..719bae52a83 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp
@@ -136,7 +136,7 @@ public:
void reset_generation_reserves();
// Computes the optimal size for the old generation, represented as a surplus or deficit of old regions
- void compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions);
+ void compute_old_generation_balance(size_t old_xfer_limit, size_t old_trashed_regions, size_t young_trashed_regions);
// Balances generations, coalesces and fills old regions if necessary
void complete_degenerated_cycle();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index 683e2959a92..ef99bd98c93 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -425,20 +425,29 @@ jint ShenandoahHeap::initialize() {
_affiliations[i] = ShenandoahAffiliation::FREE;
}
+
+ if (mode()->is_generational()) {
+ size_t young_reserve = (soft_max_capacity() * ShenandoahEvacReserve) / 100;
+ young_generation()->set_evacuation_reserve(young_reserve);
+ old_generation()->set_evacuation_reserve((size_t) 0);
+ old_generation()->set_promoted_reserve((size_t) 0);
+ }
+
_free_set = new ShenandoahFreeSet(this, _num_regions);
post_initialize_heuristics();
+
// We are initializing free set. We ignore cset region tallies.
- size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
- _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
+ size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
+ _free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
if (mode()->is_generational()) {
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
// We cannot call
// gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions)
// until after the heap is fully initialized. So we make up a safe value here.
size_t allocation_runway = InitialHeapSize / 2;
- gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
+ gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
}
- _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
+ _free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
}
if (AlwaysPreTouch) {
@@ -2521,13 +2530,10 @@ void ShenandoahHeap::final_update_refs_update_region_states() {
parallel_heap_region_iterate(&cl);
}
-void ShenandoahHeap::rebuild_free_set(bool concurrent) {
- ShenandoahGCPhase phase(concurrent ?
- ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
- ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
+void ShenandoahHeap::rebuild_free_set_within_phase() {
ShenandoahHeapLocker locker(lock());
- size_t young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count;
- _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
+ size_t young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count;
+ _free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count);
// If there are no old regions, first_old_region will be greater than last_old_region
assert((first_old_region > last_old_region) ||
((last_old_region + 1 - first_old_region >= old_region_count) &&
@@ -2546,19 +2552,11 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
// available for transfer to old. Note that transfer of humongous regions does not impact available.
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
size_t allocation_runway =
- gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
- gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
-
- // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
- // memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
- // regions in place when many of these regular regions have an abundant amount of available memory within them.
- // Fragmentation will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
- //
- // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
- // within partially consumed regions of memory.
+ gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trashed_regions);
+ gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
}
// Rebuild free set based on adjusted generation sizes.
- _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
+ _free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, old_region_count);
if (mode()->is_generational()) {
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
@@ -2567,6 +2565,13 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
}
}
+void ShenandoahHeap::rebuild_free_set(bool concurrent) {
+ ShenandoahGCPhase phase(concurrent ?
+ ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
+ ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
+ rebuild_free_set_within_phase();
+}
+
bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
size_t slice = r->index() / _bitmap_regions_per_slice;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index 65e3803627c..174001170f4 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -481,7 +481,9 @@ private:
void rendezvous_threads(const char* name);
void recycle_trash();
public:
+ // The following two functions rebuild the free set at the end of GC, in preparation for an idle phase.
void rebuild_free_set(bool concurrent);
+ void rebuild_free_set_within_phase();
void notify_gc_progress();
void notify_gc_no_progress();
size_t get_gc_no_progress_count() const;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
index 3cd5cdd2ec3..6bb8382de0a 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
@@ -75,6 +75,7 @@ ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool c
_plab_allocs(0),
_live_data(0),
_critical_pins(0),
+ _mixed_candidate_garbage_words(0),
_update_watermark(start),
_age(0),
#ifdef SHENANDOAH_CENSUS_NOISE
@@ -565,6 +566,7 @@ void ShenandoahHeapRegion::recycle_internal() {
assert(_recycling.is_set() && is_trash(), "Wrong state");
ShenandoahHeap* heap = ShenandoahHeap::heap();
+ _mixed_candidate_garbage_words = 0;
set_top(bottom());
clear_live_data();
reset_alloc_metadata();
@@ -593,6 +595,8 @@ void ShenandoahHeapRegion::try_recycle_under_lock() {
_recycling.unset();
} else {
// Ensure recycling is unset before returning to mutator to continue memory allocation.
+ // Otherwise, the mutator might see region as fully recycled and might change its affiliation only to have
+ // the racing GC worker thread overwrite its affiliation to FREE.
while (_recycling.is_set()) {
if (os::is_MP()) {
SpinPause();
@@ -603,6 +607,8 @@ void ShenandoahHeapRegion::try_recycle_under_lock() {
}
}
+// Note that return from try_recycle() does not mean the region has been recycled. It only means that
+// some GC worker thread has taken responsibility to recycle the region, eventually.
void ShenandoahHeapRegion::try_recycle() {
shenandoah_assert_not_heaplocked();
if (is_trash() && _recycling.try_set()) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
index cf0dc5476d0..9da2816e2c9 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
@@ -43,6 +43,7 @@ class ShenandoahHeapRegion {
friend class VMStructs;
friend class ShenandoahHeapRegionStateConstant;
private:
+
/*
Region state is described by a state machine. Transitions are guarded by
heap lock, which allows changing the state of several regions atomically.
@@ -259,6 +260,8 @@ private:
volatile size_t _live_data;
volatile size_t _critical_pins;
+ size_t _mixed_candidate_garbage_words;
+
HeapWord* volatile _update_watermark;
uint _age;
@@ -398,6 +401,14 @@ public:
// above TAMS.
inline size_t get_live_data_words() const;
+ inline size_t get_mixed_candidate_live_data_bytes() const;
+ inline size_t get_mixed_candidate_live_data_words() const;
+
+ inline void capture_mixed_candidate_garbage();
+
+ // Returns garbage by calculating difference between used and get_live_data_words. The value returned is only
+ // meaningful immediately following completion of marking. If there have been subsequent allocations in this region,
+ // use a different approach to determine garbage, such as (used() - get_mixed_candidate_live_data_bytes())
inline size_t garbage() const;
void print_on(outputStream* st) const;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
index b9304ee9daa..be982433885 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
@@ -163,6 +163,23 @@ inline size_t ShenandoahHeapRegion::get_live_data_bytes() const {
return get_live_data_words() * HeapWordSize;
}
+inline size_t ShenandoahHeapRegion::get_mixed_candidate_live_data_bytes() const {
+ shenandoah_assert_heaplocked_or_safepoint();
+ assert(used() >= _mixed_candidate_garbage_words * HeapWordSize, "used must exceed garbage");
+ return used() - _mixed_candidate_garbage_words * HeapWordSize;
+}
+
+inline size_t ShenandoahHeapRegion::get_mixed_candidate_live_data_words() const {
+ shenandoah_assert_heaplocked_or_safepoint();
+ assert(used() >= _mixed_candidate_garbage_words * HeapWordSize, "used must exceed garbage");
+ return used() / HeapWordSize - _mixed_candidate_garbage_words;
+}
+
+inline void ShenandoahHeapRegion::capture_mixed_candidate_garbage() {
+ shenandoah_assert_heaplocked_or_safepoint();
+ _mixed_candidate_garbage_words = garbage() / HeapWordSize;
+}
+
inline bool ShenandoahHeapRegion::has_live() const {
return get_live_data_words() != 0;
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp
index a44a831ef3d..ff441a0c868 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp
@@ -128,8 +128,6 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) {
// the space. This would be the last action if there is nothing to evacuate.
entry_cleanup_early();
- heap->free_set()->log_status_under_lock();
-
assert(!heap->is_concurrent_strong_root_in_progress(), "No evacuations during old gc.");
// We must execute this vm operation if we completed final mark. We cannot
@@ -138,7 +136,10 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) {
// collection.
heap->concurrent_final_roots();
- size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0);
- heap->compute_old_generation_balance(allocation_runway, 0);
+ // After concurrent old marking finishes, we reclaim immediate garbage. Further, we may also want to expand OLD in order
+ // to make room for anticipated promotions and/or for mixed evacuations. Mixed evacuations are especially likely to
+ // follow the end of OLD marking.
+ heap->rebuild_free_set_within_phase();
+ heap->free_set()->log_status_under_lock();
return true;
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp
index c795eda3d96..aed768b9db1 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp
@@ -427,8 +427,7 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
size_t allocation_runway =
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trash_regions);
- gen_heap->compute_old_generation_balance(allocation_runway, old_trash_regions);
-
+ gen_heap->compute_old_generation_balance(allocation_runway, old_trash_regions, young_trash_regions);
heap->free_set()->finish_rebuild(young_trash_regions, old_trash_regions, num_old);
}
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp
index 90c1458ac97..633d2c9f617 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp
@@ -66,8 +66,8 @@ private:
// remaining in a PLAB when it is retired.
size_t _promoted_expended;
- // Represents the quantity of live bytes we expect to promote during the next evacuation
- // cycle. This value is used by the young heuristic to trigger mixed collections.
+ // Represents the quantity of live bytes we expect to promote during the next GC cycle, either by
+ // evacuation or by promote-in-place. This value is used by the young heuristic to trigger mixed collections.
// It is also used when computing the optimum size for the old generation.
size_t _promotion_potential;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
index a3c96a7d53b..05af25f13ad 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
@@ -243,8 +243,7 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
#ifdef ASSERT
assert(ShenandoahHeap::heap()->mode()->is_generational(), "Do not use in non-generational mode");
assert(region->is_old(), "Do not use for young regions");
- // For HumongousRegion:s it's more efficient to jump directly to the
- // start region.
+ // For humongous regions it's more efficient to jump directly to the start region.
assert(!region->is_humongous(), "Use region->humongous_start_region() instead");
#endif
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
index 543df2422c0..0cc6d4c6ed4 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
@@ -420,7 +420,14 @@ public:
// span is the total memory affiliated with these stats (some of which is in use and other is available)
size_t span() const { return _regions * ShenandoahHeapRegion::region_size_bytes(); }
- size_t non_trashed_span() const { return (_regions - _trashed_regions) * ShenandoahHeapRegion::region_size_bytes(); }
+ size_t non_trashed_span() const {
+ assert(_regions >= _trashed_regions, "sanity");
+ return (_regions - _trashed_regions) * ShenandoahHeapRegion::region_size_bytes();
+ }
+ size_t non_trashed_committed() const {
+ assert(_committed >= _trashed_regions * ShenandoahHeapRegion::region_size_bytes(), "sanity");
+ return _committed - (_trashed_regions * ShenandoahHeapRegion::region_size_bytes());
+ }
};
class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
index 254483d1923..3eb1a06a911 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
@@ -400,27 +400,20 @@
"reserve/waste is incorrect, at the risk that application " \
"runs out of memory too early.") \
\
- product(uintx, ShenandoahOldEvacRatioPercent, 75, EXPERIMENTAL, \
- "The maximum proportion of evacuation from old-gen memory, " \
- "expressed as a percentage. The default value 75 denotes that " \
- "no more than 75% of the collection set evacuation workload may " \
- "be towards evacuation of old-gen heap regions. This limits both "\
- "the promotion of aged regions and the compaction of existing " \
- "old regions. A value of 75 denotes that the total evacuation " \
- "work may increase to up to four times the young gen evacuation " \
- "work. A larger value allows quicker promotion and allows " \
- "a smaller number of mixed evacuations to process " \
- "the entire list of old-gen collection candidates at the cost " \
- "of an increased disruption of the normal cadence of young-gen " \
- "collections. A value of 100 allows a mixed evacuation to " \
- "focus entirely on old-gen memory, allowing no young-gen " \
- "regions to be collected, likely resulting in subsequent " \
- "allocation failures because the allocation pool is not " \
- "replenished. A value of 0 allows a mixed evacuation to " \
- "focus entirely on young-gen memory, allowing no old-gen " \
- "regions to be collected, likely resulting in subsequent " \
- "promotion failures and triggering of stop-the-world full GC " \
- "events.") \
+ product(uintx, ShenandoahOldEvacPercent, 75, EXPERIMENTAL, \
+ "The maximum evacuation to old-gen expressed as a percent of " \
+ "the total live memory within the collection set. With the " \
+ "default setting, if collection set evacuates X, no more than " \
+ "75% of X may hold objects evacuated from old or promoted to " \
+ "old from young. A value of 100 allows the entire collection " \
+ "set to be comprised of old-gen regions and young regions that " \
+ "have reached the tenure age. Larger values allow fewer mixed " \
+ "evacuations to reclaim all the garbage from old. Smaller " \
+ "values result in less variation in GC cycle times between " \
+ "young vs. mixed cycles. A value of 0 prevents mixed " \
+ "evacations from running and blocks promotion of aged regions " \
+ "by evacuation. Setting the value to 0 does not prevent " \
+ "regions from being promoted in place.") \
range(0,100) \
\
product(bool, ShenandoahEvacTracking, false, DIAGNOSTIC, \
diff --git a/src/hotspot/share/gc/z/vmStructs_z.hpp b/src/hotspot/share/gc/z/vmStructs_z.hpp
index a88dae188d3..de32b964a51 100644
--- a/src/hotspot/share/gc/z/vmStructs_z.hpp
+++ b/src/hotspot/share/gc/z/vmStructs_z.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -111,7 +111,6 @@ typedef ZValue ZPerNUMAZPartition;
\
nonstatic_field(ZForwarding, _virtual, const ZVirtualMemory) \
nonstatic_field(ZForwarding, _object_alignment_shift, const size_t) \
- volatile_nonstatic_field(ZForwarding, _ref_count, int) \
nonstatic_field(ZForwarding, _entries, const ZAttachedArrayForForwarding) \
nonstatic_field(ZForwardingEntry, _entry, uint64_t) \
nonstatic_field(ZAttachedArrayForForwarding, _length, const size_t)
diff --git a/src/hotspot/share/gc/z/zAbort.cpp b/src/hotspot/share/gc/z/zAbort.cpp
index 3310793f730..82523ddebe8 100644
--- a/src/hotspot/share/gc/z/zAbort.cpp
+++ b/src/hotspot/share/gc/z/zAbort.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,10 +22,9 @@
*/
#include "gc/z/zAbort.hpp"
-#include "runtime/atomicAccess.hpp"
-volatile bool ZAbort::_should_abort = false;
+Atomic ZAbort::_should_abort{};
void ZAbort::abort() {
- AtomicAccess::store(&_should_abort, true);
+ _should_abort.store_relaxed(true);
}
diff --git a/src/hotspot/share/gc/z/zAbort.hpp b/src/hotspot/share/gc/z/zAbort.hpp
index 925b0a79ac3..64633752d5d 100644
--- a/src/hotspot/share/gc/z/zAbort.hpp
+++ b/src/hotspot/share/gc/z/zAbort.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,10 +25,11 @@
#define SHARE_GC_Z_ZABORT_HPP
#include "memory/allStatic.hpp"
+#include "runtime/atomic.hpp"
class ZAbort : public AllStatic {
private:
- static volatile bool _should_abort;
+ static Atomic _should_abort;
public:
static bool should_abort();
diff --git a/src/hotspot/share/gc/z/zAbort.inline.hpp b/src/hotspot/share/gc/z/zAbort.inline.hpp
index 37503e25f70..856179e9d2a 100644
--- a/src/hotspot/share/gc/z/zAbort.inline.hpp
+++ b/src/hotspot/share/gc/z/zAbort.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,10 +26,8 @@
#include "gc/z/zAbort.hpp"
-#include "runtime/atomicAccess.hpp"
-
inline bool ZAbort::should_abort() {
- return AtomicAccess::load(&_should_abort);
+ return _should_abort.load_relaxed();
}
#endif // SHARE_GC_Z_ZABORT_INLINE_HPP
diff --git a/src/hotspot/share/gc/z/zArray.hpp b/src/hotspot/share/gc/z/zArray.hpp
index 2c2f8a5dbfb..d39def4096e 100644
--- a/src/hotspot/share/gc/z/zArray.hpp
+++ b/src/hotspot/share/gc/z/zArray.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#include "cppstdlib/type_traits.hpp"
#include "memory/allocation.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.hpp"
#include "utilities/growableArray.hpp"
@@ -78,7 +78,9 @@ public:
template
class ZArrayIteratorImpl : public StackObj {
private:
- size_t _next;
+ using NextType = std::conditional_t, size_t>;
+
+ NextType _next;
const size_t _end;
const T* const _array;
diff --git a/src/hotspot/share/gc/z/zArray.inline.hpp b/src/hotspot/share/gc/z/zArray.inline.hpp
index 9e2bc19118e..15caa54ebab 100644
--- a/src/hotspot/share/gc/z/zArray.inline.hpp
+++ b/src/hotspot/share/gc/z/zArray.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,6 @@
#include "gc/z/zArray.hpp"
#include "gc/z/zLock.inline.hpp"
-#include "runtime/atomicAccess.hpp"
template
ZArraySlice::ZArraySlice(T* data, int len)
@@ -130,7 +129,7 @@ inline bool ZArrayIteratorImpl::next_serial(size_t* index) {
template
inline bool ZArrayIteratorImpl::next_parallel(size_t* index) {
- const size_t claimed_index = AtomicAccess::fetch_then_add(&_next, 1u, memory_order_relaxed);
+ const size_t claimed_index = _next.fetch_then_add(1u, memory_order_relaxed);
if (claimed_index < _end) {
*index = claimed_index;
@@ -177,7 +176,7 @@ inline bool ZArrayIteratorImpl::next_if(T* elem, Function predicate
template
inline bool ZArrayIteratorImpl::next_index(size_t* index) {
- if (Parallel) {
+ if constexpr (Parallel) {
return next_parallel(index);
} else {
return next_serial(index);
diff --git a/src/hotspot/share/gc/z/zForwarding.cpp b/src/hotspot/share/gc/z/zForwarding.cpp
index 820bb9dbc35..92fc1bc89df 100644
--- a/src/hotspot/share/gc/z/zForwarding.cpp
+++ b/src/hotspot/share/gc/z/zForwarding.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
#include "gc/z/zStat.hpp"
#include "gc/z/zUtils.inline.hpp"
#include "logging/log.hpp"
-#include "runtime/atomicAccess.hpp"
#include "utilities/align.hpp"
//
@@ -50,7 +49,7 @@
//
bool ZForwarding::claim() {
- return AtomicAccess::cmpxchg(&_claimed, false, true) == false;
+ return _claimed.compare_set(false, true);
}
void ZForwarding::in_place_relocation_start(zoffset relocated_watermark) {
@@ -60,7 +59,7 @@ void ZForwarding::in_place_relocation_start(zoffset relocated_watermark) {
// Support for ZHeap::is_in checks of from-space objects
// in a page that is in-place relocating
- AtomicAccess::store(&_in_place_thread, Thread::current());
+ _in_place_thread.store_relaxed(Thread::current());
_in_place_top_at_start = _page->top();
}
@@ -76,17 +75,17 @@ void ZForwarding::in_place_relocation_finish() {
}
// Disable relaxed ZHeap::is_in checks
- AtomicAccess::store(&_in_place_thread, (Thread*)nullptr);
+ _in_place_thread.store_relaxed(nullptr);
}
bool ZForwarding::in_place_relocation_is_below_top_at_start(zoffset offset) const {
// Only the relocating thread is allowed to know about the old relocation top.
- return AtomicAccess::load(&_in_place_thread) == Thread::current() && offset < _in_place_top_at_start;
+ return _in_place_thread.load_relaxed() == Thread::current() && offset < _in_place_top_at_start;
}
bool ZForwarding::retain_page(ZRelocateQueue* queue) {
for (;;) {
- const int32_t ref_count = AtomicAccess::load_acquire(&_ref_count);
+ const int32_t ref_count = _ref_count.load_acquire();
if (ref_count == 0) {
// Released
@@ -101,7 +100,7 @@ bool ZForwarding::retain_page(ZRelocateQueue* queue) {
return false;
}
- if (AtomicAccess::cmpxchg(&_ref_count, ref_count, ref_count + 1) == ref_count) {
+ if (_ref_count.compare_set(ref_count, ref_count + 1)) {
// Retained
return true;
}
@@ -110,11 +109,11 @@ bool ZForwarding::retain_page(ZRelocateQueue* queue) {
void ZForwarding::in_place_relocation_claim_page() {
for (;;) {
- const int32_t ref_count = AtomicAccess::load(&_ref_count);
+ const int32_t ref_count = _ref_count.load_relaxed();
assert(ref_count > 0, "Invalid state");
// Invert reference count
- if (AtomicAccess::cmpxchg(&_ref_count, ref_count, -ref_count) != ref_count) {
+ if (!_ref_count.compare_set(ref_count, -ref_count)) {
continue;
}
@@ -122,7 +121,7 @@ void ZForwarding::in_place_relocation_claim_page() {
// and we have now claimed the page. Otherwise we wait until it is claimed.
if (ref_count != 1) {
ZLocker locker(&_ref_lock);
- while (AtomicAccess::load_acquire(&_ref_count) != -1) {
+ while (_ref_count.load_acquire() != -1) {
_ref_lock.wait();
}
}
@@ -134,12 +133,12 @@ void ZForwarding::in_place_relocation_claim_page() {
void ZForwarding::release_page() {
for (;;) {
- const int32_t ref_count = AtomicAccess::load(&_ref_count);
+ const int32_t ref_count = _ref_count.load_relaxed();
assert(ref_count != 0, "Invalid state");
if (ref_count > 0) {
// Decrement reference count
- if (AtomicAccess::cmpxchg(&_ref_count, ref_count, ref_count - 1) != ref_count) {
+ if (!_ref_count.compare_set(ref_count, ref_count - 1)) {
continue;
}
@@ -152,7 +151,7 @@ void ZForwarding::release_page() {
}
} else {
// Increment reference count
- if (AtomicAccess::cmpxchg(&_ref_count, ref_count, ref_count + 1) != ref_count) {
+ if (!_ref_count.compare_set(ref_count, ref_count + 1)) {
continue;
}
@@ -171,9 +170,9 @@ void ZForwarding::release_page() {
ZPage* ZForwarding::detach_page() {
// Wait until released
- if (AtomicAccess::load_acquire(&_ref_count) != 0) {
+ if (_ref_count.load_acquire() != 0) {
ZLocker locker(&_ref_lock);
- while (AtomicAccess::load_acquire(&_ref_count) != 0) {
+ while (_ref_count.load_acquire() != 0) {
_ref_lock.wait();
}
}
@@ -182,16 +181,16 @@ ZPage* ZForwarding::detach_page() {
}
ZPage* ZForwarding::page() {
- assert(AtomicAccess::load(&_ref_count) != 0, "The page has been released/detached");
+ assert(_ref_count.load_relaxed() != 0, "The page has been released/detached");
return _page;
}
void ZForwarding::mark_done() {
- AtomicAccess::store(&_done, true);
+ _done.store_relaxed(true);
}
bool ZForwarding::is_done() const {
- return AtomicAccess::load(&_done);
+ return _done.load_relaxed();
}
//
@@ -288,7 +287,7 @@ void ZForwarding::relocated_remembered_fields_publish() {
// used to have remembered set entries. Now publish the fields to
// the YC.
- const ZPublishState res = AtomicAccess::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::none, ZPublishState::published);
+ const ZPublishState res = _relocated_remembered_fields_state.compare_exchange(ZPublishState::none, ZPublishState::published);
// none: OK to publish
// published: Not possible - this operation makes this transition
@@ -319,7 +318,7 @@ void ZForwarding::relocated_remembered_fields_notify_concurrent_scan_of() {
// Invariant: The page is being retained
assert(ZGeneration::young()->is_phase_mark(), "Only called when");
- const ZPublishState res = AtomicAccess::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::none, ZPublishState::reject);
+ const ZPublishState res = _relocated_remembered_fields_state.compare_exchange(ZPublishState::none, ZPublishState::reject);
// none: OC has not completed relocation
// published: OC has completed and published all relocated remembered fields
@@ -340,7 +339,7 @@ void ZForwarding::relocated_remembered_fields_notify_concurrent_scan_of() {
// OC relocation already collected and published fields
// Still notify concurrent scanning and reject the collected data from the OC
- const ZPublishState res2 = AtomicAccess::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::published, ZPublishState::reject);
+ const ZPublishState res2 = _relocated_remembered_fields_state.compare_exchange(ZPublishState::published, ZPublishState::reject);
assert(res2 == ZPublishState::published, "Should not fail");
log_debug(gc, remset)("Forwarding remset eager and reject: " PTR_FORMAT " " PTR_FORMAT, untype(start()), untype(end()));
@@ -368,7 +367,7 @@ bool ZForwarding::relocated_remembered_fields_published_contains(volatile zpoint
}
void ZForwarding::verify() const {
- guarantee(_ref_count != 0, "Invalid reference count");
+ guarantee(_ref_count.load_relaxed() != 0, "Invalid reference count");
guarantee(_page != nullptr, "Invalid page");
uint32_t live_objects = 0;
diff --git a/src/hotspot/share/gc/z/zForwarding.hpp b/src/hotspot/share/gc/z/zForwarding.hpp
index 29b5cf4aabe..72319bd2ebb 100644
--- a/src/hotspot/share/gc/z/zForwarding.hpp
+++ b/src/hotspot/share/gc/z/zForwarding.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
#include "gc/z/zPageAge.hpp"
#include "gc/z/zPageType.hpp"
#include "gc/z/zVirtualMemory.hpp"
+#include "runtime/atomic.hpp"
class ObjectClosure;
class ZForwardingAllocator;
@@ -62,13 +63,13 @@ private:
const uint32_t _partition_id;
const ZPageAge _from_age;
const ZPageAge _to_age;
- volatile bool _claimed;
+ Atomic _claimed;
mutable ZConditionLock _ref_lock;
- volatile int32_t _ref_count;
- volatile bool _done;
+ Atomic _ref_count;
+ Atomic _done;
// Relocated remembered set fields support
- volatile ZPublishState _relocated_remembered_fields_state;
+ Atomic _relocated_remembered_fields_state;
PointerArray _relocated_remembered_fields_array;
uint32_t _relocated_remembered_fields_publish_young_seqnum;
@@ -77,7 +78,7 @@ private:
zoffset_end _in_place_top_at_start;
// Debugging
- volatile Thread* _in_place_thread;
+ Atomic _in_place_thread;
ZForwardingEntry* entries() const;
ZForwardingEntry at(ZForwardingCursor* cursor) const;
diff --git a/src/hotspot/share/gc/z/zForwarding.inline.hpp b/src/hotspot/share/gc/z/zForwarding.inline.hpp
index 45b5d495e79..02f61eb5b05 100644
--- a/src/hotspot/share/gc/z/zForwarding.inline.hpp
+++ b/src/hotspot/share/gc/z/zForwarding.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -196,7 +196,7 @@ void ZForwarding::oops_do_in_forwarded_via_table(Function function) {
}
inline bool ZForwarding::in_place_relocation() const {
- assert(AtomicAccess::load(&_ref_count) != 0, "The page has been released/detached");
+ assert(_ref_count.load_relaxed() != 0, "The page has been released/detached");
return _in_place;
}
@@ -307,7 +307,7 @@ inline void ZForwarding::relocated_remembered_fields_register(volatile zpointer*
// Invariant: Page is being retained
assert(ZGeneration::young()->is_phase_mark(), "Only called when");
- const ZPublishState res = AtomicAccess::load(&_relocated_remembered_fields_state);
+ const ZPublishState res = _relocated_remembered_fields_state.load_relaxed();
// none: Gather remembered fields
// published: Have already published fields - not possible since they haven't been
@@ -327,7 +327,7 @@ inline void ZForwarding::relocated_remembered_fields_register(volatile zpointer*
// Returns true iff the page is being (or about to be) relocated by the OC
// while the YC gathered the remembered fields of the "from" page.
inline bool ZForwarding::relocated_remembered_fields_is_concurrently_scanned() const {
- return AtomicAccess::load(&_relocated_remembered_fields_state) == ZPublishState::reject;
+ return _relocated_remembered_fields_state.load_relaxed() == ZPublishState::reject;
}
template