diff --git a/doc/hotspot-style.html b/doc/hotspot-style.html
index 362245cd00a..c7126622c7d 100644
--- a/doc/hotspot-style.html
+++ b/doc/hotspot-style.html
@@ -965,9 +965,8 @@ rather than NULL. See the paper for reasons to avoid
NULL.
Don't use (constant expression or literal) 0 for pointers. Note that
C++14 removed non-literal 0 constants from null pointer
-constants, though some compilers continue to treat them as such.
-For historical reasons there may be lingering uses of 0 as a
-pointer.
+constants, though some compilers continue to treat them as
+such.
<atomic>
Do not use facilities provided by the <atomic>
header (> LogOfHRGrainBytes)
+ __ branch_optimized(Assembler::bcondEqual, done);
+
+ // Crosses regions, storing null?
+ if (new_val_may_be_null) {
+ __ z_ltgr(new_val, new_val);
+ __ z_bre(done);
+ } else {
+#ifdef ASSERT
+ __ z_ltgr(new_val, new_val);
+ __ asm_assert(Assembler::bcondNotZero, "null oop not allowed (G1 post)", 0x322); // Checked by caller.
+#endif
+ }
+
+ __ z_srag(tmp1, store_addr, CardTable::card_shift());
+
+ Address card_table_addr(thread, in_bytes(G1ThreadLocalData::card_table_base_offset()));
+ __ z_alg(tmp1, card_table_addr); // tmp1 := card address
+
+ if(UseCondCardMark) {
+ __ z_cli(0, tmp1, G1CardTable::clean_card_val());
+ __ branch_optimized(Assembler::bcondNotEqual, done);
+ }
+
+ static_assert(G1CardTable::dirty_card_val() == 0, "must be to use z_mvi");
+ __ z_mvi(0, tmp1, G1CardTable::dirty_card_val()); // *(card address) := dirty_card_val
+
+ __ block_comment("} generate_post_barrier");
+}
+
#if defined(COMPILER2)
#undef __
@@ -204,57 +255,6 @@ void G1BarrierSetAssembler::generate_c2_pre_barrier_stub(MacroAssembler* masm,
BLOCK_COMMENT("} generate_c2_pre_barrier_stub");
}
-static void generate_post_barrier(MacroAssembler* masm,
- const Register store_addr,
- const Register new_val,
- const Register thread,
- const Register tmp1,
- const Register tmp2,
- Label& done,
- bool new_val_may_be_null) {
-
- __ block_comment("generate_post_barrier {");
-
- assert(thread == Z_thread, "must be");
- assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, noreg);
-
- // Does store cross heap regions?
- if (VM_Version::has_DistinctOpnds()) {
- __ z_xgrk(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
- } else {
- __ z_lgr(tmp1, store_addr);
- __ z_xgr(tmp1, new_val);
- }
- __ z_srag(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
- __ branch_optimized(Assembler::bcondEqual, done);
-
- // Crosses regions, storing null?
- if (new_val_may_be_null) {
- __ z_ltgr(new_val, new_val);
- __ z_bre(done);
- } else {
-#ifdef ASSERT
- __ z_ltgr(new_val, new_val);
- __ asm_assert(Assembler::bcondNotZero, "null oop not allowed (G1 post)", 0x322); // Checked by caller.
-#endif
- }
-
- __ z_srag(tmp1, store_addr, CardTable::card_shift());
-
- Address card_table_addr(thread, in_bytes(G1ThreadLocalData::card_table_base_offset()));
- __ z_alg(tmp1, card_table_addr); // tmp1 := card address
-
- if(UseCondCardMark) {
- __ z_cli(0, tmp1, G1CardTable::clean_card_val());
- __ branch_optimized(Assembler::bcondNotEqual, done);
- }
-
- static_assert(G1CardTable::dirty_card_val() == 0, "must be to use z_mvi");
- __ z_mvi(0, tmp1, G1CardTable::dirty_card_val()); // *(card address) := dirty_card_val
-
- __ block_comment("} generate_post_barrier");
-}
-
void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
Register store_addr,
Register new_val,
diff --git a/src/hotspot/cpu/s390/globals_s390.hpp b/src/hotspot/cpu/s390/globals_s390.hpp
index 07987ea3469..d110443adf8 100644
--- a/src/hotspot/cpu/s390/globals_s390.hpp
+++ b/src/hotspot/cpu/s390/globals_s390.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -43,7 +43,7 @@ define_pd_global(size_t, CodeCacheSegmentSize, 256);
// Ideally, this is 256 (cache line size). This keeps code end data
// on separate lines. But we reduced it to 64 since 256 increased
// code size significantly by padding nops between IVC and second UEP.
-define_pd_global(intx, CodeEntryAlignment, 64);
+define_pd_global(uint, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 2);
define_pd_global(intx, InlineSmallCode, 2000);
diff --git a/src/hotspot/cpu/x86/globals_x86.hpp b/src/hotspot/cpu/x86/globals_x86.hpp
index 103e22d0185..4f5b6d31e75 100644
--- a/src/hotspot/cpu/x86/globals_x86.hpp
+++ b/src/hotspot/cpu/x86/globals_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,9 +46,9 @@ define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRES
// the uep and the vep doesn't get real alignment but just slops on by
// only assured that the entry instruction meets the 5 byte size requirement.
#if COMPILER2_OR_JVMCI
-define_pd_global(intx, CodeEntryAlignment, 32);
+define_pd_global(uint, CodeEntryAlignment, 32);
#else
-define_pd_global(intx, CodeEntryAlignment, 16);
+define_pd_global(uint, CodeEntryAlignment, 16);
#endif // COMPILER2_OR_JVMCI
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineSmallCode, 1000);
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index d4d3ec85bcf..83169df3456 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -765,7 +765,7 @@ void MacroAssembler::align32() {
void MacroAssembler::align(uint modulus) {
// 8273459: Ensure alignment is possible with current segment alignment
- assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
+ assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
align(modulus, offset());
}
diff --git a/src/hotspot/cpu/x86/vm_version_x86.cpp b/src/hotspot/cpu/x86/vm_version_x86.cpp
index c65c1c7d219..78d6dec08cf 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp
@@ -48,7 +48,7 @@ int VM_Version::_stepping;
bool VM_Version::_has_intel_jcc_erratum;
VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
-#define DECLARE_CPU_FEATURE_NAME(id, name, bit) name,
+#define DECLARE_CPU_FEATURE_NAME(id, name, bit) XSTR(name),
const char* VM_Version::_features_names[] = { CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_NAME)};
#undef DECLARE_CPU_FEATURE_NAME
@@ -1659,41 +1659,40 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
}
-#ifdef COMPILER2
- if (UseAVX > 2) {
- if (FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) ||
- (!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) &&
- ArrayOperationPartialInlineSize != 0 &&
- ArrayOperationPartialInlineSize != 16 &&
- ArrayOperationPartialInlineSize != 32 &&
- ArrayOperationPartialInlineSize != 64)) {
- int inline_size = 0;
- if (MaxVectorSize >= 64 && AVX3Threshold == 0) {
- inline_size = 64;
- } else if (MaxVectorSize >= 32) {
- inline_size = 32;
- } else if (MaxVectorSize >= 16) {
- inline_size = 16;
- }
- if(!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize)) {
- warning("Setting ArrayOperationPartialInlineSize as %d", inline_size);
- }
- ArrayOperationPartialInlineSize = inline_size;
- }
-
- if (ArrayOperationPartialInlineSize > MaxVectorSize) {
- ArrayOperationPartialInlineSize = MaxVectorSize >= 16 ? MaxVectorSize : 0;
- if (ArrayOperationPartialInlineSize) {
- warning("Setting ArrayOperationPartialInlineSize as MaxVectorSize=%zd", MaxVectorSize);
- } else {
- warning("Setting ArrayOperationPartialInlineSize as %zd", ArrayOperationPartialInlineSize);
- }
- }
- }
-#endif
}
#ifdef COMPILER2
+ if (UseAVX > 2) {
+ if (FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) ||
+ (!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) &&
+ ArrayOperationPartialInlineSize != 0 &&
+ ArrayOperationPartialInlineSize != 16 &&
+ ArrayOperationPartialInlineSize != 32 &&
+ ArrayOperationPartialInlineSize != 64)) {
+ int inline_size = 0;
+ if (MaxVectorSize >= 64 && AVX3Threshold == 0) {
+ inline_size = 64;
+ } else if (MaxVectorSize >= 32) {
+ inline_size = 32;
+ } else if (MaxVectorSize >= 16) {
+ inline_size = 16;
+ }
+ if(!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize)) {
+ warning("Setting ArrayOperationPartialInlineSize as %d", inline_size);
+ }
+ ArrayOperationPartialInlineSize = inline_size;
+ }
+
+ if (ArrayOperationPartialInlineSize > MaxVectorSize) {
+ ArrayOperationPartialInlineSize = MaxVectorSize >= 16 ? MaxVectorSize : 0;
+ if (ArrayOperationPartialInlineSize) {
+ warning("Setting ArrayOperationPartialInlineSize as MaxVectorSize=%zd", MaxVectorSize);
+ } else {
+ warning("Setting ArrayOperationPartialInlineSize as %zd", ArrayOperationPartialInlineSize);
+ }
+ }
+ }
+
if (FLAG_IS_DEFAULT(OptimizeFill)) {
if (MaxVectorSize < 32 || (!EnableX86ECoreOpts && !VM_Version::supports_avx512vlbw())) {
OptimizeFill = false;
@@ -3298,12 +3297,50 @@ bool VM_Version::is_intrinsic_supported(vmIntrinsicID id) {
void VM_Version::insert_features_names(VM_Version::VM_Features features, stringStream& ss) {
int i = 0;
ss.join([&]() {
- while (i < MAX_CPU_FEATURES) {
- if (_features.supports_feature((VM_Version::Feature_Flag)i)) {
- return _features_names[i++];
+ const char* str = nullptr;
+ while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
+ if (features.supports_feature((VM_Version::Feature_Flag)i)) {
+ str = _features_names[i];
}
i += 1;
}
- return (const char*)nullptr;
+ return str;
}, ", ");
}
+
+void VM_Version::get_cpu_features_name(void* features_buffer, stringStream& ss) {
+ VM_Features* features = (VM_Features*)features_buffer;
+ insert_features_names(*features, ss);
+}
+
+void VM_Version::get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss) {
+ VM_Features* vm_features_set1 = (VM_Features*)features_set1;
+ VM_Features* vm_features_set2 = (VM_Features*)features_set2;
+ int i = 0;
+ ss.join([&]() {
+ const char* str = nullptr;
+ while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
+ Feature_Flag flag = (Feature_Flag)i;
+ if (vm_features_set1->supports_feature(flag) && !vm_features_set2->supports_feature(flag)) {
+ str = _features_names[i];
+ }
+ i += 1;
+ }
+ return str;
+ }, ", ");
+}
+
+int VM_Version::cpu_features_size() {
+ return sizeof(VM_Features);
+}
+
+void VM_Version::store_cpu_features(void* buf) {
+ VM_Features copy = _features;
+ copy.clear_feature(CPU_HT); // HT does not result in incompatibility of aot code cache
+ memcpy(buf, ©, sizeof(VM_Features));
+}
+
+bool VM_Version::supports_features(void* features_buffer) {
+ VM_Features* features_to_test = (VM_Features*)features_buffer;
+ return _features.supports_features(features_to_test);
+}
diff --git a/src/hotspot/cpu/x86/vm_version_x86.hpp b/src/hotspot/cpu/x86/vm_version_x86.hpp
index a3f2a801198..e0a895737b7 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -377,84 +377,84 @@ protected:
*/
enum Feature_Flag {
#define CPU_FEATURE_FLAGS(decl) \
- decl(CX8, "cx8", 0) /* next bits are from cpuid 1 (EDX) */ \
- decl(CMOV, "cmov", 1) \
- decl(FXSR, "fxsr", 2) \
- decl(HT, "ht", 3) \
+ decl(CX8, cx8, 0) /* next bits are from cpuid 1 (EDX) */ \
+ decl(CMOV, cmov, 1) \
+ decl(FXSR, fxsr, 2) \
+ decl(HT, ht, 3) \
\
- decl(MMX, "mmx", 4) \
- decl(3DNOW_PREFETCH, "3dnowpref", 5) /* Processor supports 3dnow prefetch and prefetchw instructions */ \
+ decl(MMX, mmx, 4) \
+ decl(3DNOW_PREFETCH, 3dnowpref, 5) /* Processor supports 3dnow prefetch and prefetchw instructions */ \
/* may not necessarily support other 3dnow instructions */ \
- decl(SSE, "sse", 6) \
- decl(SSE2, "sse2", 7) \
+ decl(SSE, sse, 6) \
+ decl(SSE2, sse2, 7) \
\
- decl(SSE3, "sse3", 8 ) /* SSE3 comes from cpuid 1 (ECX) */ \
- decl(SSSE3, "ssse3", 9 ) \
- decl(SSE4A, "sse4a", 10) \
- decl(SSE4_1, "sse4.1", 11) \
+ decl(SSE3, sse3, 8 ) /* SSE3 comes from cpuid 1 (ECX) */ \
+ decl(SSSE3, ssse3, 9 ) \
+ decl(SSE4A, sse4a, 10) \
+ decl(SSE4_1, sse4.1, 11) \
\
- decl(SSE4_2, "sse4.2", 12) \
- decl(POPCNT, "popcnt", 13) \
- decl(LZCNT, "lzcnt", 14) \
- decl(TSC, "tsc", 15) \
+ decl(SSE4_2, sse4.2, 12) \
+ decl(POPCNT, popcnt, 13) \
+ decl(LZCNT, lzcnt, 14) \
+ decl(TSC, tsc, 15) \
\
- decl(TSCINV_BIT, "tscinvbit", 16) \
- decl(TSCINV, "tscinv", 17) \
- decl(AVX, "avx", 18) \
- decl(AVX2, "avx2", 19) \
+ decl(TSCINV_BIT, tscinvbit, 16) \
+ decl(TSCINV, tscinv, 17) \
+ decl(AVX, avx, 18) \
+ decl(AVX2, avx2, 19) \
\
- decl(AES, "aes", 20) \
- decl(ERMS, "erms", 21) /* enhanced 'rep movsb/stosb' instructions */ \
- decl(CLMUL, "clmul", 22) /* carryless multiply for CRC */ \
- decl(BMI1, "bmi1", 23) \
+ decl(AES, aes, 20) \
+ decl(ERMS, erms, 21) /* enhanced 'rep movsb/stosb' instructions */ \
+ decl(CLMUL, clmul, 22) /* carryless multiply for CRC */ \
+ decl(BMI1, bmi1, 23) \
\
- decl(BMI2, "bmi2", 24) \
- decl(RTM, "rtm", 25) /* Restricted Transactional Memory instructions */ \
- decl(ADX, "adx", 26) \
- decl(AVX512F, "avx512f", 27) /* AVX 512bit foundation instructions */ \
+ decl(BMI2, bmi2, 24) \
+ decl(RTM, rtm, 25) /* Restricted Transactional Memory instructions */ \
+ decl(ADX, adx, 26) \
+ decl(AVX512F, avx512f, 27) /* AVX 512bit foundation instructions */ \
\
- decl(AVX512DQ, "avx512dq", 28) \
- decl(AVX512PF, "avx512pf", 29) \
- decl(AVX512ER, "avx512er", 30) \
- decl(AVX512CD, "avx512cd", 31) \
+ decl(AVX512DQ, avx512dq, 28) \
+ decl(AVX512PF, avx512pf, 29) \
+ decl(AVX512ER, avx512er, 30) \
+ decl(AVX512CD, avx512cd, 31) \
\
- decl(AVX512BW, "avx512bw", 32) /* Byte and word vector instructions */ \
- decl(AVX512VL, "avx512vl", 33) /* EVEX instructions with smaller vector length */ \
- decl(SHA, "sha", 34) /* SHA instructions */ \
- decl(FMA, "fma", 35) /* FMA instructions */ \
+ decl(AVX512BW, avx512bw, 32) /* Byte and word vector instructions */ \
+ decl(AVX512VL, avx512vl, 33) /* EVEX instructions with smaller vector length */ \
+ decl(SHA, sha, 34) /* SHA instructions */ \
+ decl(FMA, fma, 35) /* FMA instructions */ \
\
- decl(VZEROUPPER, "vzeroupper", 36) /* Vzeroupper instruction */ \
- decl(AVX512_VPOPCNTDQ, "avx512_vpopcntdq", 37) /* Vector popcount */ \
- decl(AVX512_VPCLMULQDQ, "avx512_vpclmulqdq", 38) /* Vector carryless multiplication */ \
- decl(AVX512_VAES, "avx512_vaes", 39) /* Vector AES instruction */ \
+ decl(VZEROUPPER, vzeroupper, 36) /* Vzeroupper instruction */ \
+ decl(AVX512_VPOPCNTDQ, avx512_vpopcntdq, 37) /* Vector popcount */ \
+ decl(AVX512_VPCLMULQDQ, avx512_vpclmulqdq, 38) /* Vector carryless multiplication */ \
+ decl(AVX512_VAES, avx512_vaes, 39) /* Vector AES instruction */ \
\
- decl(AVX512_VNNI, "avx512_vnni", 40) /* Vector Neural Network Instructions */ \
- decl(FLUSH, "clflush", 41) /* flush instruction */ \
- decl(FLUSHOPT, "clflushopt", 42) /* flusopth instruction */ \
- decl(CLWB, "clwb", 43) /* clwb instruction */ \
+ decl(AVX512_VNNI, avx512_vnni, 40) /* Vector Neural Network Instructions */ \
+ decl(FLUSH, clflush, 41) /* flush instruction */ \
+ decl(FLUSHOPT, clflushopt, 42) /* flusopth instruction */ \
+ decl(CLWB, clwb, 43) /* clwb instruction */ \
\
- decl(AVX512_VBMI2, "avx512_vbmi2", 44) /* VBMI2 shift left double instructions */ \
- decl(AVX512_VBMI, "avx512_vbmi", 45) /* Vector BMI instructions */ \
- decl(HV, "hv", 46) /* Hypervisor instructions */ \
- decl(SERIALIZE, "serialize", 47) /* CPU SERIALIZE */ \
- decl(RDTSCP, "rdtscp", 48) /* RDTSCP instruction */ \
- decl(RDPID, "rdpid", 49) /* RDPID instruction */ \
- decl(FSRM, "fsrm", 50) /* Fast Short REP MOV */ \
- decl(GFNI, "gfni", 51) /* Vector GFNI instructions */ \
- decl(AVX512_BITALG, "avx512_bitalg", 52) /* Vector sub-word popcount and bit gather instructions */\
- decl(F16C, "f16c", 53) /* Half-precision and single precision FP conversion instructions*/ \
- decl(PKU, "pku", 54) /* Protection keys for user-mode pages */ \
- decl(OSPKE, "ospke", 55) /* OS enables protection keys */ \
- decl(CET_IBT, "cet_ibt", 56) /* Control Flow Enforcement - Indirect Branch Tracking */ \
- decl(CET_SS, "cet_ss", 57) /* Control Flow Enforcement - Shadow Stack */ \
- decl(AVX512_IFMA, "avx512_ifma", 58) /* Integer Vector FMA instructions*/ \
- decl(AVX_IFMA, "avx_ifma", 59) /* 256-bit VEX-coded variant of AVX512-IFMA*/ \
- decl(APX_F, "apx_f", 60) /* Intel Advanced Performance Extensions*/ \
- decl(SHA512, "sha512", 61) /* SHA512 instructions*/ \
- decl(AVX512_FP16, "avx512_fp16", 62) /* AVX512 FP16 ISA support*/ \
- decl(AVX10_1, "avx10_1", 63) /* AVX10 512 bit vector ISA Version 1 support*/ \
- decl(AVX10_2, "avx10_2", 64) /* AVX10 512 bit vector ISA Version 2 support*/ \
- decl(HYBRID, "hybrid", 65) /* Hybrid architecture */
+ decl(AVX512_VBMI2, avx512_vbmi2, 44) /* VBMI2 shift left double instructions */ \
+ decl(AVX512_VBMI, avx512_vbmi, 45) /* Vector BMI instructions */ \
+ decl(HV, hv, 46) /* Hypervisor instructions */ \
+ decl(SERIALIZE, serialize, 47) /* CPU SERIALIZE */ \
+ decl(RDTSCP, rdtscp, 48) /* RDTSCP instruction */ \
+ decl(RDPID, rdpid, 49) /* RDPID instruction */ \
+ decl(FSRM, fsrm, 50) /* Fast Short REP MOV */ \
+ decl(GFNI, gfni, 51) /* Vector GFNI instructions */ \
+ decl(AVX512_BITALG, avx512_bitalg, 52) /* Vector sub-word popcount and bit gather instructions */\
+ decl(F16C, f16c, 53) /* Half-precision and single precision FP conversion instructions*/ \
+ decl(PKU, pku, 54) /* Protection keys for user-mode pages */ \
+ decl(OSPKE, ospke, 55) /* OS enables protection keys */ \
+ decl(CET_IBT, cet_ibt, 56) /* Control Flow Enforcement - Indirect Branch Tracking */ \
+ decl(CET_SS, cet_ss, 57) /* Control Flow Enforcement - Shadow Stack */ \
+ decl(AVX512_IFMA, avx512_ifma, 58) /* Integer Vector FMA instructions*/ \
+ decl(AVX_IFMA, avx_ifma, 59) /* 256-bit VEX-coded variant of AVX512-IFMA*/ \
+ decl(APX_F, apx_f, 60) /* Intel Advanced Performance Extensions*/ \
+ decl(SHA512, sha512, 61) /* SHA512 instructions*/ \
+ decl(AVX512_FP16, avx512_fp16, 62) /* AVX512 FP16 ISA support*/ \
+ decl(AVX10_1, avx10_1, 63) /* AVX10 512 bit vector ISA Version 1 support*/ \
+ decl(AVX10_2, avx10_2, 64) /* AVX10 512 bit vector ISA Version 2 support*/ \
+ decl(HYBRID, hybrid, 65) /* Hybrid architecture */
#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (bit),
CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_FLAG)
@@ -516,6 +516,15 @@ protected:
int idx = index(feature);
return (_features_bitmap[idx] & bit_mask(feature)) != 0;
}
+
+ bool supports_features(VM_Features* features_to_test) {
+ for (int i = 0; i < features_bitmap_element_count(); i++) {
+ if ((_features_bitmap[i] & features_to_test->_features_bitmap[i]) != features_to_test->_features_bitmap[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
};
// CPU feature flags vector, can be affected by VM settings.
@@ -1103,6 +1112,20 @@ public:
static bool supports_tscinv_ext(void);
static void initialize_cpu_information(void);
+
+ static void get_cpu_features_name(void* features_buffer, stringStream& ss);
+
+ // Returns names of features present in features_set1 but not in features_set2
+ static void get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss);
+
+ // Returns number of bytes required to store cpu features representation
+ static int cpu_features_size();
+
+ // Stores cpu features representation in the provided buffer. This representation is arch dependent.
+ // Size of the buffer must be same as returned by cpu_features_size()
+ static void store_cpu_features(void* buf);
+
+ static bool supports_features(void* features_to_test);
};
#endif // CPU_X86_VM_VERSION_X86_HPP
diff --git a/src/hotspot/cpu/zero/globals_zero.hpp b/src/hotspot/cpu/zero/globals_zero.hpp
index 6b6c6ea983c..6dc7d81275c 100644
--- a/src/hotspot/cpu/zero/globals_zero.hpp
+++ b/src/hotspot/cpu/zero/globals_zero.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -39,7 +39,7 @@ define_pd_global(bool, UncommonNullCast, true);
define_pd_global(bool, DelayCompilerStubsGeneration, false); // Don't have compiler's stubs
define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
-define_pd_global(intx, CodeEntryAlignment, 32);
+define_pd_global(uint, CodeEntryAlignment, 32);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineSmallCode, 1000);
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index 0a8efbece8d..327508e1118 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -1753,10 +1753,9 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return true;
}
-bool os::remove_stack_guard_pages(char* addr, size_t size) {
+void os::remove_stack_guard_pages(char* addr, size_t size) {
// Do not call this; no need to commit stack pages on AIX.
ShouldNotReachHere();
- return true;
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
diff --git a/src/hotspot/os/aix/os_perf_aix.cpp b/src/hotspot/os/aix/os_perf_aix.cpp
index aa8819d035f..cbf78083483 100644
--- a/src/hotspot/os/aix/os_perf_aix.cpp
+++ b/src/hotspot/os/aix/os_perf_aix.cpp
@@ -143,12 +143,6 @@ static OSReturn get_jvm_load(double* jvm_uload, double* jvm_sload) {
return OS_OK;
}
-static void update_prev_time(jvm_time_store_t* from, jvm_time_store_t* to) {
- if (from && to) {
- memcpy(to, from, sizeof(jvm_time_store_t));
- }
-}
-
static void update_prev_ticks(cpu_tick_store_t* from, cpu_tick_store_t* to) {
if (from && to) {
memcpy(to, from, sizeof(cpu_tick_store_t));
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 81320b4f1aa..0ed5335adc3 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -1782,10 +1782,8 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size, !ExecMem);
}
-// If this is a growable mapping, remove the guard pages entirely by
-// munmap()ping them. If not, just call uncommit_memory().
-bool os::remove_stack_guard_pages(char* addr, size_t size) {
- return os::uncommit_memory(addr, size);
+void os::remove_stack_guard_pages(char* addr, size_t size) {
+ os::uncommit_memory(addr, size);
}
// 'requested_addr' is only treated as a hint, the return value may or
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index 7190845a8ba..09c514e3d05 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -3523,6 +3523,9 @@ bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
log_trace(os, map)("mmap failed: " RANGEFMT " errno=(%s)",
RANGEFMTARGS(addr, size),
os::strerror(ep.saved_errno()));
+ if (ep.saved_errno() == ENOMEM) {
+ fatal("Failed to uncommit " RANGEFMT ". It is possible that the process's maximum number of mappings would have been exceeded. Try increasing the limit.", RANGEFMTARGS(addr, size));
+ }
return false;
}
return true;
@@ -3633,14 +3636,16 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
// It's safe to always unmap guard pages for primordial thread because we
// always place it right after end of the mapped region.
-bool os::remove_stack_guard_pages(char* addr, size_t size) {
- uintptr_t stack_extent, stack_base;
+void os::remove_stack_guard_pages(char* addr, size_t size) {
if (os::is_primordial_thread()) {
- return ::munmap(addr, size) == 0;
+ if (::munmap(addr, size) != 0) {
+ fatal("Failed to munmap " RANGEFMT, RANGEFMTARGS(addr, size));
+ }
+ return;
}
- return os::uncommit_memory(addr, size);
+ os::uncommit_memory(addr, size);
}
// 'requested_addr' is only treated as a hint, the return value may or
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index b0b7ae18106..2e819e26e37 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -3281,11 +3281,10 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
// Do manual alignment
aligned_base = align_up(extra_base, alignment);
- bool rc = (file_desc != -1) ? os::unmap_memory(extra_base, extra_size) :
- os::release_memory(extra_base, extra_size);
- assert(rc, "release failed");
- if (!rc) {
- return nullptr;
+ if (file_desc != -1) {
+ os::unmap_memory(extra_base, extra_size);
+ } else {
+ os::release_memory(extra_base, extra_size);
}
// Attempt to map, into the just vacated space, the slightly smaller aligned area.
@@ -3681,8 +3680,8 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size, !ExecMem);
}
-bool os::remove_stack_guard_pages(char* addr, size_t size) {
- return os::uncommit_memory(addr, size);
+void os::remove_stack_guard_pages(char* addr, size_t size) {
+ os::uncommit_memory(addr, size);
}
static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
diff --git a/src/hotspot/share/adlc/adlArena.cpp b/src/hotspot/share/adlc/adlArena.cpp
index ebd1f74911d..e3ae60e91a9 100644
--- a/src/hotspot/share/adlc/adlArena.cpp
+++ b/src/hotspot/share/adlc/adlArena.cpp
@@ -136,9 +136,9 @@ void *AdlArena::Acalloc( size_t items, size_t x ) {
}
//------------------------------realloc----------------------------------------
-static size_t pointer_delta(const void *left, const void *right) {
- assert(left >= right, "pointer delta underflow");
- return (uintptr_t)left - (uintptr_t)right;
+static size_t pointer_delta(const void* high, const void* low) {
+ assert(high >= low, "pointer delta underflow");
+ return (uintptr_t)high - (uintptr_t)low;
}
// Reallocate storage in AdlArena.
diff --git a/src/hotspot/share/asm/codeBuffer.cpp b/src/hotspot/share/asm/codeBuffer.cpp
index d94f52c18f6..ba525588f32 100644
--- a/src/hotspot/share/asm/codeBuffer.cpp
+++ b/src/hotspot/share/asm/codeBuffer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -468,9 +468,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
assert(!_finalize_stubs, "non-finalized stubs");
{
- // not sure why this is here, but why not...
- int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment);
- assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment");
+ assert( (dest->_total_start - _insts.start()) % CodeEntryAlignment == 0, "copy must preserve alignment");
}
const CodeSection* prev_cs = nullptr;
diff --git a/src/hotspot/share/cds/aotClassLocation.cpp b/src/hotspot/share/cds/aotClassLocation.cpp
index f77aac3540c..9275b914ef9 100644
--- a/src/hotspot/share/cds/aotClassLocation.cpp
+++ b/src/hotspot/share/cds/aotClassLocation.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -426,7 +426,8 @@ bool AOTClassLocation::check(const char* runtime_path, bool has_aot_linked_class
bool size_differs = _filesize != st.st_size;
bool time_differs = _check_time && (_timestamp != st.st_mtime);
if (size_differs || time_differs) {
- aot_log_warning(aot)("This file is not the one used while building the shared archive file: '%s'%s%s",
+ aot_log_warning(aot)("This file is not the one used while building the %s: '%s'%s%s",
+ CDSConfig::type_of_archive_being_loaded(),
runtime_path,
time_differs ? ", timestamp has changed" : "",
size_differs ? ", size has changed" : "");
@@ -448,6 +449,13 @@ void AOTClassLocationConfig::dumptime_init(JavaThread* current) {
java_lang_Throwable::print(current->pending_exception(), tty);
vm_exit_during_initialization("AOTClassLocationConfig::dumptime_init_helper() failed unexpectedly");
}
+
+ if (CDSConfig::is_dumping_final_static_archive()) {
+ // The _max_used_index is usually updated by ClassLoader::record_result(). However,
+ // when dumping the final archive, the classes are loaded from their images in
+ // the AOT config file, so we don't go through ClassLoader::record_result().
+ dumptime_update_max_used_index(runtime()->_max_used_index); // Same value as recorded in the training run.
+ }
}
void AOTClassLocationConfig::dumptime_init_helper(TRAPS) {
diff --git a/src/hotspot/share/cds/aotCompressedPointers.cpp b/src/hotspot/share/cds/aotCompressedPointers.cpp
new file mode 100644
index 00000000000..c3efa7a7185
--- /dev/null
+++ b/src/hotspot/share/cds/aotCompressedPointers.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "cds/aotCompressedPointers.hpp"
+#include "cds/archiveBuilder.hpp"
+
+size_t AOTCompressedPointers::compute_byte_offset(address p) {
+ return ArchiveBuilder::current()->any_to_offset(p);
+}
diff --git a/src/hotspot/share/cds/aotCompressedPointers.hpp b/src/hotspot/share/cds/aotCompressedPointers.hpp
new file mode 100644
index 00000000000..ead48ef9948
--- /dev/null
+++ b/src/hotspot/share/cds/aotCompressedPointers.hpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_CDS_AOTCOMPRESSEDPOINTERS_HPP
+#define SHARE_CDS_AOTCOMPRESSEDPOINTERS_HPP
+
+#include "cds/cds_globals.hpp"
+#include "memory/allStatic.hpp"
+#include "memory/metaspace.hpp"
+#include "metaprogramming/enableIf.hpp"
+#include "utilities/align.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+class AOTCompressedPointers: public AllStatic {
+public:
+ // For space saving, we can encode the location of metadata objects in the "rw" and "ro"
+ // regions using a 32-bit offset from the bottom of the mapped AOT metaspace.
+ // Currently we allow only up to 2GB total size in the rw and ro regions (which are
+ // contiguous to each other).
+ enum class narrowPtr : u4;
+ static constexpr size_t MaxMetadataOffsetBytes = 0x7FFFFFFF;
+
+ // In the future, this could return a different numerical value than
+ // narrowp if the encoding contains shifts.
+ inline static size_t get_byte_offset(narrowPtr narrowp) {
+ return checked_cast(narrowp);
+ }
+
+ inline static narrowPtr null() {
+ return static_cast(0);
+ }
+
+ // Encoding ------
+
+ // ptr can point to one of the following
+ // - an object in the ArchiveBuilder's buffer.
+ // - an object in the currently mapped AOT cache rw/ro regions.
+ // - an object that has been copied into the ArchiveBuilder's buffer.
+ template
+ static narrowPtr encode_not_null(T ptr) {
+ address p = reinterpret_cast(ptr);
+ return encode_byte_offset(compute_byte_offset(p));
+ }
+
+ template
+ static narrowPtr encode(T ptr) { // may be null
+ if (ptr == nullptr) {
+ return null();
+ } else {
+ return encode_not_null(ptr);
+ }
+ }
+
+ // ptr must be in the currently mapped AOT cache rw/ro regions.
+ template
+ static narrowPtr encode_address_in_cache(T ptr) {
+ assert(Metaspace::in_aot_cache(ptr), "must be");
+ address p = reinterpret_cast(ptr);
+ address base = reinterpret_cast(SharedBaseAddress);
+ return encode_byte_offset(pointer_delta(p, base, 1));
+ }
+
+ template
+ static narrowPtr encode_address_in_cache_or_null(T ptr) {
+ if (ptr == nullptr) {
+ return null();
+ } else {
+ return encode_address_in_cache(ptr);
+ }
+ }
+
+ // Decoding -----
+
+ // If base_address is null, decode an address within the mapped aot cache range.
+ template
+ static T decode_not_null(narrowPtr narrowp, address base_address = nullptr) {
+ assert(narrowp != null(), "sanity");
+ if (base_address == nullptr) {
+ T p = reinterpret_cast(reinterpret_cast(SharedBaseAddress) + get_byte_offset(narrowp));
+ assert(Metaspace::in_aot_cache(p), "must be");
+ return p;
+ } else {
+ // This is usually called before the cache is fully mapped.
+ return reinterpret_cast(base_address + get_byte_offset(narrowp));
+ }
+ }
+
+ template
+ static T decode(narrowPtr narrowp, address base_address = nullptr) { // may be null
+ if (narrowp == null()) {
+ return nullptr;
+ } else {
+ return decode_not_null(narrowp, base_address);
+ }
+ }
+
+private:
+ static size_t compute_byte_offset(address p);
+
+ static narrowPtr encode_byte_offset(size_t offset) {
+ assert(offset != 0, "offset 0 is in protection zone");
+ precond(offset <= MaxMetadataOffsetBytes);
+ return checked_cast(offset);
+ }
+};
+
+// Type casts -- declared as global functions to save a few keystrokes
+
+// A simple type cast. No change in numerical value.
+inline AOTCompressedPointers::narrowPtr cast_from_u4(u4 narrowp) {
+ return checked_cast(narrowp);
+}
+
+// A simple type cast. No change in numerical value.
+// !!!DO NOT CALL THIS if you want a byte offset!!!
+inline u4 cast_to_u4(AOTCompressedPointers::narrowPtr narrowp) {
+ return checked_cast(narrowp);
+}
+
+#endif // SHARE_CDS_AOTCOMPRESSEDPOINTERS_HPP
diff --git a/src/hotspot/share/cds/aotMetaspace.cpp b/src/hotspot/share/cds/aotMetaspace.cpp
index 8bb8387f1ab..544eaa07a4d 100644
--- a/src/hotspot/share/cds/aotMetaspace.cpp
+++ b/src/hotspot/share/cds/aotMetaspace.cpp
@@ -2106,7 +2106,7 @@ MapArchiveResult AOTMetaspace::map_archive(FileMapInfo* mapinfo, char* mapped_ba
// Currently, only static archive uses early serialized data.
char* buffer = mapinfo->early_serialized_data();
intptr_t* array = (intptr_t*)buffer;
- ReadClosure rc(&array, (intptr_t)mapped_base_address);
+ ReadClosure rc(&array, (address)mapped_base_address);
early_serialize(&rc);
}
@@ -2152,7 +2152,7 @@ void AOTMetaspace::initialize_shared_spaces() {
// shared string/symbol tables.
char* buffer = static_mapinfo->serialized_data();
intptr_t* array = (intptr_t*)buffer;
- ReadClosure rc(&array, (intptr_t)SharedBaseAddress);
+ ReadClosure rc(&array, (address)SharedBaseAddress);
serialize(&rc);
// Finish initializing the heap dump mode used in the archive
@@ -2164,7 +2164,7 @@ void AOTMetaspace::initialize_shared_spaces() {
if (dynamic_mapinfo != nullptr) {
intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data();
- ReadClosure rc(&buffer, (intptr_t)SharedBaseAddress);
+ ReadClosure rc(&buffer, (address)SharedBaseAddress);
DynamicArchive::serialize(&rc);
}
diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp
index e65bd3985ac..cb9459172b3 100644
--- a/src/hotspot/share/cds/archiveBuilder.cpp
+++ b/src/hotspot/share/cds/archiveBuilder.cpp
@@ -24,6 +24,7 @@
#include "cds/aotArtifactFinder.hpp"
#include "cds/aotClassLinker.hpp"
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMapLogger.hpp"
#include "cds/aotMetaspace.hpp"
@@ -175,10 +176,10 @@ ArchiveBuilder::ArchiveBuilder() :
_mapped_static_archive_bottom(nullptr),
_mapped_static_archive_top(nullptr),
_buffer_to_requested_delta(0),
- _pz_region("pz", MAX_SHARED_DELTA), // protection zone -- used only during dumping; does NOT exist in cds archive.
- _rw_region("rw", MAX_SHARED_DELTA),
- _ro_region("ro", MAX_SHARED_DELTA),
- _ac_region("ac", MAX_SHARED_DELTA),
+ _pz_region("pz"), // protection zone -- used only during dumping; does NOT exist in cds archive.
+ _rw_region("rw"),
+ _ro_region("ro"),
+ _ac_region("ac"),
_ptrmap(mtClassShared),
_rw_ptrmap(mtClassShared),
_ro_ptrmap(mtClassShared),
@@ -990,16 +991,15 @@ void ArchiveBuilder::make_training_data_shareable() {
_src_obj_table.iterate_all(clean_td);
}
-uintx ArchiveBuilder::buffer_to_offset(address p) const {
+size_t ArchiveBuilder::buffer_to_offset(address p) const {
address requested_p = to_requested(p);
- assert(requested_p >= _requested_static_archive_bottom, "must be");
- return requested_p - _requested_static_archive_bottom;
+ return pointer_delta(requested_p, _requested_static_archive_bottom, 1);
}
-uintx ArchiveBuilder::any_to_offset(address p) const {
+size_t ArchiveBuilder::any_to_offset(address p) const {
if (is_in_mapped_static_archive(p)) {
assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
- return p - _mapped_static_archive_bottom;
+ return pointer_delta(p, _mapped_static_archive_bottom, 1);
}
if (!is_in_buffer_space(p)) {
// p must be a "source" address
@@ -1008,7 +1008,7 @@ uintx ArchiveBuilder::any_to_offset(address p) const {
return buffer_to_offset(p);
}
-address ArchiveBuilder::offset_to_buffered_address(u4 offset) const {
+address ArchiveBuilder::offset_to_buffered_address(size_t offset) const {
address requested_addr = _requested_static_archive_bottom + offset;
address buffered_addr = requested_addr - _buffer_to_requested_delta;
assert(is_in_buffer_space(buffered_addr), "bad offset");
diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp
index 2284dbf70f8..b3667ea11b4 100644
--- a/src/hotspot/share/cds/archiveBuilder.hpp
+++ b/src/hotspot/share/cds/archiveBuilder.hpp
@@ -329,49 +329,22 @@ public:
return current()->buffer_to_requested_delta();
}
- inline static u4 to_offset_u4(uintx offset) {
- guarantee(offset <= MAX_SHARED_DELTA, "must be 32-bit offset " INTPTR_FORMAT, offset);
- return (u4)offset;
- }
-
public:
- static const uintx MAX_SHARED_DELTA = ArchiveUtils::MAX_SHARED_DELTA;;
-
// The address p points to an object inside the output buffer. When the archive is mapped
// at the requested address, what's the offset of this object from _requested_static_archive_bottom?
- uintx buffer_to_offset(address p) const;
+ size_t buffer_to_offset(address p) const;
- // Same as buffer_to_offset, except that the address p points to either (a) an object
- // inside the output buffer, or (b), an object in the currently mapped static archive.
- uintx any_to_offset(address p) const;
+ // Same as buffer_to_offset, except that the address p points to one of the following:
+ // - an object in the ArchiveBuilder's buffer.
+ // - an object in the currently mapped AOT cache rw/ro regions.
+ // - an object that has been copied into the ArchiveBuilder's buffer.
+ size_t any_to_offset(address p) const;
// The reverse of buffer_to_offset()
- address offset_to_buffered_address(u4 offset) const;
+ address offset_to_buffered_address(size_t offset) const;
template
- u4 buffer_to_offset_u4(T p) const {
- uintx offset = buffer_to_offset((address)p);
- return to_offset_u4(offset);
- }
-
- template
- u4 any_to_offset_u4(T p) const {
- assert(p != nullptr, "must not be null");
- uintx offset = any_to_offset((address)p);
- return to_offset_u4(offset);
- }
-
- template
- u4 any_or_null_to_offset_u4(T p) const {
- if (p == nullptr) {
- return 0;
- } else {
- return any_to_offset_u4(p);
- }
- }
-
- template
- T offset_to_buffered(u4 offset) const {
+ T offset_to_buffered(size_t offset) const {
return (T)offset_to_buffered_address(offset);
}
diff --git a/src/hotspot/share/cds/archiveUtils.cpp b/src/hotspot/share/cds/archiveUtils.cpp
index 842668509cf..c13b447bb87 100644
--- a/src/hotspot/share/cds/archiveUtils.cpp
+++ b/src/hotspot/share/cds/archiveUtils.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
*
*/
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
@@ -201,13 +202,13 @@ char* DumpRegion::expand_top_to(char* newtop) {
commit_to(newtop);
_top = newtop;
- if (_max_delta > 0) {
+ if (ArchiveBuilder::is_active() && ArchiveBuilder::current()->is_in_buffer_space(_base)) {
uintx delta = ArchiveBuilder::current()->buffer_to_offset((address)(newtop-1));
- if (delta > _max_delta) {
+ if (delta > AOTCompressedPointers::MaxMetadataOffsetBytes) {
// This is just a sanity check and should not appear in any real world usage. This
// happens only if you allocate more than 2GB of shared objects and would require
// millions of shared classes.
- aot_log_error(aot)("Out of memory in the CDS archive: Please reduce the number of shared classes.");
+ aot_log_error(aot)("Out of memory in the %s: Please reduce the number of shared classes.", CDSConfig::type_of_archive_being_written());
AOTMetaspace::unrecoverable_writing_error();
}
}
@@ -331,9 +332,8 @@ void WriteClosure::do_ptr(void** p) {
void ReadClosure::do_ptr(void** p) {
assert(*p == nullptr, "initializing previous initialized pointer.");
- intptr_t obj = nextPtr();
- assert(obj >= 0, "sanity.");
- *p = (obj != 0) ? (void*)(_base_address + obj) : (void*)obj;
+ u4 narrowp = checked_cast(nextPtr());
+ *p = AOTCompressedPointers::decode(cast_from_u4(narrowp), _base_address);
}
void ReadClosure::do_u4(u4* p) {
diff --git a/src/hotspot/share/cds/archiveUtils.hpp b/src/hotspot/share/cds/archiveUtils.hpp
index 79d894f0144..e5d1efa5eab 100644
--- a/src/hotspot/share/cds/archiveUtils.hpp
+++ b/src/hotspot/share/cds/archiveUtils.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -153,7 +153,6 @@ private:
char* _base;
char* _top;
char* _end;
- uintx _max_delta;
bool _is_packed;
ReservedSpace* _rs;
VirtualSpace* _vs;
@@ -161,9 +160,9 @@ private:
void commit_to(char* newtop);
public:
- DumpRegion(const char* name, uintx max_delta = 0)
+ DumpRegion(const char* name)
: _name(name), _base(nullptr), _top(nullptr), _end(nullptr),
- _max_delta(max_delta), _is_packed(false),
+ _is_packed(false),
_rs(nullptr), _vs(nullptr) {}
char* expand_top_to(char* newtop);
@@ -237,13 +236,13 @@ public:
class ReadClosure : public SerializeClosure {
private:
intptr_t** _ptr_array;
- intptr_t _base_address;
+ address _base_address;
inline intptr_t nextPtr() {
return *(*_ptr_array)++;
}
public:
- ReadClosure(intptr_t** ptr_array, intptr_t base_address) :
+ ReadClosure(intptr_t** ptr_array, address base_address) :
_ptr_array(ptr_array), _base_address(base_address) {}
void do_ptr(void** p);
@@ -260,7 +259,6 @@ class ArchiveUtils {
template static Array* archive_ptr_array(GrowableArray* tmp_array);
public:
- static const uintx MAX_SHARED_DELTA = 0x7FFFFFFF;
static void log_to_classlist(BootstrapInfo* bootstrap_specifier, TRAPS) NOT_CDS_RETURN;
static bool has_aot_initialized_mirror(InstanceKlass* src_ik);
@@ -273,50 +271,6 @@ public:
static Array* archive_array(GrowableArray* tmp_array) {
return archive_ptr_array(tmp_array);
}
-
- // The following functions translate between a u4 offset and an address in the
- // the range of the mapped CDS archive (e.g., Metaspace::in_aot_cache()).
- // Since the first 16 bytes in this range are dummy data (see ArchiveBuilder::reserve_buffer()),
- // we know that offset 0 never represents a valid object. As a result, an offset of 0
- // is used to encode a nullptr.
- //
- // Use the "archived_address_or_null" variants if a nullptr may be encoded.
-
- // offset must represent an object of type T in the mapped shared space. Return
- // a direct pointer to this object.
- template T static offset_to_archived_address(u4 offset) {
- assert(offset != 0, "sanity");
- T p = (T)(SharedBaseAddress + offset);
- assert(Metaspace::in_aot_cache(p), "must be");
- return p;
- }
-
- template T static offset_to_archived_address_or_null(u4 offset) {
- if (offset == 0) {
- return nullptr;
- } else {
- return offset_to_archived_address(offset);
- }
- }
-
- // p must be an archived object. Get its offset from SharedBaseAddress
- template static u4 archived_address_to_offset(T p) {
- uintx pn = (uintx)p;
- uintx base = (uintx)SharedBaseAddress;
- assert(Metaspace::in_aot_cache(p), "must be");
- assert(pn > base, "sanity"); // No valid object is stored at 0 offset from SharedBaseAddress
- uintx offset = pn - base;
- assert(offset <= MAX_SHARED_DELTA, "range check");
- return static_cast(offset);
- }
-
- template static u4 archived_address_or_null_to_offset(T p) {
- if (p == nullptr) {
- return 0;
- } else {
- return archived_address_to_offset(p);
- }
- }
};
class HeapRootSegments {
diff --git a/src/hotspot/share/cds/dynamicArchive.cpp b/src/hotspot/share/cds/dynamicArchive.cpp
index d39cf3775e4..cd6890555d3 100644
--- a/src/hotspot/share/cds/dynamicArchive.cpp
+++ b/src/hotspot/share/cds/dynamicArchive.cpp
@@ -25,6 +25,7 @@
#include "cds/aotArtifactFinder.hpp"
#include "cds/aotClassLinker.hpp"
#include "cds/aotClassLocation.hpp"
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
@@ -75,13 +76,13 @@ public:
return 0;
}
- u4 a_offset = ArchiveBuilder::current()->any_to_offset_u4(a_name);
- u4 b_offset = ArchiveBuilder::current()->any_to_offset_u4(b_name);
+ u4 a_narrowp = cast_to_u4(AOTCompressedPointers::encode_not_null(a_name));
+ u4 b_narrowp = cast_to_u4(AOTCompressedPointers::encode_not_null(b_name));
- if (a_offset < b_offset) {
+ if (a_narrowp < b_narrowp) {
return -1;
} else {
- assert(a_offset > b_offset, "must be");
+ assert(a_narrowp > b_narrowp, "must be");
return 1;
}
}
diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp
index 7cd736885ad..da2d4f6dac2 100644
--- a/src/hotspot/share/cds/filemap.cpp
+++ b/src/hotspot/share/cds/filemap.cpp
@@ -298,11 +298,11 @@ void FileMapHeader::print(outputStream* st) {
st->print_cr("- compressed_class_ptrs: %d", _compressed_class_ptrs);
st->print_cr("- narrow_klass_pointer_bits: %d", _narrow_klass_pointer_bits);
st->print_cr("- narrow_klass_shift: %d", _narrow_klass_shift);
- st->print_cr("- cloned_vtables_offset: 0x%zx", _cloned_vtables_offset);
- st->print_cr("- early_serialized_data_offset: 0x%zx", _early_serialized_data_offset);
- st->print_cr("- serialized_data_offset: 0x%zx", _serialized_data_offset);
+ st->print_cr("- cloned_vtables: %u", cast_to_u4(_cloned_vtables));
+ st->print_cr("- early_serialized_data: %u", cast_to_u4(_early_serialized_data));
+ st->print_cr("- serialized_data: %u", cast_to_u4(_serialized_data));
st->print_cr("- jvm_ident: %s", _jvm_ident);
- st->print_cr("- class_location_config_offset: 0x%zx", _class_location_config_offset);
+ st->print_cr("- class_location_config: %d", cast_to_u4(_class_location_config));
st->print_cr("- verify_local: %d", _verify_local);
st->print_cr("- verify_remote: %d", _verify_remote);
st->print_cr("- has_platform_or_app_classes: %d", _has_platform_or_app_classes);
@@ -1325,9 +1325,7 @@ char* FileMapInfo::map_auxiliary_region(int region_index, bool read_only) {
if (VerifySharedSpaces && !r->check_region_crc(mapped_base)) {
aot_log_error(aot)("region %d CRC error", region_index);
- if (!os::unmap_memory(mapped_base, r->used_aligned())) {
- fatal("os::unmap_memory of region %d failed", region_index);
- }
+ os::unmap_memory(mapped_base, r->used_aligned());
return nullptr;
}
@@ -1654,9 +1652,7 @@ void FileMapInfo::unmap_region(int i) {
// is released. Zero it so that we don't accidentally read its content.
aot_log_info(aot)("Region #%d (%s) is in a reserved space, it will be freed when the space is released", i, shared_region_name[i]);
} else {
- if (!os::unmap_memory(mapped_base, size)) {
- fatal("os::unmap_memory failed");
- }
+ os::unmap_memory(mapped_base, size);
}
}
r->set_mapped_base(nullptr);
@@ -1767,10 +1763,6 @@ void FileMapInfo::print(outputStream* st) const {
}
}
-void FileMapHeader::set_as_offset(char* p, size_t *offset) {
- *offset = ArchiveBuilder::current()->any_to_offset((address)p);
-}
-
int FileMapHeader::compute_crc() {
char* start = (char*)this;
// start computing from the field after _header_size to end of base archive name.
diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp
index ec7b58a6d19..56b88df378a 100644
--- a/src/hotspot/share/cds/filemap.hpp
+++ b/src/hotspot/share/cds/filemap.hpp
@@ -25,6 +25,7 @@
#ifndef SHARE_CDS_FILEMAP_HPP
#define SHARE_CDS_FILEMAP_HPP
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotMappedHeap.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/aotStreamedHeap.hpp"
@@ -104,7 +105,7 @@ public:
class FileMapHeader: private CDSFileMapHeaderBase {
friend class CDSConstants;
friend class VMStructs;
-
+ using narrowPtr = AOTCompressedPointers::narrowPtr;
private:
// The following fields record the states of the VM during dump time.
// They are compared with the runtime states to see if the archive
@@ -122,16 +123,16 @@ private:
bool _compressed_class_ptrs; // save the flag UseCompressedClassPointers
int _narrow_klass_pointer_bits; // save number of bits in narrowKlass
int _narrow_klass_shift; // save shift width used to pre-compute narrowKlass IDs in archived heap objects
- size_t _cloned_vtables_offset; // The address of the first cloned vtable
- size_t _early_serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize()
- size_t _serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize()
+ narrowPtr _cloned_vtables; // The address of the first cloned vtable
+ narrowPtr _early_serialized_data; // Data accessed using {ReadClosure,WriteClosure}::serialize()
+ narrowPtr _serialized_data; // Data accessed using {ReadClosure,WriteClosure}::serialize()
// The following fields are all sanity checks for whether this archive
// will function correctly with this JVM and the bootclasspath it's
// invoked with.
char _jvm_ident[JVM_IDENT_MAX]; // identifier string of the jvm that created this dump
- size_t _class_location_config_offset;
+ narrowPtr _class_location_config;
bool _verify_local; // BytecodeVerificationLocal setting
bool _verify_remote; // BytecodeVerificationRemote setting
@@ -160,12 +161,8 @@ private:
bool _type_profile_casts;
int _spec_trap_limit_extra_entries;
- template T from_mapped_offset(size_t offset) const {
- return (T)(mapped_base_address() + offset);
- }
- void set_as_offset(char* p, size_t *offset);
- template void set_as_offset(T p, size_t *offset) {
- set_as_offset((char*)p, offset);
+ template T decode(narrowPtr narrowp) const {
+ return AOTCompressedPointers::decode_not_null(narrowp, reinterpret_cast(mapped_base_address()));
}
public:
@@ -193,9 +190,9 @@ public:
bool compact_headers() const { return _compact_headers; }
uintx max_heap_size() const { return _max_heap_size; }
CompressedOops::Mode narrow_oop_mode() const { return _narrow_oop_mode; }
- char* cloned_vtables() const { return from_mapped_offset(_cloned_vtables_offset); }
- char* early_serialized_data() const { return from_mapped_offset(_early_serialized_data_offset); }
- char* serialized_data() const { return from_mapped_offset(_serialized_data_offset); }
+ char* cloned_vtables() const { return decode(_cloned_vtables); }
+ char* early_serialized_data() const { return decode(_early_serialized_data); }
+ char* serialized_data() const { return decode(_serialized_data); }
bool object_streaming_mode() const { return _object_streaming_mode; }
const char* jvm_ident() const { return _jvm_ident; }
char* requested_base_address() const { return _requested_base_address; }
@@ -218,9 +215,9 @@ public:
void set_mapped_heap_header(AOTMappedHeapHeader header) { _mapped_heap_header = header; }
void set_has_platform_or_app_classes(bool v) { _has_platform_or_app_classes = v; }
- void set_cloned_vtables(char* p) { set_as_offset(p, &_cloned_vtables_offset); }
- void set_early_serialized_data(char* p) { set_as_offset(p, &_early_serialized_data_offset); }
- void set_serialized_data(char* p) { set_as_offset(p, &_serialized_data_offset); }
+ void set_cloned_vtables(char* p) { _cloned_vtables = AOTCompressedPointers::encode_not_null(p); }
+ void set_early_serialized_data(char* p) { _early_serialized_data = AOTCompressedPointers::encode_not_null(p); }
+ void set_serialized_data(char* p) { _serialized_data = AOTCompressedPointers::encode_not_null(p); }
void set_mapped_base_address(char* p) { _mapped_base_address = p; }
void set_rw_ptrmap_start_pos(size_t n) { _rw_ptrmap_start_pos = n; }
void set_ro_ptrmap_start_pos(size_t n) { _ro_ptrmap_start_pos = n; }
@@ -228,11 +225,11 @@ public:
void copy_base_archive_name(const char* name);
void set_class_location_config(AOTClassLocationConfig* table) {
- set_as_offset(table, &_class_location_config_offset);
+ _class_location_config = AOTCompressedPointers::encode_not_null(table);
}
AOTClassLocationConfig* class_location_config() {
- return from_mapped_offset(_class_location_config_offset);
+ return decode(_class_location_config);
}
void set_requested_base(char* b) {
diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp
index 69a098c67b3..c01e6ded25a 100644
--- a/src/hotspot/share/cds/heapShared.cpp
+++ b/src/hotspot/share/cds/heapShared.cpp
@@ -25,6 +25,7 @@
#include "cds/aotArtifactFinder.hpp"
#include "cds/aotClassInitializer.hpp"
#include "cds/aotClassLocation.hpp"
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMappedHeapLoader.hpp"
#include "cds/aotMappedHeapWriter.hpp"
@@ -1148,8 +1149,7 @@ public:
ArchivedKlassSubGraphInfoRecord* record = HeapShared::archive_subgraph_info(&info);
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass);
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k);
- u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
- _writer->add(hash, delta);
+ _writer->add(hash, AOTCompressedPointers::encode_not_null(record));
}
return true; // keep on iterating
}
diff --git a/src/hotspot/share/cds/lambdaFormInvokers.cpp b/src/hotspot/share/cds/lambdaFormInvokers.cpp
index 19dae28c5b5..3ff5705b79d 100644
--- a/src/hotspot/share/cds/lambdaFormInvokers.cpp
+++ b/src/hotspot/share/cds/lambdaFormInvokers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "cds/aotClassFilter.hpp"
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/cdsConfig.hpp"
@@ -52,7 +53,7 @@
#include "runtime/mutexLocker.hpp"
GrowableArrayCHeap* LambdaFormInvokers::_lambdaform_lines = nullptr;
-Array* LambdaFormInvokers::_static_archive_invokers = nullptr;
+Array* LambdaFormInvokers::_static_archive_invokers = nullptr;
static bool _stop_appending = false;
#define NUM_FILTER 4
@@ -252,7 +253,7 @@ void LambdaFormInvokers::dump_static_archive_invokers() {
}
}
if (count > 0) {
- _static_archive_invokers = ArchiveBuilder::new_ro_array(count);
+ _static_archive_invokers = ArchiveBuilder::new_ro_array(count);
int index = 0;
for (int i = 0; i < len; i++) {
char* str = _lambdaform_lines->at(i);
@@ -261,7 +262,7 @@ void LambdaFormInvokers::dump_static_archive_invokers() {
Array* line = ArchiveBuilder::new_ro_array((int)str_len);
strncpy(line->adr_at(0), str, str_len);
- _static_archive_invokers->at_put(index, ArchiveBuilder::current()->any_to_offset_u4(line));
+ _static_archive_invokers->at_put(index, AOTCompressedPointers::encode_not_null(line));
index++;
}
}
@@ -274,8 +275,8 @@ void LambdaFormInvokers::dump_static_archive_invokers() {
void LambdaFormInvokers::read_static_archive_invokers() {
if (_static_archive_invokers != nullptr) {
for (int i = 0; i < _static_archive_invokers->length(); i++) {
- u4 offset = _static_archive_invokers->at(i);
- Array* line = ArchiveUtils::offset_to_archived_address*>(offset);
+ narrowPtr encoded = _static_archive_invokers->at(i);
+ Array* line = AOTCompressedPointers::decode_not_null*>(encoded);
char* str = line->adr_at(0);
append(str);
}
diff --git a/src/hotspot/share/cds/lambdaFormInvokers.hpp b/src/hotspot/share/cds/lambdaFormInvokers.hpp
index 583a863a1c2..9b91850f5b1 100644
--- a/src/hotspot/share/cds/lambdaFormInvokers.hpp
+++ b/src/hotspot/share/cds/lambdaFormInvokers.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,8 @@
#ifndef SHARE_CDS_LAMBDAFORMINVOKERS_HPP
#define SHARE_CDS_LAMBDAFORMINVOKERS_HPP
+
+#include "cds/aotCompressedPointers.hpp"
#include "memory/allStatic.hpp"
#include "oops/oopHandle.hpp"
#include "runtime/handles.hpp"
@@ -35,10 +37,11 @@ class Array;
class SerializeClosure;
class LambdaFormInvokers : public AllStatic {
+ using narrowPtr = AOTCompressedPointers::narrowPtr;
private:
static GrowableArrayCHeap* _lambdaform_lines;
// For storing LF form lines (LF_RESOLVE only) in read only table.
- static Array* _static_archive_invokers;
+ static Array* _static_archive_invokers;
static void regenerate_class(char* name, ClassFileStream& st, TRAPS);
public:
static void append(char* line);
diff --git a/src/hotspot/share/cds/lambdaProxyClassDictionary.cpp b/src/hotspot/share/cds/lambdaProxyClassDictionary.cpp
index d091067c116..4d212dbf2c2 100644
--- a/src/hotspot/share/cds/lambdaProxyClassDictionary.cpp
+++ b/src/hotspot/share/cds/lambdaProxyClassDictionary.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "cds/aotClassFilter.hpp"
+#include "cds/aotCompressedPointers.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cdsProtectionDomain.hpp"
@@ -49,11 +50,11 @@ unsigned int LambdaProxyClassKey::hash() const {
}
unsigned int RunTimeLambdaProxyClassKey::hash() const {
- return primitive_hash(_caller_ik) +
- primitive_hash(_invoked_name) +
- primitive_hash(_invoked_type) +
- primitive_hash(_method_type) +
- primitive_hash(_instantiated_method_type);
+ return primitive_hash(cast_to_u4(_caller_ik)) +
+ primitive_hash(cast_to_u4(_invoked_name)) +
+ primitive_hash(cast_to_u4(_invoked_type)) +
+ primitive_hash(cast_to_u4(_method_type)) +
+ primitive_hash(cast_to_u4(_instantiated_method_type));
}
#ifndef PRODUCT
@@ -71,12 +72,12 @@ void LambdaProxyClassKey::print_on(outputStream* st) const {
void RunTimeLambdaProxyClassKey::print_on(outputStream* st) const {
ResourceMark rm;
st->print_cr("LambdaProxyClassKey : " INTPTR_FORMAT " hash: %0x08x", p2i(this), hash());
- st->print_cr("_caller_ik : %d", _caller_ik);
- st->print_cr("_instantiated_method_type : %d", _instantiated_method_type);
- st->print_cr("_invoked_name : %d", _invoked_name);
- st->print_cr("_invoked_type : %d", _invoked_type);
- st->print_cr("_member_method : %d", _member_method);
- st->print_cr("_method_type : %d", _method_type);
+ st->print_cr("_caller_ik : %d", cast_to_u4(_caller_ik));
+ st->print_cr("_instantiated_method_type : %d", cast_to_u4(_instantiated_method_type));
+ st->print_cr("_invoked_name : %d", cast_to_u4(_invoked_name));
+ st->print_cr("_invoked_type : %d", cast_to_u4(_invoked_type));
+ st->print_cr("_member_method : %d", cast_to_u4(_member_method));
+ st->print_cr("_method_type : %d", cast_to_u4(_method_type));
}
void RunTimeLambdaProxyClassInfo::print_on(outputStream* st) const {
@@ -418,8 +419,7 @@ public:
(RunTimeLambdaProxyClassInfo*)ArchiveBuilder::ro_region_alloc(byte_size);
runtime_info->init(key, info);
unsigned int hash = runtime_info->hash();
- u4 delta = _builder->any_to_offset_u4((void*)runtime_info);
- _writer->add(hash, delta);
+ _writer->add(hash, AOTCompressedPointers::encode_not_null(runtime_info));
return true;
}
};
diff --git a/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp b/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp
index 91e508bfdc5..dfb75532917 100644
--- a/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp
+++ b/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,9 @@
#ifndef SHARE_CDS_LAMBDAPROXYCLASSINFO_HPP
#define SHARE_CDS_LAMBDAPROXYCLASSINFO_HPP
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotMetaspace.hpp"
-#include "cds/archiveBuilder.hpp"
+#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.hpp"
#include "memory/metaspaceClosure.hpp"
#include "utilities/growableArray.hpp"
@@ -132,19 +133,20 @@ public:
};
class RunTimeLambdaProxyClassKey {
- u4 _caller_ik;
- u4 _invoked_name;
- u4 _invoked_type;
- u4 _method_type;
- u4 _member_method;
- u4 _instantiated_method_type;
+ using narrowPtr = AOTCompressedPointers::narrowPtr;
+ narrowPtr _caller_ik;
+ narrowPtr _invoked_name;
+ narrowPtr _invoked_type;
+ narrowPtr _method_type;
+ narrowPtr _member_method;
+ narrowPtr _instantiated_method_type;
- RunTimeLambdaProxyClassKey(u4 caller_ik,
- u4 invoked_name,
- u4 invoked_type,
- u4 method_type,
- u4 member_method,
- u4 instantiated_method_type) :
+ RunTimeLambdaProxyClassKey(narrowPtr caller_ik,
+ narrowPtr invoked_name,
+ narrowPtr invoked_type,
+ narrowPtr method_type,
+ narrowPtr member_method,
+ narrowPtr instantiated_method_type) :
_caller_ik(caller_ik),
_invoked_name(invoked_name),
_invoked_type(invoked_type),
@@ -154,15 +156,12 @@ class RunTimeLambdaProxyClassKey {
public:
static RunTimeLambdaProxyClassKey init_for_dumptime(LambdaProxyClassKey& key) {
- assert(ArchiveBuilder::is_active(), "sanity");
- ArchiveBuilder* b = ArchiveBuilder::current();
-
- u4 caller_ik = b->any_to_offset_u4(key.caller_ik());
- u4 invoked_name = b->any_to_offset_u4(key.invoked_name());
- u4 invoked_type = b->any_to_offset_u4(key.invoked_type());
- u4 method_type = b->any_to_offset_u4(key.method_type());
- u4 member_method = b->any_or_null_to_offset_u4(key.member_method()); // could be null
- u4 instantiated_method_type = b->any_to_offset_u4(key.instantiated_method_type());
+ narrowPtr caller_ik = AOTCompressedPointers::encode_not_null(key.caller_ik());
+ narrowPtr invoked_name = AOTCompressedPointers::encode_not_null(key.invoked_name());
+ narrowPtr invoked_type = AOTCompressedPointers::encode_not_null(key.invoked_type());
+ narrowPtr method_type = AOTCompressedPointers::encode_not_null(key.method_type());
+ narrowPtr member_method = AOTCompressedPointers::encode(key.member_method()); // could be null
+ narrowPtr instantiated_method_type = AOTCompressedPointers::encode_not_null(key.instantiated_method_type());
return RunTimeLambdaProxyClassKey(caller_ik, invoked_name, invoked_type, method_type,
member_method, instantiated_method_type);
@@ -176,12 +175,12 @@ public:
Symbol* instantiated_method_type) {
// All parameters must be in shared space, or else you'd get an assert in
// ArchiveUtils::to_offset().
- return RunTimeLambdaProxyClassKey(ArchiveUtils::archived_address_to_offset(caller_ik),
- ArchiveUtils::archived_address_to_offset(invoked_name),
- ArchiveUtils::archived_address_to_offset(invoked_type),
- ArchiveUtils::archived_address_to_offset(method_type),
- ArchiveUtils::archived_address_or_null_to_offset(member_method), // could be null
- ArchiveUtils::archived_address_to_offset(instantiated_method_type));
+ return RunTimeLambdaProxyClassKey(AOTCompressedPointers::encode_address_in_cache(caller_ik),
+ AOTCompressedPointers::encode_address_in_cache(invoked_name),
+ AOTCompressedPointers::encode_address_in_cache(invoked_type),
+ AOTCompressedPointers::encode_address_in_cache(method_type),
+ AOTCompressedPointers::encode_address_in_cache_or_null(member_method), // could be null
+ AOTCompressedPointers::encode_address_in_cache(instantiated_method_type));
}
unsigned int hash() const;
diff --git a/src/hotspot/share/cds/runTimeClassInfo.cpp b/src/hotspot/share/cds/runTimeClassInfo.cpp
index fe940ca6c18..a1f50ab4ffa 100644
--- a/src/hotspot/share/cds/runTimeClassInfo.cpp
+++ b/src/hotspot/share/cds/runTimeClassInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,15 +22,15 @@
*
*/
+#include "cds/aotCompressedPointers.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/dumpTimeClassInfo.hpp"
#include "cds/runTimeClassInfo.hpp"
#include "classfile/systemDictionaryShared.hpp"
void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
- ArchiveBuilder* builder = ArchiveBuilder::current();
InstanceKlass* k = info._klass;
- _klass_offset = builder->any_to_offset_u4(k);
+ _klass = AOTCompressedPointers::encode_not_null(k);
if (!SystemDictionaryShared::is_builtin(k)) {
CrcInfo* c = crc();
@@ -50,8 +50,8 @@ void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
RTVerifierConstraint* vf_constraints = verifier_constraints();
char* flags = verifier_constraint_flags();
for (i = 0; i < _num_verifier_constraints; i++) {
- vf_constraints[i]._name = builder->any_to_offset_u4(info._verifier_constraints->at(i).name());
- vf_constraints[i]._from_name = builder->any_or_null_to_offset_u4(info._verifier_constraints->at(i).from_name());
+ vf_constraints[i]._name = AOTCompressedPointers::encode_not_null(info._verifier_constraints->at(i).name());
+ vf_constraints[i]._from_name = AOTCompressedPointers::encode(info._verifier_constraints->at(i).from_name());
}
for (i = 0; i < _num_verifier_constraints; i++) {
flags[i] = info._verifier_constraint_flags->at(i);
@@ -61,14 +61,14 @@ void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
if (_num_loader_constraints > 0) {
RTLoaderConstraint* ld_constraints = loader_constraints();
for (i = 0; i < _num_loader_constraints; i++) {
- ld_constraints[i]._name = builder->any_to_offset_u4(info._loader_constraints->at(i).name());
+ ld_constraints[i]._name = AOTCompressedPointers::encode_not_null(info._loader_constraints->at(i).name());
ld_constraints[i]._loader_type1 = info._loader_constraints->at(i).loader_type1();
ld_constraints[i]._loader_type2 = info._loader_constraints->at(i).loader_type2();
}
}
if (k->is_hidden() && info.nest_host() != nullptr) {
- _nest_host_offset = builder->any_to_offset_u4(info.nest_host());
+ _nest_host = AOTCompressedPointers::encode_not_null(info.nest_host());
}
if (k->has_archived_enum_objs()) {
int num = info.num_enum_klass_static_fields();
@@ -83,11 +83,12 @@ void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
InstanceKlass* RunTimeClassInfo::klass() const {
if (AOTMetaspace::in_aot_cache(this)) {
// is inside a mmaped CDS archive.
- return ArchiveUtils::offset_to_archived_address(_klass_offset);
+ return AOTCompressedPointers::decode_not_null(_klass);
} else {
// is a temporary copy of a RunTimeClassInfo that's being initialized
// by the ArchiveBuilder.
- return ArchiveBuilder::current()->offset_to_buffered(_klass_offset);
+ size_t byte_offset = AOTCompressedPointers::get_byte_offset(_klass);
+ return ArchiveBuilder::current()->offset_to_buffered(byte_offset);
}
}
diff --git a/src/hotspot/share/cds/runTimeClassInfo.hpp b/src/hotspot/share/cds/runTimeClassInfo.hpp
index 371924f9065..d63a04698bb 100644
--- a/src/hotspot/share/cds/runTimeClassInfo.hpp
+++ b/src/hotspot/share/cds/runTimeClassInfo.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_CDS_RUNTIMECLASSINFO_HPP
#define SHARE_CDS_RUNTIMECLASSINFO_HPP
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
@@ -41,8 +42,10 @@ class Method;
class Symbol;
class RunTimeClassInfo {
- public:
- enum : char {
+ using narrowPtr = AOTCompressedPointers::narrowPtr;
+
+public:
+ enum : char {
FROM_FIELD_IS_PROTECTED = 1 << 0,
FROM_IS_ARRAY = 1 << 1,
FROM_IS_OBJECT = 1 << 2
@@ -56,19 +59,19 @@ class RunTimeClassInfo {
// This is different than DumpTimeClassInfo::DTVerifierConstraint. We use
// u4 instead of Symbol* to save space on 64-bit CPU.
struct RTVerifierConstraint {
- u4 _name;
- u4 _from_name;
- Symbol* name() { return ArchiveUtils::offset_to_archived_address(_name); }
+ narrowPtr _name;
+ narrowPtr _from_name;
+ Symbol* name() { return AOTCompressedPointers::decode_not_null(_name); }
Symbol* from_name() {
- return (_from_name == 0) ? nullptr : ArchiveUtils::offset_to_archived_address(_from_name);
+ return AOTCompressedPointers::decode(_from_name);
}
};
struct RTLoaderConstraint {
- u4 _name;
+ narrowPtr _name;
char _loader_type1;
char _loader_type2;
- Symbol* constraint_name() { return ArchiveUtils::offset_to_archived_address(_name); }
+ Symbol* constraint_name() { return AOTCompressedPointers::decode_not_null(_name); }
};
struct RTEnumKlassStaticFields {
int _num;
@@ -76,8 +79,8 @@ class RunTimeClassInfo {
};
private:
- u4 _klass_offset;
- u4 _nest_host_offset;
+ narrowPtr _klass;
+ narrowPtr _nest_host;
int _num_verifier_constraints;
int _num_loader_constraints;
@@ -185,7 +188,7 @@ public:
InstanceKlass* nest_host() {
assert(!ArchiveBuilder::is_active(), "not called when dumping archive");
- return ArchiveUtils::offset_to_archived_address_or_null(_nest_host_offset);
+ return AOTCompressedPointers::decode(_nest_host); // may be null
}
RTLoaderConstraint* loader_constraints() {
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index 817d0c64d11..c1f00cbe536 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1017,7 +1017,8 @@ public:
};
-static int skip_annotation_value(const u1*, int, int); // fwd decl
+static int skip_annotation_value(const u1* buffer, int limit, int index, int recursion_depth); // fwd decl
+static const int max_recursion_depth = 5;
// Safely increment index by val if does not pass limit
#define SAFE_ADD(index, limit, val) \
@@ -1025,23 +1026,29 @@ if (index >= limit - val) return limit; \
index += val;
// Skip an annotation. Return >=limit if there is any problem.
-static int skip_annotation(const u1* buffer, int limit, int index) {
+static int skip_annotation(const u1* buffer, int limit, int index, int recursion_depth = 0) {
assert(buffer != nullptr, "invariant");
+ if (recursion_depth > max_recursion_depth) {
+ return limit;
+ }
// annotation := atype:u2 do(nmem:u2) {member:u2 value}
// value := switch (tag:u1) { ... }
SAFE_ADD(index, limit, 4); // skip atype and read nmem
int nmem = Bytes::get_Java_u2((address)buffer + index - 2);
while (--nmem >= 0 && index < limit) {
SAFE_ADD(index, limit, 2); // skip member
- index = skip_annotation_value(buffer, limit, index);
+ index = skip_annotation_value(buffer, limit, index, recursion_depth + 1);
}
return index;
}
// Skip an annotation value. Return >=limit if there is any problem.
-static int skip_annotation_value(const u1* buffer, int limit, int index) {
+static int skip_annotation_value(const u1* buffer, int limit, int index, int recursion_depth) {
assert(buffer != nullptr, "invariant");
+ if (recursion_depth > max_recursion_depth) {
+ return limit;
+ }
// value := switch (tag:u1) {
// case B, C, I, S, Z, D, F, J, c: con:u2;
// case e: e_class:u2 e_name:u2;
@@ -1073,12 +1080,12 @@ static int skip_annotation_value(const u1* buffer, int limit, int index) {
SAFE_ADD(index, limit, 2); // read nval
int nval = Bytes::get_Java_u2((address)buffer + index - 2);
while (--nval >= 0 && index < limit) {
- index = skip_annotation_value(buffer, limit, index);
+ index = skip_annotation_value(buffer, limit, index, recursion_depth + 1);
}
}
break;
case '@':
- index = skip_annotation(buffer, limit, index);
+ index = skip_annotation(buffer, limit, index, recursion_depth + 1);
break;
default:
return limit; // bad tag byte
diff --git a/src/hotspot/share/classfile/compactHashtable.hpp b/src/hotspot/share/classfile/compactHashtable.hpp
index 944fb876521..2fe92be0f6d 100644
--- a/src/hotspot/share/classfile/compactHashtable.hpp
+++ b/src/hotspot/share/classfile/compactHashtable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_CLASSFILE_COMPACTHASHTABLE_HPP
#define SHARE_CLASSFILE_COMPACTHASHTABLE_HPP
+#include "cds/aotCompressedPointers.hpp"
#include "cds/cds_globals.hpp"
#include "oops/array.hpp"
#include "oops/symbol.hpp"
@@ -123,6 +124,9 @@ public:
~CompactHashtableWriter();
void add(unsigned int hash, u4 encoded_value);
+ void add(unsigned int hash, AOTCompressedPointers::narrowPtr encoded_value) {
+ add(hash, cast_to_u4(encoded_value));
+ }
void dump(SimpleCompactHashtable *cht, const char* table_name);
private:
@@ -371,11 +375,11 @@ public:
//
// OffsetCompactHashtable -- This is used to store many types of objects
// in the CDS archive. On 64-bit platforms, we save space by using a 32-bit
-// offset from the CDS base address.
+// narrowPtr from the CDS base address.
template
-inline V read_value_from_compact_hashtable(address base_address, u4 offset) {
- return (V)(base_address + offset);
+inline V read_value_from_compact_hashtable(address base_address, u4 narrowp) {
+ return AOTCompressedPointers::decode_not_null(cast_from_u4(narrowp), base_address);
}
template <
diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp
index c49aa10fa0d..20aa7f0776d 100644
--- a/src/hotspot/share/classfile/symbolTable.cpp
+++ b/src/hotspot/share/classfile/symbolTable.cpp
@@ -22,6 +22,7 @@
*
*/
+#include "cds/aotCompressedPointers.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/dynamicArchive.hpp"
@@ -690,7 +691,7 @@ void SymbolTable::copy_shared_symbol_table(GrowableArray* symbols,
assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),
"must not rehash during dumping");
sym->set_permanent();
- writer->add(fixed_hash, builder->buffer_to_offset_u4((address)sym));
+ writer->add(fixed_hash, AOTCompressedPointers::encode_not_null(sym));
}
}
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp
index afc190c36cf..cfb20412ab8 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "cds/aotClassFilter.hpp"
#include "cds/aotClassLocation.hpp"
+#include "cds/aotCompressedPointers.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
@@ -1282,11 +1283,10 @@ unsigned int SystemDictionaryShared::hash_for_shared_dictionary(address ptr) {
class CopySharedClassInfoToArchive : StackObj {
CompactHashtableWriter* _writer;
bool _is_builtin;
- ArchiveBuilder *_builder;
public:
CopySharedClassInfoToArchive(CompactHashtableWriter* writer,
bool is_builtin)
- : _writer(writer), _is_builtin(is_builtin), _builder(ArchiveBuilder::current()) {}
+ : _writer(writer), _is_builtin(is_builtin) {}
void do_entry(InstanceKlass* k, DumpTimeClassInfo& info) {
if (!info.is_excluded() && info.is_builtin() == _is_builtin) {
@@ -1299,11 +1299,10 @@ public:
Symbol* name = info._klass->name();
name = ArchiveBuilder::current()->get_buffered_addr(name);
hash = SystemDictionaryShared::hash_for_shared_dictionary((address)name);
- u4 delta = _builder->buffer_to_offset_u4((address)record);
if (_is_builtin && info._klass->is_hidden()) {
// skip
} else {
- _writer->add(hash, delta);
+ _writer->add(hash, AOTCompressedPointers::encode_not_null(record));
}
if (log_is_enabled(Trace, aot, hashtables)) {
ResourceMark rm;
diff --git a/src/hotspot/share/code/aotCodeCache.cpp b/src/hotspot/share/code/aotCodeCache.cpp
index f51c068f1e7..32a53691f3f 100644
--- a/src/hotspot/share/code/aotCodeCache.cpp
+++ b/src/hotspot/share/code/aotCodeCache.cpp
@@ -399,7 +399,7 @@ AOTCodeCache::~AOTCodeCache() {
}
}
-void AOTCodeCache::Config::record() {
+void AOTCodeCache::Config::record(uint cpu_features_offset) {
_flags = 0;
#ifdef ASSERT
_flags |= debugVM;
@@ -430,9 +430,50 @@ void AOTCodeCache::Config::record() {
_compressedKlassShift = CompressedKlassPointers::shift();
_contendedPaddingWidth = ContendedPaddingWidth;
_gc = (uint)Universe::heap()->kind();
+ _cpu_features_offset = cpu_features_offset;
}
-bool AOTCodeCache::Config::verify() const {
+bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
+ LogStreamHandle(Debug, aot, codecache, init) log;
+ uint offset = _cpu_features_offset;
+ uint cpu_features_size = *(uint *)cache->addr(offset);
+ assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
+ offset += sizeof(uint);
+
+ void* cached_cpu_features_buffer = (void *)cache->addr(offset);
+ if (log.is_enabled()) {
+ ResourceMark rm; // required for stringStream::as_string()
+ stringStream ss;
+ VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
+ log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
+ }
+
+ if (VM_Version::supports_features(cached_cpu_features_buffer)) {
+ if (log.is_enabled()) {
+ ResourceMark rm; // required for stringStream::as_string()
+ stringStream ss;
+ char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
+ VM_Version::store_cpu_features(runtime_cpu_features);
+ VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
+ if (!ss.is_empty()) {
+ log.print_cr("Additional runtime CPU features: %s", ss.as_string());
+ }
+ }
+ } else {
+ if (log.is_enabled()) {
+ ResourceMark rm; // required for stringStream::as_string()
+ stringStream ss;
+ char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
+ VM_Version::store_cpu_features(runtime_cpu_features);
+ VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
+ log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
+ }
+ return false;
+ }
+ return true;
+}
+
+bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
// First checks affect all cached AOT code
#ifdef ASSERT
if ((_flags & debugVM) == 0) {
@@ -478,6 +519,9 @@ bool AOTCodeCache::Config::verify() const {
AOTStubCaching = false;
}
+ if (!verify_cpu_features(cache)) {
+ return false;
+ }
return true;
}
@@ -679,6 +723,17 @@ extern "C" {
}
}
+void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
+ uint* size_ptr = (uint *)buffer;
+ *size_ptr = buffer_size;
+ buffer += sizeof(uint);
+
+ VM_Version::store_cpu_features(buffer);
+ log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
+ buffer += buffer_size;
+ buffer = align_up(buffer, DATA_ALIGNMENT);
+}
+
bool AOTCodeCache::finish_write() {
if (!align_write()) {
return false;
@@ -698,23 +753,32 @@ bool AOTCodeCache::finish_write() {
uint store_count = _store_entries_cnt;
if (store_count > 0) {
- uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
+ uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
uint code_count = store_count;
uint search_count = code_count * 2;
uint search_size = search_count * sizeof(uint);
uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
// _write_position includes size of code and strings
uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
- uint total_size = header_size + _write_position + code_alignment + search_size + entries_size;
+ uint cpu_features_size = VM_Version::cpu_features_size();
+ uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
+ uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
+ align_up(total_cpu_features_size, DATA_ALIGNMENT);
assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
- // Create ordered search table for entries [id, index];
- uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
// Allocate in AOT Cache buffer
char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
char* start = align_up(buffer, DATA_ALIGNMENT);
char* current = start + header_size; // Skip header
+ uint cpu_features_offset = current - start;
+ store_cpu_features(current, cpu_features_size);
+ assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
+ assert(current < start + total_size, "sanity check");
+
+ // Create ordered search table for entries [id, index];
+ uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
+
AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
uint adapters_count = 0;
uint shared_blobs_count = 0;
@@ -790,7 +854,7 @@ bool AOTCodeCache::finish_write() {
header->init(size, (uint)strings_count, strings_offset,
entries_count, new_entries_offset,
adapters_count, shared_blobs_count,
- C1_blobs_count, C2_blobs_count);
+ C1_blobs_count, C2_blobs_count, cpu_features_offset);
log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
}
diff --git a/src/hotspot/share/code/aotCodeCache.hpp b/src/hotspot/share/code/aotCodeCache.hpp
index 778ad34e448..45b4a15510d 100644
--- a/src/hotspot/share/code/aotCodeCache.hpp
+++ b/src/hotspot/share/code/aotCodeCache.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -185,10 +185,12 @@ protected:
restrictContendedPadding = 128
};
uint _flags;
+ uint _cpu_features_offset; // offset in the cache where cpu features are stored
public:
- void record();
- bool verify() const;
+ void record(uint cpu_features_offset);
+ bool verify_cpu_features(AOTCodeCache* cache) const;
+ bool verify(AOTCodeCache* cache) const;
};
class Header : public CHeapObj {
@@ -206,14 +208,15 @@ protected:
uint _shared_blobs_count;
uint _C1_blobs_count;
uint _C2_blobs_count;
- Config _config;
+ Config _config; // must be the last element as there is trailing data stored immediately after Config
public:
void init(uint cache_size,
uint strings_count, uint strings_offset,
uint entries_count, uint entries_offset,
uint adapters_count, uint shared_blobs_count,
- uint C1_blobs_count, uint C2_blobs_count) {
+ uint C1_blobs_count, uint C2_blobs_count,
+ uint cpu_features_offset) {
_version = AOT_CODE_VERSION;
_cache_size = cache_size;
_strings_count = strings_count;
@@ -224,7 +227,7 @@ protected:
_shared_blobs_count = shared_blobs_count;
_C1_blobs_count = C1_blobs_count;
_C2_blobs_count = C2_blobs_count;
- _config.record();
+ _config.record(cpu_features_offset);
}
@@ -239,8 +242,8 @@ protected:
uint C2_blobs_count() const { return _C2_blobs_count; }
bool verify(uint load_size) const;
- bool verify_config() const { // Called after Universe initialized
- return _config.verify();
+ bool verify_config(AOTCodeCache* cache) const { // Called after Universe initialized
+ return _config.verify(cache);
}
};
@@ -320,6 +323,8 @@ public:
AOTCodeEntry* find_entry(AOTCodeEntry::Kind kind, uint id);
+ void store_cpu_features(char*& buffer, uint buffer_size);
+
bool finish_write();
bool write_relocations(CodeBlob& code_blob);
@@ -361,7 +366,7 @@ private:
static bool open_cache(bool is_dumping, bool is_using);
bool verify_config() {
if (for_use()) {
- return _load_header->verify_config();
+ return _load_header->verify_config(this);
}
return true;
}
diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp
index 094b4f82cf0..fcc0b42a461 100644
--- a/src/hotspot/share/code/codeBlob.cpp
+++ b/src/hotspot/share/code/codeBlob.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -336,6 +336,7 @@ RuntimeBlob::RuntimeBlob(
void RuntimeBlob::free(RuntimeBlob* blob) {
assert(blob != nullptr, "caller must check for nullptr");
+ MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
blob->purge();
{
diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
index 481eb51bd5c..2a0256cc316 100644
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -1139,7 +1139,7 @@ size_t CodeCache::freelists_length() {
void icache_init();
void CodeCache::initialize() {
- assert(CodeCacheSegmentSize >= (size_t)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
+ assert(CodeCacheSegmentSize >= CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
#ifdef COMPILER2
assert(CodeCacheSegmentSize >= (size_t)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
#endif
diff --git a/src/hotspot/share/compiler/compilerDefinitions.cpp b/src/hotspot/share/compiler/compilerDefinitions.cpp
index aed1edc0db5..75d84bedcc6 100644
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp
+++ b/src/hotspot/share/compiler/compilerDefinitions.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -406,7 +406,7 @@ void CompilerConfig::set_compilation_policy_flags() {
if (CompilerConfig::is_tiered() && CompilerConfig::is_c2_enabled()) {
#ifdef COMPILER2
// Some inlining tuning
-#if defined(X86) || defined(AARCH64) || defined(RISCV64)
+#if defined(X86) || defined(AARCH64) || defined(RISCV64) || defined(PPC64)
if (FLAG_IS_DEFAULT(InlineSmallCode)) {
FLAG_SET_DEFAULT(InlineSmallCode, 2500);
}
diff --git a/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp b/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp
index c695ad977fe..fd70796251d 100644
--- a/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp
+++ b/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp
@@ -24,10 +24,9 @@
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1HeapRegion.inline.hpp"
+#include "gc/g1/g1RegionToSpaceMapper.hpp"
+#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "logging/log.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/java.hpp"
#include "runtime/os.hpp"
size_t G1BlockOffsetTable::compute_size(size_t mem_region_words) {
@@ -52,6 +51,12 @@ void G1BlockOffsetTable::set_offset_array(Atomic* addr, uint8_t offset)
addr->store_relaxed(offset);
}
+static void check_offset(size_t offset, const char* msg) {
+ assert(offset < CardTable::card_size_in_words(),
+ "%s - offset: %zu, N_words: %u",
+ msg, offset, CardTable::card_size_in_words());
+}
+
void G1BlockOffsetTable::set_offset_array(Atomic* addr, HeapWord* high, HeapWord* low) {
assert(high >= low, "addresses out of order");
size_t offset = pointer_delta(high, low);
diff --git a/src/hotspot/share/gc/g1/g1BlockOffsetTable.hpp b/src/hotspot/share/gc/g1/g1BlockOffsetTable.hpp
index 89c68ce96d2..21d447549a6 100644
--- a/src/hotspot/share/gc/g1/g1BlockOffsetTable.hpp
+++ b/src/hotspot/share/gc/g1/g1BlockOffsetTable.hpp
@@ -37,19 +37,12 @@
// for each such subregion indicates how far back one must go to find the
// start of the chunk that includes the first word of the subregion.
class G1BlockOffsetTable : public CHeapObj {
-private:
// The reserved region covered by the table.
MemRegion _reserved;
// Biased array-start of BOT array for fast BOT entry translation
Atomic* _offset_base;
- void check_offset(size_t offset, const char* msg) const {
- assert(offset < CardTable::card_size_in_words(),
- "%s - offset: %zu, N_words: %u",
- msg, offset, CardTable::card_size_in_words());
- }
-
// Bounds checking accessors:
// For performance these have to devolve to array accesses in product builds.
inline uint8_t offset_array(Atomic* addr) const;
@@ -85,7 +78,6 @@ private:
}
public:
-
// Return the number of slots needed for an offset array
// that covers mem_region_words words.
static size_t compute_size(size_t mem_region_words);
@@ -99,22 +91,14 @@ public:
// in the heap parameter.
G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* storage);
- static bool is_crossing_card_boundary(HeapWord* const obj_start,
- HeapWord* const obj_end) {
- HeapWord* cur_card_boundary = align_up_by_card_size(obj_start);
- // strictly greater-than
- return obj_end > cur_card_boundary;
- }
+ inline static bool is_crossing_card_boundary(HeapWord* const obj_start,
+ HeapWord* const obj_end);
// Returns the address of the start of the block reaching into the card containing
// "addr".
inline HeapWord* block_start_reaching_into_card(const void* addr) const;
- void update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
- if (is_crossing_card_boundary(blk_start, blk_end)) {
- update_for_block_work(blk_start, blk_end);
- }
- }
+ inline void update_for_block(HeapWord* blk_start, HeapWord* blk_end);
};
#endif // SHARE_GC_G1_G1BLOCKOFFSETTABLE_HPP
diff --git a/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp b/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp
index 0d809b65526..b707e310781 100644
--- a/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp
@@ -27,10 +27,7 @@
#include "gc/g1/g1BlockOffsetTable.hpp"
-#include "gc/g1/g1HeapRegion.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/memset_with_concurrent_readers.hpp"
-#include "oops/oop.inline.hpp"
inline HeapWord* G1BlockOffsetTable::block_start_reaching_into_card(const void* addr) const {
assert(_reserved.contains(addr), "invalid address");
@@ -70,4 +67,17 @@ inline HeapWord* G1BlockOffsetTable::addr_for_entry(const Atomic* const
return result;
}
+inline bool G1BlockOffsetTable::is_crossing_card_boundary(HeapWord* const obj_start,
+ HeapWord* const obj_end) {
+ HeapWord* cur_card_boundary = align_up_by_card_size(obj_start);
+ // strictly greater-than
+ return obj_end > cur_card_boundary;
+}
+
+inline void G1BlockOffsetTable::update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
+ if (is_crossing_card_boundary(blk_start, blk_end)) {
+ update_for_block_work(blk_start, blk_end);
+ }
+}
+
#endif // SHARE_GC_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index 8f3cafe1f5b..c739907e2b4 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -24,6 +24,7 @@
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataGraph.hpp"
+#include "cppstdlib/new.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BatchedTask.hpp"
#include "gc/g1/g1CardSetMemory.hpp"
@@ -519,8 +520,8 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
_max_concurrent_workers(0),
_region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_num_regions(), mtGC)),
- _top_at_mark_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_num_regions(), mtGC)),
- _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_num_regions(), mtGC)),
+ _top_at_mark_starts(NEW_C_HEAP_ARRAY(Atomic, _g1h->max_num_regions(), mtGC)),
+ _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(Atomic, _g1h->max_num_regions(), mtGC)),
_needs_remembered_set_rebuild(false)
{
assert(G1CGC_lock != nullptr, "CGC_lock must be initialized");
@@ -564,6 +565,12 @@ void G1ConcurrentMark::fully_initialize() {
_tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats);
}
+ for (uint i = 0; i < _g1h->max_num_regions(); i++) {
+ ::new (&_region_mark_stats[i]) G1RegionMarkStats{};
+ ::new (&_top_at_mark_starts[i]) Atomic{};
+ ::new (&_top_at_rebuild_starts[i]) Atomic{};
+ }
+
reset_at_marking_complete();
}
@@ -576,7 +583,7 @@ PartialArrayStateManager* G1ConcurrentMark::partial_array_state_manager() const
}
void G1ConcurrentMark::reset() {
- _has_aborted = false;
+ _has_aborted.store_relaxed(false);
reset_marking_for_restart();
@@ -588,7 +595,7 @@ void G1ConcurrentMark::reset() {
uint max_num_regions = _g1h->max_num_regions();
for (uint i = 0; i < max_num_regions; i++) {
- _top_at_rebuild_starts[i] = nullptr;
+ _top_at_rebuild_starts[i].store_relaxed(nullptr);
_region_mark_stats[i].clear();
}
@@ -600,7 +607,7 @@ void G1ConcurrentMark::clear_statistics(G1HeapRegion* r) {
for (uint j = 0; j < _max_num_tasks; ++j) {
_tasks[j]->clear_mark_stats_cache(region_idx);
}
- _top_at_rebuild_starts[region_idx] = nullptr;
+ _top_at_rebuild_starts[region_idx].store_relaxed(nullptr);
_region_mark_stats[region_idx].clear();
}
@@ -636,7 +643,7 @@ void G1ConcurrentMark::reset_marking_for_restart() {
}
clear_has_overflown();
- _finger = _heap.start();
+ _finger.store_relaxed(_heap.start());
for (uint i = 0; i < _max_num_tasks; ++i) {
_tasks[i]->reset_for_restart();
@@ -657,14 +664,14 @@ void G1ConcurrentMark::set_concurrency(uint active_tasks) {
void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
set_concurrency(active_tasks);
- _concurrent = concurrent;
+ _concurrent.store_relaxed(concurrent);
if (!concurrent) {
// At this point we should be in a STW phase, and completed marking.
assert_at_safepoint_on_vm_thread();
assert(out_of_regions(),
"only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
- p2i(_finger), p2i(_heap.end()));
+ p2i(finger()), p2i(_heap.end()));
}
}
@@ -695,8 +702,8 @@ void G1ConcurrentMark::reset_at_marking_complete() {
}
G1ConcurrentMark::~G1ConcurrentMark() {
- FREE_C_HEAP_ARRAY(HeapWord*, _top_at_mark_starts);
- FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
+ FREE_C_HEAP_ARRAY(Atomic, _top_at_mark_starts);
+ FREE_C_HEAP_ARRAY(Atomic, _top_at_rebuild_starts);
FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
// The G1ConcurrentMark instance is never freed.
ShouldNotReachHere();
@@ -921,6 +928,8 @@ public:
bool do_heap_region(G1HeapRegion* r) override {
if (r->is_old_or_humongous() && !r->is_collection_set_candidate() && !r->in_collection_set()) {
_cm->update_top_at_mark_start(r);
+ } else {
+ _cm->reset_top_at_mark_start(r);
}
return false;
}
@@ -1163,7 +1172,7 @@ void G1ConcurrentMark::concurrent_cycle_start() {
}
uint G1ConcurrentMark::completed_mark_cycles() const {
- return AtomicAccess::load(&_completed_mark_cycles);
+ return _completed_mark_cycles.load_relaxed();
}
void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
@@ -1172,7 +1181,7 @@ void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
_g1h->trace_heap_after_gc(_gc_tracer_cm);
if (mark_cycle_completed) {
- AtomicAccess::inc(&_completed_mark_cycles, memory_order_relaxed);
+ _completed_mark_cycles.add_then_fetch(1u, memory_order_relaxed);
}
if (has_aborted()) {
@@ -1186,7 +1195,7 @@ void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
}
void G1ConcurrentMark::mark_from_roots() {
- _restart_for_overflow = false;
+ _restart_for_overflow.store_relaxed(false);
uint active_workers = calc_active_marking_workers();
@@ -1355,7 +1364,7 @@ void G1ConcurrentMark::remark() {
}
} else {
// We overflowed. Restart concurrent marking.
- _restart_for_overflow = true;
+ _restart_for_overflow.store_relaxed(true);
verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyLocation::RemarkOverflow);
@@ -1784,44 +1793,45 @@ void G1ConcurrentMark::clear_bitmap_for_region(G1HeapRegion* hr) {
}
G1HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
- // "checkpoint" the finger
- HeapWord* finger = _finger;
+ // "Checkpoint" the finger.
+ HeapWord* local_finger = finger();
- while (finger < _heap.end()) {
- assert(_g1h->is_in_reserved(finger), "invariant");
+ while (local_finger < _heap.end()) {
+ assert(_g1h->is_in_reserved(local_finger), "invariant");
- G1HeapRegion* curr_region = _g1h->heap_region_containing_or_null(finger);
+ G1HeapRegion* curr_region = _g1h->heap_region_containing_or_null(local_finger);
// Make sure that the reads below do not float before loading curr_region.
OrderAccess::loadload();
// Above heap_region_containing may return null as we always scan claim
// until the end of the heap. In this case, just jump to the next region.
- HeapWord* end = curr_region != nullptr ? curr_region->end() : finger + G1HeapRegion::GrainWords;
+ HeapWord* end = curr_region != nullptr ? curr_region->end() : local_finger + G1HeapRegion::GrainWords;
// Is the gap between reading the finger and doing the CAS too long?
- HeapWord* res = AtomicAccess::cmpxchg(&_finger, finger, end);
- if (res == finger && curr_region != nullptr) {
- // we succeeded
+ HeapWord* res = _finger.compare_exchange(local_finger, end);
+ if (res == local_finger && curr_region != nullptr) {
+ // We succeeded.
HeapWord* bottom = curr_region->bottom();
HeapWord* limit = top_at_mark_start(curr_region);
log_trace(gc, marking)("Claim region %u bottom " PTR_FORMAT " tams " PTR_FORMAT, curr_region->hrm_index(), p2i(curr_region->bottom()), p2i(top_at_mark_start(curr_region)));
- // notice that _finger == end cannot be guaranteed here since,
- // someone else might have moved the finger even further
- assert(_finger >= end, "the finger should have moved forward");
+ // Notice that _finger == end cannot be guaranteed here since,
+ // someone else might have moved the finger even further.
+ assert(finger() >= end, "The finger should have moved forward");
if (limit > bottom) {
return curr_region;
} else {
assert(limit == bottom,
- "the region limit should be at bottom");
+ "The region limit should be at bottom");
// We return null and the caller should try calling
// claim_region() again.
return nullptr;
}
} else {
- assert(_finger > finger, "the finger should have moved forward");
- // read it again
- finger = _finger;
+ // Read the finger again.
+ HeapWord* next_finger = finger();
+ assert(next_finger > local_finger, "The finger should have moved forward " PTR_FORMAT " " PTR_FORMAT, p2i(local_finger), p2i(next_finger));
+ local_finger = next_finger;
}
}
@@ -1957,7 +1967,7 @@ bool G1ConcurrentMark::concurrent_cycle_abort() {
void G1ConcurrentMark::abort_marking_threads() {
assert(!_root_regions.scan_in_progress(), "still doing root region scan");
- _has_aborted = true;
+ _has_aborted.store_relaxed(true);
_first_overflow_barrier_sync.abort();
_second_overflow_barrier_sync.abort();
}
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
index 0271e6a4208..11da6dae5b3 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
@@ -368,7 +368,7 @@ class G1ConcurrentMark : public CHeapObj {
// For grey objects
G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
- HeapWord* volatile _finger; // The global finger, region aligned,
+ Atomic _finger; // The global finger, region aligned,
// always pointing to the end of the
// last claimed region
@@ -395,19 +395,19 @@ class G1ConcurrentMark : public CHeapObj {
WorkerThreadsBarrierSync _second_overflow_barrier_sync;
// Number of completed mark cycles.
- volatile uint _completed_mark_cycles;
+ Atomic _completed_mark_cycles;
// This is set by any task, when an overflow on the global data
// structures is detected
- volatile bool _has_overflown;
+ Atomic _has_overflown;
// True: marking is concurrent, false: we're in remark
- volatile bool _concurrent;
+ Atomic _concurrent;
// Set at the end of a Full GC so that marking aborts
- volatile bool _has_aborted;
+ Atomic _has_aborted;
// Used when remark aborts due to an overflow to indicate that
// another concurrent marking phase should start
- volatile bool _restart_for_overflow;
+ Atomic _restart_for_overflow;
ConcurrentGCTimer* _gc_timer_cm;
@@ -461,8 +461,8 @@ class G1ConcurrentMark : public CHeapObj {
void print_and_reset_taskqueue_stats();
- HeapWord* finger() { return _finger; }
- bool concurrent() { return _concurrent; }
+ HeapWord* finger() { return _finger.load_relaxed(); }
+ bool concurrent() { return _concurrent.load_relaxed(); }
uint active_tasks() { return _num_active_tasks; }
TaskTerminator* terminator() { return &_terminator; }
@@ -487,7 +487,7 @@ class G1ConcurrentMark : public CHeapObj {
// to satisfy an allocation without doing a GC. This is fine, because all
// objects in those regions will be considered live anyway because of
// SATB guarantees (i.e. their TAMS will be equal to bottom).
- bool out_of_regions() { return _finger >= _heap.end(); }
+ bool out_of_regions() { return finger() >= _heap.end(); }
// Returns the task with the given id
G1CMTask* task(uint id) {
@@ -499,10 +499,10 @@ class G1ConcurrentMark : public CHeapObj {
// Access / manipulation of the overflow flag which is set to
// indicate that the global stack has overflown
- bool has_overflown() { return _has_overflown; }
- void set_has_overflown() { _has_overflown = true; }
- void clear_has_overflown() { _has_overflown = false; }
- bool restart_for_overflow() { return _restart_for_overflow; }
+ bool has_overflown() { return _has_overflown.load_relaxed(); }
+ void set_has_overflown() { _has_overflown.store_relaxed(true); }
+ void clear_has_overflown() { _has_overflown.store_relaxed(false); }
+ bool restart_for_overflow() { return _restart_for_overflow.load_relaxed(); }
// Methods to enter the two overflow sync barriers
void enter_first_sync_barrier(uint worker_id);
@@ -516,12 +516,12 @@ class G1ConcurrentMark : public CHeapObj {
G1RegionMarkStats* _region_mark_stats;
// Top pointer for each region at the start of marking. Must be valid for all committed
// regions.
- HeapWord* volatile* _top_at_mark_starts;
+ Atomic* _top_at_mark_starts;
// Top pointer for each region at the start of the rebuild remembered set process
// for regions which remembered sets need to be rebuilt. A null for a given region
// means that this region does not be scanned during the rebuilding remembered
// set phase at all.
- HeapWord* volatile* _top_at_rebuild_starts;
+ Atomic* _top_at_rebuild_starts;
// True when Remark pause selected regions for rebuilding.
bool _needs_remembered_set_rebuild;
public:
@@ -679,7 +679,7 @@ public:
uint completed_mark_cycles() const;
- bool has_aborted() { return _has_aborted; }
+ bool has_aborted() { return _has_aborted.load_relaxed(); }
void print_summary_info();
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
index 2f4824e4cae..21167d5cae9 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
@@ -194,11 +194,11 @@ inline void G1CMTask::process_array_chunk(objArrayOop obj, size_t start, size_t
inline void G1ConcurrentMark::update_top_at_mark_start(G1HeapRegion* r) {
uint const region = r->hrm_index();
assert(region < _g1h->max_num_regions(), "Tried to access TAMS for region %u out of bounds", region);
- _top_at_mark_starts[region] = r->top();
+ _top_at_mark_starts[region].store_relaxed(r->top());
}
inline void G1ConcurrentMark::reset_top_at_mark_start(G1HeapRegion* r) {
- _top_at_mark_starts[r->hrm_index()] = r->bottom();
+ _top_at_mark_starts[r->hrm_index()].store_relaxed(r->bottom());
}
inline HeapWord* G1ConcurrentMark::top_at_mark_start(const G1HeapRegion* r) const {
@@ -207,7 +207,7 @@ inline HeapWord* G1ConcurrentMark::top_at_mark_start(const G1HeapRegion* r) cons
inline HeapWord* G1ConcurrentMark::top_at_mark_start(uint region) const {
assert(region < _g1h->max_num_regions(), "Tried to access TARS for region %u out of bounds", region);
- return _top_at_mark_starts[region];
+ return _top_at_mark_starts[region].load_relaxed();
}
inline bool G1ConcurrentMark::obj_allocated_since_mark_start(oop obj) const {
@@ -217,7 +217,7 @@ inline bool G1ConcurrentMark::obj_allocated_since_mark_start(oop obj) const {
}
inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(G1HeapRegion* r) const {
- return _top_at_rebuild_starts[r->hrm_index()];
+ return _top_at_rebuild_starts[r->hrm_index()].load_relaxed();
}
inline void G1ConcurrentMark::update_top_at_rebuild_start(G1HeapRegion* r) {
@@ -225,10 +225,10 @@ inline void G1ConcurrentMark::update_top_at_rebuild_start(G1HeapRegion* r) {
uint const region = r->hrm_index();
assert(region < _g1h->max_num_regions(), "Tried to access TARS for region %u out of bounds", region);
- assert(_top_at_rebuild_starts[region] == nullptr,
+ assert(top_at_rebuild_start(r) == nullptr,
"TARS for region %u has already been set to " PTR_FORMAT " should be null",
- region, p2i(_top_at_rebuild_starts[region]));
- _top_at_rebuild_starts[region] = r->top();
+ region, p2i(top_at_rebuild_start(r)));
+ _top_at_rebuild_starts[region].store_relaxed(r->top());
}
inline void G1CMTask::update_liveness(oop const obj, const size_t obj_size) {
diff --git a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp
index 4dcdd33846e..b8f13f4553d 100644
--- a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp
@@ -44,6 +44,8 @@ struct G1RegionMarkStats {
Atomic _live_words;
Atomic _incoming_refs;
+ G1RegionMarkStats() : _live_words(0), _incoming_refs(0) { }
+
// Clear all members.
void clear() {
_live_words.store_relaxed(0);
diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp
index 3f47d386015..557941981e4 100644
--- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp
+++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp
@@ -497,10 +497,6 @@ class G1PostEvacuateCollectionSetCleanupTask2::ProcessEvacuationFailedRegionsTas
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1ConcurrentMark* cm = g1h->concurrent_mark();
- HeapWord* top_at_mark_start = cm->top_at_mark_start(r);
- assert(top_at_mark_start == r->bottom(), "TAMS must not have been set for region %u", r->hrm_index());
- assert(cm->live_bytes(r->hrm_index()) == 0, "Marking live bytes must not be set for region %u", r->hrm_index());
-
// Concurrent mark does not mark through regions that we retain (they are root
// regions wrt to marking), so we must clear their mark data (tams, bitmap, ...)
// set eagerly or during evacuation failure.
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp
index f257f4c9177..0f00b3ec2f9 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp
@@ -30,8 +30,11 @@
#include "gc/parallel/psScavenge.hpp"
inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
- const size_t eden_size = young_gen()->eden_space()->capacity_in_words();
- return size < eden_size / 2;
+ const size_t max_young_gen_bytes = young_gen()->max_gen_size();
+ const size_t survivor_size_bytes = young_gen()->from_space()->capacity_in_bytes();
+ const size_t max_eden_size_bytes = max_young_gen_bytes - survivor_size_bytes * 2;
+ const size_t max_eden_size_words = max_eden_size_bytes / HeapWordSize;
+ return size < max_eden_size_words / 2;
}
inline bool ParallelScavengeHeap::is_in_young(const void* p) const {
diff --git a/src/hotspot/share/gc/parallel/psVirtualspace.cpp b/src/hotspot/share/gc/parallel/psVirtualspace.cpp
index f4b24fa51af..93803cf38e1 100644
--- a/src/hotspot/share/gc/parallel/psVirtualspace.cpp
+++ b/src/hotspot/share/gc/parallel/psVirtualspace.cpp
@@ -78,12 +78,13 @@ bool PSVirtualSpace::shrink_by(size_t bytes) {
}
char* const base_addr = committed_high_addr() - bytes;
- bool result = special() || os::uncommit_memory(base_addr, bytes);
- if (result) {
- _committed_high_addr -= bytes;
+ if (!special()) {
+ os::uncommit_memory(base_addr, bytes);
}
- return result;
+ _committed_high_addr -= bytes;
+
+ return true;
}
#ifndef PRODUCT
diff --git a/src/hotspot/share/gc/shared/cardTable.cpp b/src/hotspot/share/gc/shared/cardTable.cpp
index 34f1847befe..e6e3fdf3d82 100644
--- a/src/hotspot/share/gc/shared/cardTable.cpp
+++ b/src/hotspot/share/gc/shared/cardTable.cpp
@@ -169,9 +169,7 @@ void CardTable::resize_covered_region(MemRegion new_region) {
// Shrink.
MemRegion delta = MemRegion(new_committed.end(),
old_committed.word_size() - new_committed.word_size());
- bool res = os::uncommit_memory((char*)delta.start(),
- delta.byte_size());
- assert(res, "uncommit should succeed");
+ os::uncommit_memory((char*)delta.start(), delta.byte_size());
}
log_trace(gc, barrier)("CardTable::resize_covered_region: ");
diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp
index e86881d3523..d99544c0573 100644
--- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp
+++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp
@@ -459,13 +459,16 @@ size_t ThreadLocalAllocBuffer::end_reserve() {
}
size_t ThreadLocalAllocBuffer::estimated_used_bytes() const {
+ // Data races due to unsynchronized access like the following reads to _start
+ // and _top are undefined behavior. Atomic would not provide any additional
+ // guarantees, so use AtomicAccess directly.
HeapWord* start = AtomicAccess::load(&_start);
HeapWord* top = AtomicAccess::load(&_top);
- // There has been a race when retrieving _top and _start. Return 0.
- if (_top < _start) {
+ // If there has been a race when retrieving _top and _start, return 0.
+ if (top < start) {
return 0;
}
- size_t used_bytes = pointer_delta(_top, _start, 1);
+ size_t used_bytes = pointer_delta(top, start, 1);
// Comparing diff with the maximum allowed size will ensure that we don't add
// the used bytes from a semi-initialized TLAB ending up with implausible values.
// In this case also just return 0.
diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp
index a50e7c9533c..61caac7ec51 100644
--- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp
+++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp
@@ -32,8 +32,10 @@
class ThreadLocalAllocStats;
-// ThreadLocalAllocBuffer: a descriptor for thread-local storage used by
-// the threads for allocation. It is thread-private at any time.
+// ThreadLocalAllocBuffer is a descriptor for thread-local storage used by
+// mutator threads for local/private allocation. As a TLAB is thread-private,
+// there is no concurrent/parallel access to its memory or its members,
+// other than by estimated_used_bytes().
//
// Heap sampling is performed via the end and allocation_end
// fields.
@@ -123,7 +125,7 @@ public:
// Due to races with concurrent allocations and/or resetting the TLAB the return
// value may be inconsistent with any other metrics (e.g. total allocated
// bytes), and may just incorrectly return 0.
- // Intented fo external inspection only where accuracy is not 100% required.
+ // Intended for external inspection only where accuracy is not 100% required.
size_t estimated_used_bytes() const;
// Allocate size HeapWords. The memory is NOT initialized to zero.
diff --git a/src/hotspot/share/gc/shared/vmStructs_gc.hpp b/src/hotspot/share/gc/shared/vmStructs_gc.hpp
index 9348fd980f4..5bd87e6adf7 100644
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp
@@ -157,7 +157,7 @@
declare_toplevel_type(CollectedHeap*) \
declare_toplevel_type(ContiguousSpace*) \
declare_toplevel_type(HeapWord*) \
- declare_toplevel_type(HeapWord* volatile) \
+ declare_toplevel_type(Atomic) \
declare_toplevel_type(MemRegion*) \
declare_toplevel_type(ThreadLocalAllocBuffer*) \
\
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp
index 80e6decf57d..029b917deab 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp
@@ -30,6 +30,7 @@
#include "gc/shenandoah/shenandoahGeneration.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
+#include "gc/shenandoah/shenandoahInPlacePromoter.hpp"
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
#include "gc/shenandoah/shenandoahTrace.hpp"
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
@@ -403,17 +404,9 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr
bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions();
ShenandoahMarkingContext* const ctx = heap->marking_context();
- const size_t old_garbage_threshold =
- (ShenandoahHeapRegion::region_size_bytes() * heap->old_generation()->heuristics()->get_old_garbage_threshold()) / 100;
-
- const size_t pip_used_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahGenerationalMinPIPUsage) / 100;
-
size_t promo_potential = 0;
size_t candidates = 0;
- // Tracks the padding of space above top in regions eligible for promotion in place
- size_t promote_in_place_pad = 0;
-
// Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require
// less evacuation effort. This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that
// have more live data.
@@ -422,20 +415,7 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr
ResourceMark rm;
AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions);
- ShenandoahFreeSet* freeset = heap->free_set();
-
- // Any region that is to be promoted in place needs to be retired from its Collector or Mutator partition.
- idx_t pip_low_collector_idx = freeset->max_regions();
- idx_t pip_high_collector_idx = -1;
- idx_t pip_low_mutator_idx = freeset->max_regions();
- idx_t pip_high_mutator_idx = -1;
- size_t collector_regions_to_pip = 0;
- size_t mutator_regions_to_pip = 0;
-
- size_t pip_mutator_regions = 0;
- size_t pip_collector_regions = 0;
- size_t pip_mutator_bytes = 0;
- size_t pip_collector_bytes = 0;
+ ShenandoahInPlacePromotionPlanner in_place_promotions(heap);
for (idx_t i = 0; i < num_regions; i++) {
ShenandoahHeapRegion* const r = heap->get_region(i);
@@ -444,77 +424,19 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr
continue;
}
if (heap->is_tenurable(r)) {
- if ((r->garbage() < old_garbage_threshold) && (r->used() > pip_used_threshold)) {
+ if (in_place_promotions.is_eligible(r)) {
// We prefer to promote this region in place because it has a small amount of garbage and a large usage.
- HeapWord* tams = ctx->top_at_mark_start(r);
- HeapWord* original_top = r->top();
- if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) {
- // No allocations from this region have been made during concurrent mark. It meets all the criteria
- // for in-place-promotion. Though we only need the value of top when we fill the end of the region,
- // we use this field to indicate that this region should be promoted in place during the evacuation
- // phase.
- r->save_top_before_promote();
- size_t remnant_bytes = r->free();
- size_t remnant_words = remnant_bytes / HeapWordSize;
- assert(ShenandoahHeap::min_fill_size() <= PLAB::min_size(), "Implementation makes invalid assumptions");
- if (remnant_words >= ShenandoahHeap::min_fill_size()) {
- ShenandoahHeap::fill_with_object(original_top, remnant_words);
- // Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise,
- // newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any
- // new allocations would not necessarily be eligible for promotion. This addresses both issues.
- r->set_top(r->end());
- // The region r is either in the Mutator or Collector partition if remnant_words > heap()->plab_min_size.
- // Otherwise, the region is in the NotFree partition.
- ShenandoahFreeSetPartitionId p = free_set->membership(i);
- if (p == ShenandoahFreeSetPartitionId::Mutator) {
- mutator_regions_to_pip++;
- if (i < pip_low_mutator_idx) {
- pip_low_mutator_idx = i;
- }
- if (i > pip_high_mutator_idx) {
- pip_high_mutator_idx = i;
- }
- pip_mutator_regions++;
- pip_mutator_bytes += remnant_bytes;
- } else if (p == ShenandoahFreeSetPartitionId::Collector) {
- collector_regions_to_pip++;
- if (i < pip_low_collector_idx) {
- pip_low_collector_idx = i;
- }
- if (i > pip_high_collector_idx) {
- pip_high_collector_idx = i;
- }
- pip_collector_regions++;
- pip_collector_bytes += remnant_bytes;
- } else {
- assert((p == ShenandoahFreeSetPartitionId::NotFree) && (remnant_words < heap->plab_min_size()),
- "Should be NotFree if not in Collector or Mutator partitions");
- // In this case, the memory is already counted as used and the region has already been retired. There is
- // no need for further adjustments to used. Further, the remnant memory for this region will not be
- // unallocated or made available to OldCollector after pip.
- remnant_bytes = 0;
- }
- promote_in_place_pad += remnant_bytes;
- free_set->prepare_to_promote_in_place(i, remnant_bytes);
- } else {
- // Since the remnant is so small that this region has already been retired, we don't have to worry about any
- // accidental allocations occurring within this region before the region is promoted in place.
-
- // This region was already not in the Collector or Mutator set, so no need to remove it.
- assert(free_set->membership(i) == ShenandoahFreeSetPartitionId::NotFree, "sanity");
- }
- }
- // Else, we do not promote this region (either in place or by copy) because it has received new allocations.
-
- // During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold,
- // used > pip_used_threshold, and get_top_before_promote() != tams
+ // Note that if this region has been used recently for allocation, it will not be promoted and it will
+ // not be selected for promotion by evacuation.
+ in_place_promotions.prepare(r);
} else {
// Record this promotion-eligible candidate region. After sorting and selecting the best candidates below,
// we may still decide to exclude this promotion-eligible region from the current collection set. If this
// happens, we will consider this region as part of the anticipated promotion potential for the next GC
// pass; see further below.
sorted_regions[candidates]._region = r;
- sorted_regions[candidates++]._live_data = r->get_live_data_bytes();
+ sorted_regions[candidates]._live_data = r->get_live_data_bytes();
+ candidates++;
}
} else {
// We only evacuate & promote objects from regular regions whose garbage() is above old-garbage-threshold.
@@ -533,7 +455,7 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr
// in the current cycle and we will anticipate that they will be promoted in the next cycle. This will cause
// us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle.
if (heap->is_aging_cycle() && heap->age_census()->is_tenurable(r->age() + 1)) {
- if (r->garbage() >= old_garbage_threshold) {
+ if (r->garbage() >= in_place_promotions.old_garbage_threshold()) {
promo_potential += r->get_live_data_bytes();
}
}
@@ -542,21 +464,7 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr
// Subsequent regions may be selected if they have smaller live data.
}
- if (pip_mutator_regions + pip_collector_regions > 0) {
- freeset->account_for_pip_regions(pip_mutator_regions, pip_mutator_bytes, pip_collector_regions, pip_collector_bytes);
- }
-
- // Retire any regions that have been selected for promote in place
- if (collector_regions_to_pip > 0) {
- freeset->shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector,
- pip_low_collector_idx, pip_high_collector_idx,
- collector_regions_to_pip);
- }
- if (mutator_regions_to_pip > 0) {
- freeset->shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Mutator,
- pip_low_mutator_idx, pip_high_mutator_idx,
- mutator_regions_to_pip);
- }
+ in_place_promotions.update_free_set();
// Sort in increasing order according to live data bytes. Note that candidates represents the number of regions
// that qualify to be promoted by evacuation.
@@ -589,8 +497,6 @@ size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_pr
}
log_info(gc, ergo)("Promotion potential of aged regions with sufficient garbage: " PROPERFMT, PROPERFMTARGS(promo_potential));
-
- heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad);
heap->old_generation()->set_promotion_potential(promo_potential);
return old_consumed;
}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp
index fc7a35aa6c8..e657ac58ae4 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp
@@ -128,7 +128,7 @@ private:
// The value of command-line argument ShenandoahOldGarbageThreshold represents the percent of garbage that must
// be present within an old-generation region before that region is considered a good candidate for inclusion in
- // the collection set under normal circumstances. For our purposes, normal circustances are when the memory consumed
+ // the collection set under normal circumstances. For our purposes, normal circumstances are when the memory consumed
// by the old generation is less than 50% of the soft heap capacity. When the old generation grows beyond the 50%
// threshold, we dynamically adjust the old garbage threshold, allowing us to invest in packing the old generation
// more tightly so that more memory can be made available to the more frequent young GC cycles. This variable
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp
index c9b956f9c2f..6912750378e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp
@@ -24,14 +24,11 @@
*/
#include "gc/shenandoah/shenandoahAsserts.hpp"
-#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahGeneration.hpp"
#include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
-#include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
-#include "gc/shenandoah/shenandoahOldGeneration.hpp"
-#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
+#include "gc/shenandoah/shenandoahInPlacePromoter.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
-#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
class ShenandoahConcurrentEvacuator : public ObjectClosure {
private:
@@ -77,10 +74,10 @@ void ShenandoahGenerationalEvacuationTask::work(uint worker_id) {
void ShenandoahGenerationalEvacuationTask::do_work() {
if (_only_promote_regions) {
// No allocations will be made, do not enter oom-during-evac protocol.
- assert(ShenandoahHeap::heap()->collection_set()->is_empty(), "Should not have a collection set here");
+ assert(_heap->collection_set()->is_empty(), "Should not have a collection set here");
promote_regions();
} else {
- assert(!ShenandoahHeap::heap()->collection_set()->is_empty(), "Should have a collection set here");
+ assert(!_heap->collection_set()->is_empty(), "Should have a collection set here");
ShenandoahEvacOOMScope oom_evac_scope;
evacuate_and_promote_regions();
}
@@ -95,16 +92,16 @@ void log_region(const ShenandoahHeapRegion* r, LogStream* ls) {
}
void ShenandoahGenerationalEvacuationTask::promote_regions() {
- ShenandoahHeapRegion* r;
LogTarget(Debug, gc) lt;
-
+ ShenandoahInPlacePromoter promoter(_heap);
+ ShenandoahHeapRegion* r;
while ((r = _regions->next()) != nullptr) {
if (lt.is_enabled()) {
LogStream ls(lt);
log_region(r, &ls);
}
- maybe_promote_region(r);
+ promoter.maybe_promote_region(r);
if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
break;
@@ -115,6 +112,7 @@ void ShenandoahGenerationalEvacuationTask::promote_regions() {
void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() {
LogTarget(Debug, gc) lt;
ShenandoahConcurrentEvacuator cl(_heap);
+ ShenandoahInPlacePromoter promoter(_heap);
ShenandoahHeapRegion* r;
while ((r = _regions->next()) != nullptr) {
@@ -127,7 +125,7 @@ void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() {
assert(r->has_live(), "Region %zu should have been reclaimed early", r->index());
_heap->marked_object_iterate(r, &cl);
} else {
- maybe_promote_region(r);
+ promoter.maybe_promote_region(r);
}
if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
@@ -135,182 +133,3 @@ void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() {
}
}
}
-
-
-void ShenandoahGenerationalEvacuationTask::maybe_promote_region(ShenandoahHeapRegion* r) {
- if (r->is_young() && r->is_active() && _heap->is_tenurable(r)) {
- if (r->is_humongous_start()) {
- // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
- // doing this work during a safepoint. We cannot put humongous regions into the collection set because that
- // triggers the load-reference barrier (LRB) to copy on reference fetch.
- //
- // Aged humongous continuation regions are handled with their start region. If an aged regular region has
- // more garbage than the old garbage threshold, we'll promote by evacuation. If there is room for evacuation
- // in this cycle, the region will be in the collection set. If there is not room, the region will be promoted
- // by evacuation in some future GC cycle.
-
- // We do not promote primitive arrays because there's no performance penalty keeping them in young. When/if they
- // become garbage, reclaiming the memory from young is much quicker and more efficient than reclaiming them from old.
- oop obj = cast_to_oop(r->bottom());
- if (!obj->is_typeArray()) {
- promote_humongous(r);
- }
- } else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) {
- // Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
- // the LRB to copy on reference fetch.
- //
- // If an aged regular region has received allocations during the current cycle, we do not promote because the
- // newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
- promote_in_place(r);
- }
- }
-}
-
-// When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
-// set scans of this region's content. The region will be coalesced and filled prior to the next old-gen marking effort.
-// We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting pointers"
-// contained herein.
-void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion* region) {
- assert(!_generation->is_old(), "Sanity check");
- ShenandoahMarkingContext* const marking_context = _heap->young_generation()->complete_marking_context();
- HeapWord* const tams = marking_context->top_at_mark_start(region);
- size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
-
- {
- const size_t old_garbage_threshold =
- (region_size_bytes * _heap->old_generation()->heuristics()->get_old_garbage_threshold()) / 100;
- assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
- assert(region->garbage_before_padded_for_promote() < old_garbage_threshold,
- "Region %zu has too much garbage for promotion", region->index());
- assert(region->is_young(), "Only young regions can be promoted");
- assert(region->is_regular(), "Use different service to promote humongous regions");
- assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
- assert(region->get_top_before_promote() == tams, "Region %zu has been used for allocations before promotion", region->index());
- }
-
- ShenandoahOldGeneration* const old_gen = _heap->old_generation();
- ShenandoahYoungGeneration* const young_gen = _heap->young_generation();
-
- // Rebuild the remembered set information and mark the entire range as DIRTY. We do NOT scan the content of this
- // range to determine which cards need to be DIRTY. That would force us to scan the region twice, once now, and
- // once during the subsequent remembered set scan. Instead, we blindly (conservatively) mark everything as DIRTY
- // now and then sort out the CLEAN pages during the next remembered set scan.
- //
- // Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here,
- // then registering every live object and every coalesced range of free objects in the loop that follows.
- ShenandoahScanRemembered* const scanner = old_gen->card_scan();
- scanner->reset_object_range(region->bottom(), region->end());
- scanner->mark_range_as_dirty(region->bottom(), region->get_top_before_promote() - region->bottom());
-
- HeapWord* obj_addr = region->bottom();
- while (obj_addr < tams) {
- oop obj = cast_to_oop(obj_addr);
- if (marking_context->is_marked(obj)) {
- assert(obj->klass() != nullptr, "klass should not be null");
- // This thread is responsible for registering all objects in this region. No need for lock.
- scanner->register_object_without_lock(obj_addr);
- obj_addr += obj->size();
- } else {
- HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
- assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
- size_t fill_size = next_marked_obj - obj_addr;
- assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
- ShenandoahHeap::fill_with_object(obj_addr, fill_size);
- scanner->register_object_without_lock(obj_addr);
- obj_addr = next_marked_obj;
- }
- }
- // We do not need to scan above TAMS because restored top equals tams
- assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
-
- {
- ShenandoahHeapLocker locker(_heap->lock());
-
- HeapWord* update_watermark = region->get_update_watermark();
- // pip_unpadded is memory too small to be filled above original top
- size_t pip_unpadded = (region->end() - region->top()) * HeapWordSize;
- assert((region->top() == region->end())
- || (pip_unpadded == (size_t) ((region->end() - region->top()) * HeapWordSize)), "Invariant");
- assert(pip_unpadded < ShenandoahHeap::min_fill_size() * HeapWordSize, "Sanity");
- size_t pip_pad_bytes = (region->top() - region->get_top_before_promote()) * HeapWordSize;
- assert((pip_unpadded == 0) || (pip_pad_bytes == 0), "Only one of pip_unpadded and pip_pad_bytes is non-zero");
-
- // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
- // is_collector_free range. We'll add it to that range below.
- region->restore_top_before_promote();
-#ifdef ASSERT
- size_t region_to_be_used_in_old = region->used();
- assert(region_to_be_used_in_old + pip_pad_bytes + pip_unpadded == region_size_bytes, "invariant");
-#endif
-
- // The update_watermark was likely established while we had the artificially high value of top. Make it sane now.
- assert(update_watermark >= region->top(), "original top cannot exceed preserved update_watermark");
- region->set_update_watermark(region->top());
-
- // Transfer this region from young to old, increasing promoted_reserve if available space exceeds plab_min_size()
- _heap->free_set()->add_promoted_in_place_region_to_old_collector(region);
- region->set_affiliation(OLD_GENERATION);
- region->set_promoted_in_place();
- }
-}
-
-void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegion* region) {
- ShenandoahMarkingContext* marking_context = _heap->marking_context();
- oop obj = cast_to_oop(region->bottom());
- assert(_generation->is_mark_complete(), "sanity");
- assert(region->is_young(), "Only young regions can be promoted");
- assert(region->is_humongous_start(), "Should not promote humongous continuation in isolation");
- assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
- assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
-
- const size_t used_bytes = obj->size() * HeapWordSize;
- const size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
- const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
- const size_t humongous_waste = spanned_regions * region_size_bytes - obj->size() * HeapWordSize;
- const size_t index_limit = region->index() + spanned_regions;
-
- ShenandoahOldGeneration* const old_gen = _heap->old_generation();
- ShenandoahGeneration* const young_gen = _heap->young_generation();
- {
- // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
- // young to old.
- ShenandoahHeapLocker locker(_heap->lock());
-
- // We promote humongous objects unconditionally, without checking for availability. We adjust
- // usage totals, including humongous waste, after evacuation is done.
- log_debug(gc)("promoting humongous region %zu, spanning %zu", region->index(), spanned_regions);
-
- // For this region and each humongous continuation region spanned by this humongous object, change
- // affiliation to OLD_GENERATION and adjust the generation-use tallies. The remnant of memory
- // in the last humongous region that is not spanned by obj is currently not used.
- for (size_t i = region->index(); i < index_limit; i++) {
- ShenandoahHeapRegion* r = _heap->get_region(i);
- log_debug(gc)("promoting humongous region %zu, from " PTR_FORMAT " to " PTR_FORMAT,
- r->index(), p2i(r->bottom()), p2i(r->top()));
- // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
- r->set_affiliation(OLD_GENERATION);
- r->set_promoted_in_place();
- }
-
- ShenandoahFreeSet* freeset = _heap->free_set();
- freeset->transfer_humongous_regions_from_mutator_to_old_collector(spanned_regions, humongous_waste);
- }
-
- // Since this region may have served previously as OLD, it may hold obsolete object range info.
- HeapWord* const humongous_bottom = region->bottom();
- ShenandoahScanRemembered* const scanner = old_gen->card_scan();
- scanner->reset_object_range(humongous_bottom, humongous_bottom + spanned_regions * ShenandoahHeapRegion::region_size_words());
- // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
- scanner->register_object_without_lock(humongous_bottom);
-
- if (obj->is_typeArray()) {
- // Primitive arrays don't need to be scanned.
- log_debug(gc)("Clean cards for promoted humongous object (Region %zu) from " PTR_FORMAT " to " PTR_FORMAT,
- region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
- scanner->mark_range_as_clean(humongous_bottom, obj->size());
- } else {
- log_debug(gc)("Dirty cards for promoted humongous object (Region %zu) from " PTR_FORMAT " to " PTR_FORMAT,
- region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
- scanner->mark_range_as_dirty(humongous_bottom, obj->size());
- }
-}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp
index de47184ffff..1ff58b42e8c 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp
@@ -27,6 +27,7 @@
#include "gc/shared/workerThread.hpp"
+class ShenandoahGeneration;
class ShenandoahGenerationalHeap;
class ShenandoahHeapRegion;
class ShenandoahRegionIterator;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index ccfc1c036c2..9dd837b90d2 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -86,6 +86,7 @@
#include "nmt/memTracker.hpp"
#include "oops/compressedOops.inline.hpp"
#include "prims/jvmtiTagMap.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@@ -201,9 +202,9 @@ jint ShenandoahHeap::initialize() {
assert(num_min_regions <= _num_regions, "sanity");
_minimum_size = num_min_regions * reg_size_bytes;
- _soft_max_size = clamp(SoftMaxHeapSize, min_capacity(), max_capacity());
+ _soft_max_size.store_relaxed(clamp(SoftMaxHeapSize, min_capacity(), max_capacity()));
- _committed = _initial_size;
+ _committed.store_relaxed(_initial_size);
size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
@@ -725,17 +726,17 @@ size_t ShenandoahHeap::used() const {
}
size_t ShenandoahHeap::committed() const {
- return AtomicAccess::load(&_committed);
+ return _committed.load_relaxed();
}
void ShenandoahHeap::increase_committed(size_t bytes) {
shenandoah_assert_heaplocked_or_safepoint();
- _committed += bytes;
+ _committed.fetch_then_add(bytes, memory_order_relaxed);
}
void ShenandoahHeap::decrease_committed(size_t bytes) {
shenandoah_assert_heaplocked_or_safepoint();
- _committed -= bytes;
+ _committed.fetch_then_sub(bytes, memory_order_relaxed);
}
size_t ShenandoahHeap::capacity() const {
@@ -747,7 +748,7 @@ size_t ShenandoahHeap::max_capacity() const {
}
size_t ShenandoahHeap::soft_max_capacity() const {
- size_t v = AtomicAccess::load(&_soft_max_size);
+ size_t v = _soft_max_size.load_relaxed();
assert(min_capacity() <= v && v <= max_capacity(),
"Should be in bounds: %zu <= %zu <= %zu",
min_capacity(), v, max_capacity());
@@ -758,7 +759,7 @@ void ShenandoahHeap::set_soft_max_capacity(size_t v) {
assert(min_capacity() <= v && v <= max_capacity(),
"Should be in bounds: %zu <= %zu <= %zu",
min_capacity(), v, max_capacity());
- AtomicAccess::store(&_soft_max_size, v);
+ _soft_max_size.store_relaxed(v);
}
size_t ShenandoahHeap::min_capacity() const {
@@ -1775,12 +1776,7 @@ void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_sta
void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
if (!_aux_bitmap_region_special) {
- bool success = os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size());
- if (!success) {
- log_warning(gc)("Auxiliary marking bitmap uncommit failed: " PTR_FORMAT " (%zu bytes)",
- p2i(_aux_bitmap_region.start()), _aux_bitmap_region.byte_size());
- assert(false, "Auxiliary marking bitmap uncommit should always succeed");
- }
+ os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size());
}
}
@@ -1946,7 +1942,7 @@ private:
size_t const _stride;
shenandoah_padding(0);
- volatile size_t _index;
+ Atomic _index;
shenandoah_padding(1);
public:
@@ -1959,8 +1955,8 @@ public:
size_t stride = _stride;
size_t max = _heap->num_regions();
- while (AtomicAccess::load(&_index) < max) {
- size_t cur = AtomicAccess::fetch_then_add(&_index, stride, memory_order_relaxed);
+ while (_index.load_relaxed() < max) {
+ size_t cur = _index.fetch_then_add(stride, memory_order_relaxed);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
@@ -2626,11 +2622,7 @@ void ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
size_t len = _bitmap_bytes_per_slice;
char* addr = (char*) _bitmap_region.start() + off;
- bool success = os::uncommit_memory(addr, len);
- if (!success) {
- log_warning(gc)("Bitmap slice uncommit failed: " PTR_FORMAT " (%zu bytes)", p2i(addr), len);
- assert(false, "Bitmap slice uncommit should always succeed");
- }
+ os::uncommit_memory(addr, len);
}
void ShenandoahHeap::forbid_uncommit() {
@@ -2712,11 +2704,11 @@ ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
_index(0) {}
void ShenandoahRegionIterator::reset() {
- _index = 0;
+ _index.store_relaxed(0);
}
bool ShenandoahRegionIterator::has_next() const {
- return _index < _heap->num_regions();
+ return _index.load_relaxed() < _heap->num_regions();
}
ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index 9240091070b..4a4499667ff 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -88,7 +88,7 @@ private:
ShenandoahHeap* _heap;
shenandoah_padding(0);
- volatile size_t _index;
+ Atomic _index;
shenandoah_padding(1);
// No implicit copying: iterators should be passed by reference to capture the state
@@ -208,9 +208,9 @@ private:
size_t _initial_size;
size_t _minimum_size;
- volatile size_t _soft_max_size;
+ Atomic _soft_max_size;
shenandoah_padding(0);
- volatile size_t _committed;
+ Atomic _committed;
shenandoah_padding(1);
public:
@@ -340,7 +340,7 @@ private:
ShenandoahSharedFlag _full_gc_move_in_progress;
ShenandoahSharedFlag _concurrent_strong_root_in_progress;
- size_t _gc_no_progress_count;
+ Atomic _gc_no_progress_count;
// This updates the singular, global gc state. This call must happen on a safepoint.
void set_gc_state_at_safepoint(uint mask, bool value);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
index e35f116b843..02f2beaf4e0 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
@@ -49,7 +49,7 @@
#include "gc/shenandoah/shenandoahWorkGroup.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/prefetch.inline.hpp"
@@ -61,7 +61,7 @@ inline ShenandoahHeap* ShenandoahHeap::heap() {
}
inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
- size_t new_index = AtomicAccess::add(&_index, (size_t) 1, memory_order_relaxed);
+ size_t new_index = _index.add_then_fetch((size_t) 1, memory_order_relaxed);
// get_region() provides the bounds-check and returns null on OOB.
return _heap->get_region(new_index - 1);
}
@@ -75,15 +75,15 @@ inline WorkerThreads* ShenandoahHeap::safepoint_workers() {
}
inline void ShenandoahHeap::notify_gc_progress() {
- AtomicAccess::store(&_gc_no_progress_count, (size_t) 0);
+ _gc_no_progress_count.store_relaxed((size_t) 0);
}
inline void ShenandoahHeap::notify_gc_no_progress() {
- AtomicAccess::inc(&_gc_no_progress_count);
+ _gc_no_progress_count.add_then_fetch((size_t) 1);
}
inline size_t ShenandoahHeap::get_gc_no_progress_count() const {
- return AtomicAccess::load(&_gc_no_progress_count);
+ return _gc_no_progress_count.load_relaxed();
}
inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
index b0c13df6c4f..afc6b24e168 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
@@ -816,11 +816,7 @@ void ShenandoahHeapRegion::do_commit() {
void ShenandoahHeapRegion::do_uncommit() {
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (!heap->is_heap_region_special()) {
- bool success = os::uncommit_memory((char *) bottom(), RegionSizeBytes);
- if (!success) {
- log_warning(gc)("Region uncommit failed: " PTR_FORMAT " (%zu bytes)", p2i(bottom()), RegionSizeBytes);
- assert(false, "Region uncommit should always succeed");
- }
+ os::uncommit_memory((char *) bottom(), RegionSizeBytes);
}
if (!heap->is_bitmap_region_special()) {
heap->uncommit_bitmap_slice(this);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.cpp b/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.cpp
new file mode 100644
index 00000000000..83f4217df83
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.cpp
@@ -0,0 +1,311 @@
+/*
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "gc/shared/plab.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
+#include "gc/shenandoah/shenandoahInPlacePromoter.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.hpp"
+#include "gc/shenandoah/shenandoahOldGeneration.hpp"
+#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
+
+ShenandoahInPlacePromotionPlanner::RegionPromotions::RegionPromotions(ShenandoahFreeSet* free_set)
+ : _low_idx(free_set->max_regions())
+ , _high_idx(-1)
+ , _regions(0)
+ , _bytes(0)
+ , _free_set(free_set)
+{
+}
+
+void ShenandoahInPlacePromotionPlanner::RegionPromotions::increment(idx_t region_index, size_t remnant_bytes) {
+ if (region_index < _low_idx) {
+ _low_idx = region_index;
+ }
+ if (region_index > _high_idx) {
+ _high_idx = region_index;
+ }
+ _regions++;
+ _bytes += remnant_bytes;
+}
+
+void ShenandoahInPlacePromotionPlanner::RegionPromotions::update_free_set(ShenandoahFreeSetPartitionId partition_id) const {
+ if (_regions > 0) {
+ _free_set->shrink_interval_if_range_modifies_either_boundary(partition_id, _low_idx, _high_idx, _regions);
+ }
+}
+
+ShenandoahInPlacePromotionPlanner::ShenandoahInPlacePromotionPlanner(const ShenandoahGenerationalHeap* heap)
+ : _old_garbage_threshold(ShenandoahHeapRegion::region_size_bytes() * heap->old_generation()->heuristics()->get_old_garbage_threshold() / 100)
+ , _pip_used_threshold(ShenandoahHeapRegion::region_size_bytes() * ShenandoahGenerationalMinPIPUsage / 100)
+ , _heap(heap)
+ , _free_set(_heap->free_set())
+ , _marking_context(_heap->marking_context())
+ , _mutator_regions(_free_set)
+ , _collector_regions(_free_set)
+ , _pip_padding_bytes(0)
+{
+}
+
+bool ShenandoahInPlacePromotionPlanner::is_eligible(const ShenandoahHeapRegion* region) const {
+ return region->garbage() < _old_garbage_threshold && region->used() > _pip_used_threshold;
+}
+
+void ShenandoahInPlacePromotionPlanner::prepare(ShenandoahHeapRegion* r) {
+ HeapWord* tams = _marking_context->top_at_mark_start(r);
+ HeapWord* original_top = r->top();
+
+ if (_heap->is_concurrent_mark_in_progress() || tams != original_top) {
+ // We do not promote this region (either in place or by copy) because it has received new allocations.
+ // During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold,
+ // used > pip_used_threshold, and get_top_before_promote() != tams.
+ // TODO: Such a region should have had its age reset to zero when it was used for allocation?
+ return;
+ }
+
+ // No allocations from this region have been made during concurrent mark. It meets all the criteria
+ // for in-place-promotion. Though we only need the value of top when we fill the end of the region,
+ // we use this field to indicate that this region should be promoted in place during the evacuation
+ // phase.
+ r->save_top_before_promote();
+ size_t remnant_bytes = r->free();
+ size_t remnant_words = remnant_bytes / HeapWordSize;
+ assert(ShenandoahHeap::min_fill_size() <= PLAB::min_size(), "Implementation makes invalid assumptions");
+ if (remnant_words >= ShenandoahHeap::min_fill_size()) {
+ ShenandoahHeap::fill_with_object(original_top, remnant_words);
+ // Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise,
+ // newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any
+ // new allocations would not necessarily be eligible for promotion. This addresses both issues.
+ r->set_top(r->end());
+ // The region r is either in the Mutator or Collector partition if remnant_words > heap()->plab_min_size.
+ // Otherwise, the region is in the NotFree partition.
+ const idx_t i = r->index();
+ ShenandoahFreeSetPartitionId p = _free_set->membership(i);
+ if (p == ShenandoahFreeSetPartitionId::Mutator) {
+ _mutator_regions.increment(i, remnant_bytes);
+ } else if (p == ShenandoahFreeSetPartitionId::Collector) {
+ _collector_regions.increment(i, remnant_bytes);
+ } else {
+ assert((p == ShenandoahFreeSetPartitionId::NotFree) && (remnant_words < _heap->plab_min_size()),
+ "Should be NotFree if not in Collector or Mutator partitions");
+ // In this case, the memory is already counted as used and the region has already been retired. There is
+ // no need for further adjustments to used. Further, the remnant memory for this region will not be
+ // unallocated or made available to OldCollector after pip.
+ remnant_bytes = 0;
+ }
+
+ _pip_padding_bytes += remnant_bytes;
+ _free_set->prepare_to_promote_in_place(i, remnant_bytes);
+ } else {
+ // Since the remnant is so small that this region has already been retired, we don't have to worry about any
+ // accidental allocations occurring within this region before the region is promoted in place.
+
+ // This region was already not in the Collector or Mutator set, so no need to remove it.
+ assert(_free_set->membership(r->index()) == ShenandoahFreeSetPartitionId::NotFree, "sanity");
+ }
+}
+
+void ShenandoahInPlacePromotionPlanner::update_free_set() const {
+ _heap->old_generation()->set_pad_for_promote_in_place(_pip_padding_bytes);
+
+ if (_mutator_regions._regions + _collector_regions._regions > 0) {
+ _free_set->account_for_pip_regions(_mutator_regions._regions, _mutator_regions._bytes,
+ _collector_regions._regions, _collector_regions._bytes);
+ }
+
+ // Retire any regions that have been selected for promote in place
+ _mutator_regions.update_free_set(ShenandoahFreeSetPartitionId::Mutator);
+ _collector_regions.update_free_set(ShenandoahFreeSetPartitionId::Collector);
+}
+
+void ShenandoahInPlacePromoter::maybe_promote_region(ShenandoahHeapRegion* r) const {
+ if (r->is_young() && r->is_active() && _heap->is_tenurable(r)) {
+ if (r->is_humongous_start()) {
+ // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
+ // doing this work during a safepoint. We cannot put humongous regions into the collection set because that
+ // triggers the load-reference barrier (LRB) to copy on reference fetch.
+ //
+ // Aged humongous continuation regions are handled with their start region. If an aged regular region has
+ // more garbage than ShenandoahOldGarbageThreshold, we'll promote by evacuation. If there is room for evacuation
+ // in this cycle, the region will be in the collection set. If there is no room, the region will be promoted
+ // by evacuation in some future GC cycle.
+
+ // We do not promote primitive arrays because there's no performance penalty keeping them in young. When/if they
+ // become garbage, reclaiming the memory from young is much quicker and more efficient than reclaiming them from old.
+ oop obj = cast_to_oop(r->bottom());
+ if (!obj->is_typeArray()) {
+ promote_humongous(r);
+ }
+ } else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) {
+ // Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
+ // the LRB to copy on reference fetch.
+ //
+ // If an aged regular region has received allocations during the current cycle, we do not promote because the
+ // newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
+ promote(r);
+ }
+ }
+}
+
+// When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
+// set scans of this region's content. The region will be coalesced and filled prior to the next old-gen marking effort.
+// We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting pointers"
+// contained herein.
+void ShenandoahInPlacePromoter::promote(ShenandoahHeapRegion* region) const {
+
+ ShenandoahMarkingContext* const marking_context = _heap->young_generation()->complete_marking_context();
+ HeapWord* const tams = marking_context->top_at_mark_start(region);
+ size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+
+ {
+ const size_t old_garbage_threshold =
+ (region_size_bytes * _heap->old_generation()->heuristics()->get_old_garbage_threshold()) / 100;
+ assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
+ assert(region->garbage_before_padded_for_promote() < old_garbage_threshold,
+ "Region %zu has too much garbage for promotion", region->index());
+ assert(region->is_young(), "Only young regions can be promoted");
+ assert(region->is_regular(), "Use different service to promote humongous regions");
+ assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
+ assert(region->get_top_before_promote() == tams, "Region %zu has been used for allocations before promotion", region->index());
+ }
+
+ ShenandoahOldGeneration* const old_gen = _heap->old_generation();
+
+ // Rebuild the remembered set information and mark the entire range as DIRTY. We do NOT scan the content of this
+ // range to determine which cards need to be DIRTY. That would force us to scan the region twice, once now, and
+ // once during the subsequent remembered set scan. Instead, we blindly (conservatively) mark everything as DIRTY
+ // now and then sort out the CLEAN pages during the next remembered set scan.
+ //
+ // Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here,
+ // then registering every live object and every coalesced range of free objects in the loop that follows.
+ ShenandoahScanRemembered* const scanner = old_gen->card_scan();
+ scanner->reset_object_range(region->bottom(), region->end());
+ scanner->mark_range_as_dirty(region->bottom(), region->get_top_before_promote() - region->bottom());
+
+ HeapWord* obj_addr = region->bottom();
+ while (obj_addr < tams) {
+ oop obj = cast_to_oop(obj_addr);
+ if (marking_context->is_marked(obj)) {
+ assert(obj->klass() != nullptr, "klass should not be null");
+ // This thread is responsible for registering all objects in this region. No need for lock.
+ scanner->register_object_without_lock(obj_addr);
+ obj_addr += obj->size();
+ } else {
+ HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
+ assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
+ size_t fill_size = next_marked_obj - obj_addr;
+ assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
+ ShenandoahHeap::fill_with_object(obj_addr, fill_size);
+ scanner->register_object_without_lock(obj_addr);
+ obj_addr = next_marked_obj;
+ }
+ }
+ // We do not need to scan above TAMS because restored top equals tams
+ assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
+
+
+ {
+ ShenandoahHeapLocker locker(_heap->lock());
+#ifdef ASSERT
+ HeapWord* update_watermark = region->get_update_watermark();
+ // pip_unpadded is memory too small to be filled above original top
+ size_t pip_unpadded = (region->end() - region->top()) * HeapWordSize;
+ assert((region->top() == region->end())
+ || (pip_unpadded == (size_t) ((region->end() - region->top()) * HeapWordSize)), "Invariant");
+ assert(pip_unpadded < ShenandoahHeap::min_fill_size() * HeapWordSize, "Sanity");
+ size_t pip_pad_bytes = (region->top() - region->get_top_before_promote()) * HeapWordSize;
+ assert((pip_unpadded == 0) || (pip_pad_bytes == 0), "Only one of pip_unpadded and pip_pad_bytes is non-zero");
+#endif
+
+ // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
+ // is_collector_free range. We'll add it to that range below.
+ region->restore_top_before_promote();
+
+ assert(region->used() + pip_pad_bytes + pip_unpadded == region_size_bytes, "invariant");
+
+ // The update_watermark was likely established while we had the artificially high value of top. Make it sane now.
+ assert(update_watermark >= region->top(), "original top cannot exceed preserved update_watermark");
+ region->set_update_watermark(region->top());
+
+ // Transfer this region from young to old, increasing promoted_reserve if available space exceeds plab_min_size()
+ _heap->free_set()->add_promoted_in_place_region_to_old_collector(region);
+ region->set_affiliation(OLD_GENERATION);
+ region->set_promoted_in_place();
+ }
+}
+
+void ShenandoahInPlacePromoter::promote_humongous(ShenandoahHeapRegion* region) const {
+ oop obj = cast_to_oop(region->bottom());
+
+ assert(region->is_young(), "Only young regions can be promoted");
+ assert(region->is_humongous_start(), "Should not promote humongous continuation in isolation");
+ assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
+ assert(_heap->marking_context()->is_marked(obj), "Promoted humongous object should be alive");
+ assert(!obj->is_typeArray(), "Don't promote humongous primitives");
+
+ const size_t used_bytes = obj->size() * HeapWordSize;
+ const size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
+ const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+ const size_t humongous_waste = spanned_regions * region_size_bytes - obj->size() * HeapWordSize;
+ const size_t index_limit = region->index() + spanned_regions;
+
+ ShenandoahOldGeneration* const old_gen = _heap->old_generation();
+ {
+ // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
+ // young to old.
+ ShenandoahHeapLocker locker(_heap->lock());
+
+ // We promote humongous objects unconditionally, without checking for availability. We adjust
+ // usage totals, including humongous waste, after evacuation is done.
+ log_debug(gc)("promoting humongous region %zu, spanning %zu", region->index(), spanned_regions);
+
+ // For this region and each humongous continuation region spanned by this humongous object, change
+ // affiliation to OLD_GENERATION and adjust the generation-use tallies. The remnant of memory
+ // in the last humongous region that is not spanned by obj is currently not used.
+ for (size_t i = region->index(); i < index_limit; i++) {
+ ShenandoahHeapRegion* r = _heap->get_region(i);
+ log_debug(gc)("promoting humongous region %zu, from " PTR_FORMAT " to " PTR_FORMAT,
+ r->index(), p2i(r->bottom()), p2i(r->top()));
+ // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
+ r->set_affiliation(OLD_GENERATION);
+ r->set_promoted_in_place();
+ }
+
+ ShenandoahFreeSet* freeset = _heap->free_set();
+ freeset->transfer_humongous_regions_from_mutator_to_old_collector(spanned_regions, humongous_waste);
+ }
+
+ // Since this region may have served previously as OLD, it may hold obsolete object range info.
+ HeapWord* const humongous_bottom = region->bottom();
+ ShenandoahScanRemembered* const scanner = old_gen->card_scan();
+ scanner->reset_object_range(humongous_bottom, humongous_bottom + spanned_regions * ShenandoahHeapRegion::region_size_words());
+ // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
+ scanner->register_object_without_lock(humongous_bottom);
+
+ log_debug(gc)("Dirty cards for promoted humongous object (Region %zu) from " PTR_FORMAT " to " PTR_FORMAT,
+ region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
+ scanner->mark_range_as_dirty(humongous_bottom, obj->size());
+}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.hpp b/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.hpp
new file mode 100644
index 00000000000..939107dd3ac
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahInPlacePromoter.hpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHINPLACEPROMOTER_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHINPLACEPROMOTER_HPP
+
+#include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
+
+class ShenandoahFreeSet;
+class ShenandoahMarkingContext;
+class ShenandoahGenerationalHeap;
+class ShenandoahHeapRegion;
+
+class ShenandoahInPlacePromotionPlanner {
+ using idx_t = ShenandoahSimpleBitMap::idx_t;
+
+ struct RegionPromotions {
+ idx_t _low_idx;
+ idx_t _high_idx;
+ size_t _regions;
+ size_t _bytes;
+ ShenandoahFreeSet* _free_set;
+
+ explicit RegionPromotions(ShenandoahFreeSet* free_set);
+ void increment(idx_t region_index, size_t remnant_bytes);
+ void update_free_set(ShenandoahFreeSetPartitionId partition_id) const;
+ };
+
+ const size_t _old_garbage_threshold;
+ const size_t _pip_used_threshold;
+
+ const ShenandoahGenerationalHeap* _heap;
+ ShenandoahFreeSet* _free_set;
+ const ShenandoahMarkingContext* _marking_context;
+
+ // Any region that is to be promoted in place needs to be retired from its Collector or Mutator partition.
+ RegionPromotions _mutator_regions;
+ RegionPromotions _collector_regions;
+
+ // Tracks the padding of space above top in regions eligible for promotion in place
+ size_t _pip_padding_bytes;
+public:
+ explicit ShenandoahInPlacePromotionPlanner(const ShenandoahGenerationalHeap* heap);
+
+ // Returns true if this region has garbage below and usage above the configurable thresholds
+ bool is_eligible(const ShenandoahHeapRegion* region) const;
+
+ // Prepares the region for promotion by moving top to the end to prevent allocations
+ void prepare(ShenandoahHeapRegion* region);
+
+ // Notifies the free set of in place promotions
+ void update_free_set() const;
+
+ size_t old_garbage_threshold() const { return _old_garbage_threshold; }
+};
+
+class ShenandoahInPlacePromoter {
+ ShenandoahGenerationalHeap* _heap;
+public:
+ explicit ShenandoahInPlacePromoter(ShenandoahGenerationalHeap* heap) : _heap(heap) {}
+
+ // If the region still meets the criteria for promotion in place, it will be promoted
+ void maybe_promote_region(ShenandoahHeapRegion* region) const;
+
+private:
+ void promote(ShenandoahHeapRegion* region) const;
+ void promote_humongous(ShenandoahHeapRegion* region) const;
+};
+
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHINPLACEPROMOTER_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahLock.cpp b/src/hotspot/share/gc/shenandoah/shenandoahLock.cpp
index 7a3b33f5fd0..7eec0b9af64 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahLock.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahLock.cpp
@@ -24,7 +24,6 @@
#include "gc/shenandoah/shenandoahLock.hpp"
-#include "runtime/atomicAccess.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/os.hpp"
@@ -46,8 +45,8 @@ void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) {
int ctr = os::is_MP() ? 0xFF : 0;
int yields = 0;
// Apply TTAS to avoid more expensive CAS calls if the lock is still held by other thread.
- while (AtomicAccess::load(&_state) == locked ||
- AtomicAccess::cmpxchg(&_state, unlocked, locked) != unlocked) {
+ while (_state.load_relaxed() == locked ||
+ _state.compare_exchange(unlocked, locked) != unlocked) {
if (ctr > 0 && !SafepointSynchronize::is_synchronizing()) {
// Lightly contended, spin a little if no safepoint is pending.
SpinPause();
@@ -113,11 +112,11 @@ ShenandoahReentrantLock::~ShenandoahReentrantLock() {
void ShenandoahReentrantLock::lock() {
Thread* const thread = Thread::current();
- Thread* const owner = AtomicAccess::load(&_owner);
+ Thread* const owner = _owner.load_relaxed();
if (owner != thread) {
ShenandoahSimpleLock::lock();
- AtomicAccess::store(&_owner, thread);
+ _owner.store_relaxed(thread);
}
_count++;
@@ -130,13 +129,13 @@ void ShenandoahReentrantLock::unlock() {
_count--;
if (_count == 0) {
- AtomicAccess::store(&_owner, (Thread*)nullptr);
+ _owner.store_relaxed((Thread*)nullptr);
ShenandoahSimpleLock::unlock();
}
}
bool ShenandoahReentrantLock::owned_by_self() const {
Thread* const thread = Thread::current();
- Thread* const owner = AtomicAccess::load(&_owner);
+ Thread* const owner = _owner.load_relaxed();
return owner == thread;
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp b/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp
index fbdf4971354..2e44810cd5d 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp
@@ -27,6 +27,7 @@
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/safepoint.hpp"
@@ -35,9 +36,9 @@ private:
enum LockState { unlocked = 0, locked = 1 };
shenandoah_padding(0);
- volatile LockState _state;
+ Atomic _state;
shenandoah_padding(1);
- Thread* volatile _owner;
+ Atomic _owner;
shenandoah_padding(2);
template
@@ -48,33 +49,33 @@ public:
ShenandoahLock() : _state(unlocked), _owner(nullptr) {};
void lock(bool allow_block_for_safepoint) {
- assert(AtomicAccess::load(&_owner) != Thread::current(), "reentrant locking attempt, would deadlock");
+ assert(_owner.load_relaxed() != Thread::current(), "reentrant locking attempt, would deadlock");
if ((allow_block_for_safepoint && SafepointSynchronize::is_synchronizing()) ||
- (AtomicAccess::cmpxchg(&_state, unlocked, locked) != unlocked)) {
+ (_state.compare_exchange(unlocked, locked) != unlocked)) {
// 1. Java thread, and there is a pending safepoint. Dive into contended locking
// immediately without trying anything else, and block.
// 2. Fast lock fails, dive into contended lock handling.
contended_lock(allow_block_for_safepoint);
}
- assert(AtomicAccess::load(&_state) == locked, "must be locked");
- assert(AtomicAccess::load(&_owner) == nullptr, "must not be owned");
- DEBUG_ONLY(AtomicAccess::store(&_owner, Thread::current());)
+ assert(_state.load_relaxed() == locked, "must be locked");
+ assert(_owner.load_relaxed() == nullptr, "must not be owned");
+ DEBUG_ONLY(_owner.store_relaxed(Thread::current());)
}
void unlock() {
- assert(AtomicAccess::load(&_owner) == Thread::current(), "sanity");
- DEBUG_ONLY(AtomicAccess::store(&_owner, (Thread*)nullptr);)
+ assert(_owner.load_relaxed() == Thread::current(), "sanity");
+ DEBUG_ONLY(_owner.store_relaxed((Thread*)nullptr);)
OrderAccess::fence();
- AtomicAccess::store(&_state, unlocked);
+ _state.store_relaxed(unlocked);
}
void contended_lock(bool allow_block_for_safepoint);
bool owned_by_self() {
#ifdef ASSERT
- return _state == locked && _owner == Thread::current();
+ return _state.load_relaxed() == locked && _owner.load_relaxed() == Thread::current();
#else
ShouldNotReachHere();
return false;
@@ -111,7 +112,7 @@ public:
class ShenandoahReentrantLock : public ShenandoahSimpleLock {
private:
- Thread* volatile _owner;
+ Atomic _owner;
uint64_t _count;
public:
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
index 55cec63f045..facaefd4b62 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
@@ -433,8 +433,8 @@ void ShenandoahNMethodTableSnapshot::parallel_nmethods_do(NMethodClosure *f) {
ShenandoahNMethod** const list = _list->list();
size_t max = (size_t)_limit;
- while (_claimed < max) {
- size_t cur = AtomicAccess::fetch_then_add(&_claimed, stride, memory_order_relaxed);
+ while (_claimed.load_relaxed() < max) {
+ size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
@@ -457,8 +457,8 @@ void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl)
ShenandoahNMethod** list = _list->list();
size_t max = (size_t)_limit;
- while (_claimed < max) {
- size_t cur = AtomicAccess::fetch_then_add(&_claimed, stride, memory_order_relaxed);
+ while (_claimed.load_relaxed() < max) {
+ size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.hpp b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.hpp
index 5387870c9dc..77faf6c0dcb 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.hpp
@@ -30,6 +30,7 @@
#include "gc/shenandoah/shenandoahLock.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/growableArray.hpp"
// ShenandoahNMethod tuple records the internal locations of oop slots within reclocation stream in
@@ -115,7 +116,7 @@ private:
int _limit;
shenandoah_padding(0);
- volatile size_t _claimed;
+ Atomic _claimed;
shenandoah_padding(1);
public:
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp
index 12c01ad5c90..e23187a5d3f 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp
@@ -27,7 +27,7 @@
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "memory/allocation.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
typedef int32_t ShenandoahSharedValue;
typedef struct ShenandoahSharedFlag {
@@ -37,7 +37,7 @@ typedef struct ShenandoahSharedFlag {
};
shenandoah_padding(0);
- volatile ShenandoahSharedValue value;
+ Atomic value;
shenandoah_padding(1);
ShenandoahSharedFlag() {
@@ -45,19 +45,19 @@ typedef struct ShenandoahSharedFlag {
}
void set() {
- AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)SET);
+ value.release_store_fence((ShenandoahSharedValue)SET);
}
void unset() {
- AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)UNSET);
+ value.release_store_fence((ShenandoahSharedValue)UNSET);
}
bool is_set() const {
- return AtomicAccess::load_acquire(&value) == SET;
+ return value.load_acquire() == SET;
}
bool is_unset() const {
- return AtomicAccess::load_acquire(&value) == UNSET;
+ return value.load_acquire() == UNSET;
}
void set_cond(bool val) {
@@ -72,7 +72,7 @@ typedef struct ShenandoahSharedFlag {
if (is_set()) {
return false;
}
- ShenandoahSharedValue old = AtomicAccess::cmpxchg(&value, (ShenandoahSharedValue)UNSET, (ShenandoahSharedValue)SET);
+ ShenandoahSharedValue old = value.compare_exchange((ShenandoahSharedValue)UNSET, (ShenandoahSharedValue)SET);
return old == UNSET; // success
}
@@ -80,17 +80,13 @@ typedef struct ShenandoahSharedFlag {
if (!is_set()) {
return false;
}
- ShenandoahSharedValue old = AtomicAccess::cmpxchg(&value, (ShenandoahSharedValue)SET, (ShenandoahSharedValue)UNSET);
+ ShenandoahSharedValue old = value.compare_exchange((ShenandoahSharedValue)SET, (ShenandoahSharedValue)UNSET);
return old == SET; // success
}
- volatile ShenandoahSharedValue* addr_of() {
- return &value;
- }
-
private:
volatile ShenandoahSharedValue* operator&() {
- fatal("Use addr_of() instead");
+ fatal("Not supported");
return nullptr;
}
@@ -105,7 +101,7 @@ private:
typedef struct ShenandoahSharedBitmap {
shenandoah_padding(0);
- volatile ShenandoahSharedValue value;
+ Atomic value;
shenandoah_padding(1);
ShenandoahSharedBitmap() {
@@ -116,7 +112,7 @@ typedef struct ShenandoahSharedBitmap {
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask;
while (true) {
- ShenandoahSharedValue ov = AtomicAccess::load_acquire(&value);
+ ShenandoahSharedValue ov = value.load_acquire();
// We require all bits of mask_val to be set
if ((ov & mask_val) == mask_val) {
// already set
@@ -124,7 +120,7 @@ typedef struct ShenandoahSharedBitmap {
}
ShenandoahSharedValue nv = ov | mask_val;
- if (AtomicAccess::cmpxchg(&value, ov, nv) == ov) {
+ if (value.compare_exchange(ov, nv) == ov) {
// successfully set: if value returned from cmpxchg equals ov, then nv has overwritten value.
return;
}
@@ -135,14 +131,14 @@ typedef struct ShenandoahSharedBitmap {
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask;
while (true) {
- ShenandoahSharedValue ov = AtomicAccess::load_acquire(&value);
+ ShenandoahSharedValue ov = value.load_acquire();
if ((ov & mask_val) == 0) {
// already unset
return;
}
ShenandoahSharedValue nv = ov & ~mask_val;
- if (AtomicAccess::cmpxchg(&value, ov, nv) == ov) {
+ if (value.compare_exchange(ov, nv) == ov) {
// successfully unset
return;
}
@@ -150,7 +146,7 @@ typedef struct ShenandoahSharedBitmap {
}
void clear() {
- AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)0);
+ value.release_store_fence((ShenandoahSharedValue)0);
}
// Returns true iff any bit set in mask is set in this.value.
@@ -161,18 +157,18 @@ typedef struct ShenandoahSharedBitmap {
// Returns true iff all bits set in mask are set in this.value.
bool is_set_exactly(uint mask) const {
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
- uint uvalue = AtomicAccess::load_acquire(&value);
+ uint uvalue = value.load_acquire();
return (uvalue & mask) == mask;
}
// Returns true iff all bits set in mask are unset in this.value.
bool is_unset(uint mask) const {
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
- return (AtomicAccess::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0;
+ return (value.load_acquire() & (ShenandoahSharedValue) mask) == 0;
}
bool is_clear() const {
- return (AtomicAccess::load_acquire(&value)) == 0;
+ return (value.load_acquire()) == 0;
}
void set_cond(uint mask, bool val) {
@@ -183,17 +179,13 @@ typedef struct ShenandoahSharedBitmap {
}
}
- volatile ShenandoahSharedValue* addr_of() {
- return &value;
- }
-
ShenandoahSharedValue raw_value() const {
- return value;
+ return value.load_relaxed();
}
private:
volatile ShenandoahSharedValue* operator&() {
- fatal("Use addr_of() instead");
+ fatal("Not supported");
return nullptr;
}
@@ -210,42 +202,36 @@ template
struct ShenandoahSharedEnumFlag {
typedef uint32_t EnumValueType;
shenandoah_padding(0);
- volatile EnumValueType value;
+ Atomic value;
shenandoah_padding(1);
- ShenandoahSharedEnumFlag() {
- value = 0;
- }
+ ShenandoahSharedEnumFlag() : value(0) {}
void set(T v) {
assert (v >= 0, "sanity");
assert (v < (sizeof(EnumValueType) * CHAR_MAX), "sanity");
- AtomicAccess::release_store_fence(&value, (EnumValueType)v);
+ value.release_store_fence((EnumValueType)v);
}
T get() const {
- return (T)AtomicAccess::load_acquire(&value);
+ return (T)value.load_acquire();
}
T cmpxchg(T new_value, T expected) {
assert (new_value >= 0, "sanity");
assert (new_value < (sizeof(EnumValueType) * CHAR_MAX), "sanity");
- return (T)AtomicAccess::cmpxchg(&value, (EnumValueType)expected, (EnumValueType)new_value);
+ return (T)value.compare_exchange((EnumValueType)expected, (EnumValueType)new_value);
}
T xchg(T new_value) {
assert (new_value >= 0, "sanity");
assert (new_value < (sizeof(EnumValueType) * CHAR_MAX), "sanity");
- return (T)AtomicAccess::xchg(&value, (EnumValueType)new_value);
- }
-
- volatile EnumValueType* addr_of() {
- return &value;
+ return (T)value.exchange((EnumValueType)new_value);
}
private:
volatile T* operator&() {
- fatal("Use addr_of() instead");
+ fatal("Not supported");
return nullptr;
}
@@ -260,7 +246,7 @@ private:
typedef struct ShenandoahSharedSemaphore {
shenandoah_padding(0);
- volatile ShenandoahSharedValue value;
+ Atomic value;
shenandoah_padding(1);
static uint max_tokens() {
@@ -269,17 +255,17 @@ typedef struct ShenandoahSharedSemaphore {
ShenandoahSharedSemaphore(uint tokens) {
assert(tokens <= max_tokens(), "sanity");
- AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)tokens);
+ value.release_store_fence((ShenandoahSharedValue)tokens);
}
bool try_acquire() {
while (true) {
- ShenandoahSharedValue ov = AtomicAccess::load_acquire(&value);
+ ShenandoahSharedValue ov = value.load_acquire();
if (ov == 0) {
return false;
}
ShenandoahSharedValue nv = ov - 1;
- if (AtomicAccess::cmpxchg(&value, ov, nv) == ov) {
+ if (value.compare_exchange(ov, nv) == ov) {
// successfully set
return true;
}
@@ -287,7 +273,7 @@ typedef struct ShenandoahSharedSemaphore {
}
void claim_all() {
- AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)0);
+ value.release_store_fence((ShenandoahSharedValue)0);
}
} ShenandoahSharedSemaphore;
diff --git a/src/hotspot/share/gc/shenandoah/vmStructs_shenandoah.hpp b/src/hotspot/share/gc/shenandoah/vmStructs_shenandoah.hpp
index e5e2b14a3a1..4299bdb8126 100644
--- a/src/hotspot/share/gc/shenandoah/vmStructs_shenandoah.hpp
+++ b/src/hotspot/share/gc/shenandoah/vmStructs_shenandoah.hpp
@@ -36,7 +36,7 @@
nonstatic_field(ShenandoahHeap, _regions, ShenandoahHeapRegion**) \
nonstatic_field(ShenandoahHeap, _log_min_obj_alignment_in_bytes, int) \
nonstatic_field(ShenandoahHeap, _free_set, ShenandoahFreeSet*) \
- volatile_nonstatic_field(ShenandoahHeap, _committed, size_t) \
+ volatile_nonstatic_field(ShenandoahHeap, _committed, Atomic) \
static_field(ShenandoahHeapRegion, RegionSizeBytes, size_t) \
static_field(ShenandoahHeapRegion, RegionSizeBytesShift, size_t) \
nonstatic_field(ShenandoahHeapRegion, _state, Atomic) \
diff --git a/src/hotspot/share/include/cds.h b/src/hotspot/share/include/cds.h
index a7bc896172c..76de42db755 100644
--- a/src/hotspot/share/include/cds.h
+++ b/src/hotspot/share/include/cds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@
#define CDS_DYNAMIC_ARCHIVE_MAGIC 0xf00baba8
#define CDS_PREIMAGE_ARCHIVE_MAGIC 0xcafea07c
#define CDS_GENERIC_HEADER_SUPPORTED_MIN_VERSION 13
-#define CURRENT_CDS_ARCHIVE_VERSION 19
+#define CURRENT_CDS_ARCHIVE_VERSION 20
typedef struct CDSFileMapRegion {
int _crc; // CRC checksum of this region.
diff --git a/src/hotspot/share/interpreter/interpreter.hpp b/src/hotspot/share/interpreter/interpreter.hpp
index 576146b344e..f7d42fcb4da 100644
--- a/src/hotspot/share/interpreter/interpreter.hpp
+++ b/src/hotspot/share/interpreter/interpreter.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -59,7 +59,7 @@ class InterpreterCodelet: public Stub {
// General info/converters
int size() const { return _size; }
static int alignment() { return HeapWordSize; }
- static int code_alignment() { return CodeEntryAlignment; }
+ static uint code_alignment() { return CodeEntryAlignment; }
// Code info
address code_begin() const { return align_up((address)this + sizeof(InterpreterCodelet), code_alignment()); }
diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp
index 3e9d8c97b88..6214f6a2746 100644
--- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -355,7 +355,7 @@ JVMCIObjectArray CompilerToVM::initialize_intrinsics(JVMCI_TRAPS) {
return vmIntrinsics;
}
-#define PREDEFINED_CONFIG_FLAGS(do_bool_flag, do_int_flag, do_size_t_flag, do_intx_flag, do_uintx_flag) \
+#define PREDEFINED_CONFIG_FLAGS(do_bool_flag, do_uint_flag, do_int_flag, do_size_t_flag, do_intx_flag, do_uintx_flag) \
do_int_flag(AllocateInstancePrefetchLines) \
do_int_flag(AllocatePrefetchDistance) \
do_intx_flag(AllocatePrefetchInstr) \
@@ -367,7 +367,7 @@ JVMCIObjectArray CompilerToVM::initialize_intrinsics(JVMCI_TRAPS) {
do_bool_flag(CITime) \
do_bool_flag(CITimeEach) \
do_size_t_flag(CodeCacheSegmentSize) \
- do_intx_flag(CodeEntryAlignment) \
+ do_uint_flag(CodeEntryAlignment) \
do_int_flag(ContendedPaddingWidth) \
do_bool_flag(DontCompileHugeMethods) \
do_bool_flag(EagerJVMCI) \
@@ -554,16 +554,17 @@ jobjectArray readConfiguration0(JNIEnv *env, JVMCI_TRAPS) {
JVMCIENV->put_object_at(vmFlags, i++, vmFlagObj); \
}
#define ADD_BOOL_FLAG(name) ADD_FLAG(bool, name, BOXED_BOOLEAN)
+#define ADD_UINT_FLAG(name) ADD_FLAG(uint, name, BOXED_LONG)
#define ADD_INT_FLAG(name) ADD_FLAG(int, name, BOXED_LONG)
#define ADD_SIZE_T_FLAG(name) ADD_FLAG(size_t, name, BOXED_LONG)
#define ADD_INTX_FLAG(name) ADD_FLAG(intx, name, BOXED_LONG)
#define ADD_UINTX_FLAG(name) ADD_FLAG(uintx, name, BOXED_LONG)
- len = 0 + PREDEFINED_CONFIG_FLAGS(COUNT_FLAG, COUNT_FLAG, COUNT_FLAG, COUNT_FLAG, COUNT_FLAG);
+ len = 0 + PREDEFINED_CONFIG_FLAGS(COUNT_FLAG, COUNT_FLAG, COUNT_FLAG, COUNT_FLAG, COUNT_FLAG, COUNT_FLAG);
JVMCIObjectArray vmFlags = JVMCIENV->new_VMFlag_array(len, JVMCI_CHECK_NULL);
int i = 0;
JVMCIObject value;
- PREDEFINED_CONFIG_FLAGS(ADD_BOOL_FLAG, ADD_INT_FLAG, ADD_SIZE_T_FLAG, ADD_INTX_FLAG, ADD_UINTX_FLAG)
+ PREDEFINED_CONFIG_FLAGS(ADD_BOOL_FLAG, ADD_UINT_FLAG, ADD_INT_FLAG, ADD_SIZE_T_FLAG, ADD_INTX_FLAG, ADD_UINTX_FLAG)
JVMCIObjectArray vmIntrinsics = CompilerToVM::initialize_intrinsics(JVMCI_CHECK_NULL);
diff --git a/src/hotspot/share/memory/allocation.inline.hpp b/src/hotspot/share/memory/allocation.inline.hpp
index 5561cdbe6f7..1cbff9d0255 100644
--- a/src/hotspot/share/memory/allocation.inline.hpp
+++ b/src/hotspot/share/memory/allocation.inline.hpp
@@ -87,8 +87,7 @@ E* MmapArrayAllocator::allocate(size_t length, MemTag mem_tag) {
template
void MmapArrayAllocator::free(E* addr, size_t length) {
- bool result = os::release_memory((char*)addr, size_for(length));
- assert(result, "Failed to release memory");
+ os::release_memory((char*)addr, size_for(length));
}
template
diff --git a/src/hotspot/share/memory/arena.cpp b/src/hotspot/share/memory/arena.cpp
index 2de3f837c00..75cd909b028 100644
--- a/src/hotspot/share/memory/arena.cpp
+++ b/src/hotspot/share/memory/arena.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -330,6 +330,9 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
size_t len = MAX2(ARENA_ALIGN(x), (size_t) Chunk::size);
if (MemTracker::check_exceeds_limit(x, _mem_tag)) {
+ if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
+ vm_exit_out_of_memory(x, OOM_MALLOC_ERROR, "MallocLimit in Arena::grow");
+ }
return nullptr;
}
diff --git a/src/hotspot/share/memory/memoryReserver.cpp b/src/hotspot/share/memory/memoryReserver.cpp
index e8d1887f59f..59e8f30707d 100644
--- a/src/hotspot/share/memory/memoryReserver.cpp
+++ b/src/hotspot/share/memory/memoryReserver.cpp
@@ -99,9 +99,7 @@ static char* reserve_memory_inner(char* requested_address,
}
// Base not aligned, retry.
- if (!os::release_memory(base, size)) {
- fatal("os::release_memory failed");
- }
+ os::release_memory(base, size);
// Map using the requested alignment.
return os::reserve_memory_aligned(size, alignment, mem_tag, exec);
@@ -231,13 +229,13 @@ ReservedSpace MemoryReserver::reserve(size_t size,
mem_tag);
}
-bool MemoryReserver::release(const ReservedSpace& reserved) {
+void MemoryReserver::release(const ReservedSpace& reserved) {
assert(reserved.is_reserved(), "Precondition");
if (reserved.special()) {
- return os::release_memory_special(reserved.base(), reserved.size());
+ os::release_memory_special(reserved.base(), reserved.size());
} else {
- return os::release_memory(reserved.base(), reserved.size());
+ os::release_memory(reserved.base(), reserved.size());
}
}
@@ -266,9 +264,7 @@ static char* map_memory_to_file(char* requested_address,
// Base not aligned, retry.
- if (!os::unmap_memory(base, size)) {
- fatal("os::unmap_memory failed");
- }
+ os::unmap_memory(base, size);
// Map using the requested alignment.
return os::map_memory_to_file_aligned(size, alignment, fd, mem_tag);
diff --git a/src/hotspot/share/memory/memoryReserver.hpp b/src/hotspot/share/memory/memoryReserver.hpp
index 9ef12650b55..d13d8d72512 100644
--- a/src/hotspot/share/memory/memoryReserver.hpp
+++ b/src/hotspot/share/memory/memoryReserver.hpp
@@ -70,7 +70,7 @@ public:
MemTag mem_tag);
// Release reserved memory
- static bool release(const ReservedSpace& reserved);
+ static void release(const ReservedSpace& reserved);
};
class CodeMemoryReserver : AllStatic {
diff --git a/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp b/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp
index d21c6546cf5..df4e507b104 100644
--- a/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp
@@ -190,10 +190,7 @@ void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
}
// Uncommit...
- if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
- // Note: this can actually happen, since uncommit may increase the number of mappings.
- fatal("Failed to uncommit metaspace.");
- }
+ os::uncommit_memory((char*)p, word_size * BytesPerWord);
ASAN_POISON_MEMORY_REGION((char*)p, word_size * BytesPerWord);
diff --git a/src/hotspot/share/memory/virtualspace.cpp b/src/hotspot/share/memory/virtualspace.cpp
index 92a168248d2..0627c0b9a8e 100644
--- a/src/hotspot/share/memory/virtualspace.cpp
+++ b/src/hotspot/share/memory/virtualspace.cpp
@@ -370,34 +370,22 @@ void VirtualSpace::shrink_by(size_t size) {
assert(middle_high_boundary() <= aligned_upper_new_high &&
aligned_upper_new_high + upper_needs <= upper_high_boundary(),
"must not shrink beyond region");
- if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) {
- DEBUG_ONLY(warning("os::uncommit_memory failed"));
- return;
- } else {
- _upper_high -= upper_needs;
- }
+ os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable);
+ _upper_high -= upper_needs;
}
if (middle_needs > 0) {
assert(lower_high_boundary() <= aligned_middle_new_high &&
aligned_middle_new_high + middle_needs <= middle_high_boundary(),
"must not shrink beyond region");
- if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) {
- DEBUG_ONLY(warning("os::uncommit_memory failed"));
- return;
- } else {
- _middle_high -= middle_needs;
- }
+ os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable);
+ _middle_high -= middle_needs;
}
if (lower_needs > 0) {
assert(low_boundary() <= aligned_lower_new_high &&
aligned_lower_new_high + lower_needs <= lower_high_boundary(),
"must not shrink beyond region");
- if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) {
- DEBUG_ONLY(warning("os::uncommit_memory failed"));
- return;
- } else {
- _lower_high -= lower_needs;
- }
+ os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable);
+ _lower_high -= lower_needs;
}
_high -= size;
diff --git a/src/hotspot/share/oops/trainingData.cpp b/src/hotspot/share/oops/trainingData.cpp
index 1a16fd70e44..f52c22ad38a 100644
--- a/src/hotspot/share/oops/trainingData.cpp
+++ b/src/hotspot/share/oops/trainingData.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
*
*/
+#include "cds/aotCompressedPointers.hpp"
#include "cds/cdsConfig.hpp"
#include "ci/ciEnv.hpp"
#include "ci/ciMetadata.hpp"
@@ -512,8 +513,7 @@ void TrainingData::dump_training_data() {
#endif // ASSERT
td = ArchiveBuilder::current()->get_buffered_addr(td);
uint hash = TrainingData::Key::cds_hash(td->key());
- u4 delta = ArchiveBuilder::current()->buffer_to_offset_u4((address)td);
- writer.add(hash, delta);
+ writer.add(hash, AOTCompressedPointers::encode_not_null(td));
}
writer.dump(&_archived_training_data_dictionary_for_dumping, "training data dictionary");
}
diff --git a/src/hotspot/share/opto/c2_globals.hpp b/src/hotspot/share/opto/c2_globals.hpp
index f470049bf7e..1662f808286 100644
--- a/src/hotspot/share/opto/c2_globals.hpp
+++ b/src/hotspot/share/opto/c2_globals.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -694,7 +694,9 @@
"Print progress during Iterative Global Value Numbering") \
\
develop(uint, VerifyIterativeGVN, 0, \
- "Verify Iterative Global Value Numbering =EDCBA, with:" \
+ "Verify Iterative Global Value Numbering =FEDCBA, with:" \
+ " F: verify Node::Ideal does not return nullptr if the node" \
+ "hash has changed" \
" E: verify node specific invariants" \
" D: verify Node::Identity did not miss opportunities" \
" C: verify Node::Ideal did not miss opportunities" \
diff --git a/src/hotspot/share/opto/cfgnode.cpp b/src/hotspot/share/opto/cfgnode.cpp
index efa0b55e2c2..c65bc391792 100644
--- a/src/hotspot/share/opto/cfgnode.cpp
+++ b/src/hotspot/share/opto/cfgnode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2243,7 +2243,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
PhaseIterGVN* igvn = phase->is_IterGVN();
if (wait_for_cast_input_igvn(igvn)) {
igvn->_worklist.push(this);
- return nullptr;
+ return progress;
}
uncasted = true;
uin = unique_input(phase, true);
@@ -2320,6 +2320,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
PhaseIterGVN* igvn = phase->is_IterGVN();
for (uint i = 1; i < req(); i++) {
set_req_X(i, cast, igvn);
+ progress = this;
}
uin = cast;
}
@@ -2338,7 +2339,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
#endif
// Identity may not return the expected uin, if it has to wait for the region, in irreducible case
assert(ident == uin || ident->is_top() || must_wait_for_region_in_irreducible_loop(phase), "Identity must clean this up");
- return nullptr;
+ return progress;
}
Node* opt = nullptr;
@@ -2529,7 +2530,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Phi references itself through all other inputs then splitting the
// Phi through memory merges would create dead loop at later stage.
if (ii == top) {
- return nullptr; // Delay optimization until graph is cleaned.
+ return progress; // Delay optimization until graph is cleaned.
}
if (ii->is_MergeMem()) {
MergeMemNode* n = ii->as_MergeMem();
diff --git a/src/hotspot/share/opto/constantTable.cpp b/src/hotspot/share/opto/constantTable.cpp
index aba71e45e1c..ae4ac336d93 100644
--- a/src/hotspot/share/opto/constantTable.cpp
+++ b/src/hotspot/share/opto/constantTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -158,7 +158,7 @@ void ConstantTable::calculate_offsets_and_size() {
// Align size up to the next section start (which is insts; see
// CodeBuffer::align_at_start).
assert(_size == -1, "already set?");
- _size = align_up(offset, (int)CodeEntryAlignment);
+ _size = align_up(offset, CodeEntryAlignment);
}
bool ConstantTable::emit(C2_MacroAssembler* masm) const {
diff --git a/src/hotspot/share/opto/intrinsicnode.cpp b/src/hotspot/share/opto/intrinsicnode.cpp
index 1397d31af53..c3e003ad8d3 100644
--- a/src/hotspot/share/opto/intrinsicnode.cpp
+++ b/src/hotspot/share/opto/intrinsicnode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,8 +46,8 @@ Node* StrIntrinsicNode::Ideal(PhaseGVN* phase, bool can_reshape) {
if (in(0) && in(0)->is_top()) return nullptr;
if (can_reshape) {
- Node* mem = phase->transform(in(MemNode::Memory));
- // If transformed to a MergeMem, get the desired slice
+ Node* mem = in(MemNode::Memory);
+ // If mem input is a MergeMem, get the desired slice
uint alias_idx = phase->C->get_alias_index(adr_type());
mem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(alias_idx) : mem;
if (mem != in(MemNode::Memory)) {
diff --git a/src/hotspot/share/opto/mulnode.cpp b/src/hotspot/share/opto/mulnode.cpp
index aa8d6cfce2e..ac7d1925667 100644
--- a/src/hotspot/share/opto/mulnode.cpp
+++ b/src/hotspot/share/opto/mulnode.cpp
@@ -1238,20 +1238,26 @@ Node* RShiftNode::IdentityIL(PhaseGVN* phase, BasicType bt) {
return in(1);
}
// Check for useless sign-masking
+ int lshift_count = 0;
if (in(1)->Opcode() == Op_LShift(bt) &&
in(1)->req() == 3 &&
- in(1)->in(2) == in(2)) {
+ // Compare shift counts by value, not by node pointer, to also match a not-yet-normalized
+ // negative constant (e.g. -1 vs 31)
+ const_shift_count(phase, in(1), &lshift_count)) {
count &= bits_per_java_integer(bt) - 1; // semantics of Java shifts
- // Compute masks for which this shifting doesn't change
- jlong lo = (CONST64(-1) << (bits_per_java_integer(bt) - ((uint)count)-1)); // FFFF8000
- jlong hi = ~lo; // 00007FFF
- const TypeInteger* t11 = phase->type(in(1)->in(1))->isa_integer(bt);
- if (t11 == nullptr) {
- return this;
- }
- // Does actual value fit inside of mask?
- if (lo <= t11->lo_as_long() && t11->hi_as_long() <= hi) {
- return in(1)->in(1); // Then shifting is a nop
+ lshift_count &= bits_per_java_integer(bt) - 1;
+ if (count == lshift_count) {
+ // Compute masks for which this shifting doesn't change
+ jlong lo = (CONST64(-1) << (bits_per_java_integer(bt) - ((uint)count)-1)); // FFFF8000
+ jlong hi = ~lo; // 00007FFF
+ const TypeInteger* t11 = phase->type(in(1)->in(1))->isa_integer(bt);
+ if (t11 == nullptr) {
+ return this;
+ }
+ // Does actual value fit inside of mask?
+ if (lo <= t11->lo_as_long() && t11->hi_as_long() <= hi) {
+ return in(1)->in(1); // Then shifting is a nop
+ }
}
}
}
@@ -1524,11 +1530,14 @@ Node* URShiftINode::Ideal(PhaseGVN* phase, bool can_reshape) {
// If Q is "X << z" the rounding is useless. Look for patterns like
// ((X<>> Z and replace with (X + Y>>>Z) & Z-mask.
Node *add = in(1);
- const TypeInt *t2 = phase->type(in(2))->isa_int();
if (in1_op == Op_AddI) {
Node *lshl = add->in(1);
- if( lshl->Opcode() == Op_LShiftI &&
- phase->type(lshl->in(2)) == t2 ) {
+ // Compare shift counts by value, not by node pointer, to also match a not-yet-normalized
+ // negative constant (e.g. -1 vs 31)
+ int lshl_con = 0;
+ if (lshl->Opcode() == Op_LShiftI &&
+ const_shift_count(phase, lshl, &lshl_con) &&
+ (lshl_con & (BitsPerJavaInteger - 1)) == con) {
Node *y_z = phase->transform( new URShiftINode(add->in(2),in(2)) );
Node *sum = phase->transform( new AddINode( lshl->in(1), y_z ) );
return new AndINode( sum, phase->intcon(mask) );
@@ -1555,11 +1564,16 @@ Node* URShiftINode::Ideal(PhaseGVN* phase, bool can_reshape) {
// Check for "(X << z ) >>> z" which simply zero-extends
Node *shl = in(1);
- if( in1_op == Op_LShiftI &&
- phase->type(shl->in(2)) == t2 )
- return new AndINode( shl->in(1), phase->intcon(mask) );
+ // Compare shift counts by value, not by node pointer, to also match a not-yet-normalized
+ // negative constant (e.g. -1 vs 31)
+ int shl_con = 0;
+ if (in1_op == Op_LShiftI &&
+ const_shift_count(phase, shl, &shl_con) &&
+ (shl_con & (BitsPerJavaInteger - 1)) == con)
+ return new AndINode(shl->in(1), phase->intcon(mask));
// Check for (x >> n) >>> 31. Replace with (x >>> 31)
+ const TypeInt* t2 = phase->type(in(2))->isa_int();
Node *shr = in(1);
if ( in1_op == Op_RShiftI ) {
Node *in11 = shr->in(1);
@@ -1677,11 +1691,15 @@ Node* URShiftLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
const TypeInt *t2 = phase->type(in(2))->isa_int();
if (add->Opcode() == Op_AddL) {
Node *lshl = add->in(1);
- if( lshl->Opcode() == Op_LShiftL &&
- phase->type(lshl->in(2)) == t2 ) {
- Node *y_z = phase->transform( new URShiftLNode(add->in(2),in(2)) );
- Node *sum = phase->transform( new AddLNode( lshl->in(1), y_z ) );
- return new AndLNode( sum, phase->longcon(mask) );
+ // Compare shift counts by value, not by node pointer, to also match a not-yet-normalized
+ // negative constant (e.g. -1 vs 63)
+ int lshl_con = 0;
+ if (lshl->Opcode() == Op_LShiftL &&
+ const_shift_count(phase, lshl, &lshl_con) &&
+ (lshl_con & (BitsPerJavaLong - 1)) == con) {
+ Node* y_z = phase->transform(new URShiftLNode(add->in(2), in(2)));
+ Node* sum = phase->transform(new AddLNode(lshl->in(1), y_z));
+ return new AndLNode(sum, phase->longcon(mask));
}
}
@@ -1701,9 +1719,14 @@ Node* URShiftLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
// Check for "(X << z ) >>> z" which simply zero-extends
Node *shl = in(1);
- if( shl->Opcode() == Op_LShiftL &&
- phase->type(shl->in(2)) == t2 )
- return new AndLNode( shl->in(1), phase->longcon(mask) );
+ // Compare shift counts by value, not by node pointer, to also match a not-yet-normalized
+ // negative constant (e.g. -1 vs 63)
+ int shl_con = 0;
+ if (shl->Opcode() == Op_LShiftL &&
+ const_shift_count(phase, shl, &shl_con) &&
+ (shl_con & (BitsPerJavaLong - 1)) == con) {
+ return new AndLNode(shl->in(1), phase->longcon(mask));
+ }
// Check for (x >> n) >>> 63. Replace with (x >>> 63)
Node *shr = in(1);
diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp
index 832727a424e..cb5795a1250 100644
--- a/src/hotspot/share/opto/node.cpp
+++ b/src/hotspot/share/opto/node.cpp
@@ -1151,15 +1151,19 @@ const Type* Node::Value(PhaseGVN* phase) const {
// 'Idealize' the graph rooted at this Node.
//
// In order to be efficient and flexible there are some subtle invariants
-// these Ideal calls need to hold. Running with '-XX:VerifyIterativeGVN=1' checks
-// these invariants, although its too slow to have on by default. If you are
-// hacking an Ideal call, be sure to test with '-XX:VerifyIterativeGVN=1'
+// these Ideal calls need to hold. Some of the flag bits for '-XX:VerifyIterativeGVN'
+// can help with validating these invariants, although they are too slow to have on by default:
+// - '-XX:VerifyIterativeGVN=1' checks the def-use info
+// - '-XX:VerifyIterativeGVN=100000' checks the return value
+// If you are hacking an Ideal call, be sure to use these.
//
// The Ideal call almost arbitrarily reshape the graph rooted at the 'this'
// pointer. If ANY change is made, it must return the root of the reshaped
// graph - even if the root is the same Node. Example: swapping the inputs
// to an AddINode gives the same answer and same root, but you still have to
-// return the 'this' pointer instead of null.
+// return the 'this' pointer instead of null. If the node was already dead
+// before the Ideal call, this rule does not apply, and it is fine to return
+// nullptr even if modifications were made.
//
// You cannot return an OLD Node, except for the 'this' pointer. Use the
// Identity call to return an old Node; basically if Identity can find
diff --git a/src/hotspot/share/opto/phaseX.cpp b/src/hotspot/share/opto/phaseX.cpp
index ce24c46590d..868dfc03047 100644
--- a/src/hotspot/share/opto/phaseX.cpp
+++ b/src/hotspot/share/opto/phaseX.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1722,11 +1722,6 @@ void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape) {
case Op_MergeMem:
return;
- // URShiftINode::Ideal
- // Found in tier1-3. Did not investigate further yet.
- case Op_URShiftI:
- return;
-
// CMoveINode::Ideal
// Found in tier1-3. Did not investigate further yet.
case Op_CMoveI:
@@ -2167,9 +2162,15 @@ Node *PhaseIterGVN::transform_old(Node* n) {
DEBUG_ONLY(dead_loop_check(k);)
DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
C->remove_modified_node(k);
+#ifndef PRODUCT
+ uint hash_before = is_verify_Ideal_return() ? k->hash() : 0;
+#endif
Node* i = apply_ideal(k, /*can_reshape=*/true);
assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
#ifndef PRODUCT
+ if (is_verify_Ideal_return()) {
+ assert(k->outcnt() == 0 || i != nullptr || hash_before == k->hash(), "hash changed after Ideal returned nullptr for %s", k->Name());
+ }
verify_step(k);
#endif
@@ -2193,9 +2194,15 @@ Node *PhaseIterGVN::transform_old(Node* n) {
// Try idealizing again
DEBUG_ONLY(is_new = (k->outcnt() == 0);)
C->remove_modified_node(k);
+#ifndef PRODUCT
+ uint hash_before = is_verify_Ideal_return() ? k->hash() : 0;
+#endif
i = apply_ideal(k, /*can_reshape=*/true);
assert(i != k || is_new || (i->outcnt() > 0), "don't return dead nodes");
#ifndef PRODUCT
+ if (is_verify_Ideal_return()) {
+ assert(k->outcnt() == 0 || i != nullptr || hash_before == k->hash(), "hash changed after Ideal returned nullptr for %s", k->Name());
+ }
verify_step(k);
#endif
DEBUG_ONLY(loop_count++;)
@@ -2582,12 +2589,15 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_
auto is_boundary = [](Node* n){ return !n->is_ConstraintCast(); };
use->visit_uses(push_the_uses_to_worklist, is_boundary);
}
- // If changed LShift inputs, check RShift users for useless sign-ext
+ // If changed LShift inputs, check RShift/URShift users for
+ // "(X << C) >> C" sign-ext and "(X << C) >>> C" zero-ext optimizations.
if (use_op == Op_LShiftI || use_op == Op_LShiftL) {
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
Node* u = use->fast_out(i2);
- if (u->Opcode() == Op_RShiftI || u->Opcode() == Op_RShiftL)
+ if (u->Opcode() == Op_RShiftI || u->Opcode() == Op_RShiftL ||
+ u->Opcode() == Op_URShiftI || u->Opcode() == Op_URShiftL) {
worklist.push(u);
+ }
}
}
// If changed LShift inputs, check And users for shift and mask (And) operation
@@ -2668,7 +2678,7 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_
if (use->is_MinMax()) {
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
Node* u = use->fast_out(i2);
- if (u->Opcode() == use->Opcode()) {
+ if (u->is_MinMax()) {
worklist.push(u);
}
}
diff --git a/src/hotspot/share/opto/phaseX.hpp b/src/hotspot/share/opto/phaseX.hpp
index ce02f456c00..cc0d47bd634 100644
--- a/src/hotspot/share/opto/phaseX.hpp
+++ b/src/hotspot/share/opto/phaseX.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -627,6 +627,10 @@ public:
// '-XX:VerifyIterativeGVN=10000'
return ((VerifyIterativeGVN % 100000) / 10000) == 1;
}
+ static bool is_verify_Ideal_return() {
+ // '-XX:VerifyIterativeGVN=100000'
+ return ((VerifyIterativeGVN % 1000000) / 100000) == 1;
+ }
protected:
// Sub-quadratic implementation of '-XX:VerifyIterativeGVN=1' (Use-Def verification).
julong _verify_counter;
diff --git a/src/hotspot/share/runtime/abstract_vm_version.hpp b/src/hotspot/share/runtime/abstract_vm_version.hpp
index 5a6b41506c7..17ade2c068d 100644
--- a/src/hotspot/share/runtime/abstract_vm_version.hpp
+++ b/src/hotspot/share/runtime/abstract_vm_version.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,6 +42,7 @@ typedef enum {
} VirtualizationType;
class outputStream;
+class stringStream;
enum class vmIntrinsicID;
// Abstract_VM_Version provides information about the VM.
@@ -226,6 +227,21 @@ class Abstract_VM_Version: AllStatic {
static const char* cpu_name(void);
static const char* cpu_description(void);
+
+ static void get_cpu_features_name(void* features_buffer, stringStream& ss) { return; }
+
+ // Returns names of features present in features_set1 but not in features_set2
+ static void get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss) { return; }
+
+ // Returns number of bytes required to store cpu features representation
+ static int cpu_features_size() { return 0; }
+
+ // Stores arch dependent cpu features representation in the provided buffer.
+ // Size of the buffer must be same as returned by cpu_features_size()
+ static void store_cpu_features(void* buf) { return; }
+
+ // features_to_test is an opaque object that stores arch specific representation of cpu features
+ static bool supports_features(void* features_to_test) { return false; };
};
#endif // SHARE_RUNTIME_ABSTRACT_VM_VERSION_HPP
diff --git a/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp
index 63d3424b3ed..444ce321759 100644
--- a/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp
+++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -163,10 +163,10 @@ JVMFlag::Error CodeCacheSegmentSizeConstraintFunc(size_t value, bool verbose) {
return JVMFlag::VIOLATES_CONSTRAINT;
}
- if (CodeCacheSegmentSize < (size_t)CodeEntryAlignment) {
+ if (CodeCacheSegmentSize < CodeEntryAlignment) {
JVMFlag::printError(verbose,
"CodeCacheSegmentSize (%zu) must be "
- "larger than or equal to CodeEntryAlignment (%zd) "
+ "larger than or equal to CodeEntryAlignment (%u) "
"to align entry points\n",
CodeCacheSegmentSize, CodeEntryAlignment);
return JVMFlag::VIOLATES_CONSTRAINT;
@@ -194,25 +194,25 @@ JVMFlag::Error CodeCacheSegmentSizeConstraintFunc(size_t value, bool verbose) {
return JVMFlag::SUCCESS;
}
-JVMFlag::Error CodeEntryAlignmentConstraintFunc(intx value, bool verbose) {
+JVMFlag::Error CodeEntryAlignmentConstraintFunc(uint value, bool verbose) {
if (!is_power_of_2(value)) {
JVMFlag::printError(verbose,
- "CodeEntryAlignment (%zd) must be "
+ "CodeEntryAlignment (%u) must be "
"a power of two\n", CodeEntryAlignment);
return JVMFlag::VIOLATES_CONSTRAINT;
}
if (CodeEntryAlignment < 16) {
JVMFlag::printError(verbose,
- "CodeEntryAlignment (%zd) must be "
+ "CodeEntryAlignment (%u) must be "
"greater than or equal to %d\n",
CodeEntryAlignment, 16);
return JVMFlag::VIOLATES_CONSTRAINT;
}
- if ((size_t)CodeEntryAlignment > CodeCacheSegmentSize) {
+ if (CodeEntryAlignment > CodeCacheSegmentSize) {
JVMFlag::printError(verbose,
- "CodeEntryAlignment (%zd) must be "
+ "CodeEntryAlignment (%u) must be "
"less than or equal to CodeCacheSegmentSize (%zu) "
"to align entry points\n",
CodeEntryAlignment, CodeCacheSegmentSize);
@@ -241,10 +241,10 @@ JVMFlag::Error OptoLoopAlignmentConstraintFunc(intx value, bool verbose) {
return JVMFlag::VIOLATES_CONSTRAINT;
}
- if (OptoLoopAlignment > CodeEntryAlignment) {
+ if (checked_cast(OptoLoopAlignment) > CodeEntryAlignment) {
JVMFlag::printError(verbose,
"OptoLoopAlignment (%zd) must be "
- "less or equal to CodeEntryAlignment (%zd)\n",
+ "less or equal to CodeEntryAlignment (%u)\n",
value, CodeEntryAlignment);
return JVMFlag::VIOLATES_CONSTRAINT;
}
@@ -306,7 +306,7 @@ JVMFlag::Error TypeProfileLevelConstraintFunc(uint value, bool verbose) {
}
JVMFlag::Error VerifyIterativeGVNConstraintFunc(uint value, bool verbose) {
- const int max_modes = 5;
+ const int max_modes = 6;
uint original_value = value;
for (int i = 0; i < max_modes; i++) {
if (value % 10 > 1) {
@@ -339,10 +339,10 @@ JVMFlag::Error InitArrayShortSizeConstraintFunc(intx value, bool verbose) {
#ifdef COMPILER2
JVMFlag::Error InteriorEntryAlignmentConstraintFunc(intx value, bool verbose) {
- if (InteriorEntryAlignment > CodeEntryAlignment) {
+ if (checked_cast(InteriorEntryAlignment) > CodeEntryAlignment) {
JVMFlag::printError(verbose,
"InteriorEntryAlignment (%zd) must be "
- "less than or equal to CodeEntryAlignment (%zd)\n",
+ "less than or equal to CodeEntryAlignment (%u)\n",
InteriorEntryAlignment, CodeEntryAlignment);
return JVMFlag::VIOLATES_CONSTRAINT;
}
diff --git a/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.hpp b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.hpp
index 4544ad706fd..cf785800cfc 100644
--- a/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.hpp
+++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
f(intx, CompileThresholdConstraintFunc) \
f(intx, OnStackReplacePercentageConstraintFunc) \
f(size_t, CodeCacheSegmentSizeConstraintFunc) \
- f(intx, CodeEntryAlignmentConstraintFunc) \
+ f(uint, CodeEntryAlignmentConstraintFunc) \
f(intx, OptoLoopAlignmentConstraintFunc) \
f(uintx, ArraycopyDstPrefetchDistanceConstraintFunc) \
f(uintx, ArraycopySrcPrefetchDistanceConstraintFunc) \
diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp
index 6a2bbc9c648..6d4b9908e1c 100644
--- a/src/hotspot/share/runtime/globals.hpp
+++ b/src/hotspot/share/runtime/globals.hpp
@@ -1508,8 +1508,9 @@ const int ObjectAlignmentInBytes = 8;
range(1, 1024) \
constraint(CodeCacheSegmentSizeConstraintFunc, AfterErgo) \
\
- product_pd(intx, CodeEntryAlignment, EXPERIMENTAL, \
- "Code entry alignment for generated code (in bytes)") \
+ product_pd(uint, CodeEntryAlignment, EXPERIMENTAL, \
+ "Code entry alignment for generated code" \
+ " (in bytes, power of two)") \
constraint(CodeEntryAlignmentConstraintFunc, AfterErgo) \
\
product_pd(intx, OptoLoopAlignment, \
diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp
index a891e333d4c..77a567d4a09 100644
--- a/src/hotspot/share/runtime/javaThread.cpp
+++ b/src/hotspot/share/runtime/javaThread.cpp
@@ -514,7 +514,7 @@ JavaThread::JavaThread(MemTag mem_tag) :
#ifdef MACOS_AARCH64
_cur_wx_enable(nullptr),
- _cur_wx_mode(0),
+ _cur_wx_mode(nullptr),
#endif
_lock_stack(this),
diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp
index 3a5a5745095..58774e59a34 100644
--- a/src/hotspot/share/runtime/os.cpp
+++ b/src/hotspot/share/runtime/os.cpp
@@ -2293,7 +2293,7 @@ void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
// We do not have the same lock protection for pd_commit_memory and record_virtual_memory_commit.
// We assume that there is some external synchronization that prevents a region from being uncommitted
// before it is finished being committed.
-bool os::uncommit_memory(char* addr, size_t bytes, bool executable) {
+void os::uncommit_memory(char* addr, size_t bytes, bool executable) {
assert_nonempty_range(addr, bytes);
bool res;
if (MemTracker::enabled()) {
@@ -2306,13 +2306,10 @@ bool os::uncommit_memory(char* addr, size_t bytes, bool executable) {
res = pd_uncommit_memory(addr, bytes, executable);
}
- if (res) {
- log_debug(os, map)("Uncommitted " RANGEFMT, RANGEFMTARGS(addr, bytes));
- } else {
- log_info(os, map)("Failed to uncommit " RANGEFMT, RANGEFMTARGS(addr, bytes));
+ if (!res) {
+ fatal("Failed to uncommit " RANGEFMT, RANGEFMTARGS(addr, bytes));
}
-
- return res;
+ log_debug(os, map)("Uncommitted " RANGEFMT, RANGEFMTARGS(addr, bytes));
}
// The scope of NmtVirtualMemoryLocker covers both pd_release_memory and record_virtual_memory_release because
@@ -2320,7 +2317,7 @@ bool os::uncommit_memory(char* addr, size_t bytes, bool executable) {
// We do not have the same lock protection for pd_reserve_memory and record_virtual_memory_reserve.
// We assume that there is some external synchronization that prevents a region from being released
// before it is finished being reserved.
-bool os::release_memory(char* addr, size_t bytes) {
+void os::release_memory(char* addr, size_t bytes) {
assert_nonempty_range(addr, bytes);
bool res;
if (MemTracker::enabled()) {
@@ -2333,11 +2330,9 @@ bool os::release_memory(char* addr, size_t bytes) {
res = pd_release_memory(addr, bytes);
}
if (!res) {
- log_info(os, map)("Failed to release " RANGEFMT, RANGEFMTARGS(addr, bytes));
- } else {
- log_debug(os, map)("Released " RANGEFMT, RANGEFMTARGS(addr, bytes));
+ fatal("Failed to release " RANGEFMT, RANGEFMTARGS(addr, bytes));
}
- return res;
+ log_debug(os, map)("Released " RANGEFMT, RANGEFMTARGS(addr, bytes));
}
// Prints all mappings
@@ -2406,7 +2401,7 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset,
return result;
}
-bool os::unmap_memory(char *addr, size_t bytes) {
+void os::unmap_memory(char *addr, size_t bytes) {
bool result;
if (MemTracker::enabled()) {
MemTracker::NmtVirtualMemoryLocker nvml;
@@ -2417,7 +2412,9 @@ bool os::unmap_memory(char *addr, size_t bytes) {
} else {
result = pd_unmap_memory(addr, bytes);
}
- return result;
+ if (!result) {
+ fatal("Failed to unmap memory " RANGEFMT, RANGEFMTARGS(addr, bytes));
+ }
}
void os::disclaim_memory(char *addr, size_t bytes) {
@@ -2445,7 +2442,7 @@ char* os::reserve_memory_special(size_t size, size_t alignment, size_t page_size
return result;
}
-bool os::release_memory_special(char* addr, size_t bytes) {
+void os::release_memory_special(char* addr, size_t bytes) {
bool res;
if (MemTracker::enabled()) {
MemTracker::NmtVirtualMemoryLocker nvml;
@@ -2456,7 +2453,9 @@ bool os::release_memory_special(char* addr, size_t bytes) {
} else {
res = pd_release_memory_special(addr, bytes);
}
- return res;
+ if (!res) {
+ fatal("Failed to release memory special " RANGEFMT, RANGEFMTARGS(addr, bytes));
+ }
}
// Convenience wrapper around naked_short_sleep to allow for longer sleep
diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp
index c6a1d670926..e773b40cb14 100644
--- a/src/hotspot/share/runtime/os.hpp
+++ b/src/hotspot/share/runtime/os.hpp
@@ -536,8 +536,8 @@ class os: AllStatic {
static void commit_memory_or_exit(char* addr, size_t size,
size_t alignment_hint,
bool executable, const char* mesg);
- static bool uncommit_memory(char* addr, size_t bytes, bool executable = false);
- static bool release_memory(char* addr, size_t bytes);
+ static void uncommit_memory(char* addr, size_t bytes, bool executable = false);
+ static void release_memory(char* addr, size_t bytes);
// Does the platform support trimming the native heap?
static bool can_trim_native_heap();
@@ -566,7 +566,7 @@ class os: AllStatic {
static bool unguard_memory(char* addr, size_t bytes);
static bool create_stack_guard_pages(char* addr, size_t bytes);
static bool pd_create_stack_guard_pages(char* addr, size_t bytes);
- static bool remove_stack_guard_pages(char* addr, size_t bytes);
+ static void remove_stack_guard_pages(char* addr, size_t bytes);
// Helper function to create a new file with template jvmheap.XXXXXX.
// Returns a valid fd on success or else returns -1
static int create_file_for_heap(const char* dir);
@@ -582,7 +582,7 @@ class os: AllStatic {
static char* map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, MemTag mem_tag, bool read_only = false,
bool allow_exec = false);
- static bool unmap_memory(char *addr, size_t bytes);
+ static void unmap_memory(char *addr, size_t bytes);
static void disclaim_memory(char *addr, size_t bytes);
static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
@@ -605,7 +605,7 @@ class os: AllStatic {
// reserve, commit and pin the entire memory region
static char* reserve_memory_special(size_t size, size_t alignment, size_t page_size,
char* addr, bool executable);
- static bool release_memory_special(char* addr, size_t bytes);
+ static void release_memory_special(char* addr, size_t bytes);
static void large_page_init();
static size_t large_page_size();
static bool can_commit_large_page_memory();
diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp
index d426fd31a43..ae3835dd344 100644
--- a/src/hotspot/share/runtime/sharedRuntime.cpp
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,9 +22,11 @@
*
*/
+#include "cds/aotCompressedPointers.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.inline.hpp"
#include "classfile/classLoader.hpp"
+#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/vmClasses.hpp"
@@ -2933,8 +2935,7 @@ public:
assert(buffered_entry != nullptr,"sanity check");
uint hash = fp->compute_hash();
- u4 delta = _builder->buffer_to_offset_u4((address)buffered_entry);
- _writer->add(hash, delta);
+ _writer->add(hash, AOTCompressedPointers::encode_not_null(buffered_entry));
if (lsh.is_enabled()) {
address fp_runtime_addr = (address)buffered_fp + ArchiveBuilder::current()->buffer_to_requested_delta();
address entry_runtime_addr = (address)buffered_entry + ArchiveBuilder::current()->buffer_to_requested_delta();
diff --git a/src/hotspot/share/runtime/sharedRuntime.hpp b/src/hotspot/share/runtime/sharedRuntime.hpp
index 11bd39c839f..a026a4b7d69 100644
--- a/src/hotspot/share/runtime/sharedRuntime.hpp
+++ b/src/hotspot/share/runtime/sharedRuntime.hpp
@@ -25,7 +25,6 @@
#ifndef SHARE_RUNTIME_SHAREDRUNTIME_HPP
#define SHARE_RUNTIME_SHAREDRUNTIME_HPP
-#include "classfile/compactHashtable.hpp"
#include "code/codeBlob.hpp"
#include "code/vmreg.hpp"
#include "interpreter/linkResolver.hpp"
@@ -38,6 +37,7 @@
class AdapterHandlerEntry;
class AdapterFingerPrint;
class MetaspaceClosure;
+class SerializeClosure;
class vframeStream;
// Runtime is the base class for various runtime interfaces
diff --git a/src/hotspot/share/runtime/stackOverflow.cpp b/src/hotspot/share/runtime/stackOverflow.cpp
index e2bd157c555..61dbce8c5b4 100644
--- a/src/hotspot/share/runtime/stackOverflow.cpp
+++ b/src/hotspot/share/runtime/stackOverflow.cpp
@@ -116,13 +116,8 @@ void StackOverflow::remove_stack_guard_pages() {
size_t len = stack_guard_zone_size();
if (os::must_commit_stack_guard_pages()) {
- if (os::remove_stack_guard_pages((char *) low_addr, len)) {
- _stack_guard_state = stack_guard_unused;
- } else {
- log_warning(os, thread)("Attempt to deallocate stack guard pages failed ("
- PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
- return;
- }
+ os::remove_stack_guard_pages((char *) low_addr, len);
+ _stack_guard_state = stack_guard_unused;
} else {
if (_stack_guard_state == stack_guard_unused) return;
if (os::unguard_memory((char *) low_addr, len)) {
diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp
index 355dc70ae39..f27d355f56e 100644
--- a/src/hotspot/share/runtime/thread.cpp
+++ b/src/hotspot/share/runtime/thread.cpp
@@ -485,7 +485,7 @@ void Thread::print_on(outputStream* st, bool print_extended_info) const {
(double)_statistical_info.getElapsedTime() / 1000.0
);
if (is_Java_thread() && (PrintExtendedThreadInfo || print_extended_info)) {
- size_t allocated_bytes = (size_t) const_cast(this)->cooked_allocated_bytes();
+ size_t allocated_bytes = checked_cast(cooked_allocated_bytes());
st->print("allocated=%zu%s ",
byte_size_in_proper_unit(allocated_bytes),
proper_unit_for_byte_size(allocated_bytes)
diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp
index dc09d4c68c2..0225fa808cb 100644
--- a/src/hotspot/share/runtime/thread.hpp
+++ b/src/hotspot/share/runtime/thread.hpp
@@ -405,13 +405,14 @@ class Thread: public ThreadShadow {
// Thread-Local Allocation Buffer (TLAB) support
ThreadLocalAllocBuffer& tlab() { return _tlab; }
+ const ThreadLocalAllocBuffer& tlab() const { return _tlab; }
void initialize_tlab();
void retire_tlab(ThreadLocalAllocStats* stats = nullptr);
void fill_tlab(HeapWord* start, size_t pre_reserved, size_t new_size);
jlong allocated_bytes() { return _allocated_bytes; }
void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
- inline jlong cooked_allocated_bytes();
+ inline jlong cooked_allocated_bytes() const;
ThreadHeapSampler& heap_sampler() { return _heap_sampler; }
diff --git a/src/hotspot/share/runtime/thread.inline.hpp b/src/hotspot/share/runtime/thread.inline.hpp
index b194f5e2a7f..d2ac34c3e46 100644
--- a/src/hotspot/share/runtime/thread.inline.hpp
+++ b/src/hotspot/share/runtime/thread.inline.hpp
@@ -36,7 +36,7 @@
#include "runtime/os.hpp"
#endif
-inline jlong Thread::cooked_allocated_bytes() {
+inline jlong Thread::cooked_allocated_bytes() const {
jlong allocated_bytes = AtomicAccess::load_acquire(&_allocated_bytes);
size_t used_bytes = 0;
if (UseTLAB) {
diff --git a/src/hotspot/share/runtime/vmOperations.cpp b/src/hotspot/share/runtime/vmOperations.cpp
index c5539fd5e50..ef480f04c57 100644
--- a/src/hotspot/share/runtime/vmOperations.cpp
+++ b/src/hotspot/share/runtime/vmOperations.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,7 @@
#include "runtime/threadSMR.inline.hpp"
#include "runtime/vmOperations.hpp"
#include "services/threadService.hpp"
+#include "utilities/growableArray.hpp"
#include "utilities/ticks.hpp"
#define VM_OP_NAME_INITIALIZE(name) #name,
@@ -286,42 +287,27 @@ class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
}
private:
- class ObjectMonitorLinkedList :
- public LinkedListImpl {};
+ using ObjectMonitorList = GrowableArrayCHeap;
// HashTable SIZE is specified at compile time so we
// use 1031 which is the first prime after 1024.
- typedef HashTable PtrTable;
PtrTable* _ptrs;
size_t _key_count;
size_t _om_count;
- void add_list(int64_t key, ObjectMonitorLinkedList* list) {
- _ptrs->put(key, list);
- _key_count++;
- }
-
- ObjectMonitorLinkedList* get_list(int64_t key) {
- ObjectMonitorLinkedList** listpp = _ptrs->get(key);
- return (listpp == nullptr) ? nullptr : *listpp;
- }
-
void add(ObjectMonitor* monitor) {
int64_t key = monitor->owner();
- ObjectMonitorLinkedList* list = get_list(key);
- if (list == nullptr) {
- // Create new list and add it to the hash table:
- list = new (mtThread) ObjectMonitorLinkedList;
- _ptrs->put(key, list);
+ bool created = false;
+ ObjectMonitorList* list = _ptrs->put_if_absent(key, &created);
+ if (created) {
_key_count++;
}
- assert(list->find(monitor) == nullptr, "Should not contain duplicates");
- list->add(monitor); // Add the ObjectMonitor to the list.
+ assert(list->find(monitor) == -1, "Should not contain duplicates");
+ list->push(monitor); // Add the ObjectMonitor to the list.
_om_count++;
}
@@ -332,17 +318,7 @@ class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
ObjectMonitorsDump() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {}
~ObjectMonitorsDump() {
- class CleanupObjectMonitorsDump: StackObj {
- public:
- bool do_entry(int64_t& key, ObjectMonitorLinkedList*& list) {
- list->clear(); // clear the LinkListNodes
- delete list; // then delete the LinkedList
- return true;
- }
- } cleanup;
-
- _ptrs->unlink(&cleanup); // cleanup the LinkedLists
- delete _ptrs; // then delete the hash table
+ delete _ptrs;
}
// Implements MonitorClosure used to collect all owned monitors in the system
@@ -368,11 +344,12 @@ class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
// Implements the ObjectMonitorsView interface
void visit(MonitorClosure* closure, JavaThread* thread) override {
int64_t key = ObjectMonitor::owner_id_from(thread);
- ObjectMonitorLinkedList* list = get_list(key);
- LinkedListIterator iter(list != nullptr ? list->head() : nullptr);
- while (!iter.is_empty()) {
- ObjectMonitor* monitor = *iter.next();
- closure->do_monitor(monitor);
+ ObjectMonitorList* list = _ptrs->get(key);
+ if (list == nullptr) {
+ return;
+ }
+ for (int i = 0; i < list->length(); i++) {
+ closure->do_monitor(list->at(i));
}
}
diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp
index f65a3441bf4..36c55bd2ecc 100644
--- a/src/hotspot/share/runtime/vmStructs.cpp
+++ b/src/hotspot/share/runtime/vmStructs.cpp
@@ -22,6 +22,7 @@
*
*/
+#include "cds/aotCompressedPointers.hpp"
#include "cds/filemap.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/javaClasses.hpp"
@@ -762,7 +763,7 @@
CDS_ONLY(nonstatic_field(FileMapInfo, _header, FileMapHeader*)) \
CDS_ONLY( static_field(FileMapInfo, _current_info, FileMapInfo*)) \
CDS_ONLY(nonstatic_field(FileMapHeader, _regions[0], CDSFileMapRegion)) \
- CDS_ONLY(nonstatic_field(FileMapHeader, _cloned_vtables_offset, size_t)) \
+ CDS_ONLY(nonstatic_field(FileMapHeader, _cloned_vtables, AOTCompressedPointers::narrowPtr)) \
CDS_ONLY(nonstatic_field(FileMapHeader, _mapped_base_address, char*)) \
CDS_ONLY(nonstatic_field(CDSFileMapRegion, _mapped_base, char*)) \
CDS_ONLY(nonstatic_field(CDSFileMapRegion, _used, size_t)) \
@@ -903,7 +904,6 @@
/*****************************/ \
\
declare_toplevel_type(void*) \
- declare_toplevel_type(Atomic) \
declare_toplevel_type(int*) \
declare_toplevel_type(char*) \
declare_toplevel_type(char**) \
@@ -1203,6 +1203,7 @@
\
/* all enum types */ \
\
+ declare_integer_type(AOTCompressedPointers::narrowPtr) \
declare_integer_type(Bytecodes::Code) \
declare_integer_type(InstanceKlass::ClassState) \
declare_integer_type(JavaThreadState) \
diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp
index 54602297759..ea4f6f99ae6 100644
--- a/src/hotspot/share/utilities/globalDefinitions.hpp
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp
@@ -433,7 +433,8 @@ typedef unsigned char u_char;
typedef u_char* address;
typedef const u_char* const_address;
-// Pointer subtraction.
+// Pointer subtraction, calculating high - low. Asserts on underflow.
+//
// The idea here is to avoid ptrdiff_t, which is signed and so doesn't have
// the range we might need to find differences from one end of the heap
// to the other.
@@ -444,28 +445,28 @@ typedef const u_char* const_address;
// and then additions like
// ... top() + size ...
// are safe because we know that top() is at least size below end().
-inline size_t pointer_delta(const volatile void* left,
- const volatile void* right,
+inline size_t pointer_delta(const volatile void* high,
+ const volatile void* low,
size_t element_size) {
- assert(left >= right, "avoid underflow - left: " PTR_FORMAT " right: " PTR_FORMAT, p2i(left), p2i(right));
- return (((uintptr_t) left) - ((uintptr_t) right)) / element_size;
+ assert(high >= low, "avoid underflow - high address: " PTR_FORMAT " low address: " PTR_FORMAT, p2i(high), p2i(low));
+ return (((uintptr_t) high) - ((uintptr_t) low)) / element_size;
}
// A version specialized for HeapWord*'s.
-inline size_t pointer_delta(const HeapWord* left, const HeapWord* right) {
- return pointer_delta(left, right, sizeof(HeapWord));
+inline size_t pointer_delta(const HeapWord* high, const HeapWord* low) {
+ return pointer_delta(high, low, sizeof(HeapWord));
}
// A version specialized for MetaWord*'s.
-inline size_t pointer_delta(const MetaWord* left, const MetaWord* right) {
- return pointer_delta(left, right, sizeof(MetaWord));
+inline size_t pointer_delta(const MetaWord* high, const MetaWord* low) {
+ return pointer_delta(high, low, sizeof(MetaWord));
}
// pointer_delta_as_int is called to do pointer subtraction for nearby pointers that
// returns a non-negative int, usually used as a size of a code buffer range.
// This scales to sizeof(T).
template
-inline int pointer_delta_as_int(const volatile T* left, const volatile T* right) {
- size_t delta = pointer_delta(left, right, sizeof(T));
+inline int pointer_delta_as_int(const volatile T* high, const volatile T* low) {
+ size_t delta = pointer_delta(high, low, sizeof(T));
assert(delta <= size_t(INT_MAX), "pointer delta out of range: %zu", delta);
return static_cast(delta);
}
diff --git a/src/hotspot/share/utilities/rbTree.hpp b/src/hotspot/share/utilities/rbTree.hpp
index a2f84ac2373..89f9eb256bf 100644
--- a/src/hotspot/share/utilities/rbTree.hpp
+++ b/src/hotspot/share/utilities/rbTree.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,9 +26,8 @@
#define SHARE_UTILITIES_RBTREE_HPP
#include "cppstdlib/type_traits.hpp"
-#include "metaprogramming/enableIf.hpp"
+#include "memory/allocation.hpp"
#include "nmt/memTag.hpp"
-#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
// An intrusive red-black tree is constructed with two template parameters:
@@ -64,8 +63,9 @@ enum class RBTreeOrdering : int { LT, EQ, GT };
template
class AbstractRBTree;
-
+class Arena;
class outputStream;
+class ResourceArea;
class IntrusiveRBNode {
template
@@ -81,7 +81,7 @@ class IntrusiveRBNode {
DEBUG_ONLY(mutable bool _visited);
public:
- IntrusiveRBNode() : _parent(0), _left(nullptr), _right(nullptr) DEBUG_ONLY(COMMA _visited(false)) {}
+ IntrusiveRBNode();
// Gets the previous in-order node in the tree.
// nullptr is returned if there is no previous node.
@@ -96,22 +96,18 @@ public:
void print_on(outputStream* st, int depth = 0) const;
private:
- bool is_black() const { return (_parent & 0x1) != 0; }
- bool is_red() const { return (_parent & 0x1) == 0; }
+ bool is_black() const;
+ bool is_red() const;
- void set_black() { _parent |= 0x1; }
- void set_red() { _parent &= ~0x1; }
+ void set_black();
+ void set_red();
- IntrusiveRBNode* parent() const { return (IntrusiveRBNode*)(_parent & ~0x1); }
- void set_parent(IntrusiveRBNode* new_parent) { _parent = (_parent & 0x1) | (uintptr_t)new_parent; }
+ IntrusiveRBNode* parent() const;
+ void set_parent(IntrusiveRBNode* new_parent);
- bool is_right_child() const {
- return parent() != nullptr && parent()->_right == this;
- }
+ bool is_right_child() const;
- bool is_left_child() const {
- return parent() != nullptr && parent()->_left == this;
- }
+ bool is_left_child() const;
void replace_child(IntrusiveRBNode* old_child, IntrusiveRBNode* new_child);
@@ -142,20 +138,20 @@ private:
V _value;
public:
- const K& key() const { return _key; }
+ const K& key() const;
- V& val() { return _value; }
- const V& val() const { return _value; }
- void set_val(const V& v) { _value = v; }
+ V& val();
+ const V& val() const;
+ void set_val(const V& v);
- RBNode() {}
- RBNode(const K& key) : IntrusiveRBNode(), _key(key) {}
- RBNode(const K& key, const V& val) : IntrusiveRBNode(), _key(key), _value(val) {}
+ RBNode();
+ RBNode(const K& key);
+ RBNode(const K& key, const V& val);
- const RBNode* prev() const { return (RBNode*)IntrusiveRBNode::prev(); }
- const RBNode* next() const { return (RBNode*)IntrusiveRBNode::next(); }
- RBNode* prev() { return (RBNode*)IntrusiveRBNode::prev(); }
- RBNode* next() { return (RBNode*)IntrusiveRBNode::next(); }
+ const RBNode* prev() const;
+ const RBNode* next() const;
+ RBNode* prev();
+ RBNode* next();
void print_on(outputStream* st, int depth = 0) const;
@@ -176,17 +172,15 @@ public:
friend AbstractRBTree;
NodeType** _insert_location;
NodeType* _parent;
- Cursor() : _insert_location(nullptr), _parent(nullptr) {}
- Cursor(NodeType** insert_location, NodeType* parent)
- : _insert_location(insert_location), _parent(parent) {}
- Cursor(NodeType* const* insert_location, NodeType* parent)
- : _insert_location((NodeType**)insert_location), _parent(parent) {}
+ Cursor();
+ Cursor(NodeType** insert_location, NodeType* parent);
+ Cursor(NodeType* const* insert_location, NodeType* parent);
public:
- bool valid() const { return _insert_location != nullptr; }
- bool found() const { return *_insert_location != nullptr; }
- NodeType* node() { return _insert_location == nullptr ? nullptr : *_insert_location; }
- NodeType* node() const { return _insert_location == nullptr ? nullptr : *_insert_location; }
+ bool valid() const;
+ bool found() const;
+ NodeType* node();
+ NodeType* node() const;
};
protected:
@@ -212,36 +206,16 @@ private:
static constexpr bool HasNodeVerifier = HasNodeVerifierImpl::value;
- RBTreeOrdering cmp(const K& a, const NodeType* b) const {
- if constexpr (HasNodeComparator) {
- return COMPARATOR::cmp(a, b);
- } else if constexpr (HasKeyComparator) {
- return COMPARATOR::cmp(a, b->key());
- }
- }
+ RBTreeOrdering cmp(const K& a, const NodeType* b) const;
- bool less_than(const NodeType* a, const NodeType* b) const {
- if constexpr (HasNodeVerifier) {
- return COMPARATOR::less_than(a, b);
- } else {
- return true;
- }
- }
+ bool less_than(const NodeType* a, const NodeType* b) const;
- void assert_key_leq(K a, K b) const {
- if constexpr (HasKeyComparator) { // Cannot assert if no key comparator exist.
- assert(COMPARATOR::cmp(a, b) != RBTreeOrdering::GT, "key a must be less or equal to key b");
- }
- }
+ void assert_key_leq(K a, K b) const;
// True if node is black (nil nodes count as black)
- static inline bool is_black(const IntrusiveRBNode* node) {
- return node == nullptr || node->is_black();
- }
+ static inline bool is_black(const IntrusiveRBNode* node);
- static inline bool is_red(const IntrusiveRBNode* node) {
- return node != nullptr && node->is_red();
- }
+ static inline bool is_red(const IntrusiveRBNode* node);
void fix_insert_violations(IntrusiveRBNode* node);
@@ -251,18 +225,14 @@ private:
void remove_from_tree(IntrusiveRBNode* node);
struct empty_verifier {
- bool operator()(const NodeType* n) const {
- return true;
- }
+ bool operator()(const NodeType* n) const;
};
template
void verify_self(NODE_VERIFIER verifier, const USER_VERIFIER& extra_verifier) const;
struct default_printer {
- void operator()(outputStream* st, const NodeType* n, int depth) const {
- n->print_on(st, depth);
- }
+ void operator()(outputStream* st, const NodeType* n, int depth) const;
};
template
@@ -271,12 +241,9 @@ private:
public:
NONCOPYABLE(AbstractRBTree);
- AbstractRBTree() : _num_nodes(0), _root(nullptr) DEBUG_ONLY(COMMA _expected_visited(false)) {
- static_assert(std::is_trivially_destructible::value, "key type must be trivially destructable");
- static_assert(HasKeyComparator || HasNodeComparator, "comparator must be of correct type");
- }
+ AbstractRBTree();
- size_t size() const { return _num_nodes; }
+ size_t size() const;
// Gets the cursor associated with the given node or key.
Cursor cursor(const K& key, const NodeType* hint_node = nullptr);
@@ -311,87 +278,39 @@ public:
void replace_at_cursor(NodeType* new_node, const Cursor& node_cursor);
// Finds the node associated with the given key.
- NodeType* find_node(const K& key, const NodeType* hint_node = nullptr) const {
- Cursor node_cursor = cursor(key, hint_node);
- return node_cursor.node();
- }
-
- NodeType* find_node(const K& key, const NodeType* hint_node = nullptr) {
- Cursor node_cursor = cursor(key, hint_node);
- return node_cursor.node();
- }
+ NodeType* find_node(const K& key, const NodeType* hint_node = nullptr);
+ NodeType* find_node(const K& key, const NodeType* hint_node = nullptr) const;
// Inserts the given node into the tree.
- void insert(const K& key, NodeType* node, const NodeType* hint_node = nullptr) {
- Cursor node_cursor = cursor(key, hint_node);
- insert_at_cursor(node, node_cursor);
- }
+ void insert(const K& key, NodeType* node, const NodeType* hint_node = nullptr);
- void remove(NodeType* node) {
- Cursor node_cursor = cursor(node);
- remove_at_cursor(node_cursor);
- }
+ // Removes the given node from the tree.
+ void remove(NodeType* node);
// Finds the node with the closest key <= the given key.
// If no node is found, null is returned instead.
- NodeType* closest_leq(const K& key) const {
- Cursor node_cursor = cursor(key);
- return node_cursor.found() ? node_cursor.node() : prev(node_cursor).node();
- }
-
- NodeType* closest_leq(const K& key) {
- Cursor node_cursor = cursor(key);
- return node_cursor.found() ? node_cursor.node() : prev(node_cursor).node();
- }
+ NodeType* closest_leq(const K& key);
+ NodeType* closest_leq(const K& key) const;
// Finds the node with the closest key > the given key.
// If no node is found, null is returned instead.
- NodeType* closest_gt(const K& key) const {
- Cursor node_cursor = cursor(key);
- return next(node_cursor).node();
- }
-
- NodeType* closest_gt(const K& key) {
- Cursor node_cursor = cursor(key);
- return next(node_cursor).node();
- }
+ NodeType* closest_gt(const K& key);
+ NodeType* closest_gt(const K& key) const;
// Finds the node with the closest key >= the given key.
// If no node is found, null is returned instead.
- NodeType* closest_ge(const K& key) const {
- Cursor node_cursor = cursor(key);
- return node_cursor.found() ? node_cursor.node() : next(node_cursor).node();
- }
-
- NodeType* closest_ge(const K& key) {
- Cursor node_cursor = cursor(key);
- return node_cursor.found() ? node_cursor.node() : next(node_cursor).node();
- }
+ NodeType* closest_ge(const K& key);
+ NodeType* closest_ge(const K& key) const;
// Returns leftmost node, nullptr if tree is empty.
// If COMPARATOR::cmp(a, b) behaves canonically (positive value for a > b), this will the smallest key value.
- const NodeType* leftmost() const {
- IntrusiveRBNode* n = _root, *n2 = nullptr;
- while (n != nullptr) {
- n2 = n;
- n = n->_left;
- }
- return (NodeType*)n2;
- }
+ NodeType* leftmost();
+ const NodeType* leftmost() const;
// Returns rightmost node, nullptr if tree is empty.
// If COMPARATOR::cmp(a, b) behaves canonically (positive value for a > b), this will the largest key value.
- const NodeType* rightmost() const {
- IntrusiveRBNode* n = _root, *n2 = nullptr;
- while (n != nullptr) {
- n2 = n;
- n = n->_right;
- }
- return (NodeType*)n2;
- }
-
- NodeType* leftmost() { return const_cast(static_cast(this)->leftmost()); }
- NodeType* rightmost() { return const_cast(static_cast(this)->rightmost()); }
+ NodeType* rightmost();
+ const NodeType* rightmost() const;
struct Range {
NodeType* start;
@@ -403,11 +322,7 @@ public:
// Return the range [start, end)
// where start->key() <= addr < end->key().
// Failure to find the range leads to start and/or end being null.
- Range find_enclosing_range(K key) const {
- NodeType* start = closest_leq(key);
- NodeType* end = closest_gt(key);
- return Range(start, end);
- }
+ Range find_enclosing_range(K key) const;
// Visit all RBNodes in ascending order, calling f on each node.
// If f returns `true` the iteration continues, otherwise it is stopped at the current node.
@@ -417,7 +332,6 @@ public:
template
void visit_in_order(F f);
-
// Visit all RBNodes in ascending order whose keys are in range [from, to], calling f on each node.
// If f returns `true` the iteration continues, otherwise it is stopped at the current node.
template
@@ -433,15 +347,7 @@ public:
// This should return true if the node is valid.
// If provided, each node is also verified through this callable.
template
- void verify_self(const USER_VERIFIER& extra_verifier = USER_VERIFIER()) const {
- if constexpr (HasNodeVerifier) {
- verify_self([](const NodeType* a, const NodeType* b){ return COMPARATOR::less_than(a, b);}, extra_verifier);
- } else if constexpr (HasKeyComparator) {
- verify_self([](const NodeType* a, const NodeType* b){ return COMPARATOR::cmp(a->key(), b->key()) == RBTreeOrdering::LT; }, extra_verifier);
- } else {
- verify_self([](const NodeType*, const NodeType*){ return true;}, extra_verifier);
- }
- }
+ void verify_self(const USER_VERIFIER& extra_verifier = USER_VERIFIER()) const;
// Accepts an optional printing callable `void node_printer(outputStream* st, const Node* n, int depth)`.
// If provided, each node is printed through this callable rather than the default `print_on`.
@@ -458,9 +364,10 @@ class RBTree : public AbstractRBTree, COMPARATOR> {
ALLOCATOR _allocator;
public:
- RBTree() : BaseType(), _allocator() {}
+ template
+ RBTree(AllocArgs... alloc_args);
+ ~RBTree();
NONCOPYABLE(RBTree);
- ~RBTree() { remove_all(); }
bool copy_into(RBTree& other) const;
@@ -471,118 +378,68 @@ public:
using BaseType::next;
using BaseType::prev;
- void replace_at_cursor(RBNode* new_node, const Cursor& node_cursor) {
- RBNode* old_node = node_cursor.node();
- BaseType::replace_at_cursor(new_node, node_cursor);
- free_node(old_node);
- }
+ void replace_at_cursor(RBNode* new_node, const Cursor& node_cursor);
- RBNode* allocate_node(const K& key) {
- void* node_place = _allocator.allocate(sizeof(RBNode));
- if (node_place == nullptr) {
- return nullptr;
- }
- return new (node_place) RBNode(key);
- }
+ RBNode* allocate_node(const K& key);
+ RBNode* allocate_node(const K& key, const V& val);
- RBNode* allocate_node(const K& key, const V& val) {
- void* node_place = _allocator.allocate(sizeof(RBNode));
- if (node_place == nullptr) {
- return nullptr;
- }
- return new (node_place) RBNode(key, val);
- }
-
- void free_node(RBNode* node) {
- node->_value.~V();
- _allocator.free(node);
- }
+ void free_node(RBNode* node);
// Inserts a node with the given key/value into the tree,
// if the key already exist, the value is updated instead.
// Returns false if and only if allocation of a new node failed.
- bool upsert(const K& key, const V& val, const RBNode* hint_node = nullptr) {
- Cursor node_cursor = cursor(key, hint_node);
- RBNode* node = node_cursor.node();
- if (node != nullptr) {
- node->set_val(val);
- return true;
- }
-
- node = allocate_node(key, val);
- if (node == nullptr) {
- return false;
- }
- insert_at_cursor(node, node_cursor);
- return true;
- }
+ bool upsert(const K& key, const V& val, const RBNode* hint_node = nullptr);
// Finds the value of the node associated with the given key.
- V* find(const K& key) {
- Cursor node_cursor = cursor(key);
- return node_cursor.found() ? &node_cursor.node()->_value : nullptr;
- }
+ V* find(const K& key);
+ V* find(const K& key) const;
- V* find(const K& key) const {
- const Cursor node_cursor = cursor(key);
- return node_cursor.found() ? &node_cursor.node()->_value : nullptr;
- }
-
- void remove(RBNode* node) {
- Cursor node_cursor = cursor(node);
- remove_at_cursor(node_cursor);
- free_node(node);
- }
+ void remove(RBNode* node);
// Removes the node with the given key from the tree if it exists.
// Returns true if the node was successfully removed, false otherwise.
- bool remove(const K& key) {
- Cursor node_cursor = cursor(key);
- if (!node_cursor.found()) {
- return false;
- }
- RBNode* node = node_cursor.node();
- remove_at_cursor(node_cursor);
- free_node((RBNode*)node);
- return true;
- }
+ bool remove(const K& key);
// Removes all existing nodes from the tree.
- void remove_all() {
- IntrusiveRBNode* to_delete[64];
- int stack_idx = 0;
- to_delete[stack_idx++] = BaseType::_root;
-
- while (stack_idx > 0) {
- IntrusiveRBNode* head = to_delete[--stack_idx];
- if (head == nullptr) continue;
- to_delete[stack_idx++] = head->_left;
- to_delete[stack_idx++] = head->_right;
- free_node((RBNode*)head);
- }
- BaseType::_num_nodes = 0;
- BaseType::_root = nullptr;
- }
+ void remove_all();
};
template
class RBTreeCHeapAllocator {
public:
- void* allocate(size_t sz) {
- void* allocation = os::malloc(sz, mem_tag);
- if (allocation == nullptr && strategy == AllocFailStrategy::EXIT_OOM) {
- vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR,
- "red-black tree failed allocation");
- }
- return allocation;
- }
-
- void free(void* ptr) { os::free(ptr); }
+ void* allocate(size_t sz);
+ void free(void* ptr);
};
+template
+class RBTreeArenaAllocator {
+ Arena* _arena;
+public:
+ RBTreeArenaAllocator(Arena* arena);
+ void* allocate(size_t sz);
+ void free(void* ptr);
+};
+
+template
+class RBTreeResourceAreaAllocator {
+ ResourceArea* _rarea;
+public:
+ RBTreeResourceAreaAllocator(ResourceArea* rarea);
+ void* allocate(size_t sz);
+ void free(void* ptr);
+};
+
+
+
template
using RBTreeCHeap = RBTree>;
+template
+using RBTreeArena = RBTree>;
+
+template
+using RBTreeResourceArea = RBTree>;
+
template
using IntrusiveRBTree = AbstractRBTree;
diff --git a/src/hotspot/share/utilities/rbTree.inline.hpp b/src/hotspot/share/utilities/rbTree.inline.hpp
index 6e01f92d12a..ed8884b2d27 100644
--- a/src/hotspot/share/utilities/rbTree.inline.hpp
+++ b/src/hotspot/share/utilities/rbTree.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,12 +27,49 @@
#include "utilities/rbTree.hpp"
+#include "memory/allocation.hpp"
+#include "memory/arena.hpp"
+#include "memory/resourceArea.hpp"
#include "metaprogramming/enableIf.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
#include "utilities/powerOfTwo.hpp"
+inline IntrusiveRBNode::IntrusiveRBNode()
+ : _parent(0), _left(nullptr), _right(nullptr) DEBUG_ONLY(COMMA _visited(false)) {}
+
+inline bool IntrusiveRBNode::is_black() const {
+ return (_parent & 0x1) != 0;
+}
+
+inline bool IntrusiveRBNode::is_red() const {
+ return (_parent & 0x1) == 0;
+}
+
+inline void IntrusiveRBNode::set_black() {
+ _parent |= 0x1;
+}
+inline void IntrusiveRBNode::set_red() {
+ _parent &= ~0x1;
+}
+
+inline IntrusiveRBNode* IntrusiveRBNode::parent() const {
+ return reinterpret_cast(_parent & ~0x1);
+}
+
+inline void IntrusiveRBNode::set_parent(IntrusiveRBNode* new_parent) {
+ _parent = (_parent & 0x1) | reinterpret_cast(new_parent);
+}
+
+inline bool IntrusiveRBNode::is_right_child() const {
+ return parent() != nullptr && parent()->_right == this;
+}
+
+inline bool IntrusiveRBNode::is_left_child() const {
+ return parent() != nullptr && parent()->_left == this;
+}
+
inline void IntrusiveRBNode::replace_child(IntrusiveRBNode* old_child, IntrusiveRBNode* new_child) {
if (_left == old_child) {
_left = new_child;
@@ -185,6 +222,144 @@ inline void IntrusiveRBNode::verify(
}
+template
+inline const K& RBNode::key() const {
+ return _key;
+}
+
+template
+inline V& RBNode::val() {
+ return _value;
+}
+
+template
+inline const V& RBNode::val() const {
+ return _value;
+}
+
+template
+inline void RBNode::set_val(const V& v) {
+ _value = v;
+}
+
+template
+inline RBNode::RBNode() {}
+
+template
+inline RBNode::RBNode(const K& key) : IntrusiveRBNode(), _key(key) {}
+
+template
+inline RBNode::RBNode(const K& key, const V& val) : IntrusiveRBNode(), _key(key), _value(val) {}
+
+template
+inline const RBNode* RBNode::prev() const {
+ return static_cast*>(IntrusiveRBNode::prev());
+}
+
+template
+inline const RBNode* RBNode::next() const {
+ return static_cast*>(IntrusiveRBNode::next());
+}
+
+template
+inline RBNode* RBNode::prev() {
+ return static_cast*>(IntrusiveRBNode::prev());
+}
+
+template
+inline RBNode* RBNode::next() {
+ return static_cast*>(IntrusiveRBNode::next());
+}
+
+template
+inline AbstractRBTree::Cursor::Cursor()
+ : _insert_location(nullptr), _parent(nullptr) {}
+
+template
+inline AbstractRBTree