Merge remote-tracking branch 'upstream/master' into JDK-8357404

This commit is contained in:
alexander_matveev 2026-02-19 15:22:27 -08:00
commit 1bca2865d8
381 changed files with 33246 additions and 30195 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Red Hat Inc. All rights reserved.
* Copyright 2025 Arm Limited and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -664,16 +664,52 @@ void VM_Version::initialize() {
void VM_Version::insert_features_names(uint64_t features, stringStream& ss) {
int i = 0;
ss.join([&]() {
while (i < MAX_CPU_FEATURES) {
if (supports_feature((VM_Version::Feature_Flag)i)) {
return _features_names[i++];
const char* str = nullptr;
while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
if (supports_feature(features, (VM_Version::Feature_Flag)i)) {
str = _features_names[i];
}
i += 1;
}
return (const char*)nullptr;
return str;
}, ", ");
}
void VM_Version::get_cpu_features_name(void* features_buffer, stringStream& ss) {
uint64_t features = *(uint64_t*)features_buffer;
insert_features_names(features, ss);
}
void VM_Version::get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss) {
uint64_t vm_features_set1 = *(uint64_t*)features_set1;
uint64_t vm_features_set2 = *(uint64_t*)features_set2;
int i = 0;
ss.join([&]() {
const char* str = nullptr;
while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
Feature_Flag flag = (Feature_Flag)i;
if (supports_feature(vm_features_set1, flag) && !supports_feature(vm_features_set2, flag)) {
str = _features_names[i];
}
i += 1;
}
return str;
}, ", ");
}
int VM_Version::cpu_features_size() {
return sizeof(_features);
}
void VM_Version::store_cpu_features(void* buf) {
*(uint64_t*)buf = _features;
}
bool VM_Version::supports_features(void* features_buffer) {
uint64_t features_to_test = *(uint64_t*)features_buffer;
return (_features & features_to_test) == features_to_test;
}
#if defined(LINUX)
static bool check_info_file(const char* fpath,
const char* virt1, VirtualizationType vt1,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -184,6 +184,9 @@ public:
static bool supports_feature(Feature_Flag flag) {
return (_features & BIT_MASK(flag)) != 0;
}
static bool supports_feature(uint64_t features, Feature_Flag flag) {
return (features & BIT_MASK(flag)) != 0;
}
static int cpu_family() { return _cpu; }
static int cpu_model() { return _model; }
@ -244,6 +247,20 @@ public:
static bool use_neon_for_vector(int vector_length_in_bytes) {
return vector_length_in_bytes <= 16;
}
static void get_cpu_features_name(void* features_buffer, stringStream& ss);
// Returns names of features present in features_set1 but not in features_set2
static void get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss);
// Returns number of bytes required to store cpu features representation
static int cpu_features_size();
// Stores cpu features representation in the provided buffer. This representation is arch dependent.
// Size of the buffer must be same as returned by cpu_features_size()
static void store_cpu_features(void* buf);
static bool supports_features(void* features_to_test);
};
#endif // CPU_AARCH64_VM_VERSION_AARCH64_HPP

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,11 +49,6 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
inline static RegisterOrConstant constant(int value) {
return RegisterOrConstant(value);
}
void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg,
Register temp_reg, Register temp2_reg) {
if (VerifyMethodHandles) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -29,9 +29,6 @@
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#ifdef COMPILER2
#include "opto/matcher.hpp"
#endif
// ----------------------------------------------------------------------------
@ -39,7 +36,6 @@
#define __ masm->
address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark/* = nullptr*/) {
#ifdef COMPILER2
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
if (mark == nullptr) {
@ -55,7 +51,7 @@ address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address ma
__ relocate(static_stub_Relocation::spec(mark));
AddressLiteral meta = __ allocate_metadata_address(nullptr);
bool success = __ load_const_from_toc(as_Register(Matcher::inline_cache_reg_encode()), meta);
bool success = __ load_const_from_toc(Z_inline_cache, meta);
__ set_inst_mark();
AddressLiteral a((address)-1);
@ -67,10 +63,6 @@ address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address ma
__ z_br(Z_R1);
__ end_a_stub(); // Update current stubs pointer and restore insts_end.
return stub;
#else
ShouldNotReachHere();
return nullptr;
#endif
}
#undef __

View File

@ -48,7 +48,7 @@ int VM_Version::_stepping;
bool VM_Version::_has_intel_jcc_erratum;
VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
#define DECLARE_CPU_FEATURE_NAME(id, name, bit) name,
#define DECLARE_CPU_FEATURE_NAME(id, name, bit) XSTR(name),
const char* VM_Version::_features_names[] = { CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_NAME)};
#undef DECLARE_CPU_FEATURE_NAME
@ -3297,12 +3297,50 @@ bool VM_Version::is_intrinsic_supported(vmIntrinsicID id) {
void VM_Version::insert_features_names(VM_Version::VM_Features features, stringStream& ss) {
int i = 0;
ss.join([&]() {
while (i < MAX_CPU_FEATURES) {
if (_features.supports_feature((VM_Version::Feature_Flag)i)) {
return _features_names[i++];
const char* str = nullptr;
while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
if (features.supports_feature((VM_Version::Feature_Flag)i)) {
str = _features_names[i];
}
i += 1;
}
return (const char*)nullptr;
return str;
}, ", ");
}
void VM_Version::get_cpu_features_name(void* features_buffer, stringStream& ss) {
VM_Features* features = (VM_Features*)features_buffer;
insert_features_names(*features, ss);
}
void VM_Version::get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss) {
VM_Features* vm_features_set1 = (VM_Features*)features_set1;
VM_Features* vm_features_set2 = (VM_Features*)features_set2;
int i = 0;
ss.join([&]() {
const char* str = nullptr;
while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
Feature_Flag flag = (Feature_Flag)i;
if (vm_features_set1->supports_feature(flag) && !vm_features_set2->supports_feature(flag)) {
str = _features_names[i];
}
i += 1;
}
return str;
}, ", ");
}
int VM_Version::cpu_features_size() {
return sizeof(VM_Features);
}
void VM_Version::store_cpu_features(void* buf) {
VM_Features copy = _features;
copy.clear_feature(CPU_HT); // HT does not result in incompatibility of aot code cache
memcpy(buf, &copy, sizeof(VM_Features));
}
bool VM_Version::supports_features(void* features_buffer) {
VM_Features* features_to_test = (VM_Features*)features_buffer;
return _features.supports_features(features_to_test);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -377,84 +377,84 @@ protected:
*/
enum Feature_Flag {
#define CPU_FEATURE_FLAGS(decl) \
decl(CX8, "cx8", 0) /* next bits are from cpuid 1 (EDX) */ \
decl(CMOV, "cmov", 1) \
decl(FXSR, "fxsr", 2) \
decl(HT, "ht", 3) \
decl(CX8, cx8, 0) /* next bits are from cpuid 1 (EDX) */ \
decl(CMOV, cmov, 1) \
decl(FXSR, fxsr, 2) \
decl(HT, ht, 3) \
\
decl(MMX, "mmx", 4) \
decl(3DNOW_PREFETCH, "3dnowpref", 5) /* Processor supports 3dnow prefetch and prefetchw instructions */ \
decl(MMX, mmx, 4) \
decl(3DNOW_PREFETCH, 3dnowpref, 5) /* Processor supports 3dnow prefetch and prefetchw instructions */ \
/* may not necessarily support other 3dnow instructions */ \
decl(SSE, "sse", 6) \
decl(SSE2, "sse2", 7) \
decl(SSE, sse, 6) \
decl(SSE2, sse2, 7) \
\
decl(SSE3, "sse3", 8 ) /* SSE3 comes from cpuid 1 (ECX) */ \
decl(SSSE3, "ssse3", 9 ) \
decl(SSE4A, "sse4a", 10) \
decl(SSE4_1, "sse4.1", 11) \
decl(SSE3, sse3, 8 ) /* SSE3 comes from cpuid 1 (ECX) */ \
decl(SSSE3, ssse3, 9 ) \
decl(SSE4A, sse4a, 10) \
decl(SSE4_1, sse4.1, 11) \
\
decl(SSE4_2, "sse4.2", 12) \
decl(POPCNT, "popcnt", 13) \
decl(LZCNT, "lzcnt", 14) \
decl(TSC, "tsc", 15) \
decl(SSE4_2, sse4.2, 12) \
decl(POPCNT, popcnt, 13) \
decl(LZCNT, lzcnt, 14) \
decl(TSC, tsc, 15) \
\
decl(TSCINV_BIT, "tscinvbit", 16) \
decl(TSCINV, "tscinv", 17) \
decl(AVX, "avx", 18) \
decl(AVX2, "avx2", 19) \
decl(TSCINV_BIT, tscinvbit, 16) \
decl(TSCINV, tscinv, 17) \
decl(AVX, avx, 18) \
decl(AVX2, avx2, 19) \
\
decl(AES, "aes", 20) \
decl(ERMS, "erms", 21) /* enhanced 'rep movsb/stosb' instructions */ \
decl(CLMUL, "clmul", 22) /* carryless multiply for CRC */ \
decl(BMI1, "bmi1", 23) \
decl(AES, aes, 20) \
decl(ERMS, erms, 21) /* enhanced 'rep movsb/stosb' instructions */ \
decl(CLMUL, clmul, 22) /* carryless multiply for CRC */ \
decl(BMI1, bmi1, 23) \
\
decl(BMI2, "bmi2", 24) \
decl(RTM, "rtm", 25) /* Restricted Transactional Memory instructions */ \
decl(ADX, "adx", 26) \
decl(AVX512F, "avx512f", 27) /* AVX 512bit foundation instructions */ \
decl(BMI2, bmi2, 24) \
decl(RTM, rtm, 25) /* Restricted Transactional Memory instructions */ \
decl(ADX, adx, 26) \
decl(AVX512F, avx512f, 27) /* AVX 512bit foundation instructions */ \
\
decl(AVX512DQ, "avx512dq", 28) \
decl(AVX512PF, "avx512pf", 29) \
decl(AVX512ER, "avx512er", 30) \
decl(AVX512CD, "avx512cd", 31) \
decl(AVX512DQ, avx512dq, 28) \
decl(AVX512PF, avx512pf, 29) \
decl(AVX512ER, avx512er, 30) \
decl(AVX512CD, avx512cd, 31) \
\
decl(AVX512BW, "avx512bw", 32) /* Byte and word vector instructions */ \
decl(AVX512VL, "avx512vl", 33) /* EVEX instructions with smaller vector length */ \
decl(SHA, "sha", 34) /* SHA instructions */ \
decl(FMA, "fma", 35) /* FMA instructions */ \
decl(AVX512BW, avx512bw, 32) /* Byte and word vector instructions */ \
decl(AVX512VL, avx512vl, 33) /* EVEX instructions with smaller vector length */ \
decl(SHA, sha, 34) /* SHA instructions */ \
decl(FMA, fma, 35) /* FMA instructions */ \
\
decl(VZEROUPPER, "vzeroupper", 36) /* Vzeroupper instruction */ \
decl(AVX512_VPOPCNTDQ, "avx512_vpopcntdq", 37) /* Vector popcount */ \
decl(AVX512_VPCLMULQDQ, "avx512_vpclmulqdq", 38) /* Vector carryless multiplication */ \
decl(AVX512_VAES, "avx512_vaes", 39) /* Vector AES instruction */ \
decl(VZEROUPPER, vzeroupper, 36) /* Vzeroupper instruction */ \
decl(AVX512_VPOPCNTDQ, avx512_vpopcntdq, 37) /* Vector popcount */ \
decl(AVX512_VPCLMULQDQ, avx512_vpclmulqdq, 38) /* Vector carryless multiplication */ \
decl(AVX512_VAES, avx512_vaes, 39) /* Vector AES instruction */ \
\
decl(AVX512_VNNI, "avx512_vnni", 40) /* Vector Neural Network Instructions */ \
decl(FLUSH, "clflush", 41) /* flush instruction */ \
decl(FLUSHOPT, "clflushopt", 42) /* flusopth instruction */ \
decl(CLWB, "clwb", 43) /* clwb instruction */ \
decl(AVX512_VNNI, avx512_vnni, 40) /* Vector Neural Network Instructions */ \
decl(FLUSH, clflush, 41) /* flush instruction */ \
decl(FLUSHOPT, clflushopt, 42) /* flusopth instruction */ \
decl(CLWB, clwb, 43) /* clwb instruction */ \
\
decl(AVX512_VBMI2, "avx512_vbmi2", 44) /* VBMI2 shift left double instructions */ \
decl(AVX512_VBMI, "avx512_vbmi", 45) /* Vector BMI instructions */ \
decl(HV, "hv", 46) /* Hypervisor instructions */ \
decl(SERIALIZE, "serialize", 47) /* CPU SERIALIZE */ \
decl(RDTSCP, "rdtscp", 48) /* RDTSCP instruction */ \
decl(RDPID, "rdpid", 49) /* RDPID instruction */ \
decl(FSRM, "fsrm", 50) /* Fast Short REP MOV */ \
decl(GFNI, "gfni", 51) /* Vector GFNI instructions */ \
decl(AVX512_BITALG, "avx512_bitalg", 52) /* Vector sub-word popcount and bit gather instructions */\
decl(F16C, "f16c", 53) /* Half-precision and single precision FP conversion instructions*/ \
decl(PKU, "pku", 54) /* Protection keys for user-mode pages */ \
decl(OSPKE, "ospke", 55) /* OS enables protection keys */ \
decl(CET_IBT, "cet_ibt", 56) /* Control Flow Enforcement - Indirect Branch Tracking */ \
decl(CET_SS, "cet_ss", 57) /* Control Flow Enforcement - Shadow Stack */ \
decl(AVX512_IFMA, "avx512_ifma", 58) /* Integer Vector FMA instructions*/ \
decl(AVX_IFMA, "avx_ifma", 59) /* 256-bit VEX-coded variant of AVX512-IFMA*/ \
decl(APX_F, "apx_f", 60) /* Intel Advanced Performance Extensions*/ \
decl(SHA512, "sha512", 61) /* SHA512 instructions*/ \
decl(AVX512_FP16, "avx512_fp16", 62) /* AVX512 FP16 ISA support*/ \
decl(AVX10_1, "avx10_1", 63) /* AVX10 512 bit vector ISA Version 1 support*/ \
decl(AVX10_2, "avx10_2", 64) /* AVX10 512 bit vector ISA Version 2 support*/ \
decl(HYBRID, "hybrid", 65) /* Hybrid architecture */
decl(AVX512_VBMI2, avx512_vbmi2, 44) /* VBMI2 shift left double instructions */ \
decl(AVX512_VBMI, avx512_vbmi, 45) /* Vector BMI instructions */ \
decl(HV, hv, 46) /* Hypervisor instructions */ \
decl(SERIALIZE, serialize, 47) /* CPU SERIALIZE */ \
decl(RDTSCP, rdtscp, 48) /* RDTSCP instruction */ \
decl(RDPID, rdpid, 49) /* RDPID instruction */ \
decl(FSRM, fsrm, 50) /* Fast Short REP MOV */ \
decl(GFNI, gfni, 51) /* Vector GFNI instructions */ \
decl(AVX512_BITALG, avx512_bitalg, 52) /* Vector sub-word popcount and bit gather instructions */\
decl(F16C, f16c, 53) /* Half-precision and single precision FP conversion instructions*/ \
decl(PKU, pku, 54) /* Protection keys for user-mode pages */ \
decl(OSPKE, ospke, 55) /* OS enables protection keys */ \
decl(CET_IBT, cet_ibt, 56) /* Control Flow Enforcement - Indirect Branch Tracking */ \
decl(CET_SS, cet_ss, 57) /* Control Flow Enforcement - Shadow Stack */ \
decl(AVX512_IFMA, avx512_ifma, 58) /* Integer Vector FMA instructions*/ \
decl(AVX_IFMA, avx_ifma, 59) /* 256-bit VEX-coded variant of AVX512-IFMA*/ \
decl(APX_F, apx_f, 60) /* Intel Advanced Performance Extensions*/ \
decl(SHA512, sha512, 61) /* SHA512 instructions*/ \
decl(AVX512_FP16, avx512_fp16, 62) /* AVX512 FP16 ISA support*/ \
decl(AVX10_1, avx10_1, 63) /* AVX10 512 bit vector ISA Version 1 support*/ \
decl(AVX10_2, avx10_2, 64) /* AVX10 512 bit vector ISA Version 2 support*/ \
decl(HYBRID, hybrid, 65) /* Hybrid architecture */
#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (bit),
CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_FLAG)
@ -516,6 +516,15 @@ protected:
int idx = index(feature);
return (_features_bitmap[idx] & bit_mask(feature)) != 0;
}
bool supports_features(VM_Features* features_to_test) {
for (int i = 0; i < features_bitmap_element_count(); i++) {
if ((_features_bitmap[i] & features_to_test->_features_bitmap[i]) != features_to_test->_features_bitmap[i]) {
return false;
}
}
return true;
}
};
// CPU feature flags vector, can be affected by VM settings.
@ -1103,6 +1112,20 @@ public:
static bool supports_tscinv_ext(void);
static void initialize_cpu_information(void);
static void get_cpu_features_name(void* features_buffer, stringStream& ss);
// Returns names of features present in features_set1 but not in features_set2
static void get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss);
// Returns number of bytes required to store cpu features representation
static int cpu_features_size();
// Stores cpu features representation in the provided buffer. This representation is arch dependent.
// Size of the buffer must be same as returned by cpu_features_size()
static void store_cpu_features(void* buf);
static bool supports_features(void* features_to_test);
};
#endif // CPU_X86_VM_VERSION_X86_HPP

View File

@ -1753,10 +1753,9 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return true;
}
bool os::remove_stack_guard_pages(char* addr, size_t size) {
void os::remove_stack_guard_pages(char* addr, size_t size) {
// Do not call this; no need to commit stack pages on AIX.
ShouldNotReachHere();
return true;
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {

View File

@ -143,12 +143,6 @@ static OSReturn get_jvm_load(double* jvm_uload, double* jvm_sload) {
return OS_OK;
}
static void update_prev_time(jvm_time_store_t* from, jvm_time_store_t* to) {
if (from && to) {
memcpy(to, from, sizeof(jvm_time_store_t));
}
}
static void update_prev_ticks(cpu_tick_store_t* from, cpu_tick_store_t* to) {
if (from && to) {
memcpy(to, from, sizeof(cpu_tick_store_t));

View File

@ -1782,10 +1782,8 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size, !ExecMem);
}
// If this is a growable mapping, remove the guard pages entirely by
// munmap()ping them. If not, just call uncommit_memory().
bool os::remove_stack_guard_pages(char* addr, size_t size) {
return os::uncommit_memory(addr, size);
void os::remove_stack_guard_pages(char* addr, size_t size) {
os::uncommit_memory(addr, size);
}
// 'requested_addr' is only treated as a hint, the return value may or

View File

@ -3523,6 +3523,9 @@ bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
log_trace(os, map)("mmap failed: " RANGEFMT " errno=(%s)",
RANGEFMTARGS(addr, size),
os::strerror(ep.saved_errno()));
if (ep.saved_errno() == ENOMEM) {
fatal("Failed to uncommit " RANGEFMT ". It is possible that the process's maximum number of mappings would have been exceeded. Try increasing the limit.", RANGEFMTARGS(addr, size));
}
return false;
}
return true;
@ -3633,14 +3636,16 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
// It's safe to always unmap guard pages for primordial thread because we
// always place it right after end of the mapped region.
bool os::remove_stack_guard_pages(char* addr, size_t size) {
uintptr_t stack_extent, stack_base;
void os::remove_stack_guard_pages(char* addr, size_t size) {
if (os::is_primordial_thread()) {
return ::munmap(addr, size) == 0;
if (::munmap(addr, size) != 0) {
fatal("Failed to munmap " RANGEFMT, RANGEFMTARGS(addr, size));
}
return;
}
return os::uncommit_memory(addr, size);
os::uncommit_memory(addr, size);
}
// 'requested_addr' is only treated as a hint, the return value may or

View File

@ -3281,11 +3281,10 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
// Do manual alignment
aligned_base = align_up(extra_base, alignment);
bool rc = (file_desc != -1) ? os::unmap_memory(extra_base, extra_size) :
os::release_memory(extra_base, extra_size);
assert(rc, "release failed");
if (!rc) {
return nullptr;
if (file_desc != -1) {
os::unmap_memory(extra_base, extra_size);
} else {
os::release_memory(extra_base, extra_size);
}
// Attempt to map, into the just vacated space, the slightly smaller aligned area.
@ -3681,8 +3680,8 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size, !ExecMem);
}
bool os::remove_stack_guard_pages(char* addr, size_t size) {
return os::uncommit_memory(addr, size);
void os::remove_stack_guard_pages(char* addr, size_t size) {
os::uncommit_memory(addr, size);
}
static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -426,7 +426,8 @@ bool AOTClassLocation::check(const char* runtime_path, bool has_aot_linked_class
bool size_differs = _filesize != st.st_size;
bool time_differs = _check_time && (_timestamp != st.st_mtime);
if (size_differs || time_differs) {
aot_log_warning(aot)("This file is not the one used while building the shared archive file: '%s'%s%s",
aot_log_warning(aot)("This file is not the one used while building the %s: '%s'%s%s",
CDSConfig::type_of_archive_being_loaded(),
runtime_path,
time_differs ? ", timestamp has changed" : "",
size_differs ? ", size has changed" : "");
@ -448,6 +449,13 @@ void AOTClassLocationConfig::dumptime_init(JavaThread* current) {
java_lang_Throwable::print(current->pending_exception(), tty);
vm_exit_during_initialization("AOTClassLocationConfig::dumptime_init_helper() failed unexpectedly");
}
if (CDSConfig::is_dumping_final_static_archive()) {
// The _max_used_index is usually updated by ClassLoader::record_result(). However,
// when dumping the final archive, the classes are loaded from their images in
// the AOT config file, so we don't go through ClassLoader::record_result().
dumptime_update_max_used_index(runtime()->_max_used_index); // Same value as recorded in the training run.
}
}
void AOTClassLocationConfig::dumptime_init_helper(TRAPS) {

View File

@ -1148,7 +1148,7 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::ensure_module_entry_tables_exist();
ClassLoaderDataShared::build_tables(CHECK);
HeapShared::reset_archived_object_states(CHECK);
HeapShared::prepare_for_archiving(CHECK);
}
AOTReferenceObjSupport::initialize(CHECK);

View File

@ -1325,9 +1325,7 @@ char* FileMapInfo::map_auxiliary_region(int region_index, bool read_only) {
if (VerifySharedSpaces && !r->check_region_crc(mapped_base)) {
aot_log_error(aot)("region %d CRC error", region_index);
if (!os::unmap_memory(mapped_base, r->used_aligned())) {
fatal("os::unmap_memory of region %d failed", region_index);
}
os::unmap_memory(mapped_base, r->used_aligned());
return nullptr;
}
@ -1654,9 +1652,7 @@ void FileMapInfo::unmap_region(int i) {
// is released. Zero it so that we don't accidentally read its content.
aot_log_info(aot)("Region #%d (%s) is in a reserved space, it will be freed when the space is released", i, shared_region_name[i]);
} else {
if (!os::unmap_memory(mapped_base, size)) {
fatal("os::unmap_memory failed");
}
os::unmap_memory(mapped_base, size);
}
}
r->set_mapped_base(nullptr);

View File

@ -247,6 +247,28 @@ void HeapShared::reset_archived_object_states(TRAPS) {
reset_states(boot_loader(), CHECK);
}
void HeapShared::ensure_determinism(TRAPS) {
TempNewSymbol class_name = SymbolTable::new_symbol("jdk/internal/util/WeakReferenceKey");
TempNewSymbol method_name = SymbolTable::new_symbol("ensureDeterministicAOTCache");
Klass* weak_ref_key_class = SystemDictionary::resolve_or_fail(class_name, true, CHECK);
precond(weak_ref_key_class != nullptr);
log_debug(aot)("Calling WeakReferenceKey::ensureDeterministicAOTCache(Object.class)");
JavaValue result(T_BOOLEAN);
JavaCalls::call_static(&result,
weak_ref_key_class,
method_name,
vmSymbols::void_boolean_signature(),
CHECK);
assert(result.get_jboolean() == false, "sanity");
}
void HeapShared::prepare_for_archiving(TRAPS) {
reset_archived_object_states(CHECK);
ensure_determinism(CHECK);
}
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
bool HeapShared::is_archived_heap_in_use() {

View File

@ -382,8 +382,10 @@ private:
static bool walk_one_object(PendingOopStack* stack, int level, KlassSubGraphInfo* subgraph_info,
oop orig_obj, oop referrer);
public:
static void reset_archived_object_states(TRAPS);
static void ensure_determinism(TRAPS);
public:
static void prepare_for_archiving(TRAPS);
static void create_archived_object_cache() {
_archived_object_cache =
new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);

View File

@ -399,7 +399,7 @@ AOTCodeCache::~AOTCodeCache() {
}
}
void AOTCodeCache::Config::record() {
void AOTCodeCache::Config::record(uint cpu_features_offset) {
_flags = 0;
#ifdef ASSERT
_flags |= debugVM;
@ -430,9 +430,50 @@ void AOTCodeCache::Config::record() {
_compressedKlassShift = CompressedKlassPointers::shift();
_contendedPaddingWidth = ContendedPaddingWidth;
_gc = (uint)Universe::heap()->kind();
_cpu_features_offset = cpu_features_offset;
}
bool AOTCodeCache::Config::verify() const {
bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
LogStreamHandle(Debug, aot, codecache, init) log;
uint offset = _cpu_features_offset;
uint cpu_features_size = *(uint *)cache->addr(offset);
assert(cpu_features_size == (uint)VM_Version::cpu_features_size(), "must be");
offset += sizeof(uint);
void* cached_cpu_features_buffer = (void *)cache->addr(offset);
if (log.is_enabled()) {
ResourceMark rm; // required for stringStream::as_string()
stringStream ss;
VM_Version::get_cpu_features_name(cached_cpu_features_buffer, ss);
log.print_cr("CPU features recorded in AOTCodeCache: %s", ss.as_string());
}
if (VM_Version::supports_features(cached_cpu_features_buffer)) {
if (log.is_enabled()) {
ResourceMark rm; // required for stringStream::as_string()
stringStream ss;
char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
VM_Version::store_cpu_features(runtime_cpu_features);
VM_Version::get_missing_features_name(runtime_cpu_features, cached_cpu_features_buffer, ss);
if (!ss.is_empty()) {
log.print_cr("Additional runtime CPU features: %s", ss.as_string());
}
}
} else {
if (log.is_enabled()) {
ResourceMark rm; // required for stringStream::as_string()
stringStream ss;
char* runtime_cpu_features = NEW_RESOURCE_ARRAY(char, VM_Version::cpu_features_size());
VM_Version::store_cpu_features(runtime_cpu_features);
VM_Version::get_missing_features_name(cached_cpu_features_buffer, runtime_cpu_features, ss);
log.print_cr("AOT Code Cache disabled: required cpu features are missing: %s", ss.as_string());
}
return false;
}
return true;
}
bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
// First checks affect all cached AOT code
#ifdef ASSERT
if ((_flags & debugVM) == 0) {
@ -478,6 +519,9 @@ bool AOTCodeCache::Config::verify() const {
AOTStubCaching = false;
}
if (!verify_cpu_features(cache)) {
return false;
}
return true;
}
@ -679,6 +723,17 @@ extern "C" {
}
}
void AOTCodeCache::store_cpu_features(char*& buffer, uint buffer_size) {
uint* size_ptr = (uint *)buffer;
*size_ptr = buffer_size;
buffer += sizeof(uint);
VM_Version::store_cpu_features(buffer);
log_debug(aot, codecache, exit)("CPU features recorded in AOTCodeCache: %s", VM_Version::features_string());
buffer += buffer_size;
buffer = align_up(buffer, DATA_ALIGNMENT);
}
bool AOTCodeCache::finish_write() {
if (!align_write()) {
return false;
@ -698,23 +753,32 @@ bool AOTCodeCache::finish_write() {
uint store_count = _store_entries_cnt;
if (store_count > 0) {
uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
uint header_size = (uint)align_up(sizeof(AOTCodeCache::Header), DATA_ALIGNMENT);
uint code_count = store_count;
uint search_count = code_count * 2;
uint search_size = search_count * sizeof(uint);
uint entries_size = (uint)align_up(code_count * sizeof(AOTCodeEntry), DATA_ALIGNMENT); // In bytes
// _write_position includes size of code and strings
uint code_alignment = code_count * DATA_ALIGNMENT; // We align_up code size when storing it.
uint total_size = header_size + _write_position + code_alignment + search_size + entries_size;
uint cpu_features_size = VM_Version::cpu_features_size();
uint total_cpu_features_size = sizeof(uint) + cpu_features_size; // sizeof(uint) to store cpu_features_size
uint total_size = header_size + _write_position + code_alignment + search_size + entries_size +
align_up(total_cpu_features_size, DATA_ALIGNMENT);
assert(total_size < max_aot_code_size(), "AOT Code size (" UINT32_FORMAT " bytes) is greater than AOTCodeMaxSize(" UINT32_FORMAT " bytes).", total_size, max_aot_code_size());
// Create ordered search table for entries [id, index];
uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
// Allocate in AOT Cache buffer
char* buffer = (char *)AOTCacheAccess::allocate_aot_code_region(total_size + DATA_ALIGNMENT);
char* start = align_up(buffer, DATA_ALIGNMENT);
char* current = start + header_size; // Skip header
uint cpu_features_offset = current - start;
store_cpu_features(current, cpu_features_size);
assert(is_aligned(current, DATA_ALIGNMENT), "sanity check");
assert(current < start + total_size, "sanity check");
// Create ordered search table for entries [id, index];
uint* search = NEW_C_HEAP_ARRAY(uint, search_count, mtCode);
AOTCodeEntry* entries_address = _store_entries; // Pointer to latest entry
uint adapters_count = 0;
uint shared_blobs_count = 0;
@ -790,7 +854,7 @@ bool AOTCodeCache::finish_write() {
header->init(size, (uint)strings_count, strings_offset,
entries_count, new_entries_offset,
adapters_count, shared_blobs_count,
C1_blobs_count, C2_blobs_count);
C1_blobs_count, C2_blobs_count, cpu_features_offset);
log_info(aot, codecache, exit)("Wrote %d AOT code entries to AOT Code Cache", entries_count);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -185,10 +185,12 @@ protected:
restrictContendedPadding = 128
};
uint _flags;
uint _cpu_features_offset; // offset in the cache where cpu features are stored
public:
void record();
bool verify() const;
void record(uint cpu_features_offset);
bool verify_cpu_features(AOTCodeCache* cache) const;
bool verify(AOTCodeCache* cache) const;
};
class Header : public CHeapObj<mtCode> {
@ -206,14 +208,15 @@ protected:
uint _shared_blobs_count;
uint _C1_blobs_count;
uint _C2_blobs_count;
Config _config;
Config _config; // must be the last element as there is trailing data stored immediately after Config
public:
void init(uint cache_size,
uint strings_count, uint strings_offset,
uint entries_count, uint entries_offset,
uint adapters_count, uint shared_blobs_count,
uint C1_blobs_count, uint C2_blobs_count) {
uint C1_blobs_count, uint C2_blobs_count,
uint cpu_features_offset) {
_version = AOT_CODE_VERSION;
_cache_size = cache_size;
_strings_count = strings_count;
@ -224,7 +227,7 @@ protected:
_shared_blobs_count = shared_blobs_count;
_C1_blobs_count = C1_blobs_count;
_C2_blobs_count = C2_blobs_count;
_config.record();
_config.record(cpu_features_offset);
}
@ -239,8 +242,8 @@ protected:
uint C2_blobs_count() const { return _C2_blobs_count; }
bool verify(uint load_size) const;
bool verify_config() const { // Called after Universe initialized
return _config.verify();
bool verify_config(AOTCodeCache* cache) const { // Called after Universe initialized
return _config.verify(cache);
}
};
@ -320,6 +323,8 @@ public:
AOTCodeEntry* find_entry(AOTCodeEntry::Kind kind, uint id);
void store_cpu_features(char*& buffer, uint buffer_size);
bool finish_write();
bool write_relocations(CodeBlob& code_blob);
@ -361,7 +366,7 @@ private:
static bool open_cache(bool is_dumping, bool is_using);
bool verify_config() {
if (for_use()) {
return _load_header->verify_config();
return _load_header->verify_config(this);
}
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -336,6 +336,7 @@ RuntimeBlob::RuntimeBlob(
void RuntimeBlob::free(RuntimeBlob* blob) {
assert(blob != nullptr, "caller must check for nullptr");
MACOS_AARCH64_ONLY(os::thread_wx_enable_write());
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
blob->purge();
{

View File

@ -24,10 +24,9 @@
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
size_t G1BlockOffsetTable::compute_size(size_t mem_region_words) {
@ -52,6 +51,12 @@ void G1BlockOffsetTable::set_offset_array(Atomic<uint8_t>* addr, uint8_t offset)
addr->store_relaxed(offset);
}
static void check_offset(size_t offset, const char* msg) {
assert(offset < CardTable::card_size_in_words(),
"%s - offset: %zu, N_words: %u",
msg, offset, CardTable::card_size_in_words());
}
void G1BlockOffsetTable::set_offset_array(Atomic<uint8_t>* addr, HeapWord* high, HeapWord* low) {
assert(high >= low, "addresses out of order");
size_t offset = pointer_delta(high, low);

View File

@ -37,19 +37,12 @@
// for each such subregion indicates how far back one must go to find the
// start of the chunk that includes the first word of the subregion.
class G1BlockOffsetTable : public CHeapObj<mtGC> {
private:
// The reserved region covered by the table.
MemRegion _reserved;
// Biased array-start of BOT array for fast BOT entry translation
Atomic<uint8_t>* _offset_base;
void check_offset(size_t offset, const char* msg) const {
assert(offset < CardTable::card_size_in_words(),
"%s - offset: %zu, N_words: %u",
msg, offset, CardTable::card_size_in_words());
}
// Bounds checking accessors:
// For performance these have to devolve to array accesses in product builds.
inline uint8_t offset_array(Atomic<uint8_t>* addr) const;
@ -85,7 +78,6 @@ private:
}
public:
// Return the number of slots needed for an offset array
// that covers mem_region_words words.
static size_t compute_size(size_t mem_region_words);
@ -99,22 +91,14 @@ public:
// in the heap parameter.
G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* storage);
static bool is_crossing_card_boundary(HeapWord* const obj_start,
HeapWord* const obj_end) {
HeapWord* cur_card_boundary = align_up_by_card_size(obj_start);
// strictly greater-than
return obj_end > cur_card_boundary;
}
inline static bool is_crossing_card_boundary(HeapWord* const obj_start,
HeapWord* const obj_end);
// Returns the address of the start of the block reaching into the card containing
// "addr".
inline HeapWord* block_start_reaching_into_card(const void* addr) const;
void update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
if (is_crossing_card_boundary(blk_start, blk_end)) {
update_for_block_work(blk_start, blk_end);
}
}
inline void update_for_block(HeapWord* blk_start, HeapWord* blk_end);
};
#endif // SHARE_GC_G1_G1BLOCKOFFSETTABLE_HPP

View File

@ -27,10 +27,7 @@
#include "gc/g1/g1BlockOffsetTable.hpp"
#include "gc/g1/g1HeapRegion.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "oops/oop.inline.hpp"
inline HeapWord* G1BlockOffsetTable::block_start_reaching_into_card(const void* addr) const {
assert(_reserved.contains(addr), "invalid address");
@ -70,4 +67,17 @@ inline HeapWord* G1BlockOffsetTable::addr_for_entry(const Atomic<uint8_t>* const
return result;
}
inline bool G1BlockOffsetTable::is_crossing_card_boundary(HeapWord* const obj_start,
HeapWord* const obj_end) {
HeapWord* cur_card_boundary = align_up_by_card_size(obj_start);
// strictly greater-than
return obj_end > cur_card_boundary;
}
inline void G1BlockOffsetTable::update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
if (is_crossing_card_boundary(blk_start, blk_end)) {
update_for_block_work(blk_start, blk_end);
}
}
#endif // SHARE_GC_G1_G1BLOCKOFFSETTABLE_INLINE_HPP

View File

@ -24,6 +24,7 @@
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "cppstdlib/new.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BatchedTask.hpp"
#include "gc/g1/g1CardSetMemory.hpp"
@ -519,8 +520,8 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
_max_concurrent_workers(0),
_region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_num_regions(), mtGC)),
_top_at_mark_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_num_regions(), mtGC)),
_top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_num_regions(), mtGC)),
_top_at_mark_starts(NEW_C_HEAP_ARRAY(Atomic<HeapWord*>, _g1h->max_num_regions(), mtGC)),
_top_at_rebuild_starts(NEW_C_HEAP_ARRAY(Atomic<HeapWord*>, _g1h->max_num_regions(), mtGC)),
_needs_remembered_set_rebuild(false)
{
assert(G1CGC_lock != nullptr, "CGC_lock must be initialized");
@ -564,6 +565,12 @@ void G1ConcurrentMark::fully_initialize() {
_tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats);
}
for (uint i = 0; i < _g1h->max_num_regions(); i++) {
::new (&_region_mark_stats[i]) G1RegionMarkStats{};
::new (&_top_at_mark_starts[i]) Atomic<HeapWord*>{};
::new (&_top_at_rebuild_starts[i]) Atomic<HeapWord*>{};
}
reset_at_marking_complete();
}
@ -576,7 +583,7 @@ PartialArrayStateManager* G1ConcurrentMark::partial_array_state_manager() const
}
void G1ConcurrentMark::reset() {
_has_aborted = false;
_has_aborted.store_relaxed(false);
reset_marking_for_restart();
@ -588,7 +595,7 @@ void G1ConcurrentMark::reset() {
uint max_num_regions = _g1h->max_num_regions();
for (uint i = 0; i < max_num_regions; i++) {
_top_at_rebuild_starts[i] = nullptr;
_top_at_rebuild_starts[i].store_relaxed(nullptr);
_region_mark_stats[i].clear();
}
@ -600,7 +607,7 @@ void G1ConcurrentMark::clear_statistics(G1HeapRegion* r) {
for (uint j = 0; j < _max_num_tasks; ++j) {
_tasks[j]->clear_mark_stats_cache(region_idx);
}
_top_at_rebuild_starts[region_idx] = nullptr;
_top_at_rebuild_starts[region_idx].store_relaxed(nullptr);
_region_mark_stats[region_idx].clear();
}
@ -636,7 +643,7 @@ void G1ConcurrentMark::reset_marking_for_restart() {
}
clear_has_overflown();
_finger = _heap.start();
_finger.store_relaxed(_heap.start());
for (uint i = 0; i < _max_num_tasks; ++i) {
_tasks[i]->reset_for_restart();
@ -657,14 +664,14 @@ void G1ConcurrentMark::set_concurrency(uint active_tasks) {
void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
set_concurrency(active_tasks);
_concurrent = concurrent;
_concurrent.store_relaxed(concurrent);
if (!concurrent) {
// At this point we should be in a STW phase, and completed marking.
assert_at_safepoint_on_vm_thread();
assert(out_of_regions(),
"only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
p2i(_finger), p2i(_heap.end()));
p2i(finger()), p2i(_heap.end()));
}
}
@ -695,8 +702,8 @@ void G1ConcurrentMark::reset_at_marking_complete() {
}
G1ConcurrentMark::~G1ConcurrentMark() {
FREE_C_HEAP_ARRAY(HeapWord*, _top_at_mark_starts);
FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
FREE_C_HEAP_ARRAY(Atomic<HeapWord*>, _top_at_mark_starts);
FREE_C_HEAP_ARRAY(Atomic<HeapWord*>, _top_at_rebuild_starts);
FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
// The G1ConcurrentMark instance is never freed.
ShouldNotReachHere();
@ -921,6 +928,8 @@ public:
bool do_heap_region(G1HeapRegion* r) override {
if (r->is_old_or_humongous() && !r->is_collection_set_candidate() && !r->in_collection_set()) {
_cm->update_top_at_mark_start(r);
} else {
_cm->reset_top_at_mark_start(r);
}
return false;
}
@ -1163,7 +1172,7 @@ void G1ConcurrentMark::concurrent_cycle_start() {
}
uint G1ConcurrentMark::completed_mark_cycles() const {
return AtomicAccess::load(&_completed_mark_cycles);
return _completed_mark_cycles.load_relaxed();
}
void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
@ -1172,7 +1181,7 @@ void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
_g1h->trace_heap_after_gc(_gc_tracer_cm);
if (mark_cycle_completed) {
AtomicAccess::inc(&_completed_mark_cycles, memory_order_relaxed);
_completed_mark_cycles.add_then_fetch(1u, memory_order_relaxed);
}
if (has_aborted()) {
@ -1186,7 +1195,7 @@ void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
}
void G1ConcurrentMark::mark_from_roots() {
_restart_for_overflow = false;
_restart_for_overflow.store_relaxed(false);
uint active_workers = calc_active_marking_workers();
@ -1355,7 +1364,7 @@ void G1ConcurrentMark::remark() {
}
} else {
// We overflowed. Restart concurrent marking.
_restart_for_overflow = true;
_restart_for_overflow.store_relaxed(true);
verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyLocation::RemarkOverflow);
@ -1784,44 +1793,45 @@ void G1ConcurrentMark::clear_bitmap_for_region(G1HeapRegion* hr) {
}
G1HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
// "checkpoint" the finger
HeapWord* finger = _finger;
// "Checkpoint" the finger.
HeapWord* local_finger = finger();
while (finger < _heap.end()) {
assert(_g1h->is_in_reserved(finger), "invariant");
while (local_finger < _heap.end()) {
assert(_g1h->is_in_reserved(local_finger), "invariant");
G1HeapRegion* curr_region = _g1h->heap_region_containing_or_null(finger);
G1HeapRegion* curr_region = _g1h->heap_region_containing_or_null(local_finger);
// Make sure that the reads below do not float before loading curr_region.
OrderAccess::loadload();
// Above heap_region_containing may return null as we always scan claim
// until the end of the heap. In this case, just jump to the next region.
HeapWord* end = curr_region != nullptr ? curr_region->end() : finger + G1HeapRegion::GrainWords;
HeapWord* end = curr_region != nullptr ? curr_region->end() : local_finger + G1HeapRegion::GrainWords;
// Is the gap between reading the finger and doing the CAS too long?
HeapWord* res = AtomicAccess::cmpxchg(&_finger, finger, end);
if (res == finger && curr_region != nullptr) {
// we succeeded
HeapWord* res = _finger.compare_exchange(local_finger, end);
if (res == local_finger && curr_region != nullptr) {
// We succeeded.
HeapWord* bottom = curr_region->bottom();
HeapWord* limit = top_at_mark_start(curr_region);
log_trace(gc, marking)("Claim region %u bottom " PTR_FORMAT " tams " PTR_FORMAT, curr_region->hrm_index(), p2i(curr_region->bottom()), p2i(top_at_mark_start(curr_region)));
// notice that _finger == end cannot be guaranteed here since,
// someone else might have moved the finger even further
assert(_finger >= end, "the finger should have moved forward");
// Notice that _finger == end cannot be guaranteed here since,
// someone else might have moved the finger even further.
assert(finger() >= end, "The finger should have moved forward");
if (limit > bottom) {
return curr_region;
} else {
assert(limit == bottom,
"the region limit should be at bottom");
"The region limit should be at bottom");
// We return null and the caller should try calling
// claim_region() again.
return nullptr;
}
} else {
assert(_finger > finger, "the finger should have moved forward");
// read it again
finger = _finger;
// Read the finger again.
HeapWord* next_finger = finger();
assert(next_finger > local_finger, "The finger should have moved forward " PTR_FORMAT " " PTR_FORMAT, p2i(local_finger), p2i(next_finger));
local_finger = next_finger;
}
}
@ -1957,7 +1967,7 @@ bool G1ConcurrentMark::concurrent_cycle_abort() {
void G1ConcurrentMark::abort_marking_threads() {
assert(!_root_regions.scan_in_progress(), "still doing root region scan");
_has_aborted = true;
_has_aborted.store_relaxed(true);
_first_overflow_barrier_sync.abort();
_second_overflow_barrier_sync.abort();
}

View File

@ -368,7 +368,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
// For grey objects
G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
HeapWord* volatile _finger; // The global finger, region aligned,
Atomic<HeapWord*> _finger; // The global finger, region aligned,
// always pointing to the end of the
// last claimed region
@ -395,19 +395,19 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
WorkerThreadsBarrierSync _second_overflow_barrier_sync;
// Number of completed mark cycles.
volatile uint _completed_mark_cycles;
Atomic<uint> _completed_mark_cycles;
// This is set by any task, when an overflow on the global data
// structures is detected
volatile bool _has_overflown;
Atomic<bool> _has_overflown;
// True: marking is concurrent, false: we're in remark
volatile bool _concurrent;
Atomic<bool> _concurrent;
// Set at the end of a Full GC so that marking aborts
volatile bool _has_aborted;
Atomic<bool> _has_aborted;
// Used when remark aborts due to an overflow to indicate that
// another concurrent marking phase should start
volatile bool _restart_for_overflow;
Atomic<bool> _restart_for_overflow;
ConcurrentGCTimer* _gc_timer_cm;
@ -461,8 +461,8 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
void print_and_reset_taskqueue_stats();
HeapWord* finger() { return _finger; }
bool concurrent() { return _concurrent; }
HeapWord* finger() { return _finger.load_relaxed(); }
bool concurrent() { return _concurrent.load_relaxed(); }
uint active_tasks() { return _num_active_tasks; }
TaskTerminator* terminator() { return &_terminator; }
@ -487,7 +487,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
// to satisfy an allocation without doing a GC. This is fine, because all
// objects in those regions will be considered live anyway because of
// SATB guarantees (i.e. their TAMS will be equal to bottom).
bool out_of_regions() { return _finger >= _heap.end(); }
bool out_of_regions() { return finger() >= _heap.end(); }
// Returns the task with the given id
G1CMTask* task(uint id) {
@ -499,10 +499,10 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
// Access / manipulation of the overflow flag which is set to
// indicate that the global stack has overflown
bool has_overflown() { return _has_overflown; }
void set_has_overflown() { _has_overflown = true; }
void clear_has_overflown() { _has_overflown = false; }
bool restart_for_overflow() { return _restart_for_overflow; }
bool has_overflown() { return _has_overflown.load_relaxed(); }
void set_has_overflown() { _has_overflown.store_relaxed(true); }
void clear_has_overflown() { _has_overflown.store_relaxed(false); }
bool restart_for_overflow() { return _restart_for_overflow.load_relaxed(); }
// Methods to enter the two overflow sync barriers
void enter_first_sync_barrier(uint worker_id);
@ -516,12 +516,12 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
G1RegionMarkStats* _region_mark_stats;
// Top pointer for each region at the start of marking. Must be valid for all committed
// regions.
HeapWord* volatile* _top_at_mark_starts;
Atomic<HeapWord*>* _top_at_mark_starts;
// Top pointer for each region at the start of the rebuild remembered set process
// for regions which remembered sets need to be rebuilt. A null for a given region
// means that this region does not be scanned during the rebuilding remembered
// set phase at all.
HeapWord* volatile* _top_at_rebuild_starts;
Atomic<HeapWord*>* _top_at_rebuild_starts;
// True when Remark pause selected regions for rebuilding.
bool _needs_remembered_set_rebuild;
public:
@ -679,7 +679,7 @@ public:
uint completed_mark_cycles() const;
bool has_aborted() { return _has_aborted; }
bool has_aborted() { return _has_aborted.load_relaxed(); }
void print_summary_info();

View File

@ -194,11 +194,11 @@ inline void G1CMTask::process_array_chunk(objArrayOop obj, size_t start, size_t
inline void G1ConcurrentMark::update_top_at_mark_start(G1HeapRegion* r) {
uint const region = r->hrm_index();
assert(region < _g1h->max_num_regions(), "Tried to access TAMS for region %u out of bounds", region);
_top_at_mark_starts[region] = r->top();
_top_at_mark_starts[region].store_relaxed(r->top());
}
inline void G1ConcurrentMark::reset_top_at_mark_start(G1HeapRegion* r) {
_top_at_mark_starts[r->hrm_index()] = r->bottom();
_top_at_mark_starts[r->hrm_index()].store_relaxed(r->bottom());
}
inline HeapWord* G1ConcurrentMark::top_at_mark_start(const G1HeapRegion* r) const {
@ -207,7 +207,7 @@ inline HeapWord* G1ConcurrentMark::top_at_mark_start(const G1HeapRegion* r) cons
inline HeapWord* G1ConcurrentMark::top_at_mark_start(uint region) const {
assert(region < _g1h->max_num_regions(), "Tried to access TARS for region %u out of bounds", region);
return _top_at_mark_starts[region];
return _top_at_mark_starts[region].load_relaxed();
}
inline bool G1ConcurrentMark::obj_allocated_since_mark_start(oop obj) const {
@ -217,7 +217,7 @@ inline bool G1ConcurrentMark::obj_allocated_since_mark_start(oop obj) const {
}
inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(G1HeapRegion* r) const {
return _top_at_rebuild_starts[r->hrm_index()];
return _top_at_rebuild_starts[r->hrm_index()].load_relaxed();
}
inline void G1ConcurrentMark::update_top_at_rebuild_start(G1HeapRegion* r) {
@ -225,10 +225,10 @@ inline void G1ConcurrentMark::update_top_at_rebuild_start(G1HeapRegion* r) {
uint const region = r->hrm_index();
assert(region < _g1h->max_num_regions(), "Tried to access TARS for region %u out of bounds", region);
assert(_top_at_rebuild_starts[region] == nullptr,
assert(top_at_rebuild_start(r) == nullptr,
"TARS for region %u has already been set to " PTR_FORMAT " should be null",
region, p2i(_top_at_rebuild_starts[region]));
_top_at_rebuild_starts[region] = r->top();
region, p2i(top_at_rebuild_start(r)));
_top_at_rebuild_starts[region].store_relaxed(r->top());
}
inline void G1CMTask::update_liveness(oop const obj, const size_t obj_size) {

View File

@ -44,6 +44,8 @@ struct G1RegionMarkStats {
Atomic<size_t> _live_words;
Atomic<size_t> _incoming_refs;
G1RegionMarkStats() : _live_words(0), _incoming_refs(0) { }
// Clear all members.
void clear() {
_live_words.store_relaxed(0);

View File

@ -497,10 +497,6 @@ class G1PostEvacuateCollectionSetCleanupTask2::ProcessEvacuationFailedRegionsTas
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1ConcurrentMark* cm = g1h->concurrent_mark();
HeapWord* top_at_mark_start = cm->top_at_mark_start(r);
assert(top_at_mark_start == r->bottom(), "TAMS must not have been set for region %u", r->hrm_index());
assert(cm->live_bytes(r->hrm_index()) == 0, "Marking live bytes must not be set for region %u", r->hrm_index());
// Concurrent mark does not mark through regions that we retain (they are root
// regions wrt to marking), so we must clear their mark data (tams, bitmap, ...)
// set eagerly or during evacuation failure.

View File

@ -30,8 +30,11 @@
#include "gc/parallel/psScavenge.hpp"
inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
const size_t eden_size = young_gen()->eden_space()->capacity_in_words();
return size < eden_size / 2;
const size_t max_young_gen_bytes = young_gen()->max_gen_size();
const size_t survivor_size_bytes = young_gen()->from_space()->capacity_in_bytes();
const size_t max_eden_size_bytes = max_young_gen_bytes - survivor_size_bytes * 2;
const size_t max_eden_size_words = max_eden_size_bytes / HeapWordSize;
return size < max_eden_size_words / 2;
}
inline bool ParallelScavengeHeap::is_in_young(const void* p) const {

View File

@ -78,12 +78,13 @@ bool PSVirtualSpace::shrink_by(size_t bytes) {
}
char* const base_addr = committed_high_addr() - bytes;
bool result = special() || os::uncommit_memory(base_addr, bytes);
if (result) {
_committed_high_addr -= bytes;
if (!special()) {
os::uncommit_memory(base_addr, bytes);
}
return result;
_committed_high_addr -= bytes;
return true;
}
#ifndef PRODUCT

View File

@ -169,9 +169,7 @@ void CardTable::resize_covered_region(MemRegion new_region) {
// Shrink.
MemRegion delta = MemRegion(new_committed.end(),
old_committed.word_size() - new_committed.word_size());
bool res = os::uncommit_memory((char*)delta.start(),
delta.byte_size());
assert(res, "uncommit should succeed");
os::uncommit_memory((char*)delta.start(), delta.byte_size());
}
log_trace(gc, barrier)("CardTable::resize_covered_region: ");

View File

@ -157,7 +157,7 @@
declare_toplevel_type(CollectedHeap*) \
declare_toplevel_type(ContiguousSpace*) \
declare_toplevel_type(HeapWord*) \
declare_toplevel_type(HeapWord* volatile) \
declare_toplevel_type(Atomic<HeapWord*>) \
declare_toplevel_type(MemRegion*) \
declare_toplevel_type(ThreadLocalAllocBuffer*) \
\

View File

@ -37,22 +37,22 @@ ShenandoahEvacOOMCounter::ShenandoahEvacOOMCounter() :
void ShenandoahEvacOOMCounter::decrement() {
assert(unmasked_count() > 0, "sanity");
// NOTE: It's ok to simply decrement, even with mask set, because unmasked value is positive.
AtomicAccess::dec(&_bits);
_bits.fetch_then_sub(1);
}
void ShenandoahEvacOOMCounter::clear() {
assert(unmasked_count() == 0, "sanity");
AtomicAccess::release_store_fence(&_bits, (jint)0);
_bits.release_store_fence((jint)0);
}
void ShenandoahEvacOOMCounter::set_oom_bit(bool decrement) {
jint threads_in_evac = AtomicAccess::load_acquire(&_bits);
jint threads_in_evac = _bits.load_acquire();
while (true) {
jint newval = decrement
? (threads_in_evac - 1) | OOM_MARKER_MASK
: threads_in_evac | OOM_MARKER_MASK;
jint other = AtomicAccess::cmpxchg(&_bits, threads_in_evac, newval);
jint other = _bits.compare_exchange(threads_in_evac, newval);
if (other == threads_in_evac) {
// Success: wait for other threads to get out of the protocol and return.
break;
@ -65,7 +65,7 @@ void ShenandoahEvacOOMCounter::set_oom_bit(bool decrement) {
bool ShenandoahEvacOOMCounter::try_increment()
{
jint threads_in_evac = AtomicAccess::load_acquire(&_bits);
jint threads_in_evac = _bits.load_acquire();
while (true) {
// Cannot enter evacuation if OOM_MARKER_MASK is set.
@ -73,7 +73,7 @@ bool ShenandoahEvacOOMCounter::try_increment()
return false;
}
jint other = AtomicAccess::cmpxchg(&_bits, threads_in_evac, threads_in_evac + 1);
jint other = _bits.compare_exchange(threads_in_evac, threads_in_evac + 1);
if (other == threads_in_evac) {
// Success: caller may safely enter evacuation
return true;

View File

@ -27,6 +27,7 @@
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/javaThread.hpp"
#include "utilities/globalDefinitions.hpp"
@ -36,7 +37,7 @@
class ShenandoahEvacOOMCounter {
private:
// Combination of a 31-bit counter and 1-bit OOM marker.
volatile jint _bits;
Atomic<jint> _bits;
// This class must be at least a cache line in size to prevent false sharing.
shenandoah_padding_minus_size(0, sizeof(jint));

View File

@ -29,14 +29,13 @@
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
#include "runtime/atomicAccess.hpp"
jint ShenandoahEvacOOMCounter::load_acquire() {
return AtomicAccess::load_acquire(&_bits);
return _bits.load_acquire();
}
jint ShenandoahEvacOOMCounter::unmasked_count() {
return AtomicAccess::load_acquire(&_bits) & ~OOM_MARKER_MASK;
return _bits.load_acquire() & ~OOM_MARKER_MASK;
}
void ShenandoahEvacOOMHandler::enter_evacuation(Thread* thr) {

View File

@ -1108,6 +1108,10 @@ void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill);
coalesce_and_fill_old_regions(false);
}
log_info(gc, cset)("Degenerated cycle complete, promotions reserved: %zu, promotions expended: %zu, failed count: %zu, failed bytes: %zu",
old_generation()->get_promoted_reserve(), old_generation()->get_promoted_expended(),
old_generation()->get_promotion_failed_count(), old_generation()->get_promotion_failed_words() * HeapWordSize);
}
void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
@ -1121,6 +1125,10 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
// throw off the heuristics.
entry_global_coalesce_and_fill();
}
log_info(gc, cset)("Concurrent cycle complete, promotions reserved: %zu, promotions expended: %zu, failed count: %zu, failed bytes: %zu",
old_generation()->get_promoted_reserve(), old_generation()->get_promoted_expended(),
old_generation()->get_promotion_failed_count(), old_generation()->get_promotion_failed_words() * HeapWordSize);
}
void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {

View File

@ -86,6 +86,7 @@
#include "nmt/memTracker.hpp"
#include "oops/compressedOops.inline.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@ -201,9 +202,9 @@ jint ShenandoahHeap::initialize() {
assert(num_min_regions <= _num_regions, "sanity");
_minimum_size = num_min_regions * reg_size_bytes;
_soft_max_size = clamp(SoftMaxHeapSize, min_capacity(), max_capacity());
_soft_max_size.store_relaxed(clamp(SoftMaxHeapSize, min_capacity(), max_capacity()));
_committed = _initial_size;
_committed.store_relaxed(_initial_size);
size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
@ -725,17 +726,17 @@ size_t ShenandoahHeap::used() const {
}
size_t ShenandoahHeap::committed() const {
return AtomicAccess::load(&_committed);
return _committed.load_relaxed();
}
void ShenandoahHeap::increase_committed(size_t bytes) {
shenandoah_assert_heaplocked_or_safepoint();
_committed += bytes;
_committed.fetch_then_add(bytes, memory_order_relaxed);
}
void ShenandoahHeap::decrease_committed(size_t bytes) {
shenandoah_assert_heaplocked_or_safepoint();
_committed -= bytes;
_committed.fetch_then_sub(bytes, memory_order_relaxed);
}
size_t ShenandoahHeap::capacity() const {
@ -747,7 +748,7 @@ size_t ShenandoahHeap::max_capacity() const {
}
size_t ShenandoahHeap::soft_max_capacity() const {
size_t v = AtomicAccess::load(&_soft_max_size);
size_t v = _soft_max_size.load_relaxed();
assert(min_capacity() <= v && v <= max_capacity(),
"Should be in bounds: %zu <= %zu <= %zu",
min_capacity(), v, max_capacity());
@ -758,7 +759,7 @@ void ShenandoahHeap::set_soft_max_capacity(size_t v) {
assert(min_capacity() <= v && v <= max_capacity(),
"Should be in bounds: %zu <= %zu <= %zu",
min_capacity(), v, max_capacity());
AtomicAccess::store(&_soft_max_size, v);
_soft_max_size.store_relaxed(v);
}
size_t ShenandoahHeap::min_capacity() const {
@ -1775,12 +1776,7 @@ void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_sta
void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
if (!_aux_bitmap_region_special) {
bool success = os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size());
if (!success) {
log_warning(gc)("Auxiliary marking bitmap uncommit failed: " PTR_FORMAT " (%zu bytes)",
p2i(_aux_bitmap_region.start()), _aux_bitmap_region.byte_size());
assert(false, "Auxiliary marking bitmap uncommit should always succeed");
}
os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size());
}
}
@ -1946,7 +1942,7 @@ private:
size_t const _stride;
shenandoah_padding(0);
volatile size_t _index;
Atomic<size_t> _index;
shenandoah_padding(1);
public:
@ -1959,8 +1955,8 @@ public:
size_t stride = _stride;
size_t max = _heap->num_regions();
while (AtomicAccess::load(&_index) < max) {
size_t cur = AtomicAccess::fetch_then_add(&_index, stride, memory_order_relaxed);
while (_index.load_relaxed() < max) {
size_t cur = _index.fetch_then_add(stride, memory_order_relaxed);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
@ -2626,11 +2622,7 @@ void ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
size_t len = _bitmap_bytes_per_slice;
char* addr = (char*) _bitmap_region.start() + off;
bool success = os::uncommit_memory(addr, len);
if (!success) {
log_warning(gc)("Bitmap slice uncommit failed: " PTR_FORMAT " (%zu bytes)", p2i(addr), len);
assert(false, "Bitmap slice uncommit should always succeed");
}
os::uncommit_memory(addr, len);
}
void ShenandoahHeap::forbid_uncommit() {
@ -2712,11 +2704,11 @@ ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
_index(0) {}
void ShenandoahRegionIterator::reset() {
_index = 0;
_index.store_relaxed(0);
}
bool ShenandoahRegionIterator::has_next() const {
return _index < _heap->num_regions();
return _index.load_relaxed() < _heap->num_regions();
}
ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {

View File

@ -88,7 +88,7 @@ private:
ShenandoahHeap* _heap;
shenandoah_padding(0);
volatile size_t _index;
Atomic<size_t> _index;
shenandoah_padding(1);
// No implicit copying: iterators should be passed by reference to capture the state
@ -208,9 +208,9 @@ private:
size_t _initial_size;
size_t _minimum_size;
volatile size_t _soft_max_size;
Atomic<size_t> _soft_max_size;
shenandoah_padding(0);
volatile size_t _committed;
Atomic<size_t> _committed;
shenandoah_padding(1);
public:
@ -340,7 +340,7 @@ private:
ShenandoahSharedFlag _full_gc_move_in_progress;
ShenandoahSharedFlag _concurrent_strong_root_in_progress;
size_t _gc_no_progress_count;
Atomic<size_t> _gc_no_progress_count;
// This updates the singular, global gc state. This call must happen on a safepoint.
void set_gc_state_at_safepoint(uint mask, bool value);

View File

@ -49,7 +49,7 @@
#include "gc/shenandoah/shenandoahWorkGroup.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/atomic.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/prefetch.inline.hpp"
@ -61,7 +61,7 @@ inline ShenandoahHeap* ShenandoahHeap::heap() {
}
inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
size_t new_index = AtomicAccess::add(&_index, (size_t) 1, memory_order_relaxed);
size_t new_index = _index.add_then_fetch((size_t) 1, memory_order_relaxed);
// get_region() provides the bounds-check and returns null on OOB.
return _heap->get_region(new_index - 1);
}
@ -75,15 +75,15 @@ inline WorkerThreads* ShenandoahHeap::safepoint_workers() {
}
inline void ShenandoahHeap::notify_gc_progress() {
AtomicAccess::store(&_gc_no_progress_count, (size_t) 0);
_gc_no_progress_count.store_relaxed((size_t) 0);
}
inline void ShenandoahHeap::notify_gc_no_progress() {
AtomicAccess::inc(&_gc_no_progress_count);
_gc_no_progress_count.add_then_fetch((size_t) 1);
}
inline size_t ShenandoahHeap::get_gc_no_progress_count() const {
return AtomicAccess::load(&_gc_no_progress_count);
return _gc_no_progress_count.load_relaxed();
}
inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {

View File

@ -816,11 +816,7 @@ void ShenandoahHeapRegion::do_commit() {
void ShenandoahHeapRegion::do_uncommit() {
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (!heap->is_heap_region_special()) {
bool success = os::uncommit_memory((char *) bottom(), RegionSizeBytes);
if (!success) {
log_warning(gc)("Region uncommit failed: " PTR_FORMAT " (%zu bytes)", p2i(bottom()), RegionSizeBytes);
assert(false, "Region uncommit should always succeed");
}
os::uncommit_memory((char *) bottom(), RegionSizeBytes);
}
if (!heap->is_bitmap_region_special()) {
heap->uncommit_bitmap_slice(this);

View File

@ -433,8 +433,8 @@ void ShenandoahNMethodTableSnapshot::parallel_nmethods_do(NMethodClosure *f) {
ShenandoahNMethod** const list = _list->list();
size_t max = (size_t)_limit;
while (_claimed < max) {
size_t cur = AtomicAccess::fetch_then_add(&_claimed, stride, memory_order_relaxed);
while (_claimed.load_relaxed() < max) {
size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
@ -457,8 +457,8 @@ void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl)
ShenandoahNMethod** list = _list->list();
size_t max = (size_t)_limit;
while (_claimed < max) {
size_t cur = AtomicAccess::fetch_then_add(&_claimed, stride, memory_order_relaxed);
while (_claimed.load_relaxed() < max) {
size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;

View File

@ -30,6 +30,7 @@
#include "gc/shenandoah/shenandoahLock.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "utilities/growableArray.hpp"
// ShenandoahNMethod tuple records the internal locations of oop slots within reclocation stream in
@ -115,7 +116,7 @@ private:
int _limit;
shenandoah_padding(0);
volatile size_t _claimed;
Atomic<size_t> _claimed;
shenandoah_padding(1);
public:

View File

@ -36,7 +36,7 @@
nonstatic_field(ShenandoahHeap, _regions, ShenandoahHeapRegion**) \
nonstatic_field(ShenandoahHeap, _log_min_obj_alignment_in_bytes, int) \
nonstatic_field(ShenandoahHeap, _free_set, ShenandoahFreeSet*) \
volatile_nonstatic_field(ShenandoahHeap, _committed, size_t) \
volatile_nonstatic_field(ShenandoahHeap, _committed, Atomic<size_t>) \
static_field(ShenandoahHeapRegion, RegionSizeBytes, size_t) \
static_field(ShenandoahHeapRegion, RegionSizeBytesShift, size_t) \
nonstatic_field(ShenandoahHeapRegion, _state, Atomic<ShenandoahHeapRegion::RegionState>) \

View File

@ -87,8 +87,7 @@ E* MmapArrayAllocator<E>::allocate(size_t length, MemTag mem_tag) {
template <class E>
void MmapArrayAllocator<E>::free(E* addr, size_t length) {
bool result = os::release_memory((char*)addr, size_for(length));
assert(result, "Failed to release memory");
os::release_memory((char*)addr, size_for(length));
}
template <class E>

View File

@ -99,9 +99,7 @@ static char* reserve_memory_inner(char* requested_address,
}
// Base not aligned, retry.
if (!os::release_memory(base, size)) {
fatal("os::release_memory failed");
}
os::release_memory(base, size);
// Map using the requested alignment.
return os::reserve_memory_aligned(size, alignment, mem_tag, exec);
@ -231,13 +229,13 @@ ReservedSpace MemoryReserver::reserve(size_t size,
mem_tag);
}
bool MemoryReserver::release(const ReservedSpace& reserved) {
void MemoryReserver::release(const ReservedSpace& reserved) {
assert(reserved.is_reserved(), "Precondition");
if (reserved.special()) {
return os::release_memory_special(reserved.base(), reserved.size());
os::release_memory_special(reserved.base(), reserved.size());
} else {
return os::release_memory(reserved.base(), reserved.size());
os::release_memory(reserved.base(), reserved.size());
}
}
@ -266,9 +264,7 @@ static char* map_memory_to_file(char* requested_address,
// Base not aligned, retry.
if (!os::unmap_memory(base, size)) {
fatal("os::unmap_memory failed");
}
os::unmap_memory(base, size);
// Map using the requested alignment.
return os::map_memory_to_file_aligned(size, alignment, fd, mem_tag);

View File

@ -70,7 +70,7 @@ public:
MemTag mem_tag);
// Release reserved memory
static bool release(const ReservedSpace& reserved);
static void release(const ReservedSpace& reserved);
};
class CodeMemoryReserver : AllStatic {

View File

@ -190,10 +190,7 @@ void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
}
// Uncommit...
if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
// Note: this can actually happen, since uncommit may increase the number of mappings.
fatal("Failed to uncommit metaspace.");
}
os::uncommit_memory((char*)p, word_size * BytesPerWord);
ASAN_POISON_MEMORY_REGION((char*)p, word_size * BytesPerWord);

View File

@ -370,34 +370,22 @@ void VirtualSpace::shrink_by(size_t size) {
assert(middle_high_boundary() <= aligned_upper_new_high &&
aligned_upper_new_high + upper_needs <= upper_high_boundary(),
"must not shrink beyond region");
if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) {
DEBUG_ONLY(warning("os::uncommit_memory failed"));
return;
} else {
_upper_high -= upper_needs;
}
os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable);
_upper_high -= upper_needs;
}
if (middle_needs > 0) {
assert(lower_high_boundary() <= aligned_middle_new_high &&
aligned_middle_new_high + middle_needs <= middle_high_boundary(),
"must not shrink beyond region");
if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) {
DEBUG_ONLY(warning("os::uncommit_memory failed"));
return;
} else {
_middle_high -= middle_needs;
}
os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable);
_middle_high -= middle_needs;
}
if (lower_needs > 0) {
assert(low_boundary() <= aligned_lower_new_high &&
aligned_lower_new_high + lower_needs <= lower_high_boundary(),
"must not shrink beyond region");
if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) {
DEBUG_ONLY(warning("os::uncommit_memory failed"));
return;
} else {
_lower_high -= lower_needs;
}
os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable);
_lower_high -= lower_needs;
}
_high -= size;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -120,7 +120,6 @@ public:
virtual int Opcode() const;
virtual bool is_CFG() const { return true; }
virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
virtual bool depends_only_on_test() const { return false; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type* Value(PhaseGVN* phase) const;
virtual uint ideal_reg() const { return NotAMachineReg; }
@ -141,7 +140,6 @@ class RethrowNode : public Node {
virtual int Opcode() const;
virtual bool is_CFG() const { return true; }
virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
virtual bool depends_only_on_test() const { return false; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type* Value(PhaseGVN* phase) const;
virtual uint match_edge(uint idx) const;

View File

@ -207,6 +207,11 @@ bool ConstraintCastNode::higher_equal_types(PhaseGVN* phase, const Node* other)
return true;
}
Node* ConstraintCastNode::pin_node_under_control_impl() const {
assert(_dependency.is_floating(), "already pinned");
return make_cast_for_type(in(0), in(1), bottom_type(), _dependency.with_pinned_dependency(), _extra_types);
}
#ifndef PRODUCT
void ConstraintCastNode::dump_spec(outputStream *st) const {
TypeNode::dump_spec(st);
@ -277,12 +282,9 @@ void CastIINode::dump_spec(outputStream* st) const {
}
#endif
CastIINode* CastIINode::pin_array_access_node() const {
CastIINode* CastIINode::pin_node_under_control_impl() const {
assert(_dependency.is_floating(), "already pinned");
if (has_range_check()) {
return new CastIINode(in(0), in(1), bottom_type(), _dependency.with_pinned_dependency(), has_range_check());
}
return nullptr;
return new CastIINode(in(0), in(1), bottom_type(), _dependency.with_pinned_dependency(), _range_check_dependency, _extra_types);
}
void CastIINode::remove_range_check_cast(Compile* C) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -166,8 +166,6 @@ protected:
virtual int Opcode() const;
virtual uint ideal_reg() const = 0;
bool carry_dependency() const { return !_dependency.cmp(DependencyType::FloatingNarrowing); }
// A cast node depends_only_on_test if and only if it is floating
virtual bool depends_only_on_test() const { return _dependency.is_floating(); }
const DependencyType& dependency() const { return _dependency; }
TypeNode* dominating_cast(PhaseGVN* gvn, PhaseTransform* pt) const;
static Node* make_cast_for_basic_type(Node* c, Node* n, const Type* t, const DependencyType& dependency, BasicType bt);
@ -191,6 +189,12 @@ protected:
const Type* extra_type_at(int i) const {
return _extra_types->field_at(i);
}
protected:
virtual bool depends_only_on_test_impl() const { return _dependency.is_floating(); }
private:
virtual Node* pin_node_under_control_impl() const;
};
//------------------------------CastIINode-------------------------------------
@ -222,13 +226,15 @@ class CastIINode: public ConstraintCastNode {
#endif
}
CastIINode* pin_array_access_node() const;
CastIINode* make_with(Node* parent, const TypeInteger* type, const DependencyType& dependency) const;
void remove_range_check_cast(Compile* C);
#ifndef PRODUCT
virtual void dump_spec(outputStream* st) const;
#endif
private:
virtual CastIINode* pin_node_under_control_impl() const;
};
class CastLLNode: public ConstraintCastNode {
@ -320,8 +326,10 @@ class CheckCastPPNode: public ConstraintCastNode {
virtual const Type* Value(PhaseGVN* phase) const;
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; }
bool depends_only_on_test() const { return !type()->isa_rawptr() && ConstraintCastNode::depends_only_on_test(); }
};
private:
virtual bool depends_only_on_test_impl() const { return !type()->isa_rawptr() && ConstraintCastNode::depends_only_on_test_impl(); }
};
//------------------------------CastX2PNode-------------------------------------
@ -349,8 +357,10 @@ class CastP2XNode : public Node {
virtual Node* Identity(PhaseGVN* phase);
virtual uint ideal_reg() const { return Op_RegX; }
virtual const Type *bottom_type() const { return TypeX_X; }
private:
// Return false to keep node from moving away from an associated card mark.
virtual bool depends_only_on_test() const { return false; }
virtual bool depends_only_on_test_impl() const { return false; }
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -124,7 +124,6 @@ public:
virtual bool pinned() const { return (const Node*)in(0) == this; }
virtual bool is_CFG() const { return true; }
virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
virtual bool depends_only_on_test() const { return false; }
virtual const Type* bottom_type() const { return Type::CONTROL; }
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node* Identity(PhaseGVN* phase);
@ -287,7 +286,6 @@ public:
virtual bool is_CFG() const { return true; }
virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
virtual const Node *is_block_proj() const { return this; }
virtual bool depends_only_on_test() const { return false; }
virtual const Type *bottom_type() const { return Type::CONTROL; }
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node* Identity(PhaseGVN* phase);
@ -462,7 +460,7 @@ public:
Node* fold_compares(PhaseIterGVN* phase);
static Node* up_one_dom(Node* curr, bool linear_only = false);
bool is_zero_trip_guard() const;
Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool pin_array_access_nodes);
Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool prev_dom_not_imply_this);
ProjNode* uncommon_trap_proj(CallStaticJavaNode*& call, Deoptimization::DeoptReason reason = Deoptimization::Reason_none) const;
// Takes the type of val and filters it through the test represented
@ -565,7 +563,7 @@ public:
return in(0)->as_If()->proj_out(1 - _con)->as_IfProj();
}
void pin_array_access_nodes(PhaseIterGVN* igvn);
void pin_dependent_nodes(PhaseIterGVN* igvn);
protected:
// Type of If input when this branch is always taken

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,15 +35,33 @@
// Optimization - Graph Style
class DivModIntegerNode : public Node {
private:
bool _pinned;
protected:
DivModIntegerNode(Node* c, Node* dividend, Node* divisor) : Node(c, dividend, divisor), _pinned(false) {}
private:
virtual uint size_of() const override { return sizeof(DivModIntegerNode); }
virtual uint hash() const override { return Node::hash() + _pinned; }
virtual bool cmp(const Node& o) const override { return Node::cmp(o) && _pinned == static_cast<const DivModIntegerNode&>(o)._pinned; }
virtual bool depends_only_on_test_impl() const override { return !_pinned; }
virtual DivModIntegerNode* pin_node_under_control_impl() const override {
DivModIntegerNode* res = static_cast<DivModIntegerNode*>(clone());
res->_pinned = true;
return res;
}
};
//------------------------------DivINode---------------------------------------
// Integer division
// Note: this is division as defined by JVMS, i.e., MinInt/-1 == MinInt.
// On processors which don't naturally support this special case (e.g., x86),
// the matcher or runtime system must take care of this.
class DivINode : public Node {
class DivINode : public DivModIntegerNode {
public:
DivINode( Node *c, Node *dividend, Node *divisor ) : Node(c, dividend, divisor ) {}
DivINode(Node* c, Node* dividend, Node* divisor) : DivModIntegerNode(c, dividend, divisor) {}
virtual int Opcode() const;
virtual Node* Identity(PhaseGVN* phase);
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@ -54,9 +72,9 @@ public:
//------------------------------DivLNode---------------------------------------
// Long division
class DivLNode : public Node {
class DivLNode : public DivModIntegerNode {
public:
DivLNode( Node *c, Node *dividend, Node *divisor ) : Node(c, dividend, divisor ) {}
DivLNode(Node* c, Node* dividend, Node* divisor) : DivModIntegerNode(c, dividend, divisor) {}
virtual int Opcode() const;
virtual Node* Identity(PhaseGVN* phase);
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@ -107,9 +125,9 @@ public:
//------------------------------UDivINode---------------------------------------
// Unsigned integer division
class UDivINode : public Node {
class UDivINode : public DivModIntegerNode {
public:
UDivINode( Node *c, Node *dividend, Node *divisor ) : Node(c, dividend, divisor ) {}
UDivINode(Node* c, Node* dividend, Node* divisor) : DivModIntegerNode(c, dividend, divisor) {}
virtual int Opcode() const;
virtual Node* Identity(PhaseGVN* phase);
virtual const Type* Value(PhaseGVN* phase) const;
@ -120,9 +138,9 @@ public:
//------------------------------UDivLNode---------------------------------------
// Unsigned long division
class UDivLNode : public Node {
class UDivLNode : public DivModIntegerNode {
public:
UDivLNode( Node *c, Node *dividend, Node *divisor ) : Node(c, dividend, divisor ) {}
UDivLNode(Node* c, Node* dividend, Node* divisor) : DivModIntegerNode(c, dividend, divisor) {}
virtual int Opcode() const;
virtual Node* Identity(PhaseGVN* phase);
virtual const Type* Value(PhaseGVN* phase) const;
@ -133,9 +151,9 @@ public:
//------------------------------ModINode---------------------------------------
// Integer modulus
class ModINode : public Node {
class ModINode : public DivModIntegerNode {
public:
ModINode( Node *c, Node *in1, Node *in2 ) : Node(c,in1, in2) {}
ModINode(Node* c, Node* in1, Node* in2) : DivModIntegerNode(c, in1, in2) {}
virtual int Opcode() const;
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@ -145,9 +163,9 @@ public:
//------------------------------ModLNode---------------------------------------
// Long modulus
class ModLNode : public Node {
class ModLNode : public DivModIntegerNode {
public:
ModLNode( Node *c, Node *in1, Node *in2 ) : Node(c,in1, in2) {}
ModLNode(Node* c, Node* in1, Node* in2) : DivModIntegerNode(c, in1, in2) {}
virtual int Opcode() const;
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@ -199,9 +217,9 @@ public:
//------------------------------UModINode---------------------------------------
// Unsigned integer modulus
class UModINode : public Node {
class UModINode : public DivModIntegerNode {
public:
UModINode( Node *c, Node *in1, Node *in2 ) : Node(c,in1, in2) {}
UModINode(Node* c, Node* in1, Node* in2) : DivModIntegerNode(c, in1, in2) {}
virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *bottom_type() const { return TypeInt::INT; }
@ -211,9 +229,9 @@ public:
//------------------------------UModLNode---------------------------------------
// Unsigned long modulus
class UModLNode : public Node {
class UModLNode : public DivModIntegerNode {
public:
UModLNode( Node *c, Node *in1, Node *in2 ) : Node(c,in1, in2) {}
UModLNode(Node* c, Node* in1, Node* in2) : DivModIntegerNode(c, in1, in2) {}
virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *bottom_type() const { return TypeLong::LONG; }
@ -243,6 +261,9 @@ public:
ProjNode* div_proj() { return proj_out_or_null(div_proj_num); }
ProjNode* mod_proj() { return proj_out_or_null(mod_proj_num); }
private:
virtual bool depends_only_on_test() const { return false; }
};
//------------------------------DivModINode---------------------------------------

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -604,7 +604,7 @@ static void adjust_check(IfProjNode* proj, Node* range, Node* index,
// at the lowest/nearest dominating check in the graph. To ensure that these Loads/Casts do not float above any of the
// dominating checks (even when the lowest dominating check is later replaced by yet another dominating check), we
// need to pin them at the lowest dominating check.
proj->pin_array_access_nodes(igvn);
proj->pin_dependent_nodes(igvn);
}
//------------------------------up_one_dom-------------------------------------
@ -1539,7 +1539,7 @@ Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
//------------------------------dominated_by-----------------------------------
Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool pin_array_access_nodes) {
Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool prev_dom_not_imply_this) {
#ifndef PRODUCT
if (TraceIterativeGVN) {
tty->print(" Removing IfNode: "); this->dump();
@ -1570,20 +1570,16 @@ Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool pin_array_ac
// Loop ends when projection has no more uses.
for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) {
Node* s = ifp->last_out(j); // Get child of IfTrue/IfFalse
if (s->depends_only_on_test() && igvn->no_dependent_zero_check(s)) {
// For control producers.
// Do not rewire Div and Mod nodes which could have a zero divisor to avoid skipping their zero check.
if (s->depends_only_on_test()) {
// For control producers
igvn->replace_input_of(s, 0, data_target); // Move child to data-target
if (pin_array_access_nodes && data_target != top) {
// As a result of range check smearing, Loads and range check Cast nodes that are control dependent on this
// range check (that is about to be removed) now depend on multiple dominating range checks. After the removal
// of this range check, these control dependent nodes end up at the lowest/nearest dominating check in the
// graph. To ensure that these Loads/Casts do not float above any of the dominating checks (even when the
// lowest dominating check is later replaced by yet another dominating check), we need to pin them at the
// lowest dominating check.
Node* clone = s->pin_array_access_node();
if (prev_dom_not_imply_this && data_target != top) {
// If prev_dom_not_imply_this, s now depends on multiple tests with prev_dom being the
// lowest dominating one. As a result, it must be pinned there. Otherwise, it can be
// incorrectly moved to a dominating test equivalent to the lowest one here.
Node* clone = s->pin_node_under_control();
if (clone != nullptr) {
clone = igvn->transform(clone);
igvn->register_new_node_with_optimizer(clone, s);
igvn->replace_node(s, clone);
}
}
@ -1831,16 +1827,15 @@ bool IfNode::is_zero_trip_guard() const {
return false;
}
void IfProjNode::pin_array_access_nodes(PhaseIterGVN* igvn) {
void IfProjNode::pin_dependent_nodes(PhaseIterGVN* igvn) {
for (DUIterator i = outs(); has_out(i); i++) {
Node* u = out(i);
if (!u->depends_only_on_test()) {
continue;
}
Node* clone = u->pin_array_access_node();
Node* clone = u->pin_node_under_control();
if (clone != nullptr) {
clone = igvn->transform(clone);
assert(clone != u, "shouldn't common");
igvn->register_new_node_with_optimizer(clone, u);
igvn->replace_node(u, clone);
--i;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,6 +41,9 @@ class PartialSubtypeCheckNode : public Node {
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeRawPtr::BOTTOM; }
virtual uint ideal_reg() const { return Op_RegP; }
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
//------------------------------StrIntrinsic-------------------------------
@ -74,13 +77,15 @@ class StrIntrinsicNode: public Node {
Node(control, char_array_mem, s1, s2), _encoding(encoding) {
}
virtual bool depends_only_on_test() const { return false; }
virtual const TypePtr* adr_type() const { return TypeAryPtr::BYTES; }
virtual uint match_edge(uint idx) const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
virtual const Type* Value(PhaseGVN* phase) const;
ArgEncoding encoding() const { return _encoding; }
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
//------------------------------StrComp-------------------------------------
@ -172,13 +177,15 @@ class VectorizedHashCodeNode: public Node {
VectorizedHashCodeNode(Node* control, Node* ary_mem, Node* arg1, Node* cnt1, Node* result, Node* basic_type)
: Node(control, ary_mem, arg1, cnt1, result, basic_type) {};
virtual int Opcode() const;
virtual bool depends_only_on_test() const { return false; }
virtual const Type* bottom_type() const { return TypeInt::INT; }
virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
virtual uint match_edge(uint idx) const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
virtual const Type* Value(PhaseGVN* phase) const;
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
//------------------------------EncodeISOArray--------------------------------
@ -191,7 +198,6 @@ class EncodeISOArrayNode: public Node {
bool is_ascii() { return _ascii; }
virtual int Opcode() const;
virtual bool depends_only_on_test() const { return false; }
virtual const Type* bottom_type() const { return TypeInt::INT; }
virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
virtual uint match_edge(uint idx) const;
@ -203,6 +209,9 @@ class EncodeISOArrayNode: public Node {
virtual bool cmp(const Node& n) const {
return Node::cmp(n) && _ascii == ((EncodeISOArrayNode&)n).is_ascii();
}
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
//-------------------------------DigitNode----------------------------------------

View File

@ -147,6 +147,9 @@ public:
virtual int Opcode() const;
virtual const Type* Value(PhaseGVN* phase) const { return TypeInt::CC; }
const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
@ -169,6 +172,8 @@ public:
virtual const Type* Value(PhaseGVN* phase) const { return TypeInt::CC; }
const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
#endif // SHARE_OPTO_LOCKNODE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -334,7 +334,7 @@ class Invariance : public StackObj {
// loop, it was marked invariant but n is only invariant if
// it depends only on that test. Otherwise, unless that test
// is out of the loop, it's not invariant.
if (n->is_CFG() || (n->depends_only_on_test() && _phase->igvn().no_dependent_zero_check(n)) || n->in(0) == nullptr || !_phase->is_member(_lpt, n->in(0))) {
if (n->is_CFG() || n->in(0) == nullptr || n->depends_only_on_test() || !_phase->is_member(_lpt, n->in(0))) {
_invariant.set(n->_idx); // I am a invariant too
}
}

View File

@ -1676,8 +1676,8 @@ public:
Node *has_local_phi_input( Node *n );
// Mark an IfNode as being dominated by a prior test,
// without actually altering the CFG (and hence IDOM info).
void dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip = false, bool pin_array_access_nodes = false);
void rewire_safe_outputs_to_dominator(Node* source, Node* dominator, bool pin_array_access_nodes);
void dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip = false, bool prev_dom_not_imply_this = false);
void rewire_safe_outputs_to_dominator(Node* source, Node* dominator, bool dominator_not_imply_source);
// Split Node 'n' through merge point
RegionNode* split_thru_region(Node* n, RegionNode* region);
@ -1960,7 +1960,7 @@ public:
bool can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x);
void pin_array_access_nodes_dependent_on(Node* ctrl);
void pin_nodes_dependent_on(Node* ctrl, bool old_iff_is_rangecheck);
Node* ensure_node_and_inputs_are_above_pre_end(CountedLoopEndNode* pre_end, Node* node);

View File

@ -345,7 +345,7 @@ bool PhaseIdealLoop::loop_phi_backedge_type_contains_zero(const Node* phi_diviso
// Replace the dominated test with an obvious true or false. Place it on the
// IGVN worklist for later cleanup. Move control-dependent data Nodes on the
// live path up to the dominating control.
void PhaseIdealLoop::dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip, bool pin_array_access_nodes) {
void PhaseIdealLoop::dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip, bool prevdom_not_imply_this) {
if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); }
// prevdom is the dominating projection of the dominating test.
@ -386,26 +386,25 @@ void PhaseIdealLoop::dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip, b
return;
}
rewire_safe_outputs_to_dominator(dp, prevdom, pin_array_access_nodes);
rewire_safe_outputs_to_dominator(dp, prevdom, prevdom_not_imply_this);
}
void PhaseIdealLoop::rewire_safe_outputs_to_dominator(Node* source, Node* dominator, const bool pin_array_access_nodes) {
void PhaseIdealLoop::rewire_safe_outputs_to_dominator(Node* source, Node* dominator, const bool dominator_not_imply_source) {
IdealLoopTree* old_loop = get_loop(source);
for (DUIterator_Fast imax, i = source->fast_outs(imax); i < imax; i++) {
Node* out = source->fast_out(i); // Control-dependent node
// Do not rewire Div and Mod nodes which could have a zero divisor to avoid skipping their zero check.
if (out->depends_only_on_test() && _igvn.no_dependent_zero_check(out)) {
if (out->depends_only_on_test()) {
assert(out->in(0) == source, "must be control dependent on source");
_igvn.replace_input_of(out, 0, dominator);
if (pin_array_access_nodes) {
if (dominator_not_imply_source) {
// Because of Loop Predication, Loads and range check Cast nodes that are control dependent on this range
// check (that is about to be removed) now depend on multiple dominating Hoisted Check Predicates. After the
// removal of this range check, these control dependent nodes end up at the lowest/nearest dominating predicate
// in the graph. To ensure that these Loads/Casts do not float above any of the dominating checks (even when the
// lowest dominating check is later replaced by yet another dominating check), we need to pin them at the lowest
// dominating check.
Node* clone = out->pin_array_access_node();
Node* clone = out->pin_node_under_control();
if (clone != nullptr) {
clone = _igvn.register_new_node_with_optimizer(clone, out);
_igvn.replace_node(out, clone);
@ -1644,7 +1643,7 @@ bool PhaseIdealLoop::try_merge_identical_ifs(Node* n) {
void PhaseIdealLoop::push_pinned_nodes_thru_region(IfNode* dom_if, Node* region) {
for (DUIterator i = region->outs(); region->has_out(i); i++) {
Node* u = region->out(i);
if (!has_ctrl(u) || u->is_Phi() || !u->depends_only_on_test() || !_igvn.no_dependent_zero_check(u)) {
if (!has_ctrl(u) || u->is_Phi() || !u->depends_only_on_test()) {
continue;
}
assert(u->in(0) == region, "not a control dependent node?");
@ -1724,11 +1723,11 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
Node* outside_ctrl = place_outside_loop(n_ctrl, loop_ctrl);
if (!would_sink_below_pre_loop_exit(loop_ctrl, outside_ctrl)) {
if (n->depends_only_on_test()) {
Node* pinned_clone = n->pin_array_access_node();
// If this node depends_only_on_test, it will be rewired to a control input that is not
// the correct test. As a result, it must be pinned otherwise it can be incorrectly
// rewired to a dominating test equivalent to the new control.
Node* pinned_clone = n->pin_node_under_control();
if (pinned_clone != nullptr) {
// Pin array access nodes: if this is an array load, it's going to be dependent on a condition that's not a
// range check for that access. If that condition is replaced by an identical dominating one, then an
// unpinned load would risk floating above its range check.
register_new_node(pinned_clone, n_ctrl);
maybe_pinned_n = pinned_clone;
_igvn.replace_node(n, pinned_clone);
@ -1754,11 +1753,11 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
Node* u = n->last_out(j); // Clone private computation per use
_igvn.rehash_node_delayed(u);
Node* x = nullptr;
if (n->depends_only_on_test()) {
// Pin array access nodes: if this is an array load, it's going to be dependent on a condition that's not a
// range check for that access. If that condition is replaced by an identical dominating one, then an
// unpinned load would risk floating above its range check.
x = n->pin_array_access_node();
if (n->in(0) != nullptr && n->depends_only_on_test()) {
// If this node depends_only_on_test, it will be rewired to a control input that is not
// the correct test. As a result, it must be pinned otherwise it can be incorrectly
// rewired to a dominating test equivalent to the new control.
x = n->pin_node_under_control();
}
if (x == nullptr) {
x = n->clone();
@ -2328,14 +2327,12 @@ void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
// We notify all uses of old, including use, and the indirect uses,
// that may now be optimized because we have replaced old with phi.
_igvn.add_users_to_worklist(old);
if (idx == 0 &&
use->depends_only_on_test()) {
Node* pinned_clone = use->pin_array_access_node();
if (idx == 0 && use->depends_only_on_test()) {
// If this node depends_only_on_test, it will be rewired to a control input that is not the
// correct test. As a result, it must be pinned otherwise it can be incorrectly rewired to
// a dominating test equivalent to the new control.
Node* pinned_clone = use->pin_node_under_control();
if (pinned_clone != nullptr) {
// Pin array access nodes: control is updated here to a region. If, after some transformations, only one path
// into the region is left, an array load could become dependent on a condition that's not a range check for
// that access. If that condition is replaced by an identical dominating one, then an unpinned load would risk
// floating above its range check.
pinned_clone->set_req(0, phi);
register_new_node_with_ctrl_of(pinned_clone, use);
_igvn.replace_node(use, pinned_clone);
@ -4102,11 +4099,9 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
Node* n_clone = old_new[n->_idx];
if (n_clone->depends_only_on_test()) {
// Pin array access nodes: control is updated here to the loop head. If, after some transformations, the
// backedge is removed, an array load could become dependent on a condition that's not a range check for that
// access. If that condition is replaced by an identical dominating one, then an unpinned load would risk
// floating above its range check.
Node* pinned_clone = n_clone->pin_array_access_node();
// If this node depends_only_on_test, it will be rewire to the loop head, which is not the
// correct test
Node* pinned_clone = n_clone->pin_node_under_control();
if (pinned_clone != nullptr) {
register_new_node_with_ctrl_of(pinned_clone, n_clone);
old_new.map(n->_idx, pinned_clone);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, Alibaba Group Holding Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -900,7 +900,7 @@ void LoadNode::dump_spec(outputStream *st) const {
// standard dump does this in Verbose and WizardMode
st->print(" #"); _type->dump_on(st);
}
if (!depends_only_on_test()) {
if (in(0) != nullptr && !depends_only_on_test()) {
st->print(" (does not depend only on test, ");
if (control_dependency() == UnknownControl) {
st->print("unknown control");
@ -1025,14 +1025,6 @@ static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp,
return false;
}
LoadNode* LoadNode::pin_array_access_node() const {
const TypePtr* adr_type = this->adr_type();
if (adr_type != nullptr && adr_type->isa_aryptr()) {
return clone_pinned();
}
return nullptr;
}
// Is the value loaded previously stored by an arraycopy? If so return
// a load node that reads from the source array so we may be able to
// optimize out the ArrayCopy node later.
@ -2585,6 +2577,21 @@ LoadNode* LoadNode::clone_pinned() const {
return ld;
}
// Pin a LoadNode if it carries a dependency on its control input. There are cases when the node
// does not actually have any dependency on its control input. For example, if we have a LoadNode
// being used only outside a loop but it must be scheduled inside the loop, we can clone the node
// for each of its use so that all the clones can be scheduled outside the loop. Then, to prevent
// the clones from being GVN-ed again, we add a control input for each of them at the loop exit. In
// those case, since there is not a dependency between the node and its control input, we do not
// need to pin it.
LoadNode* LoadNode::pin_node_under_control_impl() const {
const TypePtr* adr_type = this->adr_type();
if (adr_type != nullptr && adr_type->isa_aryptr()) {
// Only array accesses have dependencies on their control input
return clone_pinned();
}
return nullptr;
}
//------------------------------Value------------------------------------------
const Type* LoadNKlassNode::Value(PhaseGVN* phase) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, Alibaba Group Holding Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -299,8 +299,6 @@ public:
bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; }
bool has_pinned_control_dependency() const { return _control_dependency == Pinned; }
LoadNode* pin_array_access_node() const;
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
@ -314,6 +312,7 @@ protected:
Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const;
private:
// depends_only_on_test is almost always true, and needs to be almost always
// true to enable key hoisting & commoning optimizations. However, for the
// special case of RawPtr loads from TLS top & end, and other loads performed by
@ -323,11 +322,12 @@ protected:
// which produce results (new raw memory state) inside of loops preventing all
// manner of other optimizations). Basically, it's ugly but so is the alternative.
// See comment in macro.cpp, around line 125 expand_allocate_common().
virtual bool depends_only_on_test() const {
virtual bool depends_only_on_test_impl() const {
return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest;
}
LoadNode* clone_pinned() const;
virtual LoadNode* pin_node_under_control_impl() const;
};
//------------------------------LoadBNode--------------------------------------
@ -534,7 +534,6 @@ public:
virtual int Opcode() const;
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node* Identity(PhaseGVN* phase);
virtual bool depends_only_on_test() const { return true; }
// Polymorphic factory method:
static Node* make(PhaseGVN& gvn, Node* mem, Node* adr, const TypePtr* at,
@ -563,7 +562,6 @@ public:
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node* Identity(PhaseGVN* phase);
virtual bool depends_only_on_test() const { return true; }
};
@ -580,7 +578,6 @@ private:
virtual uint size_of() const { return sizeof(*this); }
protected:
virtual bool cmp( const Node &n ) const;
virtual bool depends_only_on_test() const { return false; }
Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
Node* Ideal_sign_extended_input(PhaseGVN* phase, int num_rejected_bits);
@ -660,6 +657,9 @@ public:
Node* convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt);
MemBarNode* trailing_membar() const;
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
//------------------------------StoreBNode-------------------------------------
@ -816,7 +816,6 @@ private:
#endif // ASSERT
public:
LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
virtual bool depends_only_on_test() const { return false; }
virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
virtual const Type *bottom_type() const { return _type; }
@ -829,6 +828,9 @@ public:
uint8_t barrier_data() { return _barrier_data; }
void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; }
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
class LoadStoreConditionalNode : public LoadStoreNode {
@ -1115,6 +1117,9 @@ public:
// Return allocation input memory edge if it is different instance
// or itself if it is the one we are looking for.
static bool step_through(Node** np, uint instance_id, PhaseValues* phase);
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
//------------------------------MemBar-----------------------------------------
@ -1677,6 +1682,9 @@ public:
virtual uint match_edge(uint idx) const { return (idx == 2); }
virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
virtual const Type *bottom_type() const { return Type::MEMORY; }
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
// cachewb pre sync node for ensuring that writebacks are serialised
@ -1689,6 +1697,9 @@ public:
virtual uint match_edge(uint idx) const { return false; }
virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
virtual const Type *bottom_type() const { return Type::MEMORY; }
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
// cachewb pre sync node for ensuring that writebacks are serialised
@ -1701,6 +1712,9 @@ public:
virtual uint match_edge(uint idx) const { return false; }
virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
virtual const Type *bottom_type() const { return Type::MEMORY; }
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
//------------------------------Prefetch---------------------------------------
@ -1713,6 +1727,9 @@ public:
virtual uint ideal_reg() const { return NotAMachineReg; }
virtual uint match_edge(uint idx) const { return idx==2; }
virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
#endif // SHARE_OPTO_MEMNODE_HPP

View File

@ -1238,20 +1238,26 @@ Node* RShiftNode::IdentityIL(PhaseGVN* phase, BasicType bt) {
return in(1);
}
// Check for useless sign-masking
int lshift_count = 0;
if (in(1)->Opcode() == Op_LShift(bt) &&
in(1)->req() == 3 &&
in(1)->in(2) == in(2)) {
// Compare shift counts by value, not by node pointer, to also match a not-yet-normalized
// negative constant (e.g. -1 vs 31)
const_shift_count(phase, in(1), &lshift_count)) {
count &= bits_per_java_integer(bt) - 1; // semantics of Java shifts
// Compute masks for which this shifting doesn't change
jlong lo = (CONST64(-1) << (bits_per_java_integer(bt) - ((uint)count)-1)); // FFFF8000
jlong hi = ~lo; // 00007FFF
const TypeInteger* t11 = phase->type(in(1)->in(1))->isa_integer(bt);
if (t11 == nullptr) {
return this;
}
// Does actual value fit inside of mask?
if (lo <= t11->lo_as_long() && t11->hi_as_long() <= hi) {
return in(1)->in(1); // Then shifting is a nop
lshift_count &= bits_per_java_integer(bt) - 1;
if (count == lshift_count) {
// Compute masks for which this shifting doesn't change
jlong lo = (CONST64(-1) << (bits_per_java_integer(bt) - ((uint)count)-1)); // FFFF8000
jlong hi = ~lo; // 00007FFF
const TypeInteger* t11 = phase->type(in(1)->in(1))->isa_integer(bt);
if (t11 == nullptr) {
return this;
}
// Does actual value fit inside of mask?
if (lo <= t11->lo_as_long() && t11->hi_as_long() <= hi) {
return in(1)->in(1); // Then shifting is a nop
}
}
}
}
@ -1524,11 +1530,14 @@ Node* URShiftINode::Ideal(PhaseGVN* phase, bool can_reshape) {
// If Q is "X << z" the rounding is useless. Look for patterns like
// ((X<<Z) + Y) >>> Z and replace with (X + Y>>>Z) & Z-mask.
Node *add = in(1);
const TypeInt *t2 = phase->type(in(2))->isa_int();
if (in1_op == Op_AddI) {
Node *lshl = add->in(1);
if( lshl->Opcode() == Op_LShiftI &&
phase->type(lshl->in(2)) == t2 ) {
// Compare shift counts by value, not by node pointer, to also match a not-yet-normalized
// negative constant (e.g. -1 vs 31)
int lshl_con = 0;
if (lshl->Opcode() == Op_LShiftI &&
const_shift_count(phase, lshl, &lshl_con) &&
(lshl_con & (BitsPerJavaInteger - 1)) == con) {
Node *y_z = phase->transform( new URShiftINode(add->in(2),in(2)) );
Node *sum = phase->transform( new AddINode( lshl->in(1), y_z ) );
return new AndINode( sum, phase->intcon(mask) );
@ -1555,11 +1564,16 @@ Node* URShiftINode::Ideal(PhaseGVN* phase, bool can_reshape) {
// Check for "(X << z ) >>> z" which simply zero-extends
Node *shl = in(1);
if( in1_op == Op_LShiftI &&
phase->type(shl->in(2)) == t2 )
return new AndINode( shl->in(1), phase->intcon(mask) );
// Compare shift counts by value, not by node pointer, to also match a not-yet-normalized
// negative constant (e.g. -1 vs 31)
int shl_con = 0;
if (in1_op == Op_LShiftI &&
const_shift_count(phase, shl, &shl_con) &&
(shl_con & (BitsPerJavaInteger - 1)) == con)
return new AndINode(shl->in(1), phase->intcon(mask));
// Check for (x >> n) >>> 31. Replace with (x >>> 31)
const TypeInt* t2 = phase->type(in(2))->isa_int();
Node *shr = in(1);
if ( in1_op == Op_RShiftI ) {
Node *in11 = shr->in(1);
@ -1677,11 +1691,15 @@ Node* URShiftLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
const TypeInt *t2 = phase->type(in(2))->isa_int();
if (add->Opcode() == Op_AddL) {
Node *lshl = add->in(1);
if( lshl->Opcode() == Op_LShiftL &&
phase->type(lshl->in(2)) == t2 ) {
Node *y_z = phase->transform( new URShiftLNode(add->in(2),in(2)) );
Node *sum = phase->transform( new AddLNode( lshl->in(1), y_z ) );
return new AndLNode( sum, phase->longcon(mask) );
// Compare shift counts by value, not by node pointer, to also match a not-yet-normalized
// negative constant (e.g. -1 vs 63)
int lshl_con = 0;
if (lshl->Opcode() == Op_LShiftL &&
const_shift_count(phase, lshl, &lshl_con) &&
(lshl_con & (BitsPerJavaLong - 1)) == con) {
Node* y_z = phase->transform(new URShiftLNode(add->in(2), in(2)));
Node* sum = phase->transform(new AddLNode(lshl->in(1), y_z));
return new AndLNode(sum, phase->longcon(mask));
}
}
@ -1701,9 +1719,14 @@ Node* URShiftLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
// Check for "(X << z ) >>> z" which simply zero-extends
Node *shl = in(1);
if( shl->Opcode() == Op_LShiftL &&
phase->type(shl->in(2)) == t2 )
return new AndLNode( shl->in(1), phase->longcon(mask) );
// Compare shift counts by value, not by node pointer, to also match a not-yet-normalized
// negative constant (e.g. -1 vs 63)
int shl_con = 0;
if (shl->Opcode() == Op_LShiftL &&
const_shift_count(phase, shl, &shl_con) &&
(shl_con & (BitsPerJavaLong - 1)) == con) {
return new AndLNode(shl->in(1), phase->longcon(mask));
}
// Check for (x >> n) >>> 63. Replace with (x >>> 63)
Node *shr = in(1);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,6 @@ public:
virtual const Type *bottom_type() const = 0;
virtual bool is_CFG() const { return true; }
virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
virtual bool depends_only_on_test() const { return false; }
virtual const RegMask &out_RegMask() const;
virtual Node *match( const ProjNode *proj, const Matcher *m );
virtual uint ideal_reg() const { return NotAMachineReg; }
@ -176,8 +175,7 @@ public:
const bool _is_io_use; // Used to distinguish between the projections
// used on the control and io paths from a macro node
virtual int Opcode() const;
virtual bool is_CFG() const;
virtual bool depends_only_on_test() const { return false; }
virtual bool is_CFG() const;
virtual const Type *bottom_type() const;
virtual const TypePtr *adr_type() const;
virtual bool pinned() const;

View File

@ -1059,14 +1059,135 @@ public:
virtual bool is_CFG() const { return false; }
// If this node is control-dependent on a test, can it be
// rerouted to a dominating equivalent test? This is usually
// true of non-CFG nodes, but can be false for operations which
// depend for their correct sequencing on more than one test.
// (In that case, hoisting to a dominating test may silently
// skip some other important test.)
virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; };
// If this node is control-dependent on a test, can it be rerouted to a dominating equivalent
// test? This means that the node can be executed safely as long as it happens after the test
// that is its control input without worrying about the whole control flow. On the contrary, if
// the node depends on a test that is not its control input, or if it depends on more than one
// tests, then this method must return false.
//
// Pseudocode examples:
// 1. if (y != 0) {
// x / y;
// }
// The division depends only on the test y != 0 and can be executed anywhere y != 0 holds true.
// As a result, depends_only_on_test returns true.
// 2. if (y != 0) {
// if (x > 1) {
// x / y;
// }
// }
// If the division x / y has its control input being the IfTrueNode of the test y != 0, then
// depends_only_on_test returns true. Otherwise, if the division has its control input being the
// IfTrueNode of the test x > 1, then depends_only_on_test returns false.
// 3. if (y > z) {
// if (z > 0) {
// x / y
// }
// }
// The division depends on both tests y > z and z > 0. As a result, depends_only_on_test returns
// false.
//
// This method allows more freedom in certain nodes with regards to scheduling, for example it
// allows nodes to float out of loops together with its test.
//
// This method is pessimistic, this means that it may return false even if the node satisfy the
// requirements. However, it must return false if the node does not satisfy the requirements.
// When a test is decomposed into multiple tests, all nodes that depend on the decomposed test
// must be pinned at the lowest dominating test of those. For example, when a zero check of a
// division is split through a region but the division itself is not, it must be pinned at the
// merge point by returning false when calling this method.
bool depends_only_on_test() const {
if (is_CFG() || pinned()) {
return false;
}
assert(in(0) != nullptr, "must have a control input");
return depends_only_on_test_impl();
}
// Return a clone of the current node that's pinned. The current node must return true for
// depends_only_on_test, and the retuned node must return false. This method is called when the
// node is disconnected from its test.
//
// Examples:
// 1. for (int i = start; i <= limit; i++) {
// if (!rangecheck(i, a)) {
// trap;
// }
// a[i];
// }
// Loop predication can then hoist the range check out of the loop:
// if (!rangecheck(start, a)) {
// trap;
// }
// if (!rangecheck(limit, a)) {
// trap;
// }
// for (int i = start; i <= limit; i++) {
// a[i];
// }
// As the load a[i] now depends on both tests rangecheck(start, a) and rangecheck(limit, a), it
// must be pinned at the lowest dominating test of those.
//
// 2. if (y > x) {
// if (x >= 0) {
// if (y != 0) {
// x / y;
// }
// }
// }
// The test (y != 0) == true can be deduced from (y > x) == true and (x >= 0) == true, so we may
// choose to elide it. In such cases, the division x / y now depends on both tests
// (y > x) == true and (x >= 0) == true, so it must be pinned at the lowest dominating test of
// those.
//
// 3. if (b) {
// ...
// } else {
// ...
// }
// if (y == 0) {
// trap;
// }
// x / y;
// The division x / y depends only on the test (y == 0) == false, but if we split the test
// through the merge point but not the division:
// if (b) {
// ...
// if (y == 0) {
// trap;
// }
// } else {
// ...
// if (y == 0) {
// trap;
// }
// }
// x / y;
// The division now has the control input being the RegionNode merge the branches of if(b)
// instead of a test that proves y != 0. As a result, it must be pinned at that node.
//
// There are cases where the node does not actually have a dependency on its control input. For
// example, when we try to sink a LoadNode out of a loop in PhaseIdealLoop::try_sink_out_of_loop,
// we clone the node so that all of the clones can be scheduled out of the loop. To prevent the
// clones from being GVN-ed again, we add a control input for the node at the loop exit. For the
// cases when the node does provably not depend on its control input, this method can return
// nullptr.
Node* pin_node_under_control() const {
assert(depends_only_on_test(), "must be a depends_only_on_test node");
Node* res = pin_node_under_control_impl();
if (res == nullptr) {
assert(is_Load(), "unexpected failure to pin for %s", Name());
return nullptr;
}
assert(!res->depends_only_on_test(), "the result must not depends_only_on_test");
return res;
}
private:
virtual bool depends_only_on_test_impl() const { assert(false, "%s", Name()); return false; }
virtual Node* pin_node_under_control_impl() const { assert(false, "%s", Name()); return nullptr; }
public:
// When building basic blocks, I need to have a notion of block beginning
// Nodes, next block selector Nodes (block enders), and next block
// projections. These calls need to work on their machine equivalents. The
@ -1201,13 +1322,6 @@ public:
template <typename Callback, typename Check>
void visit_uses(Callback callback, Check is_boundary) const;
// Returns a clone of the current node that's pinned (if the current node is not) for nodes found in array accesses
// (Load and range check CastII nodes).
// This is used when an array access is made dependent on 2 or more range checks (range check smearing or Loop Predication).
virtual Node* pin_array_access_node() const {
return nullptr;
}
//----------------- Code Generation
// Ideal register class for Matching. Zero means unmatched instruction

View File

@ -1722,11 +1722,6 @@ void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape) {
case Op_MergeMem:
return;
// URShiftINode::Ideal
// Found in tier1-3. Did not investigate further yet.
case Op_URShiftI:
return;
// CMoveINode::Ideal
// Found in tier1-3. Did not investigate further yet.
case Op_CMoveI:
@ -2594,12 +2589,15 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_
auto is_boundary = [](Node* n){ return !n->is_ConstraintCast(); };
use->visit_uses(push_the_uses_to_worklist, is_boundary);
}
// If changed LShift inputs, check RShift users for useless sign-ext
// If changed LShift inputs, check RShift/URShift users for
// "(X << C) >> C" sign-ext and "(X << C) >>> C" zero-ext optimizations.
if (use_op == Op_LShiftI || use_op == Op_LShiftL) {
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
Node* u = use->fast_out(i2);
if (u->Opcode() == Op_RShiftI || u->Opcode() == Op_RShiftL)
if (u->Opcode() == Op_RShiftI || u->Opcode() == Op_RShiftL ||
u->Opcode() == Op_URShiftI || u->Opcode() == Op_URShiftL) {
worklist.push(u);
}
}
}
// If changed LShift inputs, check And users for shift and mask (And) operation
@ -2796,37 +2794,6 @@ void PhaseIterGVN::remove_speculative_types() {
_table.check_no_speculative_types();
}
// Check if the type of a divisor of a Div or Mod node includes zero.
bool PhaseIterGVN::no_dependent_zero_check(Node* n) const {
switch (n->Opcode()) {
case Op_DivI:
case Op_ModI:
case Op_UDivI:
case Op_UModI: {
// Type of divisor includes 0?
if (type(n->in(2)) == Type::TOP) {
// 'n' is dead. Treat as if zero check is still there to avoid any further optimizations.
return false;
}
const TypeInt* type_divisor = type(n->in(2))->is_int();
return (type_divisor->_hi < 0 || type_divisor->_lo > 0);
}
case Op_DivL:
case Op_ModL:
case Op_UDivL:
case Op_UModL: {
// Type of divisor includes 0?
if (type(n->in(2)) == Type::TOP) {
// 'n' is dead. Treat as if zero check is still there to avoid any further optimizations.
return false;
}
const TypeLong* type_divisor = type(n->in(2))->is_long();
return (type_divisor->_hi < 0 || type_divisor->_lo > 0);
}
}
return true;
}
//=============================================================================
#ifndef PRODUCT
uint PhaseCCP::_total_invokes = 0;

View File

@ -604,7 +604,6 @@ public:
}
bool is_dominator(Node *d, Node *n) { return is_dominator_helper(d, n, false); }
bool no_dependent_zero_check(Node* n) const;
#ifndef PRODUCT
static bool is_verify_def_use() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,6 @@ public:
virtual const Type *bottom_type() const;
virtual bool is_CFG() const { return true; }
virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
virtual bool depends_only_on_test() const { return false; }
virtual const Node *is_block_proj() const { return this; }
virtual const RegMask &out_RegMask() const;
virtual uint ideal_reg() const { return NotAMachineReg; }

View File

@ -29,6 +29,7 @@
#include "opto/movenode.hpp"
#include "opto/node.hpp"
#include "opto/opaquenode.hpp"
#include "opto/opcodes.hpp"
#include "opto/predicates.hpp"
//------------------------------split_thru_region------------------------------
@ -716,14 +717,11 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio
} // End of while merge point has phis
_igvn.remove_dead_node(region);
if (iff->Opcode() == Op_RangeCheck) {
// Pin array access nodes: control is updated here to a region. If, after some transformations, only one path
// into the region is left, an array load could become dependent on a condition that's not a range check for
// that access. If that condition is replaced by an identical dominating one, then an unpinned load would risk
// floating above its range check.
pin_array_access_nodes_dependent_on(new_true);
pin_array_access_nodes_dependent_on(new_false);
}
// Control is updated here to a region, which is not a test, so any node that
// depends_only_on_test must be pinned
pin_nodes_dependent_on(new_true, iff->Opcode() == Op_RangeCheck);
pin_nodes_dependent_on(new_false, iff->Opcode() == Op_RangeCheck);
if (new_false_region != nullptr) {
*new_false_region = new_false;
@ -735,13 +733,22 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio
DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
}
void PhaseIdealLoop::pin_array_access_nodes_dependent_on(Node* ctrl) {
void PhaseIdealLoop::pin_nodes_dependent_on(Node* ctrl, bool old_iff_is_rangecheck) {
for (DUIterator i = ctrl->outs(); ctrl->has_out(i); i++) {
Node* use = ctrl->out(i);
if (!use->depends_only_on_test()) {
continue;
}
Node* pinned_clone = use->pin_array_access_node();
// When a RangeCheckNode is folded because its condition is a constant, IfProjNode::Identity
// returns the control input of the RangeCheckNode. As a result, when the old IfNode is not a
// RangeCheckNode, and a Load output of it depends_only_on_test, we don't need to pin the Load.
if (use->is_Load() && !old_iff_is_rangecheck) {
continue;
}
Node* pinned_clone = use->pin_node_under_control();
if (pinned_clone != nullptr) {
register_new_node_with_ctrl_of(pinned_clone, use);
_igvn.replace_node(use, pinned_clone);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -507,6 +507,9 @@ public:
virtual int Opcode() const;
const Type *bottom_type() const { return Type::DOUBLE; }
virtual uint ideal_reg() const { return Op_RegD; }
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
@ -522,6 +525,9 @@ public:
const Type *bottom_type() const { return Type::DOUBLE; }
virtual uint ideal_reg() const { return Op_RegD; }
virtual const Type* Value(PhaseGVN* phase) const;
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
//------------------------------SqrtFNode--------------------------------------
@ -541,6 +547,9 @@ public:
const Type *bottom_type() const { return Type::FLOAT; }
virtual uint ideal_reg() const { return Op_RegF; }
virtual const Type* Value(PhaseGVN* phase) const;
private:
virtual bool depends_only_on_test_impl() const { return false; }
};
//------------------------------SqrtHFNode-------------------------------------

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,7 +48,6 @@ public:
virtual int Opcode() const;
const Type* bottom_type() const { return TypeInt::CC; }
bool depends_only_on_test() const { return false; }
ciMethod* method() const { return _method; }
int bci() const { return _bci; }
@ -71,6 +70,8 @@ private:
static bool is_oop(PhaseGVN* phase, Node* n);
Node* load_klass(PhaseGVN* phase) const;
virtual bool depends_only_on_test_impl() const { return false; }
#endif // ASSERT
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,6 +42,7 @@ typedef enum {
} VirtualizationType;
class outputStream;
class stringStream;
enum class vmIntrinsicID;
// Abstract_VM_Version provides information about the VM.
@ -226,6 +227,21 @@ class Abstract_VM_Version: AllStatic {
static const char* cpu_name(void);
static const char* cpu_description(void);
static void get_cpu_features_name(void* features_buffer, stringStream& ss) { return; }
// Returns names of features present in features_set1 but not in features_set2
static void get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss) { return; }
// Returns number of bytes required to store cpu features representation
static int cpu_features_size() { return 0; }
// Stores arch dependent cpu features representation in the provided buffer.
// Size of the buffer must be same as returned by cpu_features_size()
static void store_cpu_features(void* buf) { return; }
// features_to_test is an opaque object that stores arch specific representation of cpu features
static bool supports_features(void* features_to_test) { return false; };
};
#endif // SHARE_RUNTIME_ABSTRACT_VM_VERSION_HPP

View File

@ -2293,7 +2293,7 @@ void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
// We do not have the same lock protection for pd_commit_memory and record_virtual_memory_commit.
// We assume that there is some external synchronization that prevents a region from being uncommitted
// before it is finished being committed.
bool os::uncommit_memory(char* addr, size_t bytes, bool executable) {
void os::uncommit_memory(char* addr, size_t bytes, bool executable) {
assert_nonempty_range(addr, bytes);
bool res;
if (MemTracker::enabled()) {
@ -2306,13 +2306,10 @@ bool os::uncommit_memory(char* addr, size_t bytes, bool executable) {
res = pd_uncommit_memory(addr, bytes, executable);
}
if (res) {
log_debug(os, map)("Uncommitted " RANGEFMT, RANGEFMTARGS(addr, bytes));
} else {
log_info(os, map)("Failed to uncommit " RANGEFMT, RANGEFMTARGS(addr, bytes));
if (!res) {
fatal("Failed to uncommit " RANGEFMT, RANGEFMTARGS(addr, bytes));
}
return res;
log_debug(os, map)("Uncommitted " RANGEFMT, RANGEFMTARGS(addr, bytes));
}
// The scope of NmtVirtualMemoryLocker covers both pd_release_memory and record_virtual_memory_release because
@ -2320,7 +2317,7 @@ bool os::uncommit_memory(char* addr, size_t bytes, bool executable) {
// We do not have the same lock protection for pd_reserve_memory and record_virtual_memory_reserve.
// We assume that there is some external synchronization that prevents a region from being released
// before it is finished being reserved.
bool os::release_memory(char* addr, size_t bytes) {
void os::release_memory(char* addr, size_t bytes) {
assert_nonempty_range(addr, bytes);
bool res;
if (MemTracker::enabled()) {
@ -2333,11 +2330,9 @@ bool os::release_memory(char* addr, size_t bytes) {
res = pd_release_memory(addr, bytes);
}
if (!res) {
log_info(os, map)("Failed to release " RANGEFMT, RANGEFMTARGS(addr, bytes));
} else {
log_debug(os, map)("Released " RANGEFMT, RANGEFMTARGS(addr, bytes));
fatal("Failed to release " RANGEFMT, RANGEFMTARGS(addr, bytes));
}
return res;
log_debug(os, map)("Released " RANGEFMT, RANGEFMTARGS(addr, bytes));
}
// Prints all mappings
@ -2406,7 +2401,7 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset,
return result;
}
bool os::unmap_memory(char *addr, size_t bytes) {
void os::unmap_memory(char *addr, size_t bytes) {
bool result;
if (MemTracker::enabled()) {
MemTracker::NmtVirtualMemoryLocker nvml;
@ -2417,7 +2412,9 @@ bool os::unmap_memory(char *addr, size_t bytes) {
} else {
result = pd_unmap_memory(addr, bytes);
}
return result;
if (!result) {
fatal("Failed to unmap memory " RANGEFMT, RANGEFMTARGS(addr, bytes));
}
}
void os::disclaim_memory(char *addr, size_t bytes) {
@ -2445,7 +2442,7 @@ char* os::reserve_memory_special(size_t size, size_t alignment, size_t page_size
return result;
}
bool os::release_memory_special(char* addr, size_t bytes) {
void os::release_memory_special(char* addr, size_t bytes) {
bool res;
if (MemTracker::enabled()) {
MemTracker::NmtVirtualMemoryLocker nvml;
@ -2456,7 +2453,9 @@ bool os::release_memory_special(char* addr, size_t bytes) {
} else {
res = pd_release_memory_special(addr, bytes);
}
return res;
if (!res) {
fatal("Failed to release memory special " RANGEFMT, RANGEFMTARGS(addr, bytes));
}
}
// Convenience wrapper around naked_short_sleep to allow for longer sleep

View File

@ -536,8 +536,8 @@ class os: AllStatic {
static void commit_memory_or_exit(char* addr, size_t size,
size_t alignment_hint,
bool executable, const char* mesg);
static bool uncommit_memory(char* addr, size_t bytes, bool executable = false);
static bool release_memory(char* addr, size_t bytes);
static void uncommit_memory(char* addr, size_t bytes, bool executable = false);
static void release_memory(char* addr, size_t bytes);
// Does the platform support trimming the native heap?
static bool can_trim_native_heap();
@ -566,7 +566,7 @@ class os: AllStatic {
static bool unguard_memory(char* addr, size_t bytes);
static bool create_stack_guard_pages(char* addr, size_t bytes);
static bool pd_create_stack_guard_pages(char* addr, size_t bytes);
static bool remove_stack_guard_pages(char* addr, size_t bytes);
static void remove_stack_guard_pages(char* addr, size_t bytes);
// Helper function to create a new file with template jvmheap.XXXXXX.
// Returns a valid fd on success or else returns -1
static int create_file_for_heap(const char* dir);
@ -582,7 +582,7 @@ class os: AllStatic {
static char* map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, MemTag mem_tag, bool read_only = false,
bool allow_exec = false);
static bool unmap_memory(char *addr, size_t bytes);
static void unmap_memory(char *addr, size_t bytes);
static void disclaim_memory(char *addr, size_t bytes);
static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
@ -605,7 +605,7 @@ class os: AllStatic {
// reserve, commit and pin the entire memory region
static char* reserve_memory_special(size_t size, size_t alignment, size_t page_size,
char* addr, bool executable);
static bool release_memory_special(char* addr, size_t bytes);
static void release_memory_special(char* addr, size_t bytes);
static void large_page_init();
static size_t large_page_size();
static bool can_commit_large_page_memory();

View File

@ -116,13 +116,8 @@ void StackOverflow::remove_stack_guard_pages() {
size_t len = stack_guard_zone_size();
if (os::must_commit_stack_guard_pages()) {
if (os::remove_stack_guard_pages((char *) low_addr, len)) {
_stack_guard_state = stack_guard_unused;
} else {
log_warning(os, thread)("Attempt to deallocate stack guard pages failed ("
PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
return;
}
os::remove_stack_guard_pages((char *) low_addr, len);
_stack_guard_state = stack_guard_unused;
} else {
if (_stack_guard_state == stack_guard_unused) return;
if (os::unguard_memory((char *) low_addr, len)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,6 +51,7 @@
#include "runtime/threadSMR.inline.hpp"
#include "runtime/vmOperations.hpp"
#include "services/threadService.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/ticks.hpp"
#define VM_OP_NAME_INITIALIZE(name) #name,
@ -286,42 +287,27 @@ class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
}
private:
class ObjectMonitorLinkedList :
public LinkedListImpl<ObjectMonitor*,
AnyObj::C_HEAP, mtThread,
AllocFailStrategy::RETURN_NULL> {};
using ObjectMonitorList = GrowableArrayCHeap<ObjectMonitor*, mtThread>;
// HashTable SIZE is specified at compile time so we
// use 1031 which is the first prime after 1024.
typedef HashTable<int64_t, ObjectMonitorLinkedList*, 1031, AnyObj::C_HEAP, mtThread,
typedef HashTable<int64_t, ObjectMonitorList, 1031, AnyObj::C_HEAP, mtThread,
&ObjectMonitorsDump::ptr_hash> PtrTable;
PtrTable* _ptrs;
size_t _key_count;
size_t _om_count;
void add_list(int64_t key, ObjectMonitorLinkedList* list) {
_ptrs->put(key, list);
_key_count++;
}
ObjectMonitorLinkedList* get_list(int64_t key) {
ObjectMonitorLinkedList** listpp = _ptrs->get(key);
return (listpp == nullptr) ? nullptr : *listpp;
}
void add(ObjectMonitor* monitor) {
int64_t key = monitor->owner();
ObjectMonitorLinkedList* list = get_list(key);
if (list == nullptr) {
// Create new list and add it to the hash table:
list = new (mtThread) ObjectMonitorLinkedList;
_ptrs->put(key, list);
bool created = false;
ObjectMonitorList* list = _ptrs->put_if_absent(key, &created);
if (created) {
_key_count++;
}
assert(list->find(monitor) == nullptr, "Should not contain duplicates");
list->add(monitor); // Add the ObjectMonitor to the list.
assert(list->find(monitor) == -1, "Should not contain duplicates");
list->push(monitor); // Add the ObjectMonitor to the list.
_om_count++;
}
@ -332,17 +318,7 @@ class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
ObjectMonitorsDump() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {}
~ObjectMonitorsDump() {
class CleanupObjectMonitorsDump: StackObj {
public:
bool do_entry(int64_t& key, ObjectMonitorLinkedList*& list) {
list->clear(); // clear the LinkListNodes
delete list; // then delete the LinkedList
return true;
}
} cleanup;
_ptrs->unlink(&cleanup); // cleanup the LinkedLists
delete _ptrs; // then delete the hash table
delete _ptrs;
}
// Implements MonitorClosure used to collect all owned monitors in the system
@ -368,11 +344,12 @@ class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
// Implements the ObjectMonitorsView interface
void visit(MonitorClosure* closure, JavaThread* thread) override {
int64_t key = ObjectMonitor::owner_id_from(thread);
ObjectMonitorLinkedList* list = get_list(key);
LinkedListIterator<ObjectMonitor*> iter(list != nullptr ? list->head() : nullptr);
while (!iter.is_empty()) {
ObjectMonitor* monitor = *iter.next();
closure->do_monitor(monitor);
ObjectMonitorList* list = _ptrs->get(key);
if (list == nullptr) {
return;
}
for (int i = 0; i < list->length(); i++) {
closure->do_monitor(list->at(i));
}
}

View File

@ -159,7 +159,7 @@
unchecked_nonstatic_field) \
\
/******************************************************************/ \
/* OopDesc and Klass hierarchies (NOTE: MethodData* incomplete) */ \
/* OopDesc and Klass hierarchies */ \
/******************************************************************/ \
\
volatile_nonstatic_field(oopDesc, _mark, markWord) \
@ -233,27 +233,7 @@
nonstatic_field(Klass, _vtable_len, int) \
nonstatic_field(Klass, _class_loader_data, ClassLoaderData*) \
nonstatic_field(vtableEntry, _method, Method*) \
nonstatic_field(MethodData, _size, int) \
nonstatic_field(MethodData, _method, Method*) \
nonstatic_field(MethodData, _data_size, int) \
nonstatic_field(MethodData, _data[0], intptr_t) \
nonstatic_field(MethodData, _parameters_type_data_di, int) \
nonstatic_field(MethodData, _compiler_counters._nof_decompiles, uint) \
nonstatic_field(MethodData, _compiler_counters._nof_overflow_recompiles, uint) \
nonstatic_field(MethodData, _compiler_counters._nof_overflow_traps, uint) \
nonstatic_field(MethodData, _compiler_counters._trap_hist._array[0], u1) \
nonstatic_field(MethodData, _eflags, intx) \
nonstatic_field(MethodData, _arg_local, intx) \
nonstatic_field(MethodData, _arg_stack, intx) \
nonstatic_field(MethodData, _arg_returned, intx) \
nonstatic_field(MethodData, _tenure_traps, uint) \
nonstatic_field(MethodData, _invoke_mask, int) \
nonstatic_field(MethodData, _backedge_mask, int) \
nonstatic_field(DataLayout, _header._struct._tag, u1) \
nonstatic_field(DataLayout, _header._struct._flags, u1) \
nonstatic_field(DataLayout, _header._struct._bci, u2) \
nonstatic_field(DataLayout, _header._struct._traps, u4) \
nonstatic_field(DataLayout, _cells[0], intptr_t) \
nonstatic_field(MethodCounters, _invoke_mask, int) \
nonstatic_field(MethodCounters, _backedge_mask, int) \
COMPILER2_OR_JVMCI_PRESENT(nonstatic_field(MethodCounters, _interpreter_throwout_count, u2)) \
@ -904,7 +884,6 @@
/*****************************/ \
\
declare_toplevel_type(void*) \
declare_toplevel_type(Atomic<HeapWord*>) \
declare_toplevel_type(int*) \
declare_toplevel_type(char*) \
declare_toplevel_type(char**) \
@ -962,8 +941,6 @@
declare_type(ConstMethod, MetaspaceObj) \
declare_type(Annotations, MetaspaceObj) \
\
declare_toplevel_type(MethodData::CompilerCounters) \
\
declare_toplevel_type(narrowKlass) \
\
declare_toplevel_type(vtableEntry) \
@ -972,7 +949,6 @@
declare_toplevel_type(Symbol*) \
declare_toplevel_type(volatile Metadata*) \
\
declare_toplevel_type(DataLayout) \
declare_toplevel_type(BSMAttributeEntries) \
\
/********/ \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,9 +26,8 @@
#define SHARE_UTILITIES_RBTREE_HPP
#include "cppstdlib/type_traits.hpp"
#include "metaprogramming/enableIf.hpp"
#include "memory/allocation.hpp"
#include "nmt/memTag.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
// An intrusive red-black tree is constructed with two template parameters:
@ -64,8 +63,9 @@ enum class RBTreeOrdering : int { LT, EQ, GT };
template <typename K, typename NodeType, typename COMPARATOR>
class AbstractRBTree;
class Arena;
class outputStream;
class ResourceArea;
class IntrusiveRBNode {
template <typename K, typename NodeType, typename COMPARATOR>
@ -81,7 +81,7 @@ class IntrusiveRBNode {
DEBUG_ONLY(mutable bool _visited);
public:
IntrusiveRBNode() : _parent(0), _left(nullptr), _right(nullptr) DEBUG_ONLY(COMMA _visited(false)) {}
IntrusiveRBNode();
// Gets the previous in-order node in the tree.
// nullptr is returned if there is no previous node.
@ -96,22 +96,18 @@ public:
void print_on(outputStream* st, int depth = 0) const;
private:
bool is_black() const { return (_parent & 0x1) != 0; }
bool is_red() const { return (_parent & 0x1) == 0; }
bool is_black() const;
bool is_red() const;
void set_black() { _parent |= 0x1; }
void set_red() { _parent &= ~0x1; }
void set_black();
void set_red();
IntrusiveRBNode* parent() const { return (IntrusiveRBNode*)(_parent & ~0x1); }
void set_parent(IntrusiveRBNode* new_parent) { _parent = (_parent & 0x1) | (uintptr_t)new_parent; }
IntrusiveRBNode* parent() const;
void set_parent(IntrusiveRBNode* new_parent);
bool is_right_child() const {
return parent() != nullptr && parent()->_right == this;
}
bool is_right_child() const;
bool is_left_child() const {
return parent() != nullptr && parent()->_left == this;
}
bool is_left_child() const;
void replace_child(IntrusiveRBNode* old_child, IntrusiveRBNode* new_child);
@ -142,20 +138,20 @@ private:
V _value;
public:
const K& key() const { return _key; }
const K& key() const;
V& val() { return _value; }
const V& val() const { return _value; }
void set_val(const V& v) { _value = v; }
V& val();
const V& val() const;
void set_val(const V& v);
RBNode() {}
RBNode(const K& key) : IntrusiveRBNode(), _key(key) {}
RBNode(const K& key, const V& val) : IntrusiveRBNode(), _key(key), _value(val) {}
RBNode();
RBNode(const K& key);
RBNode(const K& key, const V& val);
const RBNode<K, V>* prev() const { return (RBNode<K, V>*)IntrusiveRBNode::prev(); }
const RBNode<K, V>* next() const { return (RBNode<K, V>*)IntrusiveRBNode::next(); }
RBNode<K, V>* prev() { return (RBNode<K, V>*)IntrusiveRBNode::prev(); }
RBNode<K, V>* next() { return (RBNode<K, V>*)IntrusiveRBNode::next(); }
const RBNode<K, V>* prev() const;
const RBNode<K, V>* next() const;
RBNode<K, V>* prev();
RBNode<K, V>* next();
void print_on(outputStream* st, int depth = 0) const;
@ -176,17 +172,15 @@ public:
friend AbstractRBTree<K, NodeType, COMPARATOR>;
NodeType** _insert_location;
NodeType* _parent;
Cursor() : _insert_location(nullptr), _parent(nullptr) {}
Cursor(NodeType** insert_location, NodeType* parent)
: _insert_location(insert_location), _parent(parent) {}
Cursor(NodeType* const* insert_location, NodeType* parent)
: _insert_location((NodeType**)insert_location), _parent(parent) {}
Cursor();
Cursor(NodeType** insert_location, NodeType* parent);
Cursor(NodeType* const* insert_location, NodeType* parent);
public:
bool valid() const { return _insert_location != nullptr; }
bool found() const { return *_insert_location != nullptr; }
NodeType* node() { return _insert_location == nullptr ? nullptr : *_insert_location; }
NodeType* node() const { return _insert_location == nullptr ? nullptr : *_insert_location; }
bool valid() const;
bool found() const;
NodeType* node();
NodeType* node() const;
};
protected:
@ -212,36 +206,16 @@ private:
static constexpr bool HasNodeVerifier = HasNodeVerifierImpl<COMPARATOR>::value;
RBTreeOrdering cmp(const K& a, const NodeType* b) const {
if constexpr (HasNodeComparator) {
return COMPARATOR::cmp(a, b);
} else if constexpr (HasKeyComparator) {
return COMPARATOR::cmp(a, b->key());
}
}
RBTreeOrdering cmp(const K& a, const NodeType* b) const;
bool less_than(const NodeType* a, const NodeType* b) const {
if constexpr (HasNodeVerifier) {
return COMPARATOR::less_than(a, b);
} else {
return true;
}
}
bool less_than(const NodeType* a, const NodeType* b) const;
void assert_key_leq(K a, K b) const {
if constexpr (HasKeyComparator) { // Cannot assert if no key comparator exist.
assert(COMPARATOR::cmp(a, b) != RBTreeOrdering::GT, "key a must be less or equal to key b");
}
}
void assert_key_leq(K a, K b) const;
// True if node is black (nil nodes count as black)
static inline bool is_black(const IntrusiveRBNode* node) {
return node == nullptr || node->is_black();
}
static inline bool is_black(const IntrusiveRBNode* node);
static inline bool is_red(const IntrusiveRBNode* node) {
return node != nullptr && node->is_red();
}
static inline bool is_red(const IntrusiveRBNode* node);
void fix_insert_violations(IntrusiveRBNode* node);
@ -251,18 +225,14 @@ private:
void remove_from_tree(IntrusiveRBNode* node);
struct empty_verifier {
bool operator()(const NodeType* n) const {
return true;
}
bool operator()(const NodeType* n) const;
};
template <typename NODE_VERIFIER, typename USER_VERIFIER>
void verify_self(NODE_VERIFIER verifier, const USER_VERIFIER& extra_verifier) const;
struct default_printer {
void operator()(outputStream* st, const NodeType* n, int depth) const {
n->print_on(st, depth);
}
void operator()(outputStream* st, const NodeType* n, int depth) const;
};
template <typename PRINTER>
@ -271,12 +241,9 @@ private:
public:
NONCOPYABLE(AbstractRBTree);
AbstractRBTree() : _num_nodes(0), _root(nullptr) DEBUG_ONLY(COMMA _expected_visited(false)) {
static_assert(std::is_trivially_destructible<K>::value, "key type must be trivially destructable");
static_assert(HasKeyComparator || HasNodeComparator, "comparator must be of correct type");
}
AbstractRBTree();
size_t size() const { return _num_nodes; }
size_t size() const;
// Gets the cursor associated with the given node or key.
Cursor cursor(const K& key, const NodeType* hint_node = nullptr);
@ -311,87 +278,39 @@ public:
void replace_at_cursor(NodeType* new_node, const Cursor& node_cursor);
// Finds the node associated with the given key.
NodeType* find_node(const K& key, const NodeType* hint_node = nullptr) const {
Cursor node_cursor = cursor(key, hint_node);
return node_cursor.node();
}
NodeType* find_node(const K& key, const NodeType* hint_node = nullptr) {
Cursor node_cursor = cursor(key, hint_node);
return node_cursor.node();
}
NodeType* find_node(const K& key, const NodeType* hint_node = nullptr);
NodeType* find_node(const K& key, const NodeType* hint_node = nullptr) const;
// Inserts the given node into the tree.
void insert(const K& key, NodeType* node, const NodeType* hint_node = nullptr) {
Cursor node_cursor = cursor(key, hint_node);
insert_at_cursor(node, node_cursor);
}
void insert(const K& key, NodeType* node, const NodeType* hint_node = nullptr);
void remove(NodeType* node) {
Cursor node_cursor = cursor(node);
remove_at_cursor(node_cursor);
}
// Removes the given node from the tree.
void remove(NodeType* node);
// Finds the node with the closest key <= the given key.
// If no node is found, null is returned instead.
NodeType* closest_leq(const K& key) const {
Cursor node_cursor = cursor(key);
return node_cursor.found() ? node_cursor.node() : prev(node_cursor).node();
}
NodeType* closest_leq(const K& key) {
Cursor node_cursor = cursor(key);
return node_cursor.found() ? node_cursor.node() : prev(node_cursor).node();
}
NodeType* closest_leq(const K& key);
NodeType* closest_leq(const K& key) const;
// Finds the node with the closest key > the given key.
// If no node is found, null is returned instead.
NodeType* closest_gt(const K& key) const {
Cursor node_cursor = cursor(key);
return next(node_cursor).node();
}
NodeType* closest_gt(const K& key) {
Cursor node_cursor = cursor(key);
return next(node_cursor).node();
}
NodeType* closest_gt(const K& key);
NodeType* closest_gt(const K& key) const;
// Finds the node with the closest key >= the given key.
// If no node is found, null is returned instead.
NodeType* closest_ge(const K& key) const {
Cursor node_cursor = cursor(key);
return node_cursor.found() ? node_cursor.node() : next(node_cursor).node();
}
NodeType* closest_ge(const K& key) {
Cursor node_cursor = cursor(key);
return node_cursor.found() ? node_cursor.node() : next(node_cursor).node();
}
NodeType* closest_ge(const K& key);
NodeType* closest_ge(const K& key) const;
// Returns leftmost node, nullptr if tree is empty.
// If COMPARATOR::cmp(a, b) behaves canonically (positive value for a > b), this will the smallest key value.
const NodeType* leftmost() const {
IntrusiveRBNode* n = _root, *n2 = nullptr;
while (n != nullptr) {
n2 = n;
n = n->_left;
}
return (NodeType*)n2;
}
NodeType* leftmost();
const NodeType* leftmost() const;
// Returns rightmost node, nullptr if tree is empty.
// If COMPARATOR::cmp(a, b) behaves canonically (positive value for a > b), this will the largest key value.
const NodeType* rightmost() const {
IntrusiveRBNode* n = _root, *n2 = nullptr;
while (n != nullptr) {
n2 = n;
n = n->_right;
}
return (NodeType*)n2;
}
NodeType* leftmost() { return const_cast<NodeType*>(static_cast<const TreeType*>(this)->leftmost()); }
NodeType* rightmost() { return const_cast<NodeType*>(static_cast<const TreeType*>(this)->rightmost()); }
NodeType* rightmost();
const NodeType* rightmost() const;
struct Range {
NodeType* start;
@ -403,11 +322,7 @@ public:
// Return the range [start, end)
// where start->key() <= addr < end->key().
// Failure to find the range leads to start and/or end being null.
Range find_enclosing_range(K key) const {
NodeType* start = closest_leq(key);
NodeType* end = closest_gt(key);
return Range(start, end);
}
Range find_enclosing_range(K key) const;
// Visit all RBNodes in ascending order, calling f on each node.
// If f returns `true` the iteration continues, otherwise it is stopped at the current node.
@ -417,7 +332,6 @@ public:
template <typename F>
void visit_in_order(F f);
// Visit all RBNodes in ascending order whose keys are in range [from, to], calling f on each node.
// If f returns `true` the iteration continues, otherwise it is stopped at the current node.
template <typename F>
@ -433,15 +347,7 @@ public:
// This should return true if the node is valid.
// If provided, each node is also verified through this callable.
template <typename USER_VERIFIER = empty_verifier>
void verify_self(const USER_VERIFIER& extra_verifier = USER_VERIFIER()) const {
if constexpr (HasNodeVerifier) {
verify_self([](const NodeType* a, const NodeType* b){ return COMPARATOR::less_than(a, b);}, extra_verifier);
} else if constexpr (HasKeyComparator) {
verify_self([](const NodeType* a, const NodeType* b){ return COMPARATOR::cmp(a->key(), b->key()) == RBTreeOrdering::LT; }, extra_verifier);
} else {
verify_self([](const NodeType*, const NodeType*){ return true;}, extra_verifier);
}
}
void verify_self(const USER_VERIFIER& extra_verifier = USER_VERIFIER()) const;
// Accepts an optional printing callable `void node_printer(outputStream* st, const Node* n, int depth)`.
// If provided, each node is printed through this callable rather than the default `print_on`.
@ -458,9 +364,10 @@ class RBTree : public AbstractRBTree<K, RBNode<K, V>, COMPARATOR> {
ALLOCATOR _allocator;
public:
RBTree() : BaseType(), _allocator() {}
template<typename... AllocArgs>
RBTree(AllocArgs... alloc_args);
~RBTree();
NONCOPYABLE(RBTree);
~RBTree() { remove_all(); }
bool copy_into(RBTree& other) const;
@ -471,118 +378,68 @@ public:
using BaseType::next;
using BaseType::prev;
void replace_at_cursor(RBNode<K, V>* new_node, const Cursor& node_cursor) {
RBNode<K, V>* old_node = node_cursor.node();
BaseType::replace_at_cursor(new_node, node_cursor);
free_node(old_node);
}
void replace_at_cursor(RBNode<K, V>* new_node, const Cursor& node_cursor);
RBNode<K, V>* allocate_node(const K& key) {
void* node_place = _allocator.allocate(sizeof(RBNode<K, V>));
if (node_place == nullptr) {
return nullptr;
}
return new (node_place) RBNode<K, V>(key);
}
RBNode<K, V>* allocate_node(const K& key);
RBNode<K, V>* allocate_node(const K& key, const V& val);
RBNode<K, V>* allocate_node(const K& key, const V& val) {
void* node_place = _allocator.allocate(sizeof(RBNode<K, V>));
if (node_place == nullptr) {
return nullptr;
}
return new (node_place) RBNode<K, V>(key, val);
}
void free_node(RBNode<K, V>* node) {
node->_value.~V();
_allocator.free(node);
}
void free_node(RBNode<K, V>* node);
// Inserts a node with the given key/value into the tree,
// if the key already exist, the value is updated instead.
// Returns false if and only if allocation of a new node failed.
bool upsert(const K& key, const V& val, const RBNode<K, V>* hint_node = nullptr) {
Cursor node_cursor = cursor(key, hint_node);
RBNode<K, V>* node = node_cursor.node();
if (node != nullptr) {
node->set_val(val);
return true;
}
node = allocate_node(key, val);
if (node == nullptr) {
return false;
}
insert_at_cursor(node, node_cursor);
return true;
}
bool upsert(const K& key, const V& val, const RBNode<K, V>* hint_node = nullptr);
// Finds the value of the node associated with the given key.
V* find(const K& key) {
Cursor node_cursor = cursor(key);
return node_cursor.found() ? &node_cursor.node()->_value : nullptr;
}
V* find(const K& key);
V* find(const K& key) const;
V* find(const K& key) const {
const Cursor node_cursor = cursor(key);
return node_cursor.found() ? &node_cursor.node()->_value : nullptr;
}
void remove(RBNode<K, V>* node) {
Cursor node_cursor = cursor(node);
remove_at_cursor(node_cursor);
free_node(node);
}
void remove(RBNode<K, V>* node);
// Removes the node with the given key from the tree if it exists.
// Returns true if the node was successfully removed, false otherwise.
bool remove(const K& key) {
Cursor node_cursor = cursor(key);
if (!node_cursor.found()) {
return false;
}
RBNode<K, V>* node = node_cursor.node();
remove_at_cursor(node_cursor);
free_node((RBNode<K, V>*)node);
return true;
}
bool remove(const K& key);
// Removes all existing nodes from the tree.
void remove_all() {
IntrusiveRBNode* to_delete[64];
int stack_idx = 0;
to_delete[stack_idx++] = BaseType::_root;
while (stack_idx > 0) {
IntrusiveRBNode* head = to_delete[--stack_idx];
if (head == nullptr) continue;
to_delete[stack_idx++] = head->_left;
to_delete[stack_idx++] = head->_right;
free_node((RBNode<K, V>*)head);
}
BaseType::_num_nodes = 0;
BaseType::_root = nullptr;
}
void remove_all();
};
template <MemTag mem_tag, AllocFailType strategy>
class RBTreeCHeapAllocator {
public:
void* allocate(size_t sz) {
void* allocation = os::malloc(sz, mem_tag);
if (allocation == nullptr && strategy == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR,
"red-black tree failed allocation");
}
return allocation;
}
void free(void* ptr) { os::free(ptr); }
void* allocate(size_t sz);
void free(void* ptr);
};
template <AllocFailType strategy>
class RBTreeArenaAllocator {
Arena* _arena;
public:
RBTreeArenaAllocator(Arena* arena);
void* allocate(size_t sz);
void free(void* ptr);
};
template <AllocFailType strategy>
class RBTreeResourceAreaAllocator {
ResourceArea* _rarea;
public:
RBTreeResourceAreaAllocator(ResourceArea* rarea);
void* allocate(size_t sz);
void free(void* ptr);
};
template <typename K, typename V, typename COMPARATOR, MemTag mem_tag, AllocFailType strategy = AllocFailStrategy::EXIT_OOM>
using RBTreeCHeap = RBTree<K, V, COMPARATOR, RBTreeCHeapAllocator<mem_tag, strategy>>;
template <typename K, typename V, typename COMPARATOR, AllocFailType strategy = AllocFailStrategy::EXIT_OOM>
using RBTreeArena = RBTree<K, V, COMPARATOR, RBTreeArenaAllocator<strategy>>;
template <typename K, typename V, typename COMPARATOR, AllocFailType strategy = AllocFailStrategy::EXIT_OOM>
using RBTreeResourceArea = RBTree<K, V, COMPARATOR, RBTreeResourceAreaAllocator<strategy>>;
template <typename K, typename COMPARATOR>
using IntrusiveRBTree = AbstractRBTree<K, IntrusiveRBNode, COMPARATOR>;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,12 +27,49 @@
#include "utilities/rbTree.hpp"
#include "memory/allocation.hpp"
#include "memory/arena.hpp"
#include "memory/resourceArea.hpp"
#include "metaprogramming/enableIf.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
#include "utilities/powerOfTwo.hpp"
inline IntrusiveRBNode::IntrusiveRBNode()
: _parent(0), _left(nullptr), _right(nullptr) DEBUG_ONLY(COMMA _visited(false)) {}
inline bool IntrusiveRBNode::is_black() const {
return (_parent & 0x1) != 0;
}
inline bool IntrusiveRBNode::is_red() const {
return (_parent & 0x1) == 0;
}
inline void IntrusiveRBNode::set_black() {
_parent |= 0x1;
}
inline void IntrusiveRBNode::set_red() {
_parent &= ~0x1;
}
inline IntrusiveRBNode* IntrusiveRBNode::parent() const {
return reinterpret_cast<IntrusiveRBNode*>(_parent & ~0x1);
}
inline void IntrusiveRBNode::set_parent(IntrusiveRBNode* new_parent) {
_parent = (_parent & 0x1) | reinterpret_cast<uintptr_t>(new_parent);
}
inline bool IntrusiveRBNode::is_right_child() const {
return parent() != nullptr && parent()->_right == this;
}
inline bool IntrusiveRBNode::is_left_child() const {
return parent() != nullptr && parent()->_left == this;
}
inline void IntrusiveRBNode::replace_child(IntrusiveRBNode* old_child, IntrusiveRBNode* new_child) {
if (_left == old_child) {
_left = new_child;
@ -185,6 +222,144 @@ inline void IntrusiveRBNode::verify(
}
template <typename K, typename V>
inline const K& RBNode<K, V>::key() const {
return _key;
}
template <typename K, typename V>
inline V& RBNode<K, V>::val() {
return _value;
}
template <typename K, typename V>
inline const V& RBNode<K, V>::val() const {
return _value;
}
template <typename K, typename V>
inline void RBNode<K, V>::set_val(const V& v) {
_value = v;
}
template <typename K, typename V>
inline RBNode<K, V>::RBNode() {}
template <typename K, typename V>
inline RBNode<K, V>::RBNode(const K& key) : IntrusiveRBNode(), _key(key) {}
template <typename K, typename V>
inline RBNode<K, V>::RBNode(const K& key, const V& val) : IntrusiveRBNode(), _key(key), _value(val) {}
template <typename K, typename V>
inline const RBNode<K, V>* RBNode<K, V>::prev() const {
return static_cast<const RBNode<K, V>*>(IntrusiveRBNode::prev());
}
template <typename K, typename V>
inline const RBNode<K, V>* RBNode<K, V>::next() const {
return static_cast<const RBNode<K, V>*>(IntrusiveRBNode::next());
}
template <typename K, typename V>
inline RBNode<K, V>* RBNode<K, V>::prev() {
return static_cast<RBNode<K, V>*>(IntrusiveRBNode::prev());
}
template <typename K, typename V>
inline RBNode<K, V>* RBNode<K, V>::next() {
return static_cast<RBNode<K, V>*>(IntrusiveRBNode::next());
}
template <typename K, typename NodeType, typename COMPARATOR>
inline AbstractRBTree<K, NodeType, COMPARATOR>::Cursor::Cursor()
: _insert_location(nullptr), _parent(nullptr) {}
template <typename K, typename NodeType, typename COMPARATOR>
inline AbstractRBTree<K, NodeType, COMPARATOR>::Cursor::Cursor(NodeType** insert_location, NodeType* parent)
: _insert_location(insert_location), _parent(parent) {}
template <typename K, typename NodeType, typename COMPARATOR>
inline AbstractRBTree<K, NodeType, COMPARATOR>::Cursor::Cursor(NodeType* const* insert_location, NodeType* parent)
: _insert_location(const_cast<NodeType**>(insert_location)), _parent(parent) {}
template <typename K, typename NodeType, typename COMPARATOR>
inline bool AbstractRBTree<K, NodeType, COMPARATOR>::Cursor::valid() const {
return _insert_location != nullptr;
}
template <typename K, typename NodeType, typename COMPARATOR>
inline bool AbstractRBTree<K, NodeType, COMPARATOR>::Cursor::found() const {
return *_insert_location != nullptr;
}
template <typename K, typename NodeType, typename COMPARATOR>
inline NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::Cursor::node() {
return _insert_location == nullptr ? nullptr : *_insert_location;
}
template <typename K, typename NodeType, typename COMPARATOR>
inline NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::Cursor::node() const {
return _insert_location == nullptr ? nullptr : *_insert_location;
}
template <typename K, typename NodeType, typename COMPARATOR>
inline RBTreeOrdering AbstractRBTree<K, NodeType, COMPARATOR>::cmp(const K& a, const NodeType* b) const {
if constexpr (HasNodeComparator) {
return COMPARATOR::cmp(a, b);
} else if constexpr (HasKeyComparator) {
return COMPARATOR::cmp(a, b->key());
}
}
template <typename K, typename NodeType, typename COMPARATOR>
inline bool AbstractRBTree<K, NodeType, COMPARATOR>::less_than(const NodeType* a, const NodeType* b) const {
if constexpr (HasNodeVerifier) {
return COMPARATOR::less_than(a, b);
} else {
return true;
}
}
template <typename K, typename NodeType, typename COMPARATOR>
inline void AbstractRBTree<K, NodeType, COMPARATOR>::assert_key_leq(K a, K b) const {
if constexpr (HasKeyComparator) { // Cannot assert if no key comparator exist.
assert(COMPARATOR::cmp(a, b) != RBTreeOrdering::GT, "key a must be less or equal to key b");
}
}
template <typename K, typename NodeType, typename COMPARATOR>
inline bool AbstractRBTree<K, NodeType, COMPARATOR>::is_black(const IntrusiveRBNode* node) {
return node == nullptr || node->is_black();
}
template <typename K, typename NodeType, typename COMPARATOR>
inline bool AbstractRBTree<K, NodeType, COMPARATOR>::is_red(const IntrusiveRBNode* node) {
return node != nullptr && node->is_red();
}
template <typename K, typename NodeType, typename COMPARATOR>
inline bool AbstractRBTree<K, NodeType, COMPARATOR>::empty_verifier::operator()(const NodeType* n) const {
return true;
}
template <typename K, typename NodeType, typename COMPARATOR>
inline void AbstractRBTree<K, NodeType, COMPARATOR>::default_printer::operator()(outputStream* st, const NodeType* n, int depth) const {
n->print_on(st, depth);
}
template <typename K, typename NodeType, typename COMPARATOR>
inline AbstractRBTree<K, NodeType, COMPARATOR>::AbstractRBTree()
: _num_nodes(0), _root(nullptr) DEBUG_ONLY(COMMA _expected_visited(false)) {
static_assert(std::is_trivially_destructible<K>::value, "key type must be trivially destructable");
static_assert(HasKeyComparator || HasNodeComparator, "comparator must be of correct type");
}
template <typename K, typename NodeType, typename COMPARATOR>
inline size_t AbstractRBTree<K, NodeType, COMPARATOR>::size() const {
return _num_nodes;
}
template <typename K, typename NodeType, typename COMPARATOR>
inline const typename AbstractRBTree<K, NodeType, COMPARATOR>::Cursor
AbstractRBTree<K, NodeType, COMPARATOR>::cursor(const K& key, const NodeType* hint_node) const {
@ -596,6 +771,104 @@ AbstractRBTree<K, NodeType, COMPARATOR>::prev(const Cursor& node_cursor) {
return static_cast<const AbstractRBTree<K, NodeType, COMPARATOR>*>(this)->prev(node_cursor);
}
template <typename K, typename NodeType, typename COMPARATOR>
inline NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::find_node(const K& key, const NodeType* hint_node) const {
Cursor node_cursor = cursor(key, hint_node);
return node_cursor.node();
}
template <typename K, typename NodeType, typename COMPARATOR>
inline NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::find_node(const K& key, const NodeType* hint_node) {
Cursor node_cursor = cursor(key, hint_node);
return node_cursor.node();
}
template <typename K, typename NodeType, typename COMPARATOR>
inline void AbstractRBTree<K, NodeType, COMPARATOR>::insert(const K& key, NodeType* node, const NodeType* hint_node) {
Cursor node_cursor = cursor(key, hint_node);
insert_at_cursor(node, node_cursor);
}
template <typename K, typename NodeType, typename COMPARATOR>
inline void AbstractRBTree<K, NodeType, COMPARATOR>::remove(NodeType* node) {
Cursor node_cursor = cursor(node);
remove_at_cursor(node_cursor);
}
template <typename K, typename NodeType, typename COMPARATOR>
inline NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::closest_leq(const K& key) const {
Cursor node_cursor = cursor(key);
return node_cursor.found() ? node_cursor.node() : prev(node_cursor).node();
}
template <typename K, typename NodeType, typename COMPARATOR>
inline NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::closest_leq(const K& key) {
Cursor node_cursor = cursor(key);
return node_cursor.found() ? node_cursor.node() : prev(node_cursor).node();
}
template <typename K, typename NodeType, typename COMPARATOR>
inline NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::closest_gt(const K& key) const {
Cursor node_cursor = cursor(key);
return next(node_cursor).node();
}
template <typename K, typename NodeType, typename COMPARATOR>
inline NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::closest_gt(const K& key) {
Cursor node_cursor = cursor(key);
return next(node_cursor).node();
}
template <typename K, typename NodeType, typename COMPARATOR>
inline NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::closest_ge(const K& key) const {
Cursor node_cursor = cursor(key);
return node_cursor.found() ? node_cursor.node() : next(node_cursor).node();
}
template <typename K, typename NodeType, typename COMPARATOR>
inline NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::closest_ge(const K& key) {
Cursor node_cursor = cursor(key);
return node_cursor.found() ? node_cursor.node() : next(node_cursor).node();
}
template <typename K, typename NodeType, typename COMPARATOR>
inline const NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::leftmost() const {
IntrusiveRBNode* n = _root, *n2 = nullptr;
while (n != nullptr) {
n2 = n;
n = n->_left;
}
return static_cast<const NodeType*>(n2);
}
template <typename K, typename NodeType, typename COMPARATOR>
inline const NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::rightmost() const {
IntrusiveRBNode* n = _root, *n2 = nullptr;
while (n != nullptr) {
n2 = n;
n = n->_right;
}
return static_cast<const NodeType*>(n2);
}
template <typename K, typename NodeType, typename COMPARATOR>
inline NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::leftmost() {
return const_cast<NodeType*>(static_cast<const AbstractRBTree<K, NodeType, COMPARATOR>*>(this)->leftmost());
}
template <typename K, typename NodeType, typename COMPARATOR>
inline NodeType* AbstractRBTree<K, NodeType, COMPARATOR>::rightmost() {
return const_cast<NodeType*>(static_cast<const AbstractRBTree<K, NodeType, COMPARATOR>*>(this)->rightmost());
}
template <typename K, typename NodeType, typename COMPARATOR>
inline typename AbstractRBTree<K, NodeType, COMPARATOR>::Range
AbstractRBTree<K, NodeType, COMPARATOR>::find_enclosing_range(K key) const {
NodeType* start = closest_leq(key);
NodeType* end = closest_gt(key);
return Range(start, end);
}
template <typename K, typename NodeType, typename COMPARATOR>
template <typename F>
inline void AbstractRBTree<K, NodeType, COMPARATOR>::visit_in_order(F f) const {
@ -662,6 +935,18 @@ inline void AbstractRBTree<K, NodeType, COMPARATOR>::visit_range_in_order(const
}
}
template <typename K, typename NodeType, typename COMPARATOR>
template <typename USER_VERIFIER>
inline void AbstractRBTree<K, NodeType, COMPARATOR>::verify_self(const USER_VERIFIER& extra_verifier) const {
if constexpr (HasNodeVerifier) {
verify_self([](const NodeType* a, const NodeType* b){ return COMPARATOR::less_than(a, b);}, extra_verifier);
} else if constexpr (HasKeyComparator) {
verify_self([](const NodeType* a, const NodeType* b){ return COMPARATOR::cmp(a->key(), b->key()) == RBTreeOrdering::LT; }, extra_verifier);
} else {
verify_self([](const NodeType*, const NodeType*){ return true;}, extra_verifier);
}
}
template <typename K, typename NodeType, typename COMPARATOR>
template <typename NODE_VERIFIER, typename USER_VERIFIER>
inline void AbstractRBTree<K, NodeType, COMPARATOR>::verify_self(NODE_VERIFIER verifier, const USER_VERIFIER& extra_verifier) const {
@ -753,6 +1038,15 @@ void AbstractRBTree<K, NodeType, COMPARATOR>::print_on(outputStream* st, const P
}
}
template <typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
template<typename... AllocArgs>
inline RBTree<K, V, COMPARATOR, ALLOCATOR>::RBTree(AllocArgs... alloc_args) : BaseType(), _allocator(alloc_args...) {}
template <typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
inline RBTree<K, V, COMPARATOR, ALLOCATOR>::~RBTree() {
remove_all();
}
template<typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
bool RBTree<K, V, COMPARATOR, ALLOCATOR>::copy_into(RBTree& other) const {
assert(other.size() == 0, "You can only copy into an empty RBTree");
@ -802,4 +1096,132 @@ bool RBTree<K, V, COMPARATOR, ALLOCATOR>::copy_into(RBTree& other) const {
return true;
}
template <typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
inline void RBTree<K, V, COMPARATOR, ALLOCATOR>::replace_at_cursor(RBNode<K, V>* new_node, const Cursor& node_cursor) {
RBNode<K, V>* old_node = node_cursor.node();
BaseType::replace_at_cursor(new_node, node_cursor);
free_node(old_node);
}
template <typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
inline RBNode<K, V>* RBTree<K, V, COMPARATOR, ALLOCATOR>::allocate_node(const K& key) {
void* node_place = _allocator.allocate(sizeof(RBNode<K, V>));
if (node_place == nullptr) {
return nullptr;
}
return new (node_place) RBNode<K, V>(key);
}
template <typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
inline RBNode<K, V>* RBTree<K, V, COMPARATOR, ALLOCATOR>::allocate_node(const K& key, const V& val) {
void* node_place = _allocator.allocate(sizeof(RBNode<K, V>));
if (node_place == nullptr) {
return nullptr;
}
return new (node_place) RBNode<K, V>(key, val);
}
template <typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
inline void RBTree<K, V, COMPARATOR, ALLOCATOR>::free_node(RBNode<K, V>* node) {
node->_value.~V();
_allocator.free(node);
}
template <typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
inline bool RBTree<K, V, COMPARATOR, ALLOCATOR>::upsert(const K& key, const V& val, const RBNode<K, V>* hint_node) {
Cursor node_cursor = cursor(key, hint_node);
RBNode<K, V>* node = node_cursor.node();
if (node != nullptr) {
node->set_val(val);
return true;
}
node = allocate_node(key, val);
if (node == nullptr) {
return false;
}
insert_at_cursor(node, node_cursor);
return true;
}
template <typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
inline V* RBTree<K, V, COMPARATOR, ALLOCATOR>::find(const K& key) {
Cursor node_cursor = cursor(key);
return node_cursor.found() ? &node_cursor.node()->_value : nullptr;
}
template <typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
inline V* RBTree<K, V, COMPARATOR, ALLOCATOR>::find(const K& key) const {
const Cursor node_cursor = cursor(key);
return node_cursor.found() ? &node_cursor.node()->_value : nullptr;
}
template <typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
inline void RBTree<K, V, COMPARATOR, ALLOCATOR>::remove(RBNode<K, V>* node) {
Cursor node_cursor = cursor(node);
remove_at_cursor(node_cursor);
free_node(node);
}
template <typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
inline bool RBTree<K, V, COMPARATOR, ALLOCATOR>::remove(const K& key) {
Cursor node_cursor = cursor(key);
if (!node_cursor.found()) {
return false;
}
RBNode<K, V>* node = node_cursor.node();
remove_at_cursor(node_cursor);
free_node((RBNode<K, V>*)node);
return true;
}
template <typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
inline void RBTree<K, V, COMPARATOR, ALLOCATOR>::remove_all() {
IntrusiveRBNode* to_delete[64];
int stack_idx = 0;
to_delete[stack_idx++] = BaseType::_root;
while (stack_idx > 0) {
IntrusiveRBNode* head = to_delete[--stack_idx];
if (head == nullptr) continue;
to_delete[stack_idx++] = head->_left;
to_delete[stack_idx++] = head->_right;
free_node((RBNode<K, V>*)head);
}
BaseType::_num_nodes = 0;
BaseType::_root = nullptr;
}
template <MemTag mem_tag, AllocFailType strategy>
inline void* RBTreeCHeapAllocator<mem_tag, strategy>::allocate(size_t sz) {
return AllocateHeap(sz, mem_tag, strategy);
}
template <MemTag mem_tag, AllocFailType strategy>
inline void RBTreeCHeapAllocator<mem_tag, strategy>::free(void* ptr) {
FreeHeap(ptr);
}
template <AllocFailType strategy>
inline RBTreeArenaAllocator<strategy>::RBTreeArenaAllocator(Arena* arena) : _arena(arena) {}
template <AllocFailType strategy>
inline void* RBTreeArenaAllocator<strategy>::allocate(size_t sz) {
return _arena->Amalloc(sz, strategy);
}
template <AllocFailType strategy>
inline void RBTreeArenaAllocator<strategy>::free(void* ptr) { /* NOP */ }
template <AllocFailType strategy>
inline RBTreeResourceAreaAllocator<strategy>::RBTreeResourceAreaAllocator(ResourceArea* rarea) : _rarea(rarea) {}
template <AllocFailType strategy>
inline void* RBTreeResourceAreaAllocator<strategy>::allocate(size_t sz) {
return _rarea->Amalloc(sz, strategy);
}
template <AllocFailType strategy>
inline void RBTreeResourceAreaAllocator<strategy>::free(void* ptr) { /* NOP */ }
#endif // SHARE_UTILITIES_RBTREE_INLINE_HPP

View File

@ -3561,4 +3561,50 @@ final class FdLibm {
return hx > 0 ? w : -w;
}
}
/**
* Return the Inverse Hyperbolic Cosine of x
*
* Method :
*
*
* acosh(x) is defined so that acosh(cosh(alpha)) = alpha, -&infin; &lt; alpha &lt; &infin;
* and cosh(acosh(x)) = x, 1 <= x &lt; &infin;.
* It can be written as acosh(x) = log(x + sqrt(x^2 - 1)), 1 <= x &lt; &infin;.
* acosh(x) := log(x)+ln2, if x is large; else
* := log(2x-1/(sqrt(x*x-1)+x)) if x&gt;2; else
* := log1p(t+sqrt(2.0*t+t*t)); where t=x-1.
*
*
*
* Special cases:
* acosh(x) is NaN with signal if x < 1.
* acosh(NaN) is NaN without signal.
*/
static final class Acosh {
private static final double ln2 = 6.93147180559945286227e-01;
static double compute(double x) {
double t;
int hx;
hx = __HI(x);
if (hx < 0x3ff0_0000) { // x < 1 */
return (x - x) / (x - x);
} else if (hx >= 0x41b0_0000) { // x > 2**28
if (hx >= 0x7ff0_0000) { // x is inf of NaN
return x + x;
} else {
return Log.compute(x) + ln2; // acosh(huge) = log(2x)
}
} else if (((hx - 0x3ff0_0000) | __LO(x)) == 0) {
return 0.0; // acosh(1) = 0
} else if (hx > 0x4000_0000) { // 2**28 > x > 2
t = x * x;
return Log.compute(2.0 * x - 1.0 / (x + Sqrt.compute(t - 1.0)));
} else { // 1< x <2
t = x - 1.0;
return Log1p.compute(t + Sqrt.compute(2.0 * t + t * t));
}
}
}
}

View File

@ -109,10 +109,10 @@ import static java.lang.Double.*;
* acos acos}, {@link atan atan}, {@link exp exp}, {@link expm1
* expm1}, {@link log log}, {@link log10 log10}, {@link log1p log1p},
* {@link sinh sinh}, {@link cosh cosh}, {@link tanh tanh}, {@link asinh asinh},
* {@link hypot hypot}, and {@link pow pow}. (The {@link sqrt sqrt}
* operation is a required part of IEEE 754 from a different section
* of the standard.) The special case behavior of the recommended
* operations generally follows the guidance of the IEEE 754
* {@link acosh acosh}, {@link hypot hypot}, and {@link pow pow}.
* (The {@link sqrt sqrt} operation is a required part of IEEE 754
* from a different section of the standard.) The special case behavior
* of the recommended operations generally follows the guidance of the IEEE 754
* standard. However, the {@code pow} method defines different
* behavior for some arguments, as noted in its {@linkplain pow
* specification}. The IEEE 754 standard defines its operations to be
@ -2785,6 +2785,35 @@ public final class Math {
return StrictMath.asinh(x);
}
/**
* Returns the inverse hyperbolic cosine of a {@code double} value.
* The inverse hyperbolic cosine of <i>x</i> is defined to be the function such that
* acosh({@linkplain Math#cosh cosh(<i>x</i>)}) = <i>x</i> for any <i>x</i> >= 0.
* Note that range of the exact acosh(x) is >= 0.
* <p>Special cases:
* <ul>
*
* <li>If the argument is positive infinity, then the result is
* positive infinity
*
* <li>If the argument less than 1, then the result is NaN.
*
* <li>If the argument is NaN, then the result is NaN.
*
* <li>If the argument is {@code 1.0}, then the result is positive zero.
*
* </ul>
* <p>The computed result must be within 2.5 ulps of the exact result.
* @param x The number whose inverse hyperbolic cosine is to be returned.
* @return The inverse hyperbolic cosine of {@code x}.
* @since 27
*/
public static double acosh(double x) {
return StrictMath.acosh(x);
}
/**
* Returns sqrt(<i>x</i><sup>2</sup>&nbsp;+<i>y</i><sup>2</sup>)
* without intermediate overflow or underflow.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -930,6 +930,12 @@ public final class ProcessBuilder
* command interpreters, or the standard C library function
* {@code system()}.
*
* @implNote
* When the process is {@link #start started},
* if {#code System.out} and/or {#code System.err} have been
* closed in the current process, the corresponding output
* in the subprocess will be discarded.
*
* @return this process builder
* @since 1.7
*/

View File

@ -76,8 +76,8 @@ import jdk.internal.vm.annotation.IntrinsicCandidate;
* {@code exp}, {@code log}, {@code log10},
* {@code cbrt}, {@code atan2}, {@code pow},
* {@code sinh}, {@code cosh}, {@code tanh},
* {@code asinh}, {@code hypot}, {@code expm1},
* and {@code log1p}.
* {@code asinh}, {@code acosh}, {@code hypot},
* {@code expm1}, and {@code log1p}.
*
* <p>
* The platform uses signed two's complement integer arithmetic with
@ -2196,6 +2196,32 @@ public final class StrictMath {
return FdLibm.Asinh.compute(x);
}
/**
* Returns the inverse hyperbolic cosine of a {@code double} value.
* The inverse hyperbolic cosine of <i>x</i> is defined to be the function such that
* acosh({@linkplain Math#cosh cosh(<i>x</i>)}) = <i>x</i> for any <i>x</i> >= 0.
* Note that range of the exact acosh(x) is >= 0.
* <p>Special cases:
* <ul>
*
* <li>If the argument is positive infinity, then the result is
* positive infinity
*
* <li>If the argument less than {@code 1.0}, then the result is NaN.
*
* <li>If the argument is NaN, then the result is NaN.
*
* <li>If the argument is {@code 1.0}, then the result is positive zero.
*
* </ul>
* @param x The number whose inverse hyperbolic cosine is to be returned.
* @return The inverse hyperbolic cosine of {@code x}.
* @since 27
*/
public static double acosh(double x) {
return FdLibm.Acosh.compute(x);
}
/**
* Returns sqrt(<i>x</i><sup>2</sup>&nbsp;+<i>y</i><sup>2</sup>)
* without intermediate overflow or underflow.

View File

@ -195,10 +195,9 @@ public class BasicImageReader implements AutoCloseable {
}
if (result.getMajorVersion() != ImageHeader.MAJOR_VERSION ||
result.getMinorVersion() != ImageHeader.MINOR_VERSION) {
throw new IOException("The image file \"" + name + "\" is not " +
"the correct version. Major: " + result.getMajorVersion() +
". Minor: " + result.getMinorVersion());
result.getMinorVersion() != ImageHeader.MINOR_VERSION) {
throw new ImageVersionMismatchException(
name, result.getMajorVersion(), result.getMinorVersion());
}
return result;
@ -447,4 +446,14 @@ public class BasicImageReader implements AutoCloseable {
return new ByteArrayInputStream(bytes);
}
public static final class ImageVersionMismatchException extends IOException {
@Deprecated
private static final long serialVersionUID = 1L;
// If needed we could capture major/minor version for use by JImageTask.
ImageVersionMismatchException(String name, int majorVersion, int minorVersion) {
super("The image file \"" + name + "\" is not the correct version. " +
"Major: " + majorVersion + ". Minor: " + minorVersion);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -88,4 +88,18 @@ final class WeakReferenceKey<T> extends WeakReference<T> implements ReferenceKey
public String toString() {
return this.getClass().getCanonicalName() + "#" + System.identityHashCode(this);
}
// WeakReferenceKey.equals() is usually executed in the AOT assembly phase. However,
// in some rare occasions, it's not executed (due to peculiarity of hash code and
// memory addressing??). As a result, the constant pool entries used by
// equals() are not resolved.
//
// The JVM calls ensureDeterministicAOTCache() during the AOT assembly phase to ensure
// that the constant pool entries used by equals() are resolved, so that the
// the JDK's default CDS archives have deterministic contents.
private static boolean ensureDeterministicAOTCache() {
WeakReferenceKey<String> k1 = new WeakReferenceKey<>("1", null);
WeakReferenceKey<String> k2 = new WeakReferenceKey<>("2", null);
return k1.equals(k2);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1995, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1995, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -119,6 +119,12 @@ final class ProcessImpl extends Process {
stdHandles[1] = -1L;
} else if (redirects[1] == Redirect.INHERIT) {
stdHandles[1] = fdAccess.getHandle(FileDescriptor.out);
if (stdHandles[1] == -1L) {
// FileDescriptor.out has been closed.
f1 = newFileOutputStream(Redirect.DISCARD.file(),
Redirect.DISCARD.append());
stdHandles[1] = fdAccess.getHandle(f1.getFD());
}
} else if (redirects[1] instanceof ProcessBuilder.RedirectPipeImpl) {
stdHandles[1] = fdAccess.getHandle(((ProcessBuilder.RedirectPipeImpl) redirects[1]).getFd());
// Force getInputStream to return a null stream,
@ -134,6 +140,12 @@ final class ProcessImpl extends Process {
stdHandles[2] = -1L;
} else if (redirects[2] == Redirect.INHERIT) {
stdHandles[2] = fdAccess.getHandle(FileDescriptor.err);
if (stdHandles[2] == -1L) {
// FileDescriptor.err has been closed.
f2 = newFileOutputStream(Redirect.DISCARD.file(),
Redirect.DISCARD.append());
stdHandles[2] = fdAccess.getHandle(f2.getFD());
}
} else if (redirects[2] instanceof ProcessBuilder.RedirectPipeImpl) {
stdHandles[2] = fdAccess.getHandle(((ProcessBuilder.RedirectPipeImpl) redirects[2]).getFd());
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,7 +64,6 @@ import javax.swing.JComponent;
import sun.awt.AWTAccessor;
import sun.awt.AWTAccessor.ComponentAccessor;
import sun.awt.AppContext;
import sun.awt.CGraphicsDevice;
import sun.awt.DisplayChangedListener;
import sun.awt.ExtendedKeyCodes;
@ -1236,9 +1235,7 @@ public class LWWindowPeer
return false;
}
AppContext targetAppContext = AWTAccessor.getComponentAccessor().getAppContext(getTarget());
KeyboardFocusManager kfm = AWTAccessor.getKeyboardFocusManagerAccessor()
.getCurrentKeyboardFocusManager(targetAppContext);
KeyboardFocusManager kfm = KeyboardFocusManager.getCurrentKeyboardFocusManager();
Window currentActive = kfm.getActiveWindow();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1030,13 +1030,19 @@ final class CAccessibility implements PropertyChangeListener {
}
if (!allowIgnored) {
final AccessibleRole role = context.getAccessibleRole();
if (role != null && ignoredRoles != null && ignoredRoles.contains(roleKey(role))) {
// Get the child's unignored children.
_addChildren(child, whichChildren, false, childrenAndRoles, ChildrenOperations.COMMON);
} else {
childrenAndRoles.add(child);
childrenAndRoles.add(getAccessibleRole(child));
// If a Component isn't showing then it should be classified as
// "ignored", and we should skip it and its descendants
if (isShowing(context)) {
final AccessibleRole role = context.getAccessibleRole();
if (role != null && ignoredRoles != null &&
ignoredRoles.contains(roleKey(role))) {
// Get the child's unignored children.
_addChildren(child, whichChildren, false,
childrenAndRoles, ChildrenOperations.COMMON);
} else {
childrenAndRoles.add(child);
childrenAndRoles.add(getAccessibleRole(child));
}
}
} else {
childrenAndRoles.add(child);
@ -1050,6 +1056,46 @@ final class CAccessibility implements PropertyChangeListener {
}
}
/**
* Return false if an AccessibleContext is not showing
* <p>
* This first checks {@link AccessibleComponent#isShowing()}, if possible.
* If there is no AccessibleComponent then this checks the
* AccessibleStateSet for {@link AccessibleState#SHOWING}. If there is no
* AccessibleStateSet then we assume (given the lack of information) the
* AccessibleContext may be visible, and we recursive check its parent if
* possible.
*
* Return false if an AccessibleContext is not showing
*/
private static boolean isShowing(final AccessibleContext context) {
AccessibleComponent c = context.getAccessibleComponent();
if (c != null) {
return c.isShowing();
}
AccessibleStateSet ass = context.getAccessibleStateSet();
if (ass != null && ass.contains((AccessibleState.SHOWING))) {
return true;
} else {
// We don't have an AccessibleComponent. And either we don't
// have an AccessibleStateSet OR it doesn't include useful
// info to determine visibility/showing. So our status is
// unknown. When in doubt: assume we're showing and ask our
// parent if it is visible/showing.
}
Accessible parent = context.getAccessibleParent();
if (parent == null) {
return true;
}
AccessibleContext parentContext = parent.getAccessibleContext();
if (parentContext == null) {
return true;
}
return isShowing(parentContext);
}
private static native String roleKey(AccessibleRole aRole);
public static Object[] getChildren(final Accessible a, final Component c) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -136,7 +136,7 @@ final class CClipboard extends SunClipboard {
void checkPasteboardAndNotify() {
if (checkPasteboardWithoutNotification()) {
notifyChanged();
lostOwnershipNow(null);
lostOwnershipNow();
}
}

View File

@ -36,8 +36,6 @@ import javax.swing.UIManager;
import javax.swing.plaf.ComponentUI;
import javax.swing.plaf.basic.BasicRadioButtonUI;
import sun.awt.AppContext;
/**
* RadioButtonUI implementation for MotifRadioButtonUI
*
@ -45,7 +43,7 @@ import sun.awt.AppContext;
*/
public class MotifRadioButtonUI extends BasicRadioButtonUI {
private static final Object MOTIF_RADIO_BUTTON_UI_KEY = new Object();
private static final ComponentUI UI = new MotifRadioButtonUI();
protected Color focusColor;
@ -55,14 +53,7 @@ public class MotifRadioButtonUI extends BasicRadioButtonUI {
// Create PLAF
// ********************************
public static ComponentUI createUI(JComponent c) {
AppContext appContext = AppContext.getAppContext();
MotifRadioButtonUI motifRadioButtonUI =
(MotifRadioButtonUI) appContext.get(MOTIF_RADIO_BUTTON_UI_KEY);
if (motifRadioButtonUI == null) {
motifRadioButtonUI = new MotifRadioButtonUI();
appContext.put(MOTIF_RADIO_BUTTON_UI_KEY, motifRadioButtonUI);
}
return motifRadioButtonUI;
return UI;
}
// ********************************

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1995, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1995, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -7938,7 +7938,7 @@ public abstract class Component implements ImageObserver, MenuContainer,
(this, temporary, focusedWindowChangeAllowed, time, cause);
if (!success) {
KeyboardFocusManager.getCurrentKeyboardFocusManager
(appContext).dequeueKeyEvents(time, this);
().dequeueKeyEvents(time, this);
if (focusLog.isLoggable(PlatformLogger.Level.FINEST)) {
focusLog.finest("Peer request failed");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,7 +38,6 @@ import java.util.ListIterator;
import java.util.Set;
import sun.awt.AWTAccessor;
import sun.awt.AppContext;
import sun.awt.SunToolkit;
import sun.awt.TimedWindowEvent;
import sun.util.logging.PlatformLogger;
@ -231,9 +230,8 @@ public class DefaultKeyboardFocusManager extends KeyboardFocusManager {
@Serial
private static final long serialVersionUID = -2924743257508701758L;
public DefaultKeyboardFocusManagerSentEvent(AWTEvent nested,
AppContext toNotify) {
super(nested, toNotify);
public DefaultKeyboardFocusManagerSentEvent(AWTEvent nested) {
super(nested);
}
public final void dispatch() {
KeyboardFocusManager manager =
@ -260,76 +258,12 @@ public class DefaultKeyboardFocusManager extends KeyboardFocusManager {
}
/**
* Sends a synthetic AWTEvent to a Component. If the Component is in
* the current AppContext, then the event is immediately dispatched.
* If the Component is in a different AppContext, then the event is
* posted to the other AppContext's EventQueue, and this method blocks
* until the event is handled or target AppContext is disposed.
* Returns true if successfully dispatched event, false if failed
* to dispatch.
* Sends a synthetic AWTEvent to a Component.
*/
static boolean sendMessage(Component target, AWTEvent e) {
e.isPosted = true;
AppContext myAppContext = AppContext.getAppContext();
final AppContext targetAppContext = target.appContext;
final SentEvent se =
new DefaultKeyboardFocusManagerSentEvent(e, myAppContext);
if (myAppContext == targetAppContext) {
se.dispatch();
} else {
if (targetAppContext.isDisposed()) {
return false;
}
SunToolkit.postEvent(targetAppContext, se);
if (EventQueue.isDispatchThread()) {
if (Thread.currentThread() instanceof EventDispatchThread) {
EventDispatchThread edt = (EventDispatchThread)
Thread.currentThread();
edt.pumpEvents(SentEvent.ID, new Conditional() {
public boolean evaluate() {
return !se.dispatched && !targetAppContext.isDisposed();
}
});
} else {
if (fxAppThreadIsDispatchThread) {
Thread fxCheckDispatchThread = new Thread() {
@Override
public void run() {
while (!se.dispatched && !targetAppContext.isDisposed()) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
break;
}
}
}
};
fxCheckDispatchThread.start();
try {
// check if event is dispatched or disposed
// but since this user app thread is same as
// dispatch thread in fx when run with
// javafx.embed.singleThread=true
// we do not wait infinitely to avoid deadlock
// as dispatch will ultimately be done by this thread
fxCheckDispatchThread.join(500);
} catch (InterruptedException ex) {
}
}
}
} else {
synchronized (se) {
while (!se.dispatched && !targetAppContext.isDisposed()) {
try {
se.wait(1000);
} catch (InterruptedException ie) {
break;
}
}
}
}
}
final SentEvent se = new DefaultKeyboardFocusManagerSentEvent(e);
se.dispatch();
return se.dispatched;
}
@ -356,7 +290,7 @@ public class DefaultKeyboardFocusManager extends KeyboardFocusManager {
// Check that the component awaiting focus belongs to
// the current focused window. See 8015454.
if (toplevel != null && toplevel.isFocused()) {
SunToolkit.postEvent(AppContext.getAppContext(), new SequencedEvent(e));
SunToolkit.postEvent(new SequencedEvent(e));
return true;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,7 +50,6 @@ import java.util.WeakHashMap;
import sun.util.logging.PlatformLogger;
import sun.awt.AppContext;
import sun.awt.SunToolkit;
import sun.awt.KeyboardFocusManagerPeerProvider;
import sun.awt.AWTAccessor;
@ -127,9 +126,6 @@ public abstract class KeyboardFocusManager
public void setMostRecentFocusOwner(Window window, Component component) {
KeyboardFocusManager.setMostRecentFocusOwner(window, component);
}
public KeyboardFocusManager getCurrentKeyboardFocusManager(AppContext ctx) {
return KeyboardFocusManager.getCurrentKeyboardFocusManager(ctx);
}
public Container getCurrentFocusCycleRoot() {
return KeyboardFocusManager.currentFocusCycleRoot;
}
@ -183,53 +179,40 @@ public abstract class KeyboardFocusManager
static final int TRAVERSAL_KEY_LENGTH = DOWN_CYCLE_TRAVERSAL_KEYS + 1;
private static KeyboardFocusManager manager;
/**
* Returns the current KeyboardFocusManager instance for the calling
* thread's context.
* Returns the current KeyboardFocusManager instance
*
* @return this thread's context's KeyboardFocusManager
* @return the current KeyboardFocusManager
* @see #setCurrentKeyboardFocusManager
*/
public static KeyboardFocusManager getCurrentKeyboardFocusManager() {
return getCurrentKeyboardFocusManager(AppContext.getAppContext());
}
static synchronized KeyboardFocusManager
getCurrentKeyboardFocusManager(AppContext appcontext)
{
KeyboardFocusManager manager = (KeyboardFocusManager)
appcontext.get(KeyboardFocusManager.class);
public static synchronized KeyboardFocusManager getCurrentKeyboardFocusManager() {
if (manager == null) {
manager = new DefaultKeyboardFocusManager();
appcontext.put(KeyboardFocusManager.class, manager);
}
return manager;
}
/**
* Sets the current KeyboardFocusManager instance for the calling thread's
* context. If null is specified, then the current KeyboardFocusManager
* Sets the current KeyboardFocusManager instance.
* If null is specified, then the current KeyboardFocusManager
* is replaced with a new instance of DefaultKeyboardFocusManager.
*
* @param newManager the new KeyboardFocusManager for this thread's context
* @param newManager the new KeyboardFocusManager
* @see #getCurrentKeyboardFocusManager
* @see DefaultKeyboardFocusManager
*/
public static void setCurrentKeyboardFocusManager(KeyboardFocusManager newManager) {
KeyboardFocusManager oldManager = null;
KeyboardFocusManager oldManager = manager;
if (newManager == null) {
newManager = new DefaultKeyboardFocusManager();
}
synchronized (KeyboardFocusManager.class) {
AppContext appcontext = AppContext.getAppContext();
if (newManager != null) {
oldManager = getCurrentKeyboardFocusManager(appcontext);
appcontext.put(KeyboardFocusManager.class, newManager);
} else {
oldManager = getCurrentKeyboardFocusManager(appcontext);
appcontext.remove(KeyboardFocusManager.class);
}
manager = newManager;
}
if (oldManager != null) {
@ -344,7 +327,7 @@ public abstract class KeyboardFocusManager
private static java.util.Map<Window, WeakReference<Component>> mostRecentFocusOwners = new WeakHashMap<>();
/*
* SequencedEvent which is currently dispatched in AppContext.
* SequencedEvent which is currently dispatched.
*/
transient SequencedEvent currentSequencedEvent = null;
@ -431,13 +414,7 @@ public abstract class KeyboardFocusManager
*/
public Component getFocusOwner() {
synchronized (KeyboardFocusManager.class) {
if (focusOwner == null) {
return null;
}
return (focusOwner.appContext == AppContext.getAppContext())
? focusOwner
: null;
return focusOwner;
}
}
@ -599,42 +576,32 @@ public abstract class KeyboardFocusManager
}
/**
* Returns the permanent focus owner, if the permanent focus owner is in
* the same context as the calling thread. The permanent focus owner is
* Returns the permanent focus owner. The permanent focus owner is
* defined as the last Component in an application to receive a permanent
* FOCUS_GAINED event. The focus owner and permanent focus owner are
* equivalent unless a temporary focus change is currently in effect. In
* such a situation, the permanent focus owner will again be the focus
* owner when the temporary focus change ends.
*
* @return the permanent focus owner, or null if the permanent focus owner
* is not a member of the calling thread's context
* @return the permanent focus owner, or null if there is none
* @see #getGlobalPermanentFocusOwner
* @see #setGlobalPermanentFocusOwner
*/
public Component getPermanentFocusOwner() {
synchronized (KeyboardFocusManager.class) {
if (permanentFocusOwner == null) {
return null;
}
return (permanentFocusOwner.appContext ==
AppContext.getAppContext())
? permanentFocusOwner
: null;
return permanentFocusOwner;
}
}
/**
* Returns the permanent focus owner, even if the calling thread is in a
* different context than the permanent focus owner. The permanent focus
* Returns the permanent focus owner. The permanent focus
* owner is defined as the last Component in an application to receive a
* permanent FOCUS_GAINED event. The focus owner and permanent focus owner
* are equivalent unless a temporary focus change is currently in effect.
* In such a situation, the permanent focus owner will again be the focus
* owner when the temporary focus change ends.
*
* @return the permanent focus owner
* @return the permanent focus owner, or null if there is none
* @see #getPermanentFocusOwner
* @see #setGlobalPermanentFocusOwner
*/
@ -701,24 +668,16 @@ public abstract class KeyboardFocusManager
}
/**
* Returns the focused Window, if the focused Window is in the same context
* as the calling thread. The focused Window is the Window that is or
* contains the focus owner.
* Returns the focused Window.
* The focused Window is the Window that is or contains the focus owner.
*
* @return the focused Window, or null if the focused Window is not a
* member of the calling thread's context
* @return the focused Window, or null if there is none
* @see #getGlobalFocusedWindow
* @see #setGlobalFocusedWindow
*/
public Window getFocusedWindow() {
synchronized (KeyboardFocusManager.class) {
if (focusedWindow == null) {
return null;
}
return (focusedWindow.appContext == AppContext.getAppContext())
? focusedWindow
: null;
return focusedWindow;
}
}
@ -785,27 +744,19 @@ public abstract class KeyboardFocusManager
}
/**
* Returns the active Window, if the active Window is in the same context
* as the calling thread. Only a Frame or a Dialog can be the active
* Returns the active Window. Only a Frame or a Dialog can be the active
* Window. The native windowing system may denote the active Window or its
* children with special decorations, such as a highlighted title bar.
* The active Window is always either the focused Window, or the first
* Frame or Dialog that is an owner of the focused Window.
*
* @return the active Window, or null if the active Window is not a member
* of the calling thread's context
* @return the active Window, or null if there is none
* @see #getGlobalActiveWindow
* @see #setGlobalActiveWindow
*/
public Window getActiveWindow() {
synchronized (KeyboardFocusManager.class) {
if (activeWindow == null) {
return null;
}
return (activeWindow.appContext == AppContext.getAppContext())
? activeWindow
: null;
return activeWindow;
}
}
@ -1100,14 +1051,7 @@ public abstract class KeyboardFocusManager
*/
public Container getCurrentFocusCycleRoot() {
synchronized (KeyboardFocusManager.class) {
if (currentFocusCycleRoot == null) {
return null;
}
return (currentFocusCycleRoot.appContext ==
AppContext.getAppContext())
? currentFocusCycleRoot
: null;
return currentFocusCycleRoot;
}
}
@ -2159,7 +2103,7 @@ public abstract class KeyboardFocusManager
descendant = heavyweight;
}
KeyboardFocusManager manager = getCurrentKeyboardFocusManager(SunToolkit.targetToAppContext(descendant));
KeyboardFocusManager manager = getCurrentKeyboardFocusManager();
FocusEvent currentFocusOwnerEvent = null;
FocusEvent newFocusOwnerEvent = null;
@ -2268,8 +2212,7 @@ public abstract class KeyboardFocusManager
descendant = heavyweight;
}
KeyboardFocusManager manager =
getCurrentKeyboardFocusManager(SunToolkit.targetToAppContext(descendant));
KeyboardFocusManager manager = getCurrentKeyboardFocusManager();
KeyboardFocusManager thisManager = getCurrentKeyboardFocusManager();
Component currentFocusOwner = thisManager.getGlobalFocusOwner();
Component nativeFocusOwner = thisManager.getNativeFocusOwner();
@ -2484,16 +2427,6 @@ public abstract class KeyboardFocusManager
KeyboardFocusManager manager = getCurrentKeyboardFocusManager();
LinkedList<LightweightFocusRequest> localLightweightRequests = null;
Component globalFocusOwner = manager.getGlobalFocusOwner();
if ((globalFocusOwner != null) &&
(globalFocusOwner.appContext != AppContext.getAppContext()))
{
// The current app context differs from the app context of a focus
// owner (and all pending lightweight requests), so we do nothing
// now and wait for a next event.
return;
}
synchronized(heavyweightRequests) {
if (currentLightweightRequests != null) {
clearingCurrentLightweightRequests = true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,6 @@ import java.beans.PropertyChangeSupport;
import java.util.Vector;
import sun.awt.AWTAccessor;
import sun.awt.AppContext;
import sun.awt.HeadlessToolkit;
import sun.awt.SunToolkit;
@ -213,6 +212,8 @@ public class SystemTray {
}
}
private Vector<TrayIcon> icons;
/**
* Adds a {@code TrayIcon} to the {@code SystemTray}.
* The tray icon becomes visible in the system tray once it is
@ -240,15 +241,10 @@ public class SystemTray {
}
TrayIcon[] oldArray;
TrayIcon[] newArray;
Vector<TrayIcon> icons;
synchronized (this) {
oldArray = systemTray.getTrayIcons();
@SuppressWarnings("unchecked")
Vector<TrayIcon> tmp = (Vector<TrayIcon>)AppContext.getAppContext().get(TrayIcon.class);
icons = tmp;
if (icons == null) {
icons = new Vector<>(3);
AppContext.getAppContext().put(TrayIcon.class, icons);
} else if (icons.contains(trayIcon)) {
throw new IllegalArgumentException("adding TrayIcon that is already added");
@ -291,8 +287,6 @@ public class SystemTray {
TrayIcon[] newArray;
synchronized (this) {
oldArray = systemTray.getTrayIcons();
@SuppressWarnings("unchecked")
Vector<TrayIcon> icons = (Vector<TrayIcon>)AppContext.getAppContext().get(TrayIcon.class);
// TrayIcon with no peer is not contained in the array.
if (icons == null || !icons.remove(trayIcon)) {
return;
@ -320,12 +314,12 @@ public class SystemTray {
* @see TrayIcon
*/
public TrayIcon[] getTrayIcons() {
@SuppressWarnings("unchecked")
Vector<TrayIcon> icons = (Vector<TrayIcon>)AppContext.getAppContext().get(TrayIcon.class);
if (icons != null) {
return icons.toArray(EMPTY_TRAY_ARRAY);
synchronized (this) {
if (icons != null) {
return icons.toArray(EMPTY_TRAY_ARRAY);
}
return EMPTY_TRAY_ARRAY;
}
return EMPTY_TRAY_ARRAY;
}
/**
@ -374,8 +368,6 @@ public class SystemTray {
* </tbody>
* </table>
* <p>
* The {@code listener} listens to property changes only in this context.
* <p>
* If {@code listener} is {@code null}, no exception is thrown
* and no action is performed.
*
@ -398,8 +390,6 @@ public class SystemTray {
* Removes a {@code PropertyChangeListener} from the listener list
* for a specific property.
* <p>
* The {@code PropertyChangeListener} must be from this context.
* <p>
* If {@code propertyName} or {@code listener} is {@code null} or invalid,
* no exception is thrown and no action is taken.
*
@ -421,8 +411,6 @@ public class SystemTray {
/**
* Returns an array of all the listeners that have been associated
* with the named property.
* <p>
* Only the listeners in this context are returned.
*
* @param propertyName the specified property
* @return all of the {@code PropertyChangeListener}s associated with
@ -461,19 +449,16 @@ public class SystemTray {
getCurrentChangeSupport().firePropertyChange(propertyName, oldValue, newValue);
}
private PropertyChangeSupport changeSupport;
/**
* Returns the current PropertyChangeSupport instance for the
* calling thread's context.
* Returns the current PropertyChangeSupport instance
*
* @return this thread's context's PropertyChangeSupport
* @return the current PropertyChangeSupport for this {@code SystemTray}
*/
private synchronized PropertyChangeSupport getCurrentChangeSupport() {
PropertyChangeSupport changeSupport =
(PropertyChangeSupport)AppContext.getAppContext().get(SystemTray.class);
if (changeSupport == null) {
changeSupport = new PropertyChangeSupport(this);
AppContext.getAppContext().put(SystemTray.class, changeSupport);
}
return changeSupport;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,6 @@ package java.awt;
import java.awt.event.*;
import java.awt.peer.TrayIconPeer;
import sun.awt.AppContext;
import sun.awt.SunToolkit;
import sun.awt.AWTAccessor;
import sun.awt.HeadlessToolkit;
@ -126,7 +125,7 @@ public class TrayIcon {
if (!SystemTray.isSupported()) {
throw new UnsupportedOperationException();
}
SunToolkit.insertTargetMapping(this, AppContext.getAppContext());
SunToolkit.insertTargetMapping(this);
}
/**

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
package java.awt.geom;
import java.awt.Rectangle;
import java.io.IOException;
import java.io.Serial;
import java.io.Serializable;
@ -1052,19 +1053,7 @@ public abstract class Arc2D extends RectangularShape {
}
/**
* Returns the high-precision framing rectangle of the arc. The framing
* rectangle contains only the part of this {@code Arc2D} that is
* in between the starting and ending angles and contains the pie
* wedge, if this {@code Arc2D} has a {@code PIE} closure type.
* <p>
* This method differs from the
* {@link RectangularShape#getBounds() getBounds} in that the
* {@code getBounds} method only returns the bounds of the
* enclosing ellipse of this {@code Arc2D} without considering
* the starting and ending angles of this {@code Arc2D}.
*
* @return the {@code Rectangle2D} that represents the arc's
* framing rectangle.
* {@inheritDoc java.awt.Shape}
* @since 1.2
*/
public Rectangle2D getBounds2D() {
@ -1110,6 +1099,15 @@ public abstract class Arc2D extends RectangularShape {
return makeBounds(x1, y1, x2, y2);
}
/**
* {@inheritDoc java.awt.Shape}
* @since 1.2
*/
@Override
public Rectangle getBounds() {
return getBounds2D().getBounds();
}
/**
* Constructs a {@code Rectangle2D} of the appropriate precision
* to hold the parameters calculated to be the framing rectangle

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1996, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -118,6 +118,7 @@ public class BeanDescriptor extends FeatureDescriptor {
customizerClassRef = old.customizerClassRef;
}
@Override
void appendTo(StringBuilder sb) {
appendTo(sb, "beanClass", this.beanClassRef);
appendTo(sb, "customizerClass", this.customizerClassRef);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -340,6 +340,7 @@ class ObjectInputStreamWithLoader extends ObjectInputStream
/**
* Use the given ClassLoader rather than using the system class
*/
@Override
@SuppressWarnings("rawtypes")
protected Class resolveClass(ObjectStreamClass classDesc)
throws IOException, ClassNotFoundException {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -130,6 +130,7 @@ public class DefaultPersistenceDelegate extends PersistenceDelegate {
*
* @see #DefaultPersistenceDelegate(String[])
*/
@Override
protected boolean mutatesTo(Object oldInstance, Object newInstance) {
// Assume the instance is either mutable or a singleton
// if it has a nullary constructor.
@ -153,6 +154,7 @@ public class DefaultPersistenceDelegate extends PersistenceDelegate {
*
* @see #DefaultPersistenceDelegate(String[])
*/
@Override
protected Expression instantiate(Object oldInstance, Encoder out) {
int nArgs = constructor.length;
Class<?> type = oldInstance.getClass();
@ -393,6 +395,7 @@ public class DefaultPersistenceDelegate extends PersistenceDelegate {
* @see java.beans.Introspector#getBeanInfo
* @see java.beans.PropertyDescriptor
*/
@Override
protected void initialize(Class<?> type,
Object oldInstance, Object newInstance,
Encoder out)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -415,6 +415,7 @@ public class EventHandler implements InvocationHandler {
*
* @see EventHandler
*/
@Override
public Object invoke(final Object proxy, final Method method, final Object[] arguments) {
String methodName = method.getName();
if (method.getDeclaringClass() == Object.class) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1996, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -536,6 +536,7 @@ public class EventSetDescriptor extends FeatureDescriptor {
inDefaultEventSet = old.inDefaultEventSet;
}
@Override
void appendTo(StringBuilder sb) {
appendTo(sb, "unicast", this.unicast);
appendTo(sb, "inDefaultEventSet", this.inDefaultEventSet);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -169,6 +169,7 @@ public class Expression extends Statement {
this.value = value;
}
@Override
/*pp*/ String instanceName(Object instance) {
return instance == unbound ? "<unbound>" : super.instanceName(instance);
}
@ -176,6 +177,7 @@ public class Expression extends Statement {
/**
* Prints the value of this expression using a Java-style syntax.
*/
@Override
public String toString() {
return instanceName(value) + "=" + super.toString();
}

Some files were not shown because too many files have changed in this diff Show More