mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
Merge branch 'master' into 8366736-closed-system-out-causes-child-process-to-hang-on-windows
This commit is contained in:
commit
cf75fad676
@ -1109,11 +1109,11 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
|
||||
lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
|
||||
ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
|
||||
addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
|
||||
add(R11_scratch1, R12_scratch2, R12_scratch2);
|
||||
add(R11_scratch1, R11_scratch1, R12_scratch2);
|
||||
cmpd(CR0, R11_scratch1, R14_bcp);
|
||||
beq(CR0, verify_continue);
|
||||
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), R19_method, R14_bcp, R28_mdx);
|
||||
|
||||
bind(verify_continue);
|
||||
#endif
|
||||
|
||||
@ -4535,7 +4535,7 @@ void MacroAssembler::push_cont_fastpath() {
|
||||
Label done;
|
||||
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||
cmpld(CR0, R1_SP, R0);
|
||||
ble(CR0, done);
|
||||
ble(CR0, done); // if (SP <= _cont_fastpath) goto done;
|
||||
st_ptr(R1_SP, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||
bind(done);
|
||||
}
|
||||
@ -4546,7 +4546,7 @@ void MacroAssembler::pop_cont_fastpath() {
|
||||
Label done;
|
||||
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||
cmpld(CR0, R1_SP, R0);
|
||||
ble(CR0, done);
|
||||
blt(CR0, done); // if (SP < _cont_fastpath) goto done;
|
||||
li(R0, 0);
|
||||
st_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||
bind(done);
|
||||
|
||||
@ -167,11 +167,6 @@ void VM_Version::common_initialize() {
|
||||
(unaligned_scalar.value() == MISALIGNED_SCALAR_FAST));
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(AlignVector)) {
|
||||
FLAG_SET_DEFAULT(AlignVector,
|
||||
unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
|
||||
}
|
||||
|
||||
#ifdef __riscv_ztso
|
||||
// Hotspot is compiled with TSO support, it will only run on hardware which
|
||||
// supports Ztso
|
||||
@ -242,6 +237,11 @@ void VM_Version::c2_initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(AlignVector)) {
|
||||
FLAG_SET_DEFAULT(AlignVector,
|
||||
unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
|
||||
}
|
||||
|
||||
// NOTE: Make sure codes dependent on UseRVV are put after MaxVectorSize initialize,
|
||||
// as there are extra checks inside it which could disable UseRVV
|
||||
// in some situations.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2024, 2025, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2024, 2026, Intel Corporation. All rights reserved.
|
||||
*
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -1330,10 +1330,12 @@ static void big_case_loop_helper(bool sizeKnown, int size, Label &noMatch, Label
|
||||
// Clarification: The BYTE_K compare above compares haystack[(n-32):(n-1)]. We need to
|
||||
// compare haystack[(k-1):(k-1+31)]. Subtracting either index gives shift value of
|
||||
// (k + 31 - n): x = (k-1+31)-(n-1) = k-1+31-n+1 = k+31-n.
|
||||
// When isU is set, similarly, shift is from haystack[(n-32):(n-1)] to [(k-2):(k-2+31)]
|
||||
|
||||
if (sizeKnown) {
|
||||
__ movl(temp2, 31 + size);
|
||||
__ movl(temp2, (isU ? 30 : 31) + size);
|
||||
} else {
|
||||
__ movl(temp2, 31);
|
||||
__ movl(temp2, isU ? 30 : 31);
|
||||
__ addl(temp2, needleLen);
|
||||
}
|
||||
__ subl(temp2, hsLength);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -6086,7 +6086,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
|
||||
vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
|
||||
|
||||
subptr(count, 16 << shift);
|
||||
jccb(Assembler::less, L_check_fill_32_bytes);
|
||||
jcc(Assembler::less, L_check_fill_32_bytes);
|
||||
align(16);
|
||||
|
||||
BIND(L_fill_64_bytes_loop_avx3);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,14 +32,10 @@ const KRegister::KRegisterImpl all_KRegisterImpls [KRegister::number_
|
||||
|
||||
const char * Register::RegisterImpl::name() const {
|
||||
static const char *const names[number_of_registers] = {
|
||||
#ifdef _LP64
|
||||
"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
|
||||
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
|
||||
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
|
||||
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
|
||||
#else
|
||||
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
|
||||
#endif // _LP64
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "noreg";
|
||||
}
|
||||
@ -54,11 +50,9 @@ const char* FloatRegister::FloatRegisterImpl::name() const {
|
||||
const char* XMMRegister::XMMRegisterImpl::name() const {
|
||||
static const char *const names[number_of_registers] = {
|
||||
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
|
||||
#ifdef _LP64
|
||||
,"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
|
||||
,"xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23"
|
||||
,"xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31"
|
||||
#endif // _LP64
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "xnoreg";
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,7 @@
|
||||
class VMRegImpl;
|
||||
typedef VMRegImpl* VMReg;
|
||||
|
||||
// The implementation of integer registers for the x86/x64 architectures.
|
||||
// The implementation of integer registers for the x64 architectures.
|
||||
class Register {
|
||||
private:
|
||||
int _encoding;
|
||||
@ -44,11 +44,9 @@ private:
|
||||
public:
|
||||
inline friend constexpr Register as_Register(int encoding);
|
||||
|
||||
enum {
|
||||
number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
|
||||
number_of_byte_registers = LP64_ONLY( 32 ) NOT_LP64( 4 ),
|
||||
max_slots_per_register = LP64_ONLY( 2 ) NOT_LP64( 1 )
|
||||
};
|
||||
static const int number_of_registers = 32;
|
||||
static const int number_of_byte_registers = 32;
|
||||
static const int max_slots_per_register = 2;
|
||||
|
||||
class RegisterImpl: public AbstractRegisterImpl {
|
||||
friend class Register;
|
||||
@ -79,11 +77,9 @@ public:
|
||||
|
||||
// Actually available GP registers for use, depending on actual CPU capabilities and flags.
|
||||
static int available_gp_registers() {
|
||||
#ifdef _LP64
|
||||
if (!UseAPX) {
|
||||
return number_of_registers / 2;
|
||||
}
|
||||
#endif // _LP64
|
||||
return number_of_registers;
|
||||
}
|
||||
};
|
||||
@ -116,9 +112,8 @@ constexpr Register rsp = as_Register(4);
|
||||
constexpr Register rbp = as_Register(5);
|
||||
constexpr Register rsi = as_Register(6);
|
||||
constexpr Register rdi = as_Register(7);
|
||||
#ifdef _LP64
|
||||
constexpr Register r8 = as_Register( 8);
|
||||
constexpr Register r9 = as_Register( 9);
|
||||
constexpr Register r8 = as_Register(8);
|
||||
constexpr Register r9 = as_Register(9);
|
||||
constexpr Register r10 = as_Register(10);
|
||||
constexpr Register r11 = as_Register(11);
|
||||
constexpr Register r12 = as_Register(12);
|
||||
@ -141,7 +136,6 @@ constexpr Register r28 = as_Register(28);
|
||||
constexpr Register r29 = as_Register(29);
|
||||
constexpr Register r30 = as_Register(30);
|
||||
constexpr Register r31 = as_Register(31);
|
||||
#endif // _LP64
|
||||
|
||||
|
||||
// The implementation of x87 floating point registers for the ia32 architecture.
|
||||
@ -154,10 +148,8 @@ private:
|
||||
public:
|
||||
inline friend constexpr FloatRegister as_FloatRegister(int encoding);
|
||||
|
||||
enum {
|
||||
number_of_registers = 8,
|
||||
max_slots_per_register = 2
|
||||
};
|
||||
static const int number_of_registers = 8;
|
||||
static const int max_slots_per_register = 2;
|
||||
|
||||
class FloatRegisterImpl: public AbstractRegisterImpl {
|
||||
friend class FloatRegister;
|
||||
@ -217,10 +209,8 @@ private:
|
||||
public:
|
||||
inline friend constexpr XMMRegister as_XMMRegister(int encoding);
|
||||
|
||||
enum {
|
||||
number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
|
||||
max_slots_per_register = LP64_ONLY( 16 ) NOT_LP64( 16 ) // 512-bit
|
||||
};
|
||||
static const int number_of_registers = 32;
|
||||
static const int max_slots_per_register = 16; // 512-bit
|
||||
|
||||
class XMMRegisterImpl: public AbstractRegisterImpl {
|
||||
friend class XMMRegister;
|
||||
@ -250,11 +240,9 @@ public:
|
||||
|
||||
// Actually available XMM registers for use, depending on actual CPU capabilities and flags.
|
||||
static int available_xmm_registers() {
|
||||
#ifdef _LP64
|
||||
if (UseAVX < 3) {
|
||||
return number_of_registers / 2;
|
||||
}
|
||||
#endif // _LP64
|
||||
return number_of_registers;
|
||||
}
|
||||
};
|
||||
@ -287,7 +275,6 @@ constexpr XMMRegister xmm4 = as_XMMRegister( 4);
|
||||
constexpr XMMRegister xmm5 = as_XMMRegister( 5);
|
||||
constexpr XMMRegister xmm6 = as_XMMRegister( 6);
|
||||
constexpr XMMRegister xmm7 = as_XMMRegister( 7);
|
||||
#ifdef _LP64
|
||||
constexpr XMMRegister xmm8 = as_XMMRegister( 8);
|
||||
constexpr XMMRegister xmm9 = as_XMMRegister( 9);
|
||||
constexpr XMMRegister xmm10 = as_XMMRegister(10);
|
||||
@ -312,7 +299,6 @@ constexpr XMMRegister xmm28 = as_XMMRegister(28);
|
||||
constexpr XMMRegister xmm29 = as_XMMRegister(29);
|
||||
constexpr XMMRegister xmm30 = as_XMMRegister(30);
|
||||
constexpr XMMRegister xmm31 = as_XMMRegister(31);
|
||||
#endif // _LP64
|
||||
|
||||
|
||||
// The implementation of AVX-512 opmask registers.
|
||||
@ -394,25 +380,17 @@ constexpr KRegister k7 = as_KRegister(7);
|
||||
// Define a class that exports it.
|
||||
class ConcreteRegisterImpl : public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
max_gpr = Register::number_of_registers * Register::max_slots_per_register,
|
||||
max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register,
|
||||
max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register,
|
||||
max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register,
|
||||
static const int max_gpr = Register::number_of_registers * Register::max_slots_per_register;
|
||||
static const int max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
|
||||
static const int max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register;
|
||||
static const int max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register;
|
||||
|
||||
// A big enough number for C2: all the registers plus flags
|
||||
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
|
||||
// There is no requirement that any ordering here matches any ordering c2 gives
|
||||
// it's optoregs.
|
||||
|
||||
// x86_32.ad defines additional dummy FILL0-FILL7 registers, in order to tally
|
||||
// REG_COUNT (computed by ADLC based on the number of reg_defs seen in .ad files)
|
||||
// with ConcreteRegisterImpl::number_of_registers additional count of 8 is being
|
||||
// added for 32 bit jvm.
|
||||
number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
|
||||
NOT_LP64( 8 + ) // FILL0-FILL7 in x86_32.ad
|
||||
1 // eflags
|
||||
};
|
||||
static const int number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
|
||||
1; // eflags
|
||||
};
|
||||
|
||||
template <>
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -64,6 +64,39 @@ static address kyberAvx512ConstsAddr(int offset) {
|
||||
|
||||
const Register scratch = r10;
|
||||
|
||||
ATTRIBUTE_ALIGNED(64) static const uint8_t kyberAvx512_12To16Dup[] = {
|
||||
// 0 - 63
|
||||
0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15, 16,
|
||||
16, 17, 18, 19, 19, 20, 21, 22, 22, 23, 24, 25, 25, 26, 27, 28, 28, 29, 30,
|
||||
31, 31, 32, 33, 34, 34, 35, 36, 37, 37, 38, 39, 40, 40, 41, 42, 43, 43, 44,
|
||||
45, 46, 46, 47
|
||||
};
|
||||
|
||||
static address kyberAvx512_12To16DupAddr() {
|
||||
return (address) kyberAvx512_12To16Dup;
|
||||
}
|
||||
|
||||
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512_12To16Shift[] = {
|
||||
// 0 - 31
|
||||
0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0,
|
||||
4, 0, 4, 0, 4, 0, 4
|
||||
};
|
||||
|
||||
static address kyberAvx512_12To16ShiftAddr() {
|
||||
return (address) kyberAvx512_12To16Shift;
|
||||
}
|
||||
|
||||
ATTRIBUTE_ALIGNED(64) static const uint64_t kyberAvx512_12To16And[] = {
|
||||
// 0 - 7
|
||||
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
|
||||
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
|
||||
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF
|
||||
};
|
||||
|
||||
static address kyberAvx512_12To16AndAddr() {
|
||||
return (address) kyberAvx512_12To16And;
|
||||
}
|
||||
|
||||
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512NttPerms[] = {
|
||||
// 0
|
||||
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
|
||||
@ -822,10 +855,65 @@ address generate_kyber12To16_avx512(StubGenerator *stubgen,
|
||||
|
||||
const Register perms = r11;
|
||||
|
||||
Label Loop;
|
||||
Label Loop, VBMILoop;
|
||||
|
||||
__ addptr(condensed, condensedOffs);
|
||||
|
||||
if (VM_Version::supports_avx512_vbmi()) {
|
||||
// mask load for the first 48 bytes of each vector
|
||||
__ mov64(rax, 0x0000FFFFFFFFFFFF);
|
||||
__ kmovql(k1, rax);
|
||||
|
||||
__ lea(perms, ExternalAddress(kyberAvx512_12To16DupAddr()));
|
||||
__ evmovdqub(xmm20, Address(perms), Assembler::AVX_512bit);
|
||||
|
||||
__ lea(perms, ExternalAddress(kyberAvx512_12To16ShiftAddr()));
|
||||
__ evmovdquw(xmm21, Address(perms), Assembler::AVX_512bit);
|
||||
|
||||
__ lea(perms, ExternalAddress(kyberAvx512_12To16AndAddr()));
|
||||
__ evmovdquq(xmm22, Address(perms), Assembler::AVX_512bit);
|
||||
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(VBMILoop);
|
||||
|
||||
__ evmovdqub(xmm0, k1, Address(condensed, 0), false,
|
||||
Assembler::AVX_512bit);
|
||||
__ evmovdqub(xmm1, k1, Address(condensed, 48), false,
|
||||
Assembler::AVX_512bit);
|
||||
__ evmovdqub(xmm2, k1, Address(condensed, 96), false,
|
||||
Assembler::AVX_512bit);
|
||||
__ evmovdqub(xmm3, k1, Address(condensed, 144), false,
|
||||
Assembler::AVX_512bit);
|
||||
|
||||
__ evpermb(xmm4, k0, xmm20, xmm0, false, Assembler::AVX_512bit);
|
||||
__ evpermb(xmm5, k0, xmm20, xmm1, false, Assembler::AVX_512bit);
|
||||
__ evpermb(xmm6, k0, xmm20, xmm2, false, Assembler::AVX_512bit);
|
||||
__ evpermb(xmm7, k0, xmm20, xmm3, false, Assembler::AVX_512bit);
|
||||
|
||||
__ evpsrlvw(xmm4, xmm4, xmm21, Assembler::AVX_512bit);
|
||||
__ evpsrlvw(xmm5, xmm5, xmm21, Assembler::AVX_512bit);
|
||||
__ evpsrlvw(xmm6, xmm6, xmm21, Assembler::AVX_512bit);
|
||||
__ evpsrlvw(xmm7, xmm7, xmm21, Assembler::AVX_512bit);
|
||||
|
||||
__ evpandq(xmm0, xmm22, xmm4, Assembler::AVX_512bit);
|
||||
__ evpandq(xmm1, xmm22, xmm5, Assembler::AVX_512bit);
|
||||
__ evpandq(xmm2, xmm22, xmm6, Assembler::AVX_512bit);
|
||||
__ evpandq(xmm3, xmm22, xmm7, Assembler::AVX_512bit);
|
||||
|
||||
store4regs(parsed, 0, xmm0_3, _masm);
|
||||
|
||||
__ addptr(condensed, 192);
|
||||
__ addptr(parsed, 256);
|
||||
__ subl(parsedLength, 128);
|
||||
__ jcc(Assembler::greater, VBMILoop);
|
||||
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ mov64(rax, 0); // return 0
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
__ lea(perms, ExternalAddress(kyberAvx512_12To16PermsAddr()));
|
||||
|
||||
load4regs(xmm24_27, perms, 0, _masm);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,9 +32,7 @@ void VMRegImpl::set_regName() {
|
||||
int i;
|
||||
for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) {
|
||||
regName[i++] = reg->name();
|
||||
#ifdef AMD64
|
||||
regName[i++] = reg->name();
|
||||
#endif // AMD64
|
||||
reg = reg->successor();
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,14 +52,8 @@ inline bool is_KRegister() {
|
||||
}
|
||||
|
||||
inline Register as_Register() {
|
||||
|
||||
assert( is_Register(), "must be");
|
||||
// Yuk
|
||||
#ifdef AMD64
|
||||
assert(is_Register(), "must be");
|
||||
return ::as_Register(value() >> 1);
|
||||
#else
|
||||
return ::as_Register(value());
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
inline FloatRegister as_FloatRegister() {
|
||||
@ -82,9 +76,6 @@ inline KRegister as_KRegister() {
|
||||
|
||||
inline bool is_concrete() {
|
||||
assert(is_reg(), "must be");
|
||||
#ifndef AMD64
|
||||
if (is_Register()) return true;
|
||||
#endif // AMD64
|
||||
// Do not use is_XMMRegister() here as it depends on the UseAVX setting.
|
||||
if (value() >= ConcreteRegisterImpl::max_fpr && value() < ConcreteRegisterImpl::max_xmm) {
|
||||
int base = value() - ConcreteRegisterImpl::max_fpr;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,7 @@
|
||||
#define CPU_X86_VMREG_X86_INLINE_HPP
|
||||
|
||||
inline VMReg Register::RegisterImpl::as_VMReg() const {
|
||||
return VMRegImpl::as_VMReg(encoding() LP64_ONLY( << 1 ));
|
||||
return VMRegImpl::as_VMReg(encoding() << 1);
|
||||
}
|
||||
|
||||
inline VMReg FloatRegister::FloatRegisterImpl::as_VMReg() const {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -258,10 +258,18 @@ bool os::free_memory(physical_memory_size_type& value) {
|
||||
return Aix::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_memory(physical_memory_size_type& value) {
|
||||
return Aix::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::available_memory(physical_memory_size_type& value) {
|
||||
return Aix::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::available_memory(physical_memory_size_type& value) {
|
||||
return Aix::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Aix::available_memory(physical_memory_size_type& value) {
|
||||
os::Aix::meminfo_t mi;
|
||||
if (os::Aix::get_meminfo(&mi)) {
|
||||
@ -273,6 +281,10 @@ bool os::Aix::available_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::total_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
|
||||
perfstat_memory_total_t memory_info;
|
||||
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
|
||||
return false;
|
||||
@ -282,6 +294,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::free_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
|
||||
perfstat_memory_total_t memory_info;
|
||||
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
|
||||
return false;
|
||||
@ -294,6 +310,10 @@ physical_memory_size_type os::physical_memory() {
|
||||
return Aix::physical_memory();
|
||||
}
|
||||
|
||||
physical_memory_size_type os::Machine::physical_memory() {
|
||||
return Aix::physical_memory();
|
||||
}
|
||||
|
||||
size_t os::rss() { return (size_t)0; }
|
||||
|
||||
// Cpu architecture string
|
||||
@ -2264,6 +2284,10 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
return Machine::active_processor_count();
|
||||
}
|
||||
|
||||
int os::Machine::active_processor_count() {
|
||||
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
|
||||
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
|
||||
return online_cpus;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -137,10 +137,18 @@ bool os::available_memory(physical_memory_size_type& value) {
|
||||
return Bsd::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::available_memory(physical_memory_size_type& value) {
|
||||
return Bsd::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::free_memory(physical_memory_size_type& value) {
|
||||
return Bsd::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_memory(physical_memory_size_type& value) {
|
||||
return Bsd::available_memory(value);
|
||||
}
|
||||
|
||||
// Available here means free. Note that this number is of no much use. As an estimate
|
||||
// for future memory pressure it is far too conservative, since MacOS will use a lot
|
||||
// of unused memory for caches, and return it willingly in case of needs.
|
||||
@ -181,6 +189,10 @@ void os::Bsd::print_uptime_info(outputStream* st) {
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::total_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
|
||||
#if defined(__APPLE__)
|
||||
struct xsw_usage vmusage;
|
||||
size_t size = sizeof(vmusage);
|
||||
@ -195,6 +207,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::free_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
|
||||
#if defined(__APPLE__)
|
||||
struct xsw_usage vmusage;
|
||||
size_t size = sizeof(vmusage);
|
||||
@ -212,6 +228,10 @@ physical_memory_size_type os::physical_memory() {
|
||||
return Bsd::physical_memory();
|
||||
}
|
||||
|
||||
physical_memory_size_type os::Machine::physical_memory() {
|
||||
return Bsd::physical_memory();
|
||||
}
|
||||
|
||||
size_t os::rss() {
|
||||
size_t rss = 0;
|
||||
#ifdef __APPLE__
|
||||
@ -2189,6 +2209,10 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
return Machine::active_processor_count();
|
||||
}
|
||||
|
||||
int os::Machine::active_processor_count() {
|
||||
return _processor_count;
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -631,22 +631,20 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
|
||||
* return:
|
||||
* true if there were no errors. false otherwise.
|
||||
*/
|
||||
bool CgroupSubsystem::active_processor_count(int& value) {
|
||||
int cpu_count;
|
||||
int result = -1;
|
||||
|
||||
bool CgroupSubsystem::active_processor_count(double& value) {
|
||||
// We use a cache with a timeout to avoid performing expensive
|
||||
// computations in the event this function is called frequently.
|
||||
// [See 8227006].
|
||||
CachingCgroupController<CgroupCpuController>* contrl = cpu_controller();
|
||||
CachedMetric* cpu_limit = contrl->metrics_cache();
|
||||
CachingCgroupController<CgroupCpuController, double>* contrl = cpu_controller();
|
||||
CachedMetric<double>* cpu_limit = contrl->metrics_cache();
|
||||
if (!cpu_limit->should_check_metric()) {
|
||||
value = (int)cpu_limit->value();
|
||||
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", value);
|
||||
value = cpu_limit->value();
|
||||
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %.2f", value);
|
||||
return true;
|
||||
}
|
||||
|
||||
cpu_count = os::Linux::active_processor_count();
|
||||
int cpu_count = os::Linux::active_processor_count();
|
||||
double result = -1;
|
||||
if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
|
||||
return false;
|
||||
}
|
||||
@ -671,8 +669,8 @@ bool CgroupSubsystem::active_processor_count(int& value) {
|
||||
*/
|
||||
bool CgroupSubsystem::memory_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& value) {
|
||||
CachingCgroupController<CgroupMemoryController>* contrl = memory_controller();
|
||||
CachedMetric* memory_limit = contrl->metrics_cache();
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* contrl = memory_controller();
|
||||
CachedMetric<physical_memory_size_type>* memory_limit = contrl->metrics_cache();
|
||||
if (!memory_limit->should_check_metric()) {
|
||||
value = memory_limit->value();
|
||||
return true;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -181,9 +181,10 @@ class CgroupController: public CHeapObj<mtInternal> {
|
||||
static bool limit_from_str(char* limit_str, physical_memory_size_type& value);
|
||||
};
|
||||
|
||||
template <typename MetricType>
|
||||
class CachedMetric : public CHeapObj<mtInternal>{
|
||||
private:
|
||||
volatile physical_memory_size_type _metric;
|
||||
volatile MetricType _metric;
|
||||
volatile jlong _next_check_counter;
|
||||
public:
|
||||
CachedMetric() {
|
||||
@ -193,8 +194,8 @@ class CachedMetric : public CHeapObj<mtInternal>{
|
||||
bool should_check_metric() {
|
||||
return os::elapsed_counter() > _next_check_counter;
|
||||
}
|
||||
physical_memory_size_type value() { return _metric; }
|
||||
void set_value(physical_memory_size_type value, jlong timeout) {
|
||||
MetricType value() { return _metric; }
|
||||
void set_value(MetricType value, jlong timeout) {
|
||||
_metric = value;
|
||||
// Metric is unlikely to change, but we want to remain
|
||||
// responsive to configuration changes. A very short grace time
|
||||
@ -205,19 +206,19 @@ class CachedMetric : public CHeapObj<mtInternal>{
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
template <class T, typename MetricType>
|
||||
class CachingCgroupController : public CHeapObj<mtInternal> {
|
||||
private:
|
||||
T* _controller;
|
||||
CachedMetric* _metrics_cache;
|
||||
CachedMetric<MetricType>* _metrics_cache;
|
||||
|
||||
public:
|
||||
CachingCgroupController(T* cont) {
|
||||
_controller = cont;
|
||||
_metrics_cache = new CachedMetric();
|
||||
_metrics_cache = new CachedMetric<MetricType>();
|
||||
}
|
||||
|
||||
CachedMetric* metrics_cache() { return _metrics_cache; }
|
||||
CachedMetric<MetricType>* metrics_cache() { return _metrics_cache; }
|
||||
T* controller() { return _controller; }
|
||||
};
|
||||
|
||||
@ -277,7 +278,7 @@ class CgroupMemoryController: public CHeapObj<mtInternal> {
|
||||
class CgroupSubsystem: public CHeapObj<mtInternal> {
|
||||
public:
|
||||
bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
|
||||
bool active_processor_count(int& value);
|
||||
bool active_processor_count(double& value);
|
||||
|
||||
virtual bool pids_max(uint64_t& value) = 0;
|
||||
virtual bool pids_current(uint64_t& value) = 0;
|
||||
@ -286,8 +287,8 @@ class CgroupSubsystem: public CHeapObj<mtInternal> {
|
||||
virtual char * cpu_cpuset_cpus() = 0;
|
||||
virtual char * cpu_cpuset_memory_nodes() = 0;
|
||||
virtual const char * container_type() = 0;
|
||||
virtual CachingCgroupController<CgroupMemoryController>* memory_controller() = 0;
|
||||
virtual CachingCgroupController<CgroupCpuController>* cpu_controller() = 0;
|
||||
virtual CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() = 0;
|
||||
virtual CachingCgroupController<CgroupCpuController, double>* cpu_controller() = 0;
|
||||
virtual CgroupCpuacctController* cpuacct_controller() = 0;
|
||||
|
||||
bool cpu_quota(int& value);
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2024, 2025, Red Hat, Inc.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,9 +26,8 @@
|
||||
#include "cgroupUtil_linux.hpp"
|
||||
#include "os_linux.hpp"
|
||||
|
||||
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, int& value) {
|
||||
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, double& value) {
|
||||
assert(upper_bound > 0, "upper bound of cpus must be positive");
|
||||
int limit_count = upper_bound;
|
||||
int quota = -1;
|
||||
int period = -1;
|
||||
if (!cpu_ctrl->cpu_quota(quota)) {
|
||||
@ -37,20 +37,15 @@ bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound,
|
||||
return false;
|
||||
}
|
||||
int quota_count = 0;
|
||||
int result = upper_bound;
|
||||
double result = upper_bound;
|
||||
|
||||
if (quota > -1 && period > 0) {
|
||||
quota_count = ceilf((float)quota / (float)period);
|
||||
log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
|
||||
if (quota > 0 && period > 0) { // Use quotas
|
||||
double cpu_quota = static_cast<double>(quota) / period;
|
||||
log_trace(os, container)("CPU Quota based on quota/period: %.2f", cpu_quota);
|
||||
result = MIN2(result, cpu_quota);
|
||||
}
|
||||
|
||||
// Use quotas
|
||||
if (quota_count != 0) {
|
||||
limit_count = quota_count;
|
||||
}
|
||||
|
||||
result = MIN2(upper_bound, limit_count);
|
||||
log_trace(os, container)("OSContainer::active_processor_count: %d", result);
|
||||
log_trace(os, container)("OSContainer::active_processor_count: %.2f", result);
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
@ -73,11 +68,11 @@ physical_memory_size_type CgroupUtil::get_updated_mem_limit(CgroupMemoryControll
|
||||
|
||||
// Get an updated cpu limit. The return value is strictly less than or equal to the
|
||||
// passed in 'lowest' value.
|
||||
int CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
|
||||
double CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
|
||||
int lowest,
|
||||
int upper_bound) {
|
||||
assert(lowest > 0 && lowest <= upper_bound, "invariant");
|
||||
int cpu_limit_val = -1;
|
||||
double cpu_limit_val = -1;
|
||||
if (CgroupUtil::processor_count(cpu, upper_bound, cpu_limit_val) && cpu_limit_val != upper_bound) {
|
||||
assert(cpu_limit_val <= upper_bound, "invariant");
|
||||
if (lowest > cpu_limit_val) {
|
||||
@ -172,7 +167,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
|
||||
assert(cg_path[0] == '/', "cgroup path must start with '/'");
|
||||
int host_cpus = os::Linux::active_processor_count();
|
||||
int lowest_limit = host_cpus;
|
||||
int cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
|
||||
double cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
|
||||
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
|
||||
char* limit_cg_path = nullptr;
|
||||
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Red Hat, Inc.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +32,7 @@
|
||||
class CgroupUtil: AllStatic {
|
||||
|
||||
public:
|
||||
static bool processor_count(CgroupCpuController* cpu, int upper_bound, int& value);
|
||||
static bool processor_count(CgroupCpuController* cpu, int upper_bound, double& value);
|
||||
// Given a memory controller, adjust its path to a point in the hierarchy
|
||||
// that represents the closest memory limit.
|
||||
static void adjust_controller(CgroupMemoryController* m);
|
||||
@ -42,9 +43,7 @@ class CgroupUtil: AllStatic {
|
||||
static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
|
||||
physical_memory_size_type lowest,
|
||||
physical_memory_size_type upper_bound);
|
||||
static int get_updated_cpu_limit(CgroupCpuController* c,
|
||||
int lowest,
|
||||
int upper_bound);
|
||||
static double get_updated_cpu_limit(CgroupCpuController* c, int lowest, int upper_bound);
|
||||
};
|
||||
|
||||
#endif // CGROUP_UTIL_LINUX_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -328,8 +328,8 @@ CgroupV1Subsystem::CgroupV1Subsystem(CgroupV1Controller* cpuset,
|
||||
_pids(pids) {
|
||||
CgroupUtil::adjust_controller(memory);
|
||||
CgroupUtil::adjust_controller(cpu);
|
||||
_memory = new CachingCgroupController<CgroupMemoryController>(memory);
|
||||
_cpu = new CachingCgroupController<CgroupCpuController>(cpu);
|
||||
_memory = new CachingCgroupController<CgroupMemoryController, physical_memory_size_type>(memory);
|
||||
_cpu = new CachingCgroupController<CgroupCpuController, double>(cpu);
|
||||
}
|
||||
|
||||
bool CgroupV1Subsystem::is_containerized() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -214,15 +214,15 @@ class CgroupV1Subsystem: public CgroupSubsystem {
|
||||
const char * container_type() override {
|
||||
return "cgroupv1";
|
||||
}
|
||||
CachingCgroupController<CgroupMemoryController>* memory_controller() override { return _memory; }
|
||||
CachingCgroupController<CgroupCpuController>* cpu_controller() override { return _cpu; }
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() override { return _memory; }
|
||||
CachingCgroupController<CgroupCpuController, double>* cpu_controller() override { return _cpu; }
|
||||
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; }
|
||||
|
||||
private:
|
||||
/* controllers */
|
||||
CachingCgroupController<CgroupMemoryController>* _memory = nullptr;
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* _memory = nullptr;
|
||||
CgroupV1Controller* _cpuset = nullptr;
|
||||
CachingCgroupController<CgroupCpuController>* _cpu = nullptr;
|
||||
CachingCgroupController<CgroupCpuController, double>* _cpu = nullptr;
|
||||
CgroupV1CpuacctController* _cpuacct = nullptr;
|
||||
CgroupV1Controller* _pids = nullptr;
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Red Hat Inc.
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -156,8 +156,8 @@ CgroupV2Subsystem::CgroupV2Subsystem(CgroupV2MemoryController * memory,
|
||||
_unified(unified) {
|
||||
CgroupUtil::adjust_controller(memory);
|
||||
CgroupUtil::adjust_controller(cpu);
|
||||
_memory = new CachingCgroupController<CgroupMemoryController>(memory);
|
||||
_cpu = new CachingCgroupController<CgroupCpuController>(cpu);
|
||||
_memory = new CachingCgroupController<CgroupMemoryController, physical_memory_size_type>(memory);
|
||||
_cpu = new CachingCgroupController<CgroupCpuController, double>(cpu);
|
||||
_cpuacct = cpuacct;
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2024, Red Hat Inc.
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -152,8 +152,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
|
||||
/* One unified controller */
|
||||
CgroupV2Controller _unified;
|
||||
/* Caching wrappers for cpu/memory metrics */
|
||||
CachingCgroupController<CgroupMemoryController>* _memory = nullptr;
|
||||
CachingCgroupController<CgroupCpuController>* _cpu = nullptr;
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* _memory = nullptr;
|
||||
CachingCgroupController<CgroupCpuController, double>* _cpu = nullptr;
|
||||
|
||||
CgroupCpuacctController* _cpuacct = nullptr;
|
||||
|
||||
@ -175,8 +175,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
|
||||
const char * container_type() override {
|
||||
return "cgroupv2";
|
||||
}
|
||||
CachingCgroupController<CgroupMemoryController>* memory_controller() override { return _memory; }
|
||||
CachingCgroupController<CgroupCpuController>* cpu_controller() override { return _cpu; }
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() override { return _memory; }
|
||||
CachingCgroupController<CgroupCpuController, double>* cpu_controller() override { return _cpu; }
|
||||
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; };
|
||||
};
|
||||
|
||||
|
||||
@ -66,9 +66,6 @@
|
||||
#endif
|
||||
|
||||
// open(2) flags
|
||||
#ifndef O_CLOEXEC
|
||||
#define O_CLOEXEC 02000000
|
||||
#endif
|
||||
#ifndef O_TMPFILE
|
||||
#define O_TMPFILE (020000000 | O_DIRECTORY)
|
||||
#endif
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -86,8 +86,8 @@ void OSContainer::init() {
|
||||
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
|
||||
physical_memory_size_type mem_limit_val = value_unlimited;
|
||||
(void)memory_limit_in_bytes(mem_limit_val); // discard error and use default
|
||||
int host_cpus = os::Linux::active_processor_count();
|
||||
int cpus = host_cpus;
|
||||
double host_cpus = os::Linux::active_processor_count();
|
||||
double cpus = host_cpus;
|
||||
(void)active_processor_count(cpus); // discard error and use default
|
||||
any_mem_cpu_limit_present = mem_limit_val != value_unlimited || host_cpus != cpus;
|
||||
if (any_mem_cpu_limit_present) {
|
||||
@ -127,8 +127,7 @@ bool OSContainer::available_memory_in_bytes(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_swap,
|
||||
physical_memory_size_type& value) {
|
||||
bool OSContainer::available_swap_in_bytes(physical_memory_size_type& value) {
|
||||
physical_memory_size_type mem_limit = 0;
|
||||
physical_memory_size_type mem_swap_limit = 0;
|
||||
if (memory_limit_in_bytes(mem_limit) &&
|
||||
@ -179,8 +178,7 @@ bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_sw
|
||||
assert(num < 25, "buffer too small");
|
||||
mem_limit_buf[num] = '\0';
|
||||
log_trace(os,container)("OSContainer::available_swap_in_bytes: container_swap_limit=%s"
|
||||
" container_mem_limit=%s, host_free_swap: " PHYS_MEM_TYPE_FORMAT,
|
||||
mem_swap_buf, mem_limit_buf, host_free_swap);
|
||||
" container_mem_limit=%s", mem_swap_buf, mem_limit_buf);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -252,7 +250,7 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
|
||||
return cgroup_subsystem->cpu_cpuset_memory_nodes();
|
||||
}
|
||||
|
||||
bool OSContainer::active_processor_count(int& value) {
|
||||
bool OSContainer::active_processor_count(double& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->active_processor_count(value);
|
||||
}
|
||||
@ -291,11 +289,13 @@ template<typename T> struct metric_fmt;
|
||||
template<> struct metric_fmt<unsigned long long int> { static constexpr const char* fmt = "%llu"; };
|
||||
template<> struct metric_fmt<unsigned long int> { static constexpr const char* fmt = "%lu"; };
|
||||
template<> struct metric_fmt<int> { static constexpr const char* fmt = "%d"; };
|
||||
template<> struct metric_fmt<double> { static constexpr const char* fmt = "%.2f"; };
|
||||
template<> struct metric_fmt<const char*> { static constexpr const char* fmt = "%s"; };
|
||||
|
||||
template void OSContainer::print_container_metric<unsigned long long int>(outputStream*, const char*, unsigned long long int, const char*);
|
||||
template void OSContainer::print_container_metric<unsigned long int>(outputStream*, const char*, unsigned long int, const char*);
|
||||
template void OSContainer::print_container_metric<int>(outputStream*, const char*, int, const char*);
|
||||
template void OSContainer::print_container_metric<double>(outputStream*, const char*, double, const char*);
|
||||
template void OSContainer::print_container_metric<const char*>(outputStream*, const char*, const char*, const char*);
|
||||
|
||||
template <typename T>
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -72,8 +72,7 @@ class OSContainer: AllStatic {
|
||||
static const char * container_type();
|
||||
|
||||
static bool available_memory_in_bytes(physical_memory_size_type& value);
|
||||
static bool available_swap_in_bytes(physical_memory_size_type host_free_swap,
|
||||
physical_memory_size_type& value);
|
||||
static bool available_swap_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_limit_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_and_swap_limit_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_and_swap_usage_in_bytes(physical_memory_size_type& value);
|
||||
@ -84,7 +83,7 @@ class OSContainer: AllStatic {
|
||||
static bool rss_usage_in_bytes(physical_memory_size_type& value);
|
||||
static bool cache_usage_in_bytes(physical_memory_size_type& value);
|
||||
|
||||
static bool active_processor_count(int& value);
|
||||
static bool active_processor_count(double& value);
|
||||
|
||||
static char * cpu_cpuset_cpus();
|
||||
static char * cpu_cpuset_memory_nodes();
|
||||
|
||||
@ -211,15 +211,58 @@ static bool suppress_primordial_thread_resolution = false;
|
||||
|
||||
// utility functions
|
||||
|
||||
bool os::is_containerized() {
|
||||
return OSContainer::is_containerized();
|
||||
}
|
||||
|
||||
bool os::Container::memory_limit(physical_memory_size_type& value) {
|
||||
physical_memory_size_type result = 0;
|
||||
if (OSContainer::memory_limit_in_bytes(result) && result != value_unlimited) {
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::memory_soft_limit(physical_memory_size_type& value) {
|
||||
physical_memory_size_type result = 0;
|
||||
if (OSContainer::memory_soft_limit_in_bytes(result) && result != 0 && result != value_unlimited) {
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::memory_throttle_limit(physical_memory_size_type& value) {
|
||||
physical_memory_size_type result = 0;
|
||||
if (OSContainer::memory_throttle_limit_in_bytes(result) && result != value_unlimited) {
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::used_memory(physical_memory_size_type& value) {
|
||||
return OSContainer::memory_usage_in_bytes(value);
|
||||
}
|
||||
|
||||
bool os::available_memory(physical_memory_size_type& value) {
|
||||
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
|
||||
if (is_containerized() && Container::available_memory(value)) {
|
||||
log_trace(os)("available container memory: " PHYS_MEM_TYPE_FORMAT, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
return Machine::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::available_memory(physical_memory_size_type& value) {
|
||||
return Linux::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Container::available_memory(physical_memory_size_type& value) {
|
||||
return OSContainer::available_memory_in_bytes(value);
|
||||
}
|
||||
|
||||
bool os::Linux::available_memory(physical_memory_size_type& value) {
|
||||
physical_memory_size_type avail_mem = 0;
|
||||
|
||||
@ -251,11 +294,15 @@ bool os::Linux::available_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::free_memory(physical_memory_size_type& value) {
|
||||
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
|
||||
if (is_containerized() && Container::available_memory(value)) {
|
||||
log_trace(os)("free container memory: " PHYS_MEM_TYPE_FORMAT, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
return Machine::free_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_memory(physical_memory_size_type& value) {
|
||||
return Linux::free_memory(value);
|
||||
}
|
||||
|
||||
@ -274,21 +321,30 @@ bool os::Linux::free_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
if (OSContainer::is_containerized()) {
|
||||
physical_memory_size_type mem_swap_limit = value_unlimited;
|
||||
physical_memory_size_type memory_limit = value_unlimited;
|
||||
if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
|
||||
OSContainer::memory_limit_in_bytes(memory_limit)) {
|
||||
if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
|
||||
mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
|
||||
value = mem_swap_limit - memory_limit;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} // fallback to the host swap space if the container returned unlimited
|
||||
if (is_containerized() && Container::total_swap_space(value)) {
|
||||
return true;
|
||||
} // fallback to the host swap space if the container value fails
|
||||
return Machine::total_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
|
||||
return Linux::host_swap(value);
|
||||
}
|
||||
|
||||
bool os::Container::total_swap_space(physical_memory_size_type& value) {
|
||||
physical_memory_size_type mem_swap_limit = value_unlimited;
|
||||
physical_memory_size_type memory_limit = value_unlimited;
|
||||
if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
|
||||
OSContainer::memory_limit_in_bytes(memory_limit)) {
|
||||
if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
|
||||
mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
|
||||
value = mem_swap_limit - memory_limit;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool host_free_swap_f(physical_memory_size_type& value) {
|
||||
struct sysinfo si;
|
||||
int ret = sysinfo(&si);
|
||||
@ -309,32 +365,45 @@ bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
|
||||
if (OSContainer::is_containerized()) {
|
||||
if (OSContainer::available_swap_in_bytes(host_free_swap_val, value)) {
|
||||
if (is_containerized()) {
|
||||
if (Container::free_swap_space(value)) {
|
||||
return true;
|
||||
}
|
||||
// Fall through to use host value
|
||||
log_trace(os,container)("os::free_swap_space: containerized value unavailable"
|
||||
" returning host value: " PHYS_MEM_TYPE_FORMAT, host_free_swap_val);
|
||||
}
|
||||
|
||||
value = host_free_swap_val;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
|
||||
return host_free_swap_f(value);
|
||||
}
|
||||
|
||||
bool os::Container::free_swap_space(physical_memory_size_type& value) {
|
||||
return OSContainer::available_swap_in_bytes(value);
|
||||
}
|
||||
|
||||
physical_memory_size_type os::physical_memory() {
|
||||
if (OSContainer::is_containerized()) {
|
||||
if (is_containerized()) {
|
||||
physical_memory_size_type mem_limit = value_unlimited;
|
||||
if (OSContainer::memory_limit_in_bytes(mem_limit) && mem_limit != value_unlimited) {
|
||||
if (Container::memory_limit(mem_limit) && mem_limit != value_unlimited) {
|
||||
log_trace(os)("total container memory: " PHYS_MEM_TYPE_FORMAT, mem_limit);
|
||||
return mem_limit;
|
||||
}
|
||||
}
|
||||
|
||||
physical_memory_size_type phys_mem = Linux::physical_memory();
|
||||
physical_memory_size_type phys_mem = Machine::physical_memory();
|
||||
log_trace(os)("total system memory: " PHYS_MEM_TYPE_FORMAT, phys_mem);
|
||||
return phys_mem;
|
||||
}
|
||||
|
||||
physical_memory_size_type os::Machine::physical_memory() {
|
||||
return Linux::physical_memory();
|
||||
}
|
||||
|
||||
// Returns the resident set size (RSS) of the process.
|
||||
// Falls back to using VmRSS from /proc/self/status if /proc/self/smaps_rollup is unavailable.
|
||||
// Note: On kernels with memory cgroups or shared memory, VmRSS may underreport RSS.
|
||||
@ -2439,20 +2508,21 @@ bool os::Linux::print_container_info(outputStream* st) {
|
||||
OSContainer::print_container_metric(st, "cpu_memory_nodes", p != nullptr ? p : "not supported");
|
||||
free(p);
|
||||
|
||||
int i = -1;
|
||||
bool supported = OSContainer::active_processor_count(i);
|
||||
double cpus = -1;
|
||||
bool supported = OSContainer::active_processor_count(cpus);
|
||||
if (supported) {
|
||||
assert(i > 0, "must be");
|
||||
assert(cpus > 0, "must be");
|
||||
if (ActiveProcessorCount > 0) {
|
||||
OSContainer::print_container_metric(st, "active_processor_count", ActiveProcessorCount, "(from -XX:ActiveProcessorCount)");
|
||||
} else {
|
||||
OSContainer::print_container_metric(st, "active_processor_count", i);
|
||||
OSContainer::print_container_metric(st, "active_processor_count", cpus);
|
||||
}
|
||||
} else {
|
||||
OSContainer::print_container_metric(st, "active_processor_count", "not supported");
|
||||
}
|
||||
|
||||
|
||||
int i = -1;
|
||||
supported = OSContainer::cpu_quota(i);
|
||||
if (supported && i > 0) {
|
||||
OSContainer::print_container_metric(st, "cpu_quota", i);
|
||||
@ -4737,15 +4807,26 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
int active_cpus = -1;
|
||||
if (OSContainer::is_containerized() && OSContainer::active_processor_count(active_cpus)) {
|
||||
log_trace(os)("active_processor_count: determined by OSContainer: %d",
|
||||
active_cpus);
|
||||
} else {
|
||||
active_cpus = os::Linux::active_processor_count();
|
||||
if (is_containerized()) {
|
||||
double cpu_quota;
|
||||
if (Container::processor_count(cpu_quota)) {
|
||||
int active_cpus = ceilf(cpu_quota); // Round fractional CPU quota up.
|
||||
assert(active_cpus <= Machine::active_processor_count(), "must be");
|
||||
log_trace(os)("active_processor_count: determined by OSContainer: %d",
|
||||
active_cpus);
|
||||
return active_cpus;
|
||||
}
|
||||
}
|
||||
|
||||
return active_cpus;
|
||||
return Machine::active_processor_count();
|
||||
}
|
||||
|
||||
int os::Machine::active_processor_count() {
|
||||
return os::Linux::active_processor_count();
|
||||
}
|
||||
|
||||
bool os::Container::processor_count(double& value) {
|
||||
return OSContainer::active_processor_count(value);
|
||||
}
|
||||
|
||||
static bool should_warn_invalid_processor_id() {
|
||||
@ -4878,31 +4959,8 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
// All file descriptors that are opened in the Java process and not
|
||||
// specifically destined for a subprocess should have the close-on-exec
|
||||
// flag set. If we don't set it, then careless 3rd party native code
|
||||
// might fork and exec without closing all appropriate file descriptors,
|
||||
// and this in turn might:
|
||||
//
|
||||
// - cause end-of-file to fail to be detected on some file
|
||||
// descriptors, resulting in mysterious hangs, or
|
||||
//
|
||||
// - might cause an fopen in the subprocess to fail on a system
|
||||
// suffering from bug 1085341.
|
||||
//
|
||||
// (Yes, the default setting of the close-on-exec flag is a Unix
|
||||
// design flaw)
|
||||
//
|
||||
// See:
|
||||
// 1085341: 32-bit stdio routines should support file descriptors >255
|
||||
// 4843136: (process) pipe file descriptor from Runtime.exec not being closed
|
||||
// 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
|
||||
//
|
||||
// Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open().
|
||||
// O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor
|
||||
// because it saves a system call and removes a small window where the flag
|
||||
// is unset. On ancient Linux kernels the O_CLOEXEC flag will be ignored
|
||||
// and we fall back to using FD_CLOEXEC (see below).
|
||||
#ifdef O_CLOEXEC
|
||||
// might fork and exec without closing all appropriate file descriptors.
|
||||
oflag |= O_CLOEXEC;
|
||||
#endif
|
||||
|
||||
int fd = ::open(path, oflag, mode);
|
||||
if (fd == -1) return -1;
|
||||
@ -4925,21 +4983,6 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef FD_CLOEXEC
|
||||
// Validate that the use of the O_CLOEXEC flag on open above worked.
|
||||
// With recent kernels, we will perform this check exactly once.
|
||||
static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
|
||||
if (!O_CLOEXEC_is_known_to_work) {
|
||||
int flags = ::fcntl(fd, F_GETFD);
|
||||
if (flags != -1) {
|
||||
if ((flags & FD_CLOEXEC) != 0)
|
||||
O_CLOEXEC_is_known_to_work = 1;
|
||||
else
|
||||
::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -951,6 +951,32 @@ struct enum_sigcode_desc_t {
|
||||
const char* s_desc;
|
||||
};
|
||||
|
||||
#if defined(LINUX)
|
||||
// Additional kernel si_code definitions that are only exported by
|
||||
// more recent glibc distributions, so we have to hard-code the values.
|
||||
#ifndef BUS_MCEERR_AR // glibc 2.17
|
||||
#define BUS_MCEERR_AR 4
|
||||
#define BUS_MCEERR_AO 5
|
||||
#endif
|
||||
|
||||
#ifndef SEGV_PKUERR // glibc 2.27
|
||||
#define SEGV_PKUERR 4
|
||||
#endif
|
||||
|
||||
#ifndef SYS_SECCOMP // glibc 2.28
|
||||
#define SYS_SECCOMP 1
|
||||
#endif
|
||||
|
||||
#ifndef TRAP_BRANCH // glibc 2.30
|
||||
#define TRAP_BRANCH 3
|
||||
#endif
|
||||
|
||||
#ifndef TRAP_HWBKPT // not glibc version specific - gdb related
|
||||
#define TRAP_HWBKPT 4
|
||||
#endif
|
||||
|
||||
#endif // LINUX
|
||||
|
||||
static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t* out) {
|
||||
|
||||
const struct {
|
||||
@ -976,6 +1002,7 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
|
||||
{ SIGSEGV, SEGV_ACCERR, "SEGV_ACCERR", "Invalid permissions for mapped object." },
|
||||
#if defined(LINUX)
|
||||
{ SIGSEGV, SEGV_BNDERR, "SEGV_BNDERR", "Failed address bound checks." },
|
||||
{ SIGSEGV, SEGV_PKUERR, "SEGV_PKUERR", "Protection key checking failure." },
|
||||
#endif
|
||||
#if defined(AIX)
|
||||
// no explanation found what keyerr would be
|
||||
@ -984,8 +1011,18 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
|
||||
{ SIGBUS, BUS_ADRALN, "BUS_ADRALN", "Invalid address alignment." },
|
||||
{ SIGBUS, BUS_ADRERR, "BUS_ADRERR", "Nonexistent physical address." },
|
||||
{ SIGBUS, BUS_OBJERR, "BUS_OBJERR", "Object-specific hardware error." },
|
||||
#if defined(LINUX)
|
||||
{ SIGBUS, BUS_MCEERR_AR,"BUS_MCEERR_AR","Hardware memory error consumed on a machine check: action required." },
|
||||
{ SIGBUS, BUS_MCEERR_AO,"BUS_MCEERR_AO","Hardware memory error detected in process but not consumed: action optional." },
|
||||
|
||||
{ SIGSYS, SYS_SECCOMP, "SYS_SECCOMP", "Secure computing (seccomp) filter failure." },
|
||||
#endif
|
||||
{ SIGTRAP, TRAP_BRKPT, "TRAP_BRKPT", "Process breakpoint." },
|
||||
{ SIGTRAP, TRAP_TRACE, "TRAP_TRACE", "Process trace trap." },
|
||||
#if defined(LINUX)
|
||||
{ SIGTRAP, TRAP_BRANCH, "TRAP_BRANCH", "Process taken branch trap." },
|
||||
{ SIGTRAP, TRAP_HWBKPT, "TRAP_HWBKPT", "Hardware breakpoint/watchpoint." },
|
||||
#endif
|
||||
{ SIGCHLD, CLD_EXITED, "CLD_EXITED", "Child has exited." },
|
||||
{ SIGCHLD, CLD_KILLED, "CLD_KILLED", "Child has terminated abnormally and did not create a core file." },
|
||||
{ SIGCHLD, CLD_DUMPED, "CLD_DUMPED", "Child has terminated abnormally and created a core file." },
|
||||
@ -993,6 +1030,7 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
|
||||
{ SIGCHLD, CLD_STOPPED, "CLD_STOPPED", "Child has stopped." },
|
||||
{ SIGCHLD, CLD_CONTINUED,"CLD_CONTINUED","Stopped child has continued." },
|
||||
#ifdef SIGPOLL
|
||||
{ SIGPOLL, POLL_IN, "POLL_IN", "Data input available." },
|
||||
{ SIGPOLL, POLL_OUT, "POLL_OUT", "Output buffers available." },
|
||||
{ SIGPOLL, POLL_MSG, "POLL_MSG", "Input message available." },
|
||||
{ SIGPOLL, POLL_ERR, "POLL_ERR", "I/O error." },
|
||||
|
||||
@ -839,10 +839,18 @@ bool os::available_memory(physical_memory_size_type& value) {
|
||||
return win32::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::available_memory(physical_memory_size_type& value) {
|
||||
return win32::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::free_memory(physical_memory_size_type& value) {
|
||||
return win32::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_memory(physical_memory_size_type& value) {
|
||||
return win32::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::win32::available_memory(physical_memory_size_type& value) {
|
||||
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
|
||||
// value if total memory is larger than 4GB
|
||||
@ -858,7 +866,11 @@ bool os::win32::available_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::total_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
|
||||
MEMORYSTATUSEX ms;
|
||||
ms.dwLength = sizeof(ms);
|
||||
BOOL res = GlobalMemoryStatusEx(&ms);
|
||||
@ -872,6 +884,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::free_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
|
||||
MEMORYSTATUSEX ms;
|
||||
ms.dwLength = sizeof(ms);
|
||||
BOOL res = GlobalMemoryStatusEx(&ms);
|
||||
@ -888,6 +904,10 @@ physical_memory_size_type os::physical_memory() {
|
||||
return win32::physical_memory();
|
||||
}
|
||||
|
||||
physical_memory_size_type os::Machine::physical_memory() {
|
||||
return win32::physical_memory();
|
||||
}
|
||||
|
||||
size_t os::rss() {
|
||||
size_t rss = 0;
|
||||
PROCESS_MEMORY_COUNTERS_EX pmex;
|
||||
@ -911,6 +931,10 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
return Machine::active_processor_count();
|
||||
}
|
||||
|
||||
int os::Machine::active_processor_count() {
|
||||
bool schedules_all_processor_groups = win32::is_windows_11_or_greater() || win32::is_windows_server_2022_or_greater();
|
||||
if (UseAllWindowsProcessorGroups && !schedules_all_processor_groups && !win32::processor_group_warning_displayed()) {
|
||||
win32::set_processor_group_warning_displayed(true);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -614,6 +614,10 @@ struct StringTableDeleteCheck : StackObj {
|
||||
};
|
||||
|
||||
void StringTable::clean_dead_entries(JavaThread* jt) {
|
||||
// BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
|
||||
// When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
|
||||
// Take SuspendMark first to keep lock order and avoid deadlock.
|
||||
NativeHeapTrimmer::SuspendMark sm("stringtable");
|
||||
StringTableHash::BulkDeleteTask bdt(_local_table);
|
||||
if (!bdt.prepare(jt)) {
|
||||
return;
|
||||
@ -621,7 +625,6 @@ void StringTable::clean_dead_entries(JavaThread* jt) {
|
||||
|
||||
StringTableDeleteCheck stdc;
|
||||
StringTableDoDelete stdd;
|
||||
NativeHeapTrimmer::SuspendMark sm("stringtable");
|
||||
{
|
||||
TraceTime timer("Clean", TRACETIME_LOG(Debug, stringtable, perf));
|
||||
while(bdt.do_task(jt, stdc, stdd)) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -763,6 +763,10 @@ struct SymbolTableDeleteCheck : StackObj {
|
||||
};
|
||||
|
||||
void SymbolTable::clean_dead_entries(JavaThread* jt) {
|
||||
// BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
|
||||
// When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
|
||||
// Take SuspendMark first to keep lock order and avoid deadlock.
|
||||
NativeHeapTrimmer::SuspendMark sm("symboltable");
|
||||
SymbolTableHash::BulkDeleteTask bdt(_local_table);
|
||||
if (!bdt.prepare(jt)) {
|
||||
return;
|
||||
@ -770,7 +774,6 @@ void SymbolTable::clean_dead_entries(JavaThread* jt) {
|
||||
|
||||
SymbolTableDeleteCheck stdc;
|
||||
SymbolTableDoDelete stdd;
|
||||
NativeHeapTrimmer::SuspendMark sm("symboltable");
|
||||
{
|
||||
TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf));
|
||||
while (bdt.do_task(jt, stdc, stdd)) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1371,6 +1371,7 @@ void AOTCodeAddressTable::init_extrs() {
|
||||
SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
|
||||
#endif
|
||||
#if INCLUDE_ZGC
|
||||
SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
|
||||
SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
|
||||
#if defined(AMD64)
|
||||
SET_ADDRESS(_extrs, &ZPointerLoadShift);
|
||||
|
||||
@ -62,8 +62,6 @@ jint EpsilonHeap::initialize() {
|
||||
|
||||
// Enable monitoring
|
||||
_monitoring_support = new EpsilonMonitoringSupport(this);
|
||||
_last_counter_update = 0;
|
||||
_last_heap_print = 0;
|
||||
|
||||
// Install barrier set
|
||||
BarrierSet::set_barrier_set(new EpsilonBarrierSet());
|
||||
@ -156,17 +154,17 @@ HeapWord* EpsilonHeap::allocate_work(size_t size) {
|
||||
// At this point, some diagnostic subsystems might not yet be initialized.
|
||||
// We pretend the printout happened either way. This keeps allocation path
|
||||
// from obsessively checking the subsystems' status on every allocation.
|
||||
size_t last_counter = AtomicAccess::load(&_last_counter_update);
|
||||
size_t last_counter = _last_counter_update.load_relaxed();
|
||||
if ((used - last_counter >= _step_counter_update) &&
|
||||
AtomicAccess::cmpxchg(&_last_counter_update, last_counter, used) == last_counter) {
|
||||
_last_counter_update.compare_set(last_counter, used)) {
|
||||
if (_monitoring_support->is_ready()) {
|
||||
_monitoring_support->update_counters();
|
||||
}
|
||||
}
|
||||
|
||||
size_t last_heap = AtomicAccess::load(&_last_heap_print);
|
||||
size_t last_heap = _last_heap_print.load_relaxed();
|
||||
if ((used - last_heap >= _step_heap_print) &&
|
||||
AtomicAccess::cmpxchg(&_last_heap_print, last_heap, used) == last_heap) {
|
||||
_last_heap_print.compare_set(last_heap, used)) {
|
||||
print_heap_info(used);
|
||||
if (Metaspace::initialized()) {
|
||||
print_metaspace_info();
|
||||
|
||||
@ -31,6 +31,7 @@
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "services/memoryManager.hpp"
|
||||
|
||||
class EpsilonHeap : public CollectedHeap {
|
||||
@ -45,8 +46,8 @@ private:
|
||||
size_t _step_counter_update;
|
||||
size_t _step_heap_print;
|
||||
int64_t _decay_time_ns;
|
||||
volatile size_t _last_counter_update;
|
||||
volatile size_t _last_heap_print;
|
||||
Atomic<size_t> _last_counter_update;
|
||||
Atomic<size_t> _last_heap_print;
|
||||
|
||||
void print_tracing_info() const override;
|
||||
void stop() override {};
|
||||
|
||||
@ -96,7 +96,6 @@ public:
|
||||
EpsilonMonitoringSupport::EpsilonMonitoringSupport(EpsilonHeap* heap) {
|
||||
_heap_counters = new EpsilonGenerationCounters(heap);
|
||||
_space_counters = new EpsilonSpaceCounters("Heap", 0, heap->max_capacity(), 0, _heap_counters);
|
||||
_ready = false;
|
||||
}
|
||||
|
||||
void EpsilonMonitoringSupport::update_counters() {
|
||||
@ -114,9 +113,9 @@ void EpsilonMonitoringSupport::update_counters() {
|
||||
}
|
||||
|
||||
bool EpsilonMonitoringSupport::is_ready() {
|
||||
return AtomicAccess::load_acquire(&_ready);
|
||||
return _ready.load_acquire();
|
||||
}
|
||||
|
||||
void EpsilonMonitoringSupport::mark_ready() {
|
||||
return AtomicAccess::release_store(&_ready, true);
|
||||
_ready.release_store(true);
|
||||
}
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
#define SHARE_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
class EpsilonGenerationCounters;
|
||||
class EpsilonSpaceCounters;
|
||||
@ -35,7 +36,7 @@ class EpsilonMonitoringSupport : public CHeapObj<mtGC> {
|
||||
private:
|
||||
EpsilonGenerationCounters* _heap_counters;
|
||||
EpsilonSpaceCounters* _space_counters;
|
||||
volatile bool _ready;
|
||||
Atomic<bool> _ready;
|
||||
|
||||
public:
|
||||
EpsilonMonitoringSupport(EpsilonHeap* heap);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1024,10 +1024,6 @@ size_t G1CardSet::num_containers() {
|
||||
return cl._count;
|
||||
}
|
||||
|
||||
G1CardSetCoarsenStats G1CardSet::coarsen_stats() {
|
||||
return _coarsen_stats;
|
||||
}
|
||||
|
||||
void G1CardSet::print_coarsen_stats(outputStream* out) {
|
||||
_last_coarsen_stats.subtract_from(_coarsen_stats);
|
||||
|
||||
|
||||
@ -103,7 +103,6 @@
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/cpuTimeCounters.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
@ -687,8 +686,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
// before the allocation is that we avoid having to keep track of the newly
|
||||
// allocated memory while we do a GC.
|
||||
// Only try that if we can actually perform a GC.
|
||||
if (is_init_completed() && policy()->need_to_start_conc_mark("concurrent humongous allocation",
|
||||
word_size)) {
|
||||
if (is_init_completed() &&
|
||||
policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) {
|
||||
try_collect(word_size, GCCause::_g1_humongous_allocation, collection_counters(this));
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +29,6 @@
|
||||
#include "gc/g1/g1EvacFailureRegions.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
G1EvacFailureRegions::G1EvacFailureRegions() :
|
||||
@ -43,7 +43,7 @@ G1EvacFailureRegions::~G1EvacFailureRegions() {
|
||||
}
|
||||
|
||||
void G1EvacFailureRegions::pre_collection(uint max_regions) {
|
||||
AtomicAccess::store(&_num_regions_evac_failed, 0u);
|
||||
_num_regions_evac_failed.store_relaxed(0u);
|
||||
_regions_evac_failed.resize(max_regions);
|
||||
_regions_pinned.resize(max_regions);
|
||||
_regions_alloc_failed.resize(max_regions);
|
||||
@ -69,6 +69,6 @@ void G1EvacFailureRegions::par_iterate(G1HeapRegionClosure* closure,
|
||||
G1CollectedHeap::heap()->par_iterate_regions_array(closure,
|
||||
hrclaimer,
|
||||
_evac_failed_regions,
|
||||
AtomicAccess::load(&_num_regions_evac_failed),
|
||||
num_regions_evac_failed(),
|
||||
worker_id);
|
||||
}
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +26,7 @@
|
||||
#ifndef SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
|
||||
#define SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
|
||||
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
|
||||
class G1AbstractSubTask;
|
||||
@ -53,14 +55,14 @@ class G1EvacFailureRegions {
|
||||
// Evacuation failed regions (indexes) in the current collection.
|
||||
uint* _evac_failed_regions;
|
||||
// Number of regions evacuation failed in the current collection.
|
||||
volatile uint _num_regions_evac_failed;
|
||||
Atomic<uint> _num_regions_evac_failed;
|
||||
|
||||
public:
|
||||
G1EvacFailureRegions();
|
||||
~G1EvacFailureRegions();
|
||||
|
||||
uint get_region_idx(uint idx) const {
|
||||
assert(idx < _num_regions_evac_failed, "precondition");
|
||||
assert(idx < _num_regions_evac_failed.load_relaxed(), "precondition");
|
||||
return _evac_failed_regions[idx];
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,10 +30,9 @@
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
|
||||
uint G1EvacFailureRegions::num_regions_evac_failed() const {
|
||||
return AtomicAccess::load(&_num_regions_evac_failed);
|
||||
return _num_regions_evac_failed.load_relaxed();
|
||||
}
|
||||
|
||||
bool G1EvacFailureRegions::has_regions_evac_failed() const {
|
||||
@ -57,7 +57,7 @@ bool G1EvacFailureRegions::record(uint worker_id, uint region_idx, bool cause_pi
|
||||
bool success = _regions_evac_failed.par_set_bit(region_idx,
|
||||
memory_order_relaxed);
|
||||
if (success) {
|
||||
size_t offset = AtomicAccess::fetch_then_add(&_num_regions_evac_failed, 1u);
|
||||
size_t offset = _num_regions_evac_failed.fetch_then_add(1u);
|
||||
_evac_failed_regions[offset] = region_idx;
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,7 +37,6 @@
|
||||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
|
||||
class G1AdjustLiveClosure : public StackObj {
|
||||
G1AdjustClosure* _adjust_closure;
|
||||
|
||||
@ -108,7 +108,7 @@ void G1FullGCMarker::follow_array_chunk(objArrayOop array, int index) {
|
||||
push_objarray(array, end_index);
|
||||
}
|
||||
|
||||
array->oop_iterate_range(mark_closure(), beg_index, end_index);
|
||||
array->oop_iterate_elements_range(mark_closure(), beg_index, end_index);
|
||||
}
|
||||
|
||||
inline void G1FullGCMarker::follow_object(oop obj) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +31,6 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/padded.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,6 @@
|
||||
#include "gc/g1/g1CodeRootSet.hpp"
|
||||
#include "gc/g1/g1CollectionSetCandidates.hpp"
|
||||
#include "gc/g1/g1FromCardCache.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
@ -123,9 +122,6 @@ public:
|
||||
|
||||
static void initialize(MemRegion reserved);
|
||||
|
||||
// Coarsening statistics since VM start.
|
||||
static G1CardSetCoarsenStats coarsen_stats() { return G1CardSet::coarsen_stats(); }
|
||||
|
||||
inline uintptr_t to_card(OopOrNarrowOopStar from) const;
|
||||
|
||||
private:
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,6 @@
|
||||
#include "gc/g1/g1CardSet.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
void G1HeapRegionRemSet::set_state_untracked() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,7 +24,6 @@
|
||||
|
||||
#include "gc/g1/g1MonotonicArena.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "utilities/globalCounter.inline.hpp"
|
||||
|
||||
@ -61,13 +60,13 @@ void G1MonotonicArena::SegmentFreeList::bulk_add(Segment& first,
|
||||
size_t num,
|
||||
size_t mem_size) {
|
||||
_list.prepend(first, last);
|
||||
AtomicAccess::add(&_num_segments, num, memory_order_relaxed);
|
||||
AtomicAccess::add(&_mem_size, mem_size, memory_order_relaxed);
|
||||
_num_segments.add_then_fetch(num, memory_order_relaxed);
|
||||
_mem_size.add_then_fetch(mem_size, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void G1MonotonicArena::SegmentFreeList::print_on(outputStream* out, const char* prefix) {
|
||||
out->print_cr("%s: segments %zu size %zu",
|
||||
prefix, AtomicAccess::load(&_num_segments), AtomicAccess::load(&_mem_size));
|
||||
prefix, _num_segments.load_relaxed(), _mem_size.load_relaxed());
|
||||
}
|
||||
|
||||
G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& num_segments,
|
||||
@ -75,12 +74,12 @@ G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& nu
|
||||
GlobalCounter::CriticalSection cs(Thread::current());
|
||||
|
||||
Segment* result = _list.pop_all();
|
||||
num_segments = AtomicAccess::load(&_num_segments);
|
||||
mem_size = AtomicAccess::load(&_mem_size);
|
||||
num_segments = _num_segments.load_relaxed();
|
||||
mem_size = _mem_size.load_relaxed();
|
||||
|
||||
if (result != nullptr) {
|
||||
AtomicAccess::sub(&_num_segments, num_segments, memory_order_relaxed);
|
||||
AtomicAccess::sub(&_mem_size, mem_size, memory_order_relaxed);
|
||||
_num_segments.sub_then_fetch(num_segments, memory_order_relaxed);
|
||||
_mem_size.sub_then_fetch(mem_size, memory_order_relaxed);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -96,8 +95,8 @@ void G1MonotonicArena::SegmentFreeList::free_all() {
|
||||
Segment::delete_segment(cur);
|
||||
}
|
||||
|
||||
AtomicAccess::sub(&_num_segments, num_freed, memory_order_relaxed);
|
||||
AtomicAccess::sub(&_mem_size, mem_size_freed, memory_order_relaxed);
|
||||
_num_segments.sub_then_fetch(num_freed, memory_order_relaxed);
|
||||
_mem_size.sub_then_fetch(mem_size_freed, memory_order_relaxed);
|
||||
}
|
||||
|
||||
G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
|
||||
@ -115,7 +114,7 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
|
||||
}
|
||||
|
||||
// Install it as current allocation segment.
|
||||
Segment* old = AtomicAccess::cmpxchg(&_first, prev, next);
|
||||
Segment* old = _first.compare_exchange(prev, next);
|
||||
if (old != prev) {
|
||||
// Somebody else installed the segment, use that one.
|
||||
Segment::delete_segment(next);
|
||||
@ -126,9 +125,9 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
|
||||
_last = next;
|
||||
}
|
||||
// Successfully installed the segment into the list.
|
||||
AtomicAccess::inc(&_num_segments, memory_order_relaxed);
|
||||
AtomicAccess::add(&_mem_size, next->mem_size(), memory_order_relaxed);
|
||||
AtomicAccess::add(&_num_total_slots, next->num_slots(), memory_order_relaxed);
|
||||
_num_segments.add_then_fetch(1u, memory_order_relaxed);
|
||||
_mem_size.add_then_fetch(next->mem_size(), memory_order_relaxed);
|
||||
_num_total_slots.add_then_fetch(next->num_slots(), memory_order_relaxed);
|
||||
return next;
|
||||
}
|
||||
}
|
||||
@ -155,7 +154,7 @@ uint G1MonotonicArena::slot_size() const {
|
||||
}
|
||||
|
||||
void G1MonotonicArena::drop_all() {
|
||||
Segment* cur = AtomicAccess::load_acquire(&_first);
|
||||
Segment* cur = _first.load_acquire();
|
||||
|
||||
if (cur != nullptr) {
|
||||
assert(_last != nullptr, "If there is at least one segment, there must be a last one.");
|
||||
@ -175,25 +174,25 @@ void G1MonotonicArena::drop_all() {
|
||||
cur = next;
|
||||
}
|
||||
#endif
|
||||
assert(num_segments == _num_segments, "Segment count inconsistent %u %u", num_segments, _num_segments);
|
||||
assert(mem_size == _mem_size, "Memory size inconsistent");
|
||||
assert(num_segments == _num_segments.load_relaxed(), "Segment count inconsistent %u %u", num_segments, _num_segments.load_relaxed());
|
||||
assert(mem_size == _mem_size.load_relaxed(), "Memory size inconsistent");
|
||||
assert(last == _last, "Inconsistent last segment");
|
||||
|
||||
_segment_free_list->bulk_add(*first, *_last, _num_segments, _mem_size);
|
||||
_segment_free_list->bulk_add(*first, *_last, _num_segments.load_relaxed(), _mem_size.load_relaxed());
|
||||
}
|
||||
|
||||
_first = nullptr;
|
||||
_first.store_relaxed(nullptr);
|
||||
_last = nullptr;
|
||||
_num_segments = 0;
|
||||
_mem_size = 0;
|
||||
_num_total_slots = 0;
|
||||
_num_allocated_slots = 0;
|
||||
_num_segments.store_relaxed(0);
|
||||
_mem_size.store_relaxed(0);
|
||||
_num_total_slots.store_relaxed(0);
|
||||
_num_allocated_slots.store_relaxed(0);
|
||||
}
|
||||
|
||||
void* G1MonotonicArena::allocate() {
|
||||
assert(slot_size() > 0, "instance size not set.");
|
||||
|
||||
Segment* cur = AtomicAccess::load_acquire(&_first);
|
||||
Segment* cur = _first.load_acquire();
|
||||
if (cur == nullptr) {
|
||||
cur = new_segment(cur);
|
||||
}
|
||||
@ -201,7 +200,7 @@ void* G1MonotonicArena::allocate() {
|
||||
while (true) {
|
||||
void* slot = cur->allocate_slot();
|
||||
if (slot != nullptr) {
|
||||
AtomicAccess::inc(&_num_allocated_slots, memory_order_relaxed);
|
||||
_num_allocated_slots.add_then_fetch(1u, memory_order_relaxed);
|
||||
guarantee(is_aligned(slot, _alloc_options->slot_alignment()),
|
||||
"result " PTR_FORMAT " not aligned at %u", p2i(slot), _alloc_options->slot_alignment());
|
||||
return slot;
|
||||
@ -213,7 +212,7 @@ void* G1MonotonicArena::allocate() {
|
||||
}
|
||||
|
||||
uint G1MonotonicArena::num_segments() const {
|
||||
return AtomicAccess::load(&_num_segments);
|
||||
return _num_segments.load_relaxed();
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -238,7 +237,7 @@ uint G1MonotonicArena::calculate_length() const {
|
||||
|
||||
template <typename SegmentClosure>
|
||||
void G1MonotonicArena::iterate_segments(SegmentClosure& closure) const {
|
||||
Segment* cur = AtomicAccess::load_acquire(&_first);
|
||||
Segment* cur = _first.load_acquire();
|
||||
|
||||
assert((cur != nullptr) == (_last != nullptr),
|
||||
"If there is at least one segment, there must be a last one");
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -28,6 +28,7 @@
|
||||
|
||||
#include "gc/shared/freeListAllocator.hpp"
|
||||
#include "nmt/memTag.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/lockFreeStack.hpp"
|
||||
|
||||
@ -65,27 +66,27 @@ private:
|
||||
// AllocOptions provides parameters for Segment sizing and expansion.
|
||||
const AllocOptions* _alloc_options;
|
||||
|
||||
Segment* volatile _first; // The (start of the) list of all segments.
|
||||
Segment* _last; // The last segment of the list of all segments.
|
||||
volatile uint _num_segments; // Number of assigned segments to this allocator.
|
||||
volatile size_t _mem_size; // Memory used by all segments.
|
||||
Atomic<Segment*> _first; // The (start of the) list of all segments.
|
||||
Segment* _last; // The last segment of the list of all segments.
|
||||
Atomic<uint> _num_segments; // Number of assigned segments to this allocator.
|
||||
Atomic<size_t> _mem_size; // Memory used by all segments.
|
||||
|
||||
SegmentFreeList* _segment_free_list; // The global free segment list to preferentially
|
||||
// get new segments from.
|
||||
|
||||
volatile uint _num_total_slots; // Number of slots available in all segments (allocated + not yet used).
|
||||
volatile uint _num_allocated_slots; // Number of total slots allocated ever (including free and pending).
|
||||
Atomic<uint> _num_total_slots; // Number of slots available in all segments (allocated + not yet used).
|
||||
Atomic<uint> _num_allocated_slots; // Number of total slots allocated ever (including free and pending).
|
||||
|
||||
inline Segment* new_segment(Segment* const prev);
|
||||
|
||||
DEBUG_ONLY(uint calculate_length() const;)
|
||||
|
||||
public:
|
||||
const Segment* first_segment() const { return AtomicAccess::load(&_first); }
|
||||
const Segment* first_segment() const { return _first.load_relaxed(); }
|
||||
|
||||
uint num_total_slots() const { return AtomicAccess::load(&_num_total_slots); }
|
||||
uint num_total_slots() const { return _num_total_slots.load_relaxed(); }
|
||||
uint num_allocated_slots() const {
|
||||
uint allocated = AtomicAccess::load(&_num_allocated_slots);
|
||||
uint allocated = _num_allocated_slots.load_relaxed();
|
||||
assert(calculate_length() == allocated, "Must be");
|
||||
return allocated;
|
||||
}
|
||||
@ -116,11 +117,11 @@ static constexpr uint SegmentPayloadMaxAlignment = 8;
|
||||
class alignas(SegmentPayloadMaxAlignment) G1MonotonicArena::Segment {
|
||||
const uint _slot_size;
|
||||
const uint _num_slots;
|
||||
Segment* volatile _next;
|
||||
Atomic<Segment*> _next;
|
||||
// Index into the next free slot to allocate into. Full if equal (or larger)
|
||||
// to _num_slots (can be larger because we atomically increment this value and
|
||||
// check only afterwards if the allocation has been successful).
|
||||
uint volatile _next_allocate;
|
||||
Atomic<uint> _next_allocate;
|
||||
const MemTag _mem_tag;
|
||||
|
||||
static size_t header_size() { return align_up(sizeof(Segment), SegmentPayloadMaxAlignment); }
|
||||
@ -139,21 +140,21 @@ class alignas(SegmentPayloadMaxAlignment) G1MonotonicArena::Segment {
|
||||
Segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag);
|
||||
~Segment() = default;
|
||||
public:
|
||||
Segment* volatile* next_addr() { return &_next; }
|
||||
Atomic<Segment*>* next_addr() { return &_next; }
|
||||
|
||||
void* allocate_slot();
|
||||
|
||||
uint num_slots() const { return _num_slots; }
|
||||
|
||||
Segment* next() const { return _next; }
|
||||
Segment* next() const { return _next.load_relaxed(); }
|
||||
|
||||
void set_next(Segment* next) {
|
||||
assert(next != this, " loop condition");
|
||||
_next = next;
|
||||
_next.store_relaxed(next);
|
||||
}
|
||||
|
||||
void reset(Segment* next) {
|
||||
_next_allocate = 0;
|
||||
_next_allocate.store_relaxed(0);
|
||||
assert(next != this, " loop condition");
|
||||
set_next(next);
|
||||
memset(payload(0), 0, payload_size());
|
||||
@ -166,7 +167,7 @@ public:
|
||||
uint length() const {
|
||||
// _next_allocate might grow larger than _num_slots in multi-thread environments
|
||||
// due to races.
|
||||
return MIN2(_next_allocate, _num_slots);
|
||||
return MIN2(_next_allocate.load_relaxed(), _num_slots);
|
||||
}
|
||||
|
||||
static size_t size_in_bytes(uint slot_size, uint num_slots) {
|
||||
@ -176,7 +177,7 @@ public:
|
||||
static Segment* create_segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag);
|
||||
static void delete_segment(Segment* segment);
|
||||
|
||||
bool is_full() const { return _next_allocate >= _num_slots; }
|
||||
bool is_full() const { return _next_allocate.load_relaxed() >= _num_slots; }
|
||||
};
|
||||
|
||||
static_assert(alignof(G1MonotonicArena::Segment) >= SegmentPayloadMaxAlignment, "assert alignment of Segment (and indirectly its payload)");
|
||||
@ -186,15 +187,15 @@ static_assert(alignof(G1MonotonicArena::Segment) >= SegmentPayloadMaxAlignment,
|
||||
// performed by multiple threads concurrently.
|
||||
// Counts and memory usage are current on a best-effort basis if accessed concurrently.
|
||||
class G1MonotonicArena::SegmentFreeList {
|
||||
static Segment* volatile* next_ptr(Segment& segment) {
|
||||
static Atomic<Segment*>* next_ptr(Segment& segment) {
|
||||
return segment.next_addr();
|
||||
}
|
||||
using SegmentStack = LockFreeStack<Segment, &SegmentFreeList::next_ptr>;
|
||||
|
||||
SegmentStack _list;
|
||||
|
||||
volatile size_t _num_segments;
|
||||
volatile size_t _mem_size;
|
||||
Atomic<size_t> _num_segments;
|
||||
Atomic<size_t> _mem_size;
|
||||
|
||||
public:
|
||||
SegmentFreeList() : _list(), _num_segments(0), _mem_size(0) { }
|
||||
@ -210,8 +211,8 @@ public:
|
||||
|
||||
void print_on(outputStream* out, const char* prefix = "");
|
||||
|
||||
size_t num_segments() const { return AtomicAccess::load(&_num_segments); }
|
||||
size_t mem_size() const { return AtomicAccess::load(&_mem_size); }
|
||||
size_t num_segments() const { return _num_segments.load_relaxed(); }
|
||||
size_t mem_size() const { return _mem_size.load_relaxed(); }
|
||||
};
|
||||
|
||||
// Configuration for G1MonotonicArena, e.g slot size, slot number of next Segment.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -28,14 +28,13 @@
|
||||
|
||||
#include "gc/g1/g1MonotonicArena.hpp"
|
||||
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/globalCounter.inline.hpp"
|
||||
|
||||
inline void* G1MonotonicArena::Segment::allocate_slot() {
|
||||
if (_next_allocate >= _num_slots) {
|
||||
if (_next_allocate.load_relaxed() >= _num_slots) {
|
||||
return nullptr;
|
||||
}
|
||||
uint result = AtomicAccess::fetch_then_add(&_next_allocate, 1u, memory_order_relaxed);
|
||||
uint result = _next_allocate.fetch_then_add(1u, memory_order_relaxed);
|
||||
if (result >= _num_slots) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -48,8 +47,8 @@ inline G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get() {
|
||||
|
||||
Segment* result = _list.pop();
|
||||
if (result != nullptr) {
|
||||
AtomicAccess::dec(&_num_segments, memory_order_relaxed);
|
||||
AtomicAccess::sub(&_mem_size, result->mem_size(), memory_order_relaxed);
|
||||
_num_segments.sub_then_fetch(1u, memory_order_relaxed);
|
||||
_mem_size.sub_then_fetch(result->mem_size(), memory_order_relaxed);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,6 @@
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,6 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -238,9 +237,9 @@ void G1ParScanThreadState::do_partial_array(PartialArrayState* state, bool stole
|
||||
G1HeapRegionAttr dest_attr = _g1h->region_attr(to_array);
|
||||
G1SkipCardMarkSetter x(&_scanner, dest_attr.is_new_survivor());
|
||||
// Process claimed task.
|
||||
to_array->oop_iterate_range(&_scanner,
|
||||
checked_cast<int>(claim._start),
|
||||
checked_cast<int>(claim._end));
|
||||
to_array->oop_iterate_elements_range(&_scanner,
|
||||
checked_cast<int>(claim._start),
|
||||
checked_cast<int>(claim._end));
|
||||
}
|
||||
|
||||
MAYBE_INLINE_EVACUATION
|
||||
@ -260,7 +259,7 @@ void G1ParScanThreadState::start_partial_objarray(oop from_obj,
|
||||
// Process the initial chunk. No need to process the type in the
|
||||
// klass, as it will already be handled by processing the built-in
|
||||
// module.
|
||||
to_array->oop_iterate_range(&_scanner, 0, checked_cast<int>(initial_chunk_size));
|
||||
to_array->oop_iterate_elements_range(&_scanner, 0, checked_cast<int>(initial_chunk_size));
|
||||
}
|
||||
|
||||
MAYBE_INLINE_EVACUATION
|
||||
|
||||
@ -412,7 +412,7 @@ void SerialFullGC::follow_array_chunk(objArrayOop array, int index) {
|
||||
const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
|
||||
const int end_index = beg_index + stride;
|
||||
|
||||
array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
|
||||
array->oop_iterate_elements_range(&mark_and_push_closure, beg_index, end_index);
|
||||
|
||||
if (end_index < len) {
|
||||
SerialFullGC::push_objarray(array, end_index); // Push the continuation.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,8 +22,9 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cppstdlib/new.hpp"
|
||||
#include "gc/shared/workerUtils.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
// *** WorkerThreadsBarrierSync
|
||||
@ -80,21 +81,21 @@ void WorkerThreadsBarrierSync::abort() {
|
||||
|
||||
SubTasksDone::SubTasksDone(uint n) :
|
||||
_tasks(nullptr), _n_tasks(n) {
|
||||
_tasks = NEW_C_HEAP_ARRAY(bool, n, mtInternal);
|
||||
_tasks = NEW_C_HEAP_ARRAY(Atomic<bool>, n, mtInternal);
|
||||
for (uint i = 0; i < _n_tasks; i++) {
|
||||
_tasks[i] = false;
|
||||
::new (&_tasks[i]) Atomic<bool>(false);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void SubTasksDone::all_tasks_claimed_impl(uint skipped[], size_t skipped_size) {
|
||||
if (AtomicAccess::cmpxchg(&_verification_done, false, true)) {
|
||||
if (!_verification_done.compare_set(false, true)) {
|
||||
// another thread has done the verification
|
||||
return;
|
||||
}
|
||||
// all non-skipped tasks are claimed
|
||||
for (uint i = 0; i < _n_tasks; ++i) {
|
||||
if (!_tasks[i]) {
|
||||
if (!_tasks[i].load_relaxed()) {
|
||||
auto is_skipped = false;
|
||||
for (size_t j = 0; j < skipped_size; ++j) {
|
||||
if (i == skipped[j]) {
|
||||
@ -109,27 +110,27 @@ void SubTasksDone::all_tasks_claimed_impl(uint skipped[], size_t skipped_size) {
|
||||
for (size_t i = 0; i < skipped_size; ++i) {
|
||||
auto task_index = skipped[i];
|
||||
assert(task_index < _n_tasks, "Array in range.");
|
||||
assert(!_tasks[task_index], "%d is both claimed and skipped.", task_index);
|
||||
assert(!_tasks[task_index].load_relaxed(), "%d is both claimed and skipped.", task_index);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
bool SubTasksDone::try_claim_task(uint t) {
|
||||
assert(t < _n_tasks, "bad task id.");
|
||||
return !_tasks[t] && !AtomicAccess::cmpxchg(&_tasks[t], false, true);
|
||||
return !_tasks[t].load_relaxed() && _tasks[t].compare_set(false, true);
|
||||
}
|
||||
|
||||
SubTasksDone::~SubTasksDone() {
|
||||
assert(_verification_done, "all_tasks_claimed must have been called.");
|
||||
FREE_C_HEAP_ARRAY(bool, _tasks);
|
||||
assert(_verification_done.load_relaxed(), "all_tasks_claimed must have been called.");
|
||||
FREE_C_HEAP_ARRAY(Atomic<bool>, _tasks);
|
||||
}
|
||||
|
||||
// *** SequentialSubTasksDone
|
||||
|
||||
bool SequentialSubTasksDone::try_claim_task(uint& t) {
|
||||
t = _num_claimed;
|
||||
t = _num_claimed.load_relaxed();
|
||||
if (t < _num_tasks) {
|
||||
t = AtomicAccess::add(&_num_claimed, 1u) - 1;
|
||||
t = _num_claimed.fetch_then_add(1u);
|
||||
}
|
||||
return t < _num_tasks;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,7 @@
|
||||
#include "cppstdlib/type_traits.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "metaprogramming/enableIf.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -79,11 +80,11 @@ public:
|
||||
// enumeration type.
|
||||
|
||||
class SubTasksDone: public CHeapObj<mtInternal> {
|
||||
volatile bool* _tasks;
|
||||
Atomic<bool>* _tasks;
|
||||
uint _n_tasks;
|
||||
|
||||
// make sure verification logic is run exactly once to avoid duplicate assertion failures
|
||||
DEBUG_ONLY(volatile bool _verification_done = false;)
|
||||
DEBUG_ONLY(Atomic<bool> _verification_done;)
|
||||
void all_tasks_claimed_impl(uint skipped[], size_t skipped_size) NOT_DEBUG_RETURN;
|
||||
|
||||
NONCOPYABLE(SubTasksDone);
|
||||
@ -127,7 +128,7 @@ public:
|
||||
class SequentialSubTasksDone : public CHeapObj<mtInternal> {
|
||||
|
||||
uint _num_tasks; // Total number of tasks available.
|
||||
volatile uint _num_claimed; // Number of tasks claimed.
|
||||
Atomic<uint> _num_claimed; // Number of tasks claimed.
|
||||
|
||||
NONCOPYABLE(SequentialSubTasksDone);
|
||||
|
||||
@ -135,7 +136,8 @@ public:
|
||||
SequentialSubTasksDone(uint num_tasks) : _num_tasks(num_tasks), _num_claimed(0) { }
|
||||
~SequentialSubTasksDone() {
|
||||
// Claiming may try to claim more tasks than there are.
|
||||
assert(_num_claimed >= _num_tasks, "Claimed %u tasks of %u", _num_claimed, _num_tasks);
|
||||
assert(_num_claimed.load_relaxed() >= _num_tasks,
|
||||
"Claimed %u tasks of %u", _num_claimed.load_relaxed(), _num_tasks);
|
||||
}
|
||||
|
||||
// Attempt to claim the next unclaimed task in the sequence,
|
||||
|
||||
@ -338,7 +338,7 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() {
|
||||
_empty_region_counts[partition_id] = 0;
|
||||
_used[partition_id] = 0;
|
||||
_humongous_waste[partition_id] = 0;
|
||||
_available[partition_id] = FreeSetUnderConstruction;
|
||||
_available[partition_id] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2495,6 +2495,10 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r
|
||||
void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t &old_trashed_regions,
|
||||
size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) {
|
||||
shenandoah_assert_heaplocked();
|
||||
assert(rebuild_lock() != nullptr, "sanity");
|
||||
rebuild_lock()->lock(false);
|
||||
// This resets all state information, removing all regions from all sets.
|
||||
clear();
|
||||
log_debug(gc, free)("Rebuilding FreeSet");
|
||||
|
||||
// This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
|
||||
@ -2524,6 +2528,9 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
|
||||
_total_young_regions = _heap->num_regions() - old_region_count;
|
||||
_total_global_regions = _heap->num_regions();
|
||||
establish_old_collector_alloc_bias();
|
||||
|
||||
// Release the rebuild lock now. What remains in this function is read-only
|
||||
rebuild_lock()->unlock();
|
||||
_partitions.assert_bounds(true);
|
||||
log_status();
|
||||
}
|
||||
@ -3058,7 +3065,7 @@ void ShenandoahFreeSet::log_status() {
|
||||
size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
|
||||
// capacity() is capacity of mutator
|
||||
// used() is used of mutator
|
||||
size_t free = capacity() - used();
|
||||
size_t free = capacity_holding_lock() - used_holding_lock();
|
||||
// Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
|
||||
// retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
|
||||
// my internally tracked values of used() and free().
|
||||
|
||||
@ -28,9 +28,13 @@
|
||||
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahLock.hpp"
|
||||
#include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
|
||||
typedef ShenandoahLock ShenandoahRebuildLock;
|
||||
typedef ShenandoahLocker ShenandoahRebuildLocker;
|
||||
|
||||
// Each ShenandoahHeapRegion is associated with a ShenandoahFreeSetPartitionId.
|
||||
enum class ShenandoahFreeSetPartitionId : uint8_t {
|
||||
Mutator, // Region is in the Mutator free set: available memory is available to mutators.
|
||||
@ -139,8 +143,6 @@ public:
|
||||
ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set);
|
||||
~ShenandoahRegionPartitions() {}
|
||||
|
||||
static const size_t FreeSetUnderConstruction = SIZE_MAX;
|
||||
|
||||
inline idx_t max() const { return _max; }
|
||||
|
||||
// At initialization, reset OldCollector tallies
|
||||
@ -352,6 +354,16 @@ public:
|
||||
return _available[int(which_partition)];
|
||||
}
|
||||
|
||||
// Return available_in assuming caller does not hold the heap lock but does hold the rebuild_lock.
|
||||
// The returned value may be "slightly stale" because we do not assure that every fetch of this value
|
||||
// sees the most recent update of this value. Requiring the caller to hold the rebuild_lock assures
|
||||
// that we don't see "bogus" values that are "worse than stale". During rebuild of the freeset, the
|
||||
// value of _available is not reliable.
|
||||
inline size_t available_in_locked_for_rebuild(ShenandoahFreeSetPartitionId which_partition) const {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
return _available[int(which_partition)];
|
||||
}
|
||||
|
||||
// Returns bytes of humongous waste
|
||||
inline size_t humongous_waste(ShenandoahFreeSetPartitionId which_partition) const {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
@ -359,23 +371,6 @@ public:
|
||||
return _humongous_waste[int(which_partition)];
|
||||
}
|
||||
|
||||
// Return available_in assuming caller does not hold the heap lock. In production builds, available is
|
||||
// returned without acquiring the lock. In debug builds, the global heap lock is acquired in order to
|
||||
// enforce a consistency assert.
|
||||
inline size_t available_in_not_locked(ShenandoahFreeSetPartitionId which_partition) const {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
shenandoah_assert_not_heaplocked();
|
||||
#ifdef ASSERT
|
||||
ShenandoahHeapLocker locker(ShenandoahHeap::heap()->lock());
|
||||
assert((_available[int(which_partition)] == FreeSetUnderConstruction) ||
|
||||
(_available[int(which_partition)] == _capacity[int(which_partition)] - _used[int(which_partition)]),
|
||||
"Expect available (%zu) equals capacity (%zu) - used (%zu) for partition %s",
|
||||
_available[int(which_partition)], _capacity[int(which_partition)], _used[int(which_partition)],
|
||||
partition_membership_name(idx_t(which_partition)));
|
||||
#endif
|
||||
return _available[int(which_partition)];
|
||||
}
|
||||
|
||||
inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value);
|
||||
|
||||
inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
|
||||
@ -440,6 +435,15 @@ private:
|
||||
ShenandoahHeap* const _heap;
|
||||
ShenandoahRegionPartitions _partitions;
|
||||
|
||||
// This locks the rebuild process (in combination with the global heap lock). Whenever we rebuild the free set,
|
||||
// we first acquire the global heap lock and then we acquire this _rebuild_lock in a nested context. Threads that
|
||||
// need to check available, acquire only the _rebuild_lock to make sure that they are not obtaining the value of
|
||||
// available for a partially reconstructed free-set.
|
||||
//
|
||||
// Note that there is rank ordering of nested locks to prevent deadlock. All threads that need to acquire both
|
||||
// locks will acquire them in the same order: first the global heap lock and then the rebuild lock.
|
||||
ShenandoahRebuildLock _rebuild_lock;
|
||||
|
||||
size_t _total_humongous_waste;
|
||||
|
||||
HeapWord* allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r);
|
||||
@ -635,10 +639,12 @@ private:
|
||||
void log_status();
|
||||
|
||||
public:
|
||||
static const size_t FreeSetUnderConstruction = ShenandoahRegionPartitions::FreeSetUnderConstruction;
|
||||
|
||||
ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions);
|
||||
|
||||
ShenandoahRebuildLock* rebuild_lock() {
|
||||
return &_rebuild_lock;
|
||||
}
|
||||
|
||||
inline size_t max_regions() const { return _partitions.max(); }
|
||||
ShenandoahFreeSetPartitionId membership(size_t index) const { return _partitions.membership(index); }
|
||||
inline void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition,
|
||||
@ -776,9 +782,29 @@ public:
|
||||
// adjusts available with respect to lock holders. However, sequential calls to these three functions may produce
|
||||
// inconsistent data: available may not equal capacity - used because the intermediate states of any "atomic"
|
||||
// locked action can be seen by these unlocked functions.
|
||||
inline size_t capacity() const { return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
inline size_t used() const { return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
inline size_t available() const { return _partitions.available_in_not_locked(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
inline size_t capacity_holding_lock() const {
|
||||
shenandoah_assert_heaplocked();
|
||||
return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t capacity_not_holding_lock() {
|
||||
shenandoah_assert_not_heaplocked();
|
||||
ShenandoahRebuildLocker locker(rebuild_lock());
|
||||
return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t used_holding_lock() const {
|
||||
shenandoah_assert_heaplocked();
|
||||
return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t used_not_holding_lock() {
|
||||
shenandoah_assert_not_heaplocked();
|
||||
ShenandoahRebuildLocker locker(rebuild_lock());
|
||||
return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t available() {
|
||||
shenandoah_assert_not_heaplocked();
|
||||
ShenandoahRebuildLocker locker(rebuild_lock());
|
||||
return _partitions.available_in_locked_for_rebuild(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
|
||||
inline size_t total_humongous_waste() const { return _total_humongous_waste; }
|
||||
inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
|
||||
@ -1113,18 +1113,17 @@ void ShenandoahFullGC::phase5_epilog() {
|
||||
ShenandoahPostCompactClosure post_compact;
|
||||
heap->heap_region_iterate(&post_compact);
|
||||
heap->collection_set()->clear();
|
||||
size_t young_cset_regions, old_cset_regions;
|
||||
size_t first_old, last_old, num_old;
|
||||
heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
|
||||
// We also do not expand old generation size following Full GC because we have scrambled age populations and
|
||||
// no longer have objects separated by age into distinct regions.
|
||||
if (heap->mode()->is_generational()) {
|
||||
ShenandoahGenerationalFullGC::compute_balances();
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
ShenandoahFreeSet* free_set = heap->free_set();
|
||||
{
|
||||
free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
// We also do not expand old generation size following Full GC because we have scrambled age populations and
|
||||
// no longer have objects separated by age into distinct regions.
|
||||
if (heap->mode()->is_generational()) {
|
||||
ShenandoahGenerationalFullGC::compute_balances();
|
||||
}
|
||||
free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
|
||||
}
|
||||
|
||||
heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
|
||||
|
||||
// Set mark incomplete because the marking bitmaps have been reset except pinned regions.
|
||||
_generation->set_mark_incomplete();
|
||||
|
||||
|
||||
@ -815,10 +815,9 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
|
||||
ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
|
||||
ShenandoahHeapLocker locker(heap->lock());
|
||||
size_t young_cset_regions, old_cset_regions;
|
||||
|
||||
// We are preparing for evacuation. At this time, we ignore cset region tallies.
|
||||
size_t first_old, last_old, num_old;
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
|
||||
if (heap->mode()->is_generational()) {
|
||||
|
||||
@ -104,17 +104,6 @@ void ShenandoahGenerationalHeap::initialize_heuristics() {
|
||||
// Initialize global generation and heuristics even in generational mode.
|
||||
ShenandoahHeap::initialize_heuristics();
|
||||
|
||||
// Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
|
||||
// for old would be total heap - minimum capacity of young. This means the sum of the maximum
|
||||
// allowed for old and young could exceed the total heap size. It remains the case that the
|
||||
// _actual_ capacity of young + old = total.
|
||||
size_t region_count = num_regions();
|
||||
size_t max_young_regions = MAX2((region_count * ShenandoahMaxYoungPercentage) / 100, (size_t) 1U);
|
||||
size_t initial_capacity_young = max_young_regions * ShenandoahHeapRegion::region_size_bytes();
|
||||
size_t max_capacity_young = initial_capacity_young;
|
||||
size_t initial_capacity_old = max_capacity() - max_capacity_young;
|
||||
size_t max_capacity_old = max_capacity() - initial_capacity_young;
|
||||
|
||||
_young_generation = new ShenandoahYoungGeneration(max_workers());
|
||||
_old_generation = new ShenandoahOldGeneration(max_workers());
|
||||
_young_generation->initialize_heuristics(mode());
|
||||
|
||||
@ -426,8 +426,6 @@ jint ShenandoahHeap::initialize() {
|
||||
_affiliations[i] = ShenandoahAffiliation::FREE;
|
||||
}
|
||||
_free_set = new ShenandoahFreeSet(this, _num_regions);
|
||||
|
||||
|
||||
post_initialize_heuristics();
|
||||
// We are initializing free set. We ignore cset region tallies.
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
@ -1658,7 +1656,7 @@ void ShenandoahHeap::verify(VerifyOption vo) {
|
||||
}
|
||||
}
|
||||
size_t ShenandoahHeap::tlab_capacity() const {
|
||||
return _free_set->capacity();
|
||||
return _free_set->capacity_not_holding_lock();
|
||||
}
|
||||
|
||||
class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
|
||||
@ -2138,7 +2136,7 @@ GCTracer* ShenandoahHeap::tracer() {
|
||||
}
|
||||
|
||||
size_t ShenandoahHeap::tlab_used() const {
|
||||
return _free_set->used();
|
||||
return _free_set->used_not_holding_lock();
|
||||
}
|
||||
|
||||
bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) {
|
||||
@ -2528,8 +2526,7 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
|
||||
ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
size_t young_cset_regions, old_cset_regions;
|
||||
size_t first_old_region, last_old_region, old_region_count;
|
||||
size_t young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count;
|
||||
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
|
||||
// If there are no old regions, first_old_region will be greater than last_old_region
|
||||
assert((first_old_region > last_old_region) ||
|
||||
@ -2548,13 +2545,14 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
|
||||
// The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
|
||||
// available for transfer to old. Note that transfer of humongous regions does not impact available.
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
|
||||
size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
|
||||
size_t allocation_runway =
|
||||
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
|
||||
gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
|
||||
|
||||
// Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
|
||||
// memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
|
||||
// regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation
|
||||
// will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
|
||||
// regions in place when many of these regular regions have an abundant amount of available memory within them.
|
||||
// Fragmentation will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
|
||||
//
|
||||
// We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
|
||||
// within partially consumed regions of memory.
|
||||
|
||||
@ -167,7 +167,7 @@ inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q,
|
||||
|
||||
if (len <= (int) ObjArrayMarkingStride*2) {
|
||||
// A few slices only, process directly
|
||||
array->oop_iterate_range(cl, 0, len);
|
||||
array->oop_iterate_elements_range(cl, 0, len);
|
||||
} else {
|
||||
int bits = log2i_graceful(len);
|
||||
// Compensate for non-power-of-two arrays, cover the array in excess:
|
||||
@ -216,7 +216,7 @@ inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q,
|
||||
// Process the irregular tail, if present
|
||||
int from = last_idx;
|
||||
if (from < len) {
|
||||
array->oop_iterate_range(cl, from, len);
|
||||
array->oop_iterate_elements_range(cl, from, len);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -248,7 +248,7 @@ inline void ShenandoahMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl,
|
||||
assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
|
||||
#endif
|
||||
|
||||
array->oop_iterate_range(cl, from, to);
|
||||
array->oop_iterate_elements_range(cl, from, to);
|
||||
}
|
||||
|
||||
template <ShenandoahGenerationType GENERATION>
|
||||
|
||||
@ -30,7 +30,7 @@
|
||||
|
||||
ShenandoahMetricsSnapshot::ShenandoahMetricsSnapshot(ShenandoahFreeSet* free_set)
|
||||
: _free_set(free_set)
|
||||
, _used_before(free_set->used())
|
||||
, _used_before(free_set->used_not_holding_lock())
|
||||
, _if_before(free_set->internal_fragmentation())
|
||||
, _ef_before(free_set->external_fragmentation()) {
|
||||
}
|
||||
@ -38,7 +38,6 @@ ShenandoahMetricsSnapshot::ShenandoahMetricsSnapshot(ShenandoahFreeSet* free_set
|
||||
bool ShenandoahMetricsSnapshot::is_good_progress() const {
|
||||
// Under the critical threshold?
|
||||
const size_t free_actual = _free_set->available();
|
||||
assert(free_actual != ShenandoahFreeSet::FreeSetUnderConstruction, "Avoid this race");
|
||||
|
||||
// ShenandoahCriticalFreeThreshold is expressed as a percentage. We multiply this percentage by 1/100th
|
||||
// of the soft max capacity to determine whether the available memory within the mutator partition of the
|
||||
@ -52,7 +51,7 @@ bool ShenandoahMetricsSnapshot::is_good_progress() const {
|
||||
}
|
||||
|
||||
// Freed up enough?
|
||||
const size_t used_after = _free_set->used();
|
||||
const size_t used_after = _free_set->used_not_holding_lock();
|
||||
const size_t progress_actual = (_used_before > used_after) ? _used_before - used_after : 0;
|
||||
const size_t progress_expected = ShenandoahHeapRegion::region_size_bytes();
|
||||
const bool prog_used = progress_actual >= progress_expected;
|
||||
|
||||
@ -412,9 +412,12 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent
|
||||
ShenandoahGCPhase phase(concurrent ?
|
||||
ShenandoahPhaseTimings::final_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
|
||||
ShenandoahFreeSet* free_set = heap->free_set();
|
||||
ShenandoahHeapLocker locker(heap->lock());
|
||||
size_t young_trash_regions, old_trash_regions;
|
||||
size_t first_old, last_old, num_old;
|
||||
|
||||
// This is completion of old-gen marking. We rebuild in order to reclaim immediate garbage and to
|
||||
// prepare for subsequent mixed evacuations.
|
||||
size_t young_trash_regions, old_trash_regions, first_old, last_old, num_old;
|
||||
heap->free_set()->prepare_to_rebuild(young_trash_regions, old_trash_regions, first_old, last_old, num_old);
|
||||
// At the end of old-gen, we may find that we have reclaimed immediate garbage, allowing a longer allocation runway.
|
||||
// We may also find that we have accumulated canddiate regions for mixed evacuation. If so, we will want to expand
|
||||
|
||||
@ -430,18 +430,6 @@
|
||||
"by thread type (worker or mutator) and evacuation type (young, " \
|
||||
"old, or promotion.") \
|
||||
\
|
||||
product(uintx, ShenandoahMinYoungPercentage, 20, EXPERIMENTAL, \
|
||||
"The minimum percentage of the heap to use for the young " \
|
||||
"generation. Heuristics will not adjust the young generation " \
|
||||
"to be less than this.") \
|
||||
range(0, 100) \
|
||||
\
|
||||
product(uintx, ShenandoahMaxYoungPercentage, 100, EXPERIMENTAL, \
|
||||
"The maximum percentage of the heap to use for the young " \
|
||||
"generation. Heuristics will not adjust the young generation " \
|
||||
"to be more than this.") \
|
||||
range(0, 100) \
|
||||
\
|
||||
product(uintx, ShenandoahCriticalFreeThreshold, 1, EXPERIMENTAL, \
|
||||
"How much of the heap needs to be free after recovery cycles, " \
|
||||
"either Degenerated or Full GC to be claimed successful. If this "\
|
||||
|
||||
@ -456,7 +456,7 @@ void ZHeapIterator::follow_array_chunk(const ZHeapIteratorContext& context, cons
|
||||
|
||||
// Follow array chunk
|
||||
ZHeapIteratorOopClosure<false /* VisitReferents */> cl(this, context, obj);
|
||||
ZIterator::oop_iterate_range(obj, &cl, start, end);
|
||||
ZIterator::oop_iterate_elements_range(obj, &cl, start, end);
|
||||
}
|
||||
|
||||
template <bool VisitWeaks>
|
||||
|
||||
@ -41,7 +41,7 @@ public:
|
||||
static void oop_iterate(oop obj, OopClosureT* cl);
|
||||
|
||||
template <typename OopClosureT>
|
||||
static void oop_iterate_range(objArrayOop obj, OopClosureT* cl, int start, int end);
|
||||
static void oop_iterate_elements_range(objArrayOop obj, OopClosureT* cl, int start, int end);
|
||||
|
||||
// This function skips invisible roots
|
||||
template <typename Function>
|
||||
|
||||
@ -66,9 +66,9 @@ void ZIterator::oop_iterate(oop obj, OopClosureT* cl) {
|
||||
}
|
||||
|
||||
template <typename OopClosureT>
|
||||
void ZIterator::oop_iterate_range(objArrayOop obj, OopClosureT* cl, int start, int end) {
|
||||
void ZIterator::oop_iterate_elements_range(objArrayOop obj, OopClosureT* cl, int start, int end) {
|
||||
assert(!is_invisible_object_array(obj), "not safe");
|
||||
obj->oop_iterate_range(cl, start, end);
|
||||
obj->oop_iterate_elements_range(cl, start, end);
|
||||
}
|
||||
|
||||
template <typename Function>
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,6 +32,7 @@
|
||||
#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
|
||||
#include "jfr/recorder/repository/jfrRepository.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/service/jfrRecorderService.hpp"
|
||||
#include "jfr/support/jfrClassDefineEvent.hpp"
|
||||
#include "jfr/support/jfrKlassExtension.hpp"
|
||||
#include "jfr/support/jfrResolution.hpp"
|
||||
@ -43,6 +44,7 @@
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
|
||||
|
||||
bool Jfr::is_enabled() {
|
||||
return JfrRecorder::is_enabled();
|
||||
}
|
||||
@ -153,9 +155,9 @@ void Jfr::on_resolution(const Method* caller, const Method* target, TRAPS) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void Jfr::on_vm_shutdown(bool emit_old_object_samples, bool emit_event_shutdown, bool halt) {
|
||||
void Jfr::on_vm_shutdown(bool exception_handler /* false */, bool halt /* false */, bool oom /* false */) {
|
||||
if (!halt && JfrRecorder::is_recording()) {
|
||||
JfrEmergencyDump::on_vm_shutdown(emit_old_object_samples, emit_event_shutdown);
|
||||
JfrEmergencyDump::on_vm_shutdown(exception_handler, oom);
|
||||
}
|
||||
}
|
||||
|
||||
@ -173,6 +175,12 @@ bool Jfr::on_start_flight_recording_option(const JavaVMOption** option, char* de
|
||||
return JfrOptionSet::parse_start_flight_recording_option(option, delimiter);
|
||||
}
|
||||
|
||||
void Jfr::on_report_java_out_of_memory() {
|
||||
if (CrashOnOutOfMemoryError && JfrRecorder::is_recording()) {
|
||||
JfrRecorderService::emit_leakprofiler_events_on_oom();
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void Jfr::on_restoration(const Klass* k, JavaThread* jt) {
|
||||
assert(k != nullptr, "invariant");
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -71,7 +71,7 @@ class Jfr : AllStatic {
|
||||
static void on_resolution(const Method* caller, const Method* target, TRAPS);
|
||||
static void on_java_thread_start(JavaThread* starter, JavaThread* startee);
|
||||
static void on_set_current_thread(JavaThread* jt, oop thread);
|
||||
static void on_vm_shutdown(bool emit_old_object_samples, bool emit_event_shutdown, bool halt = false);
|
||||
static void on_vm_shutdown(bool exception_handler = false, bool halt = false, bool oom = false);
|
||||
static void on_vm_error_report(outputStream* st);
|
||||
static bool on_flight_recorder_option(const JavaVMOption** option, char* delimiter);
|
||||
static bool on_start_flight_recording_option(const JavaVMOption** option, char* delimiter);
|
||||
@ -79,6 +79,7 @@ class Jfr : AllStatic {
|
||||
static void initialize_main_thread(JavaThread* jt);
|
||||
static bool has_sample_request(JavaThread* jt);
|
||||
static void check_and_process_sample_request(JavaThread* jt);
|
||||
static void on_report_java_out_of_memory();
|
||||
CDS_ONLY(static void on_restoration(const Klass* k, JavaThread* jt);)
|
||||
};
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -66,10 +66,6 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#ifdef LINUX
|
||||
#include "os_linux.hpp"
|
||||
#include "osContainer_linux.hpp"
|
||||
#endif
|
||||
|
||||
#define NO_TRANSITION(result_type, header) extern "C" { result_type JNICALL header {
|
||||
#define NO_TRANSITION_END } }
|
||||
@ -364,8 +360,7 @@ JVM_ENTRY_NO_ENV(void, jfr_set_force_instrumentation(JNIEnv* env, jclass jvm, jb
|
||||
JVM_END
|
||||
|
||||
NO_TRANSITION(void, jfr_emit_old_object_samples(JNIEnv* env, jclass jvm, jlong cutoff_ticks, jboolean emit_all, jboolean skip_bfs))
|
||||
JfrRecorderService service;
|
||||
service.emit_leakprofiler_events(cutoff_ticks, emit_all == JNI_TRUE, skip_bfs == JNI_TRUE);
|
||||
JfrRecorderService::emit_leakprofiler_events(cutoff_ticks, emit_all == JNI_TRUE, skip_bfs == JNI_TRUE);
|
||||
NO_TRANSITION_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(void, jfr_exclude_thread(JNIEnv* env, jclass jvm, jobject t))
|
||||
@ -401,35 +396,18 @@ JVM_ENTRY_NO_ENV(jboolean, jfr_is_class_instrumented(JNIEnv* env, jclass jvm, jc
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jboolean, jfr_is_containerized(JNIEnv* env, jclass jvm))
|
||||
#ifdef LINUX
|
||||
return OSContainer::is_containerized();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
return os::is_containerized();
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jlong, jfr_host_total_memory(JNIEnv* env, jclass jvm))
|
||||
#ifdef LINUX
|
||||
// We want the host memory, not the container limit.
|
||||
// os::physical_memory() would return the container limit.
|
||||
return static_cast<jlong>(os::Linux::physical_memory());
|
||||
#else
|
||||
return static_cast<jlong>(os::physical_memory());
|
||||
#endif
|
||||
return static_cast<jlong>(os::Machine::physical_memory());
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jlong, jfr_host_total_swap_memory(JNIEnv* env, jclass jvm))
|
||||
#ifdef LINUX
|
||||
// We want the host swap memory, not the container value.
|
||||
physical_memory_size_type host_swap = 0;
|
||||
(void)os::Linux::host_swap(host_swap); // Discard return value and treat as no swap
|
||||
return static_cast<jlong>(host_swap);
|
||||
#else
|
||||
physical_memory_size_type total_swap_space = 0;
|
||||
// Return value ignored - defaulting to 0 on failure.
|
||||
(void)os::total_swap_space(total_swap_space);
|
||||
(void)os::Machine::total_swap_space(total_swap_space);
|
||||
return static_cast<jlong>(total_swap_space);
|
||||
#endif
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(void, jfr_emit_data_loss(JNIEnv* env, jclass jvm, jlong bytes))
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -232,41 +232,50 @@ void JfrSamplerThread::task_stacktrace(JfrSampleRequestType type, JavaThread** l
|
||||
JavaThread* start = nullptr;
|
||||
elapsedTimer sample_time;
|
||||
sample_time.start();
|
||||
ThreadsListHandle tlh;
|
||||
// Resolve a sample session relative start position index into the thread list array.
|
||||
// In cases where the last sampled thread is null or not-null but stale, find_index() returns -1.
|
||||
_cur_index = tlh.list()->find_index_of_JavaThread(*last_thread);
|
||||
JavaThread* current = _cur_index != -1 ? *last_thread : nullptr;
|
||||
{
|
||||
/*
|
||||
* Take the Threads_lock for three purposes:
|
||||
*
|
||||
* 1) Avoid sampling right through a safepoint,
|
||||
* which could result in touching oops in case of virtual threads.
|
||||
* 2) Prevent JFR from issuing an epoch rotation while the sampler thread
|
||||
* is actively processing a thread in state native, as both threads are outside the safepoint protocol.
|
||||
* 3) Some operating systems (BSD / Mac) require a process lock when sending a signal with pthread_kill.
|
||||
* Holding the Threads_lock prevents a JavaThread from calling os::create_thread(), which also takes the process lock.
|
||||
* In a sense, we provide a coarse signal mask, so we can always send the resume signal.
|
||||
*/
|
||||
MutexLocker tlock(Threads_lock);
|
||||
ThreadsListHandle tlh;
|
||||
// Resolve a sample session relative start position index into the thread list array.
|
||||
// In cases where the last sampled thread is null or not-null but stale, find_index() returns -1.
|
||||
_cur_index = tlh.list()->find_index_of_JavaThread(*last_thread);
|
||||
JavaThread* current = _cur_index != -1 ? *last_thread : nullptr;
|
||||
|
||||
while (num_samples < sample_limit) {
|
||||
current = next_thread(tlh.list(), start, current);
|
||||
if (current == nullptr) {
|
||||
break;
|
||||
}
|
||||
if (is_excluded(current)) {
|
||||
continue;
|
||||
}
|
||||
if (start == nullptr) {
|
||||
start = current; // remember the thread where we started to attempt sampling
|
||||
}
|
||||
bool success;
|
||||
if (JAVA_SAMPLE == type) {
|
||||
success = sample_java_thread(current);
|
||||
} else {
|
||||
assert(type == NATIVE_SAMPLE, "invariant");
|
||||
success = sample_native_thread(current);
|
||||
}
|
||||
if (success) {
|
||||
num_samples++;
|
||||
}
|
||||
if (SafepointSynchronize::is_at_safepoint()) {
|
||||
// For _thread_in_native, we cannot get the Threads_lock.
|
||||
// For _thread_in_Java, well, there are none.
|
||||
break;
|
||||
while (num_samples < sample_limit) {
|
||||
current = next_thread(tlh.list(), start, current);
|
||||
if (current == nullptr) {
|
||||
break;
|
||||
}
|
||||
if (is_excluded(current)) {
|
||||
continue;
|
||||
}
|
||||
if (start == nullptr) {
|
||||
start = current; // remember the thread where we started to attempt sampling
|
||||
}
|
||||
bool success;
|
||||
if (JAVA_SAMPLE == type) {
|
||||
success = sample_java_thread(current);
|
||||
} else {
|
||||
assert(type == NATIVE_SAMPLE, "invariant");
|
||||
success = sample_native_thread(current);
|
||||
}
|
||||
if (success) {
|
||||
num_samples++;
|
||||
}
|
||||
}
|
||||
|
||||
*last_thread = current; // remember the thread we last attempted to sample
|
||||
}
|
||||
|
||||
*last_thread = current; // remember the thread we last attempted to sample
|
||||
sample_time.stop();
|
||||
log_trace(jfr)("JFR thread sampling done in %3.7f secs with %d java %d native samples",
|
||||
sample_time.seconds(), type == JAVA_SAMPLE ? num_samples : 0, type == NATIVE_SAMPLE ? num_samples : 0);
|
||||
@ -297,6 +306,7 @@ class OSThreadSampler : public SuspendedThreadTask {
|
||||
// Sampling a thread in state _thread_in_Java
|
||||
// involves a platform-specific thread suspend and CPU context retrieval.
|
||||
bool JfrSamplerThread::sample_java_thread(JavaThread* jt) {
|
||||
assert_lock_strong(Threads_lock);
|
||||
if (jt->thread_state() != _thread_in_Java) {
|
||||
return false;
|
||||
}
|
||||
@ -328,6 +338,7 @@ static JfrSamplerThread* _sampler_thread = nullptr;
|
||||
// without thread suspension and CPU context retrieval,
|
||||
// if we carefully order the loads of the thread state.
|
||||
bool JfrSamplerThread::sample_native_thread(JavaThread* jt) {
|
||||
assert_lock_strong(Threads_lock);
|
||||
if (jt->thread_state() != _thread_in_native) {
|
||||
return false;
|
||||
}
|
||||
@ -343,24 +354,14 @@ bool JfrSamplerThread::sample_native_thread(JavaThread* jt) {
|
||||
|
||||
SafepointMechanism::arm_local_poll_release(jt);
|
||||
|
||||
// Take the Threads_lock for two purposes:
|
||||
// 1) Avoid sampling through a safepoint which could result
|
||||
// in touching oops in case of virtual threads.
|
||||
// 2) Prevent JFR from issuing an epoch rotation while the sampler thread
|
||||
// is actively processing a thread in native, as both threads are now
|
||||
// outside the safepoint protocol.
|
||||
|
||||
// OrderAccess::fence() as part of acquiring the lock prevents loads from floating up.
|
||||
JfrMutexTryLock threads_lock(Threads_lock);
|
||||
|
||||
if (!threads_lock.acquired() || !jt->has_last_Java_frame()) {
|
||||
// Remove the native sample request and release the potentially waiting thread.
|
||||
JfrSampleMonitor jsm(tl);
|
||||
return false;
|
||||
// Separate the arming of the poll (above) from the reading of JavaThread state (below).
|
||||
if (UseSystemMemoryBarrier) {
|
||||
SystemMemoryBarrier::emit();
|
||||
} else {
|
||||
OrderAccess::fence();
|
||||
}
|
||||
|
||||
if (jt->thread_state() != _thread_in_native) {
|
||||
assert_lock_strong(Threads_lock);
|
||||
if (jt->thread_state() != _thread_in_native || !jt->has_last_Java_frame()) {
|
||||
JfrSampleMonitor jsm(tl);
|
||||
if (jsm.is_waiting()) {
|
||||
// The thread has already returned from native,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,6 +38,8 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
@ -460,15 +462,6 @@ static void release_locks(Thread* thread) {
|
||||
assert(thread != nullptr, "invariant");
|
||||
assert(!thread->is_Java_thread() || JavaThread::cast(thread)->thread_state() == _thread_in_vm, "invariant");
|
||||
|
||||
#ifdef ASSERT
|
||||
Mutex* owned_lock = thread->owned_locks();
|
||||
while (owned_lock != nullptr) {
|
||||
Mutex* next = owned_lock->next();
|
||||
owned_lock->unlock();
|
||||
owned_lock = next;
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
if (Threads_lock->owned_by_self()) {
|
||||
Threads_lock->unlock();
|
||||
}
|
||||
@ -550,17 +543,14 @@ class JavaThreadInVMAndNative : public StackObj {
|
||||
}
|
||||
};
|
||||
|
||||
static void post_events(bool emit_old_object_samples, bool emit_event_shutdown, Thread* thread) {
|
||||
if (emit_old_object_samples) {
|
||||
LeakProfiler::emit_events(max_jlong, false, false);
|
||||
}
|
||||
if (emit_event_shutdown) {
|
||||
static void post_events(bool exception_handler, bool oom, Thread * thread) {
|
||||
if (exception_handler) {
|
||||
EventShutdown e;
|
||||
e.set_reason("VM Error");
|
||||
e.set_reason(oom ? "CrashOnOutOfMemoryError" : "VM Error");
|
||||
e.commit();
|
||||
}
|
||||
EventDumpReason event;
|
||||
event.set_reason(emit_old_object_samples ? "Out of Memory" : "Crash");
|
||||
event.set_reason(exception_handler && oom ? "CrashOnOutOfMemoryError" : exception_handler ? "Crash" : "Out of Memory");
|
||||
event.set_recordingId(-1);
|
||||
event.commit();
|
||||
}
|
||||
@ -594,20 +584,40 @@ static bool guard_reentrancy() {
|
||||
return false;
|
||||
}
|
||||
|
||||
void JfrEmergencyDump::on_vm_shutdown(bool emit_old_object_samples, bool emit_event_shutdown) {
|
||||
void JfrEmergencyDump::on_vm_shutdown(bool exception_handler, bool oom) {
|
||||
if (!guard_reentrancy()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Thread* const thread = Thread::current_or_null_safe();
|
||||
assert(thread != nullptr, "invariant");
|
||||
if (thread->is_Watcher_thread()) {
|
||||
log_info(jfr, system)("The Watcher thread crashed so no jfr emergency dump will be generated.");
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure a JavaThread is _thread_in_vm when we make this call
|
||||
JavaThreadInVMAndNative jtivm(thread);
|
||||
post_events(exception_handler, oom, thread);
|
||||
|
||||
if (thread->is_Watcher_thread()) {
|
||||
// We cannot attempt an emergency dump using the Watcher thread
|
||||
// because we rely on the WatcherThread task "is_error_reported()",
|
||||
// to exit the VM after a hardcoded timeout, should the relatively
|
||||
// risky operation of an emergency dump fail (deadlock, livelock).
|
||||
log_warning(jfr, system)
|
||||
("The Watcher thread crashed so no jfr emergency dump will be generated.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (thread->is_VM_thread()) {
|
||||
const VM_Operation* const operation = VMThread::vm_operation();
|
||||
if (operation != nullptr && operation->type() == VM_Operation::VMOp_JFROldObject) {
|
||||
// We will not be able to issue a rotation because the rotation lock
|
||||
// is held by the JFR Recorder Thread that issued the VM_Operation.
|
||||
log_warning(jfr, system)
|
||||
("The VM Thread crashed as part of emitting leak profiler events so no jfr emergency dump will be generated.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
release_locks(thread);
|
||||
post_events(emit_old_object_samples, emit_event_shutdown, thread);
|
||||
|
||||
// if JavaThread, transition to _thread_in_native to issue a final flushpoint
|
||||
NoHandleMark nhm;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,7 @@ class JfrEmergencyDump : AllStatic {
|
||||
static const char* chunk_path(const char* repository_path);
|
||||
static void on_vm_error(const char* repository_path);
|
||||
static void on_vm_error_report(outputStream* st, const char* repository_path);
|
||||
static void on_vm_shutdown(bool emit_old_object_samples, bool emit_event_shutdown);
|
||||
static void on_vm_shutdown(bool exception_handler, bool oom);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_REPOSITORY_JFREMERGENCYDUMP_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,8 @@
|
||||
(MSGBIT(MSG_START)) | \
|
||||
(MSGBIT(MSG_CLONE_IN_MEMORY)) | \
|
||||
(MSGBIT(MSG_VM_ERROR)) | \
|
||||
(MSGBIT(MSG_FLUSHPOINT)) \
|
||||
(MSGBIT(MSG_FLUSHPOINT)) | \
|
||||
(MSGBIT(MSG_EMIT_LEAKP_REFCHAINS)) \
|
||||
)
|
||||
|
||||
static JfrPostBox* _instance = nullptr;
|
||||
@ -165,7 +166,7 @@ void JfrPostBox::notify_waiters() {
|
||||
assert(JfrMsg_lock->owned_by_self(), "incrementing _msg_handled_serial is protected by JfrMsg_lock.");
|
||||
// Update made visible on release of JfrMsg_lock via fence instruction in Monitor::IUnlock.
|
||||
++_msg_handled_serial;
|
||||
JfrMsg_lock->notify();
|
||||
JfrMsg_lock->notify_all();
|
||||
}
|
||||
|
||||
// safeguard to ensure no threads are left waiting
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,6 +43,7 @@ enum JFR_Msg {
|
||||
MSG_SHUTDOWN,
|
||||
MSG_VM_ERROR,
|
||||
MSG_FLUSHPOINT,
|
||||
MSG_EMIT_LEAKP_REFCHAINS,
|
||||
MSG_NO_OF_MSGS
|
||||
};
|
||||
|
||||
@ -51,23 +52,25 @@ enum JFR_Msg {
|
||||
*
|
||||
* Synchronous messages (posting thread waits for message completion):
|
||||
*
|
||||
* MSG_CLONE_IN_MEMORY (0) ; MSGBIT(MSG_CLONE_IN_MEMORY) == (1 << 0) == 0x1
|
||||
* MSG_START(1) ; MSGBIT(MSG_START) == (1 << 0x1) == 0x2
|
||||
* MSG_STOP (2) ; MSGBIT(MSG_STOP) == (1 << 0x2) == 0x4
|
||||
* MSG_ROTATE (3) ; MSGBIT(MSG_ROTATE) == (1 << 0x3) == 0x8
|
||||
* MSG_VM_ERROR (8) ; MSGBIT(MSG_VM_ERROR) == (1 << 0x8) == 0x100
|
||||
* MSG_FLUSHPOINT (9) ; MSGBIT(MSG_FLUSHPOINT) == (1 << 0x9) == 0x200
|
||||
* MSG_CLONE_IN_MEMORY (0) ; MSGBIT(MSG_CLONE_IN_MEMORY) == (1 << 0) == 0x1
|
||||
* MSG_START(1) ; MSGBIT(MSG_START) == (1 << 0x1) == 0x2
|
||||
* MSG_STOP (2) ; MSGBIT(MSG_STOP) == (1 << 0x2) == 0x4
|
||||
* MSG_ROTATE (3) ; MSGBIT(MSG_ROTATE) == (1 << 0x3) == 0x8
|
||||
* MSG_VM_ERROR (8) ; MSGBIT(MSG_VM_ERROR) == (1 << 0x8) == 0x100
|
||||
* MSG_FLUSHPOINT (9) ; MSGBIT(MSG_FLUSHPOINT) == (1 << 0x9) == 0x200
|
||||
* MSG_EMIT_LEAKP_REFCHAINS (10); MSGBIT(MSG_EMIT_LEAKP_REFCHAINS) == (1 << 0xa) == 0x400
|
||||
*
|
||||
* Asynchronous messages (posting thread returns immediately upon deposit):
|
||||
*
|
||||
* MSG_FULLBUFFER (4) ; MSGBIT(MSG_FULLBUFFER) == (1 << 0x4) == 0x10
|
||||
* MSG_CHECKPOINT (5) ; MSGBIT(CHECKPOINT) == (1 << 0x5) == 0x20
|
||||
* MSG_WAKEUP (6) ; MSGBIT(WAKEUP) == (1 << 0x6) == 0x40
|
||||
* MSG_SHUTDOWN (7) ; MSGBIT(MSG_SHUTDOWN) == (1 << 0x7) == 0x80
|
||||
* MSG_FULLBUFFER (4) ; MSGBIT(MSG_FULLBUFFER) == (1 << 0x4) == 0x10
|
||||
* MSG_CHECKPOINT (5) ; MSGBIT(CHECKPOINT) == (1 << 0x5) == 0x20
|
||||
* MSG_WAKEUP (6) ; MSGBIT(WAKEUP) == (1 << 0x6) == 0x40
|
||||
* MSG_SHUTDOWN (7) ; MSGBIT(MSG_SHUTDOWN) == (1 << 0x7) == 0x80
|
||||
*/
|
||||
|
||||
class JfrPostBox : public JfrCHeapObj {
|
||||
friend class JfrRecorder;
|
||||
friend class JfrRecorderService;
|
||||
public:
|
||||
void post(JFR_Msg msg);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,6 +55,7 @@
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
// incremented on each flushpoint
|
||||
static u8 flushpoint_id = 0;
|
||||
@ -391,6 +392,7 @@ class JfrSafepointWriteVMOperation : public VM_Operation {
|
||||
JfrRecorderService::JfrRecorderService() :
|
||||
_checkpoint_manager(JfrCheckpointManager::instance()),
|
||||
_chunkwriter(JfrRepository::chunkwriter()),
|
||||
_post_box(JfrPostBox::instance()),
|
||||
_repository(JfrRepository::instance()),
|
||||
_stack_trace_repository(JfrStackTraceRepository::instance()),
|
||||
_storage(JfrStorage::instance()),
|
||||
@ -670,17 +672,173 @@ void JfrRecorderService::evaluate_chunk_size_for_rotation() {
|
||||
JfrChunkRotation::evaluate(_chunkwriter);
|
||||
}
|
||||
|
||||
void JfrRecorderService::emit_leakprofiler_events(int64_t cutoff_ticks, bool emit_all, bool skip_bfs) {
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(JavaThread::current()));
|
||||
// Take the rotation lock to exclude flush() during event emits. This is because event emit
|
||||
// also creates a number checkpoint events. Those checkpoint events require a future typeset checkpoint
|
||||
// event for completeness, i.e. to be generated before being flushed to a segment.
|
||||
// The upcoming flush() or rotation() after event emit completes this typeset checkpoint
|
||||
// and serializes all event emit checkpoint events to the same segment.
|
||||
JfrRotationLock lock;
|
||||
// Take the rotation lock before the transition.
|
||||
JavaThread* current_thread = JavaThread::current();
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current_thread));
|
||||
ThreadInVMfromNative transition(current_thread);
|
||||
LeakProfiler::emit_events(cutoff_ticks, emit_all, skip_bfs);
|
||||
// LeakProfiler event serialization support.
|
||||
|
||||
struct JfrLeakProfilerEmitRequest {
|
||||
int64_t cutoff_ticks;
|
||||
bool emit_all;
|
||||
bool skip_bfs;
|
||||
bool oom;
|
||||
};
|
||||
|
||||
typedef GrowableArrayCHeap<JfrLeakProfilerEmitRequest, mtTracing> JfrLeakProfilerEmitRequestQueue;
|
||||
static JfrLeakProfilerEmitRequestQueue* _queue = nullptr;
|
||||
constexpr const static int64_t _no_path_to_gc_roots = 0;
|
||||
static bool _oom_emit_request_posted = false;
|
||||
static bool _oom_emit_request_delivered = false;
|
||||
|
||||
static inline bool exclude_paths_to_gc_roots(int64_t cutoff_ticks) {
|
||||
return cutoff_ticks <= _no_path_to_gc_roots;
|
||||
}
|
||||
|
||||
static void enqueue(const JfrLeakProfilerEmitRequest& request) {
|
||||
assert(JfrRotationLock::is_owner(), "invariant");
|
||||
if (_queue == nullptr) {
|
||||
_queue = new JfrLeakProfilerEmitRequestQueue(4);
|
||||
}
|
||||
assert(_queue != nullptr, "invariant");
|
||||
assert(!_oom_emit_request_posted, "invariant");
|
||||
if (request.oom) {
|
||||
_oom_emit_request_posted = true;
|
||||
}
|
||||
_queue->append(request);
|
||||
}
|
||||
|
||||
static JfrLeakProfilerEmitRequest dequeue() {
|
||||
assert(JfrRotationLock::is_owner(), "invariant");
|
||||
assert(_queue != nullptr, "invariant");
|
||||
assert(_queue->is_nonempty(), "invariant");
|
||||
const JfrLeakProfilerEmitRequest& request = _queue->first();
|
||||
_queue->remove_at(0);
|
||||
return request;
|
||||
}
|
||||
|
||||
// This version of emit excludes path-to-gc-roots, i.e. it skips reference chains.
|
||||
static void emit_leakprofiler_events(bool emit_all, bool skip_bfs, JavaThread* jt) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
|
||||
// Take the rotation lock to exclude flush() during event emits. This is because the event emit operation
|
||||
// also creates a number of checkpoint events. Those checkpoint events require a future typeset checkpoint
|
||||
// event for completeness, i.e., to be generated before being flushed to a segment.
|
||||
// The upcoming flush() or rotation() after event emit completes this typeset checkpoint
|
||||
// and serializes all checkpoint events to the same segment.
|
||||
JfrRotationLock lock;
|
||||
// Take the rotation lock before the thread transition, to avoid blocking safepoints.
|
||||
if (_oom_emit_request_posted) {
|
||||
// A request to emit leakprofiler events in response to CrashOnOutOfMemoryError
|
||||
// is pending or has already been completed. We are about to crash at any time now.
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
return;
|
||||
}
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt));
|
||||
ThreadInVMfromNative transition(jt);
|
||||
// Since we are not requesting path-to-gc-roots, i.e., reference chains, we need not issue a VM_Operation.
|
||||
// Therefore, we can let the requesting thread process the request directly, since it already holds the requisite lock.
|
||||
LeakProfiler::emit_events(_no_path_to_gc_roots, emit_all, skip_bfs);
|
||||
}
|
||||
|
||||
void JfrRecorderService::transition_and_post_leakprofiler_emit_msg(JavaThread* jt) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt);)
|
||||
assert(!JfrRotationLock::is_owner(), "invariant");
|
||||
// Transition to _thread_in_VM and post a synchronous message to the JFR Recorder Thread
|
||||
// for it to process our enqueued request, which includes paths-to-gc-roots, i.e., reference chains.
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt));
|
||||
ThreadInVMfromNative transition(jt);
|
||||
_post_box.post(MSG_EMIT_LEAKP_REFCHAINS);
|
||||
}
|
||||
|
||||
// This version of emit includes path-to-gc-roots, i.e., it includes in the request traversing of reference chains.
|
||||
// Traversing reference chains is performed as part of a VM_Operation, and we initiate it from the JFR Recorder Thread.
|
||||
// Because multiple threads can concurrently report_on_java_out_of_memory(), having them all post a synchronous JFR msg,
|
||||
// they rendezvous at a safepoint in a convenient state, ThreadBlockInVM. This mechanism prevents any thread from racing past
|
||||
// this point and begin executing VMError::report_and_die(), until at least one oom request has been delivered.
|
||||
void JfrRecorderService::emit_leakprofiler_events_paths_to_gc_roots(int64_t cutoff_ticks,
|
||||
bool emit_all,
|
||||
bool skip_bfs,
|
||||
bool oom,
|
||||
JavaThread* jt) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt);)
|
||||
assert(!exclude_paths_to_gc_roots(cutoff_ticks), "invariant");
|
||||
|
||||
{
|
||||
JfrRotationLock lock;
|
||||
// Take the rotation lock to read and post a request for the JFR Recorder Thread.
|
||||
if (_oom_emit_request_posted) {
|
||||
if (!oom) {
|
||||
// A request to emit leakprofiler events in response to CrashOnOutOfMemoryError
|
||||
// is pending or has already been completed. We are about to crash at any time now.
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
assert(!_oom_emit_request_posted, "invariant");
|
||||
JfrLeakProfilerEmitRequest request = { cutoff_ticks, emit_all, skip_bfs, oom };
|
||||
enqueue(request);
|
||||
}
|
||||
}
|
||||
JfrRecorderService service;
|
||||
service.transition_and_post_leakprofiler_emit_msg(jt);
|
||||
}
|
||||
|
||||
// Leakprofiler serialization request, the jdk.jfr.internal.JVM.emitOldObjectSamples() Java entry point.
|
||||
void JfrRecorderService::emit_leakprofiler_events(int64_t cutoff_ticks,
|
||||
bool emit_all,
|
||||
bool skip_bfs) {
|
||||
JavaThread* const jt = JavaThread::current();
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt);)
|
||||
if (exclude_paths_to_gc_roots(cutoff_ticks)) {
|
||||
::emit_leakprofiler_events(emit_all, skip_bfs, jt);
|
||||
return;
|
||||
}
|
||||
emit_leakprofiler_events_paths_to_gc_roots(cutoff_ticks, emit_all, skip_bfs, /* oom */ false, jt);
|
||||
}
|
||||
|
||||
// Leakprofiler serialization request, the report_on_java_out_of_memory VM entry point.
|
||||
void JfrRecorderService::emit_leakprofiler_events_on_oom() {
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
if (EventOldObjectSample::is_enabled()) {
|
||||
JavaThread* const jt = JavaThread::current();
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt);)
|
||||
ThreadToNativeFromVM transition(jt);
|
||||
emit_leakprofiler_events_paths_to_gc_roots(max_jlong, false, false, /* oom */ true, jt);
|
||||
}
|
||||
}
|
||||
|
||||
// The worker routine for the JFR Recorder Thread when processing MSG_EMIT_LEAKP_REFCHAINS messages.
|
||||
void JfrRecorderService::emit_leakprofiler_events() {
|
||||
JavaThread* const jt = JavaThread::current();
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
|
||||
// Take the rotation lock before the transition.
|
||||
JfrRotationLock lock;
|
||||
if (_oom_emit_request_delivered) {
|
||||
// A request to emit leakprofiler events in response to CrashOnOutOfMemoryError
|
||||
// has already been completed. We are about to crash at any time now.
|
||||
assert(_oom_emit_request_posted, "invariant");
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
return;
|
||||
}
|
||||
|
||||
assert(_queue->is_nonempty(), "invariant");
|
||||
|
||||
{
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt));
|
||||
ThreadInVMfromNative transition(jt);
|
||||
while (_queue->is_nonempty()) {
|
||||
const JfrLeakProfilerEmitRequest& request = dequeue();
|
||||
LeakProfiler::emit_events(request.cutoff_ticks, request.emit_all, request.skip_bfs);
|
||||
if (_oom_emit_request_posted && request.oom) {
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
_oom_emit_request_delivered = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If processing involved an out-of-memory request, issue an immediate flush operation.
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
|
||||
if (_chunkwriter.is_valid() && _oom_emit_request_delivered) {
|
||||
invoke_flush();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,19 +27,23 @@
|
||||
|
||||
#include "jfr/utilities/jfrAllocation.hpp"
|
||||
|
||||
class JavaThread;
|
||||
class JfrCheckpointManager;
|
||||
class JfrChunkWriter;
|
||||
class JfrPostBox;
|
||||
class JfrRepository;
|
||||
class JfrStackTraceRepository;
|
||||
class JfrStorage;
|
||||
class JfrStringPool;
|
||||
|
||||
class JfrRecorderService : public StackObj {
|
||||
friend class Jfr;
|
||||
friend class JfrSafepointClearVMOperation;
|
||||
friend class JfrSafepointWriteVMOperation;
|
||||
private:
|
||||
JfrCheckpointManager& _checkpoint_manager;
|
||||
JfrChunkWriter& _chunkwriter;
|
||||
JfrPostBox& _post_box;
|
||||
JfrRepository& _repository;
|
||||
JfrStackTraceRepository& _stack_trace_repository;
|
||||
JfrStorage& _storage;
|
||||
@ -64,6 +68,14 @@ class JfrRecorderService : public StackObj {
|
||||
void invoke_safepoint_write();
|
||||
void post_safepoint_write();
|
||||
|
||||
void transition_and_post_leakprofiler_emit_msg(JavaThread* jt);
|
||||
|
||||
static void emit_leakprofiler_events_on_oom();
|
||||
static void emit_leakprofiler_events_paths_to_gc_roots(int64_t cutoff_ticks,
|
||||
bool emit_all,
|
||||
bool skip_bfs,
|
||||
bool oom,
|
||||
JavaThread* jt);
|
||||
public:
|
||||
JfrRecorderService();
|
||||
void start();
|
||||
@ -72,8 +84,12 @@ class JfrRecorderService : public StackObj {
|
||||
void flushpoint();
|
||||
void process_full_buffers();
|
||||
void evaluate_chunk_size_for_rotation();
|
||||
void emit_leakprofiler_events(int64_t cutoff_ticks, bool emit_all, bool skip_bfs);
|
||||
void emit_leakprofiler_events();
|
||||
|
||||
static bool is_recording();
|
||||
static void emit_leakprofiler_events(int64_t cutoff_ticks,
|
||||
bool emit_all,
|
||||
bool skip_bfs);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_SERVICE_JFRRECORDERSERVICE_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,6 +44,7 @@ void recorderthread_entry(JavaThread* thread, JavaThread* unused) {
|
||||
#define ROTATE (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP)))
|
||||
#define FLUSHPOINT (msgs & (MSGBIT(MSG_FLUSHPOINT)))
|
||||
#define PROCESS_FULL_BUFFERS (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP)|MSGBIT(MSG_FULLBUFFER)))
|
||||
#define LEAKPROFILER_REFCHAINS (msgs & MSGBIT(MSG_EMIT_LEAKP_REFCHAINS))
|
||||
|
||||
JfrPostBox& post_box = JfrRecorderThreadEntry::post_box();
|
||||
log_debug(jfr, system)("Recorder thread STARTED");
|
||||
@ -70,6 +71,9 @@ void recorderthread_entry(JavaThread* thread, JavaThread* unused) {
|
||||
if (PROCESS_FULL_BUFFERS) {
|
||||
service.process_full_buffers();
|
||||
}
|
||||
if (LEAKPROFILER_REFCHAINS) {
|
||||
service.emit_leakprofiler_events();
|
||||
}
|
||||
// Check amount of data written to chunk already
|
||||
// if it warrants asking for a new chunk.
|
||||
service.evaluate_chunk_size_for_rotation();
|
||||
@ -98,5 +102,5 @@ void recorderthread_entry(JavaThread* thread, JavaThread* unused) {
|
||||
#undef ROTATE
|
||||
#undef FLUSHPOINT
|
||||
#undef PROCESS_FULL_BUFFERS
|
||||
#undef SCAVENGE
|
||||
#undef LEAKPROFILER_REFCHAINS
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_JFR_UTILITIES_JFRSET_HPP
|
||||
#define SHARE_JFR_UTILITIES_JFRSET_HPP
|
||||
|
||||
#include "cppstdlib/new.hpp"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
@ -67,7 +68,9 @@ class JfrSetStorage : public AnyObj {
|
||||
} else {
|
||||
table = NEW_RESOURCE_ARRAY(K, table_size);
|
||||
}
|
||||
memset(table, 0, table_size * sizeof(K));
|
||||
for (unsigned i = 0; i < table_size; ++i) {
|
||||
::new (&table[i]) K{};
|
||||
}
|
||||
return table;
|
||||
}
|
||||
|
||||
@ -88,7 +91,7 @@ class JfrSetStorage : public AnyObj {
|
||||
assert(is_nonempty(), "invariant");
|
||||
for (unsigned i = 0; i < _table_size; ++i) {
|
||||
K k = _table[i];
|
||||
if (k != 0) {
|
||||
if (k != K{}) {
|
||||
functor(k);
|
||||
}
|
||||
}
|
||||
@ -136,11 +139,11 @@ class JfrSet : public JfrSetStorage<CONFIG> {
|
||||
_resize_threshold = old_table_size;
|
||||
for (unsigned i = 0; i < old_table_size; ++i) {
|
||||
const K k = old_table[i];
|
||||
if (k != 0) {
|
||||
if (k != K{}) {
|
||||
uint32_t idx = slot_idx(CONFIG::hash(k));
|
||||
do {
|
||||
K v = this->_table[idx];
|
||||
if (v == 0) {
|
||||
if (v == K{}) {
|
||||
this->_table[idx] = k;
|
||||
break;
|
||||
}
|
||||
@ -161,7 +164,7 @@ class JfrSet : public JfrSetStorage<CONFIG> {
|
||||
K* result = nullptr;
|
||||
while (true) {
|
||||
K v = this->_table[idx];
|
||||
if (v == 0) {
|
||||
if (v == K{}) {
|
||||
result = &this->_table[idx];
|
||||
break;
|
||||
}
|
||||
@ -196,7 +199,7 @@ class JfrSet : public JfrSetStorage<CONFIG> {
|
||||
// Already exists.
|
||||
return false;
|
||||
}
|
||||
assert(*slot == 0, "invariant");
|
||||
assert(*slot == K{}, "invariant");
|
||||
*slot = k;
|
||||
if (++this->_elements == _resize_threshold) {
|
||||
resize();
|
||||
|
||||
@ -135,17 +135,16 @@ class ObjArrayKlass : public ArrayKlass {
|
||||
template <typename T, typename OopClosureType>
|
||||
inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
// Iterate over oop elements within [start, end), and metadata.
|
||||
template <typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_range(objArrayOop a, OopClosureType* closure, int start, int end);
|
||||
|
||||
public:
|
||||
// Iterate over all oop elements.
|
||||
// Iterate over all oop elements, and no metadata.
|
||||
template <typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_elements(objArrayOop a, OopClosureType* closure);
|
||||
|
||||
// Iterate over oop elements within index range [start, end), and no metadata.
|
||||
template <typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_elements_range(objArrayOop a, OopClosureType* closure, int start, int end);
|
||||
|
||||
private:
|
||||
// Iterate over all oop elements with indices within mr.
|
||||
// Iterate over all oop elements bounded by addresses [low, high), and no metadata.
|
||||
template <typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_elements_bounded(objArrayOop a, OopClosureType* closure, void* low, void* high);
|
||||
|
||||
|
||||
@ -38,10 +38,18 @@
|
||||
|
||||
template <typename T, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_elements(objArrayOop a, OopClosureType* closure) {
|
||||
T* p = (T*)a->base();
|
||||
T* const end = p + a->length();
|
||||
oop_oop_iterate_elements_range<T>(a, closure, 0, a->length());
|
||||
}
|
||||
|
||||
for (;p < end; p++) {
|
||||
// Like oop_oop_iterate but only iterates over a specified range and only used
|
||||
// for objArrayOops.
|
||||
template <typename T, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_elements_range(objArrayOop a, OopClosureType* closure, int start, int end) {
|
||||
T* base = (T*)a->base();
|
||||
T* p = base + start;
|
||||
T* const end_p = base + end;
|
||||
|
||||
for (;p < end_p; ++p) {
|
||||
Devirtualizer::do_oop(closure, p);
|
||||
}
|
||||
}
|
||||
@ -98,24 +106,4 @@ void ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, Me
|
||||
oop_oop_iterate_elements_bounded<T>(a, closure, mr.start(), mr.end());
|
||||
}
|
||||
|
||||
// Like oop_oop_iterate but only iterates over a specified range and only used
|
||||
// for objArrayOops.
|
||||
template <typename T, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_range(objArrayOop a, OopClosureType* closure, int start, int end) {
|
||||
T* low = (T*)a->base() + start;
|
||||
T* high = (T*)a->base() + end;
|
||||
|
||||
oop_oop_iterate_elements_bounded<T>(a, closure, low, high);
|
||||
}
|
||||
|
||||
// Placed here to resolve include cycle between objArrayKlass.inline.hpp and objArrayOop.inline.hpp
|
||||
template <typename OopClosureType>
|
||||
void objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) {
|
||||
if (UseCompressedOops) {
|
||||
((ObjArrayKlass*)klass())->oop_oop_iterate_range<narrowOop>(this, blk, start, end);
|
||||
} else {
|
||||
((ObjArrayKlass*)klass())->oop_oop_iterate_range<oop>(this, blk, start, end);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_OOPS_OBJARRAYKLASS_INLINE_HPP
|
||||
|
||||
@ -83,9 +83,9 @@ class objArrayOopDesc : public arrayOopDesc {
|
||||
Klass* element_klass();
|
||||
|
||||
public:
|
||||
// special iterators for index ranges, returns size of object
|
||||
// Special iterators for an element index range.
|
||||
template <typename OopClosureType>
|
||||
void oop_iterate_range(OopClosureType* blk, int start, int end);
|
||||
void oop_iterate_elements_range(OopClosureType* blk, int start, int end);
|
||||
};
|
||||
|
||||
// See similar requirement for oopDesc.
|
||||
|
||||
@ -29,6 +29,7 @@
|
||||
|
||||
#include "oops/access.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
|
||||
@ -51,4 +52,13 @@ inline void objArrayOopDesc::obj_at_put(int index, oop value) {
|
||||
HeapAccess<IS_ARRAY>::oop_store_at(as_oop(), offset, value);
|
||||
}
|
||||
|
||||
template <typename OopClosureType>
|
||||
void objArrayOopDesc::oop_iterate_elements_range(OopClosureType* blk, int start, int end) {
|
||||
if (UseCompressedOops) {
|
||||
((ObjArrayKlass*)klass())->oop_oop_iterate_elements_range<narrowOop>(this, blk, start, end);
|
||||
} else {
|
||||
((ObjArrayKlass*)klass())->oop_oop_iterate_elements_range<oop>(this, blk, start, end);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_OOPS_OBJARRAYOOP_INLINE_HPP
|
||||
|
||||
@ -1195,7 +1195,7 @@ const Type* XorLNode::Value(PhaseGVN* phase) const {
|
||||
return AddNode::Value(phase);
|
||||
}
|
||||
|
||||
Node* MaxNode::build_min_max_int(Node* a, Node* b, bool is_max) {
|
||||
Node* MinMaxNode::build_min_max_int(Node* a, Node* b, bool is_max) {
|
||||
if (is_max) {
|
||||
return new MaxINode(a, b);
|
||||
} else {
|
||||
@ -1203,7 +1203,7 @@ Node* MaxNode::build_min_max_int(Node* a, Node* b, bool is_max) {
|
||||
}
|
||||
}
|
||||
|
||||
Node* MaxNode::build_min_max_long(PhaseGVN* phase, Node* a, Node* b, bool is_max) {
|
||||
Node* MinMaxNode::build_min_max_long(PhaseGVN* phase, Node* a, Node* b, bool is_max) {
|
||||
if (is_max) {
|
||||
return new MaxLNode(phase->C, a, b);
|
||||
} else {
|
||||
@ -1211,7 +1211,7 @@ Node* MaxNode::build_min_max_long(PhaseGVN* phase, Node* a, Node* b, bool is_max
|
||||
}
|
||||
}
|
||||
|
||||
Node* MaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, const Type* t, PhaseGVN& gvn) {
|
||||
Node* MinMaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, const Type* t, PhaseGVN& gvn) {
|
||||
bool is_int = gvn.type(a)->isa_int();
|
||||
assert(is_int || gvn.type(a)->isa_long(), "int or long inputs");
|
||||
assert(is_int == (gvn.type(b)->isa_int() != nullptr), "inconsistent inputs");
|
||||
@ -1243,7 +1243,7 @@ Node* MaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, co
|
||||
return res;
|
||||
}
|
||||
|
||||
Node* MaxNode::build_min_max_diff_with_zero(Node* a, Node* b, bool is_max, const Type* t, PhaseGVN& gvn) {
|
||||
Node* MinMaxNode::build_min_max_diff_with_zero(Node* a, Node* b, bool is_max, const Type* t, PhaseGVN& gvn) {
|
||||
bool is_int = gvn.type(a)->isa_int();
|
||||
assert(is_int || gvn.type(a)->isa_long(), "int or long inputs");
|
||||
assert(is_int == (gvn.type(b)->isa_int() != nullptr), "inconsistent inputs");
|
||||
@ -1290,7 +1290,7 @@ static bool can_overflow(const TypeLong* t, jlong c) {
|
||||
// Let <x, x_off> = x_operands and <y, y_off> = y_operands.
|
||||
// If x == y and neither add(x, x_off) nor add(y, y_off) overflow, return
|
||||
// add(x, op(x_off, y_off)). Otherwise, return nullptr.
|
||||
Node* MaxNode::extract_add(PhaseGVN* phase, ConstAddOperands x_operands, ConstAddOperands y_operands) {
|
||||
Node* MinMaxNode::extract_add(PhaseGVN* phase, ConstAddOperands x_operands, ConstAddOperands y_operands) {
|
||||
Node* x = x_operands.first;
|
||||
Node* y = y_operands.first;
|
||||
int opcode = Opcode();
|
||||
@ -1327,7 +1327,7 @@ static ConstAddOperands as_add_with_constant(Node* n) {
|
||||
return ConstAddOperands(x, c_type->is_int()->get_con());
|
||||
}
|
||||
|
||||
Node* MaxNode::IdealI(PhaseGVN* phase, bool can_reshape) {
|
||||
Node* MinMaxNode::IdealI(PhaseGVN* phase, bool can_reshape) {
|
||||
Node* n = AddNode::Ideal(phase, can_reshape);
|
||||
if (n != nullptr) {
|
||||
return n;
|
||||
@ -1401,7 +1401,7 @@ Node* MaxINode::Identity(PhaseGVN* phase) {
|
||||
return in(2);
|
||||
}
|
||||
|
||||
return MaxNode::Identity(phase);
|
||||
return MinMaxNode::Identity(phase);
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
@ -1434,7 +1434,7 @@ Node* MinINode::Identity(PhaseGVN* phase) {
|
||||
return in(1);
|
||||
}
|
||||
|
||||
return MaxNode::Identity(phase);
|
||||
return MinMaxNode::Identity(phase);
|
||||
}
|
||||
|
||||
//------------------------------add_ring---------------------------------------
|
||||
@ -1564,7 +1564,7 @@ Node* MaxLNode::Identity(PhaseGVN* phase) {
|
||||
return in(2);
|
||||
}
|
||||
|
||||
return MaxNode::Identity(phase);
|
||||
return MinMaxNode::Identity(phase);
|
||||
}
|
||||
|
||||
Node* MaxLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
@ -1596,7 +1596,7 @@ Node* MinLNode::Identity(PhaseGVN* phase) {
|
||||
return in(1);
|
||||
}
|
||||
|
||||
return MaxNode::Identity(phase);
|
||||
return MinMaxNode::Identity(phase);
|
||||
}
|
||||
|
||||
Node* MinLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
@ -1610,7 +1610,7 @@ Node* MinLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
int MaxNode::opposite_opcode() const {
|
||||
int MinMaxNode::opposite_opcode() const {
|
||||
if (Opcode() == max_opcode()) {
|
||||
return min_opcode();
|
||||
} else {
|
||||
@ -1621,7 +1621,7 @@ int MaxNode::opposite_opcode() const {
|
||||
|
||||
// Given a redundant structure such as Max/Min(A, Max/Min(B, C)) where A == B or A == C, return the useful part of the structure.
|
||||
// 'operation' is the node expected to be the inner 'Max/Min(B, C)', and 'operand' is the node expected to be the 'A' operand of the outer node.
|
||||
Node* MaxNode::find_identity_operation(Node* operation, Node* operand) {
|
||||
Node* MinMaxNode::find_identity_operation(Node* operation, Node* operand) {
|
||||
if (operation->Opcode() == Opcode() || operation->Opcode() == opposite_opcode()) {
|
||||
Node* n1 = operation->in(1);
|
||||
Node* n2 = operation->in(2);
|
||||
@ -1645,17 +1645,17 @@ Node* MaxNode::find_identity_operation(Node* operation, Node* operand) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Node* MaxNode::Identity(PhaseGVN* phase) {
|
||||
Node* MinMaxNode::Identity(PhaseGVN* phase) {
|
||||
if (in(1) == in(2)) {
|
||||
return in(1);
|
||||
}
|
||||
|
||||
Node* identity_1 = MaxNode::find_identity_operation(in(2), in(1));
|
||||
Node* identity_1 = MinMaxNode::find_identity_operation(in(2), in(1));
|
||||
if (identity_1 != nullptr) {
|
||||
return identity_1;
|
||||
}
|
||||
|
||||
Node* identity_2 = MaxNode::find_identity_operation(in(1), in(2));
|
||||
Node* identity_2 = MinMaxNode::find_identity_operation(in(1), in(2));
|
||||
if (identity_2 != nullptr) {
|
||||
return identity_2;
|
||||
}
|
||||
|
||||
@ -324,14 +324,16 @@ public:
|
||||
//------------------------------MaxNode----------------------------------------
|
||||
// Max (or min) of 2 values. Included with the ADD nodes because it inherits
|
||||
// all the behavior of addition on a ring.
|
||||
class MaxNode : public AddNode {
|
||||
class MinMaxNode : public AddNode {
|
||||
private:
|
||||
static Node* build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, const Type* t, PhaseGVN& gvn);
|
||||
static Node* build_min_max_diff_with_zero(Node* a, Node* b, bool is_max, const Type* t, PhaseGVN& gvn);
|
||||
Node* extract_add(PhaseGVN* phase, ConstAddOperands x_operands, ConstAddOperands y_operands);
|
||||
|
||||
public:
|
||||
MaxNode( Node *in1, Node *in2 ) : AddNode(in1,in2) {}
|
||||
MinMaxNode(Node* in1, Node* in2) : AddNode(in1, in2) {
|
||||
init_class_id(Class_MinMax);
|
||||
}
|
||||
virtual int Opcode() const = 0;
|
||||
virtual int max_opcode() const = 0;
|
||||
virtual int min_opcode() const = 0;
|
||||
@ -373,9 +375,9 @@ public:
|
||||
//------------------------------MaxINode---------------------------------------
|
||||
// Maximum of 2 integers. Included with the ADD nodes because it inherits
|
||||
// all the behavior of addition on a ring.
|
||||
class MaxINode : public MaxNode {
|
||||
class MaxINode : public MinMaxNode {
|
||||
public:
|
||||
MaxINode( Node *in1, Node *in2 ) : MaxNode(in1,in2) {}
|
||||
MaxINode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *add_ring( const Type *, const Type * ) const;
|
||||
virtual const Type *add_id() const { return TypeInt::make(min_jint); }
|
||||
@ -390,9 +392,9 @@ public:
|
||||
//------------------------------MinINode---------------------------------------
|
||||
// MINimum of 2 integers. Included with the ADD nodes because it inherits
|
||||
// all the behavior of addition on a ring.
|
||||
class MinINode : public MaxNode {
|
||||
class MinINode : public MinMaxNode {
|
||||
public:
|
||||
MinINode( Node *in1, Node *in2 ) : MaxNode(in1,in2) {}
|
||||
MinINode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *add_ring( const Type *, const Type * ) const;
|
||||
virtual const Type *add_id() const { return TypeInt::make(max_jint); }
|
||||
@ -406,9 +408,9 @@ public:
|
||||
|
||||
//------------------------------MaxLNode---------------------------------------
|
||||
// MAXimum of 2 longs.
|
||||
class MaxLNode : public MaxNode {
|
||||
class MaxLNode : public MinMaxNode {
|
||||
public:
|
||||
MaxLNode(Compile* C, Node* in1, Node* in2) : MaxNode(in1, in2) {
|
||||
MaxLNode(Compile* C, Node* in1, Node* in2) : MinMaxNode(in1, in2) {
|
||||
init_flags(Flag_is_macro);
|
||||
C->add_macro_node(this);
|
||||
}
|
||||
@ -425,9 +427,9 @@ public:
|
||||
|
||||
//------------------------------MinLNode---------------------------------------
|
||||
// MINimum of 2 longs.
|
||||
class MinLNode : public MaxNode {
|
||||
class MinLNode : public MinMaxNode {
|
||||
public:
|
||||
MinLNode(Compile* C, Node* in1, Node* in2) : MaxNode(in1, in2) {
|
||||
MinLNode(Compile* C, Node* in1, Node* in2) : MinMaxNode(in1, in2) {
|
||||
init_flags(Flag_is_macro);
|
||||
C->add_macro_node(this);
|
||||
}
|
||||
@ -444,9 +446,9 @@ public:
|
||||
|
||||
//------------------------------MaxFNode---------------------------------------
|
||||
// Maximum of 2 floats.
|
||||
class MaxFNode : public MaxNode {
|
||||
class MaxFNode : public MinMaxNode {
|
||||
public:
|
||||
MaxFNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
|
||||
MaxFNode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *add_ring(const Type*, const Type*) const;
|
||||
virtual const Type *add_id() const { return TypeF::NEG_INF; }
|
||||
@ -458,9 +460,9 @@ public:
|
||||
|
||||
//------------------------------MinFNode---------------------------------------
|
||||
// Minimum of 2 floats.
|
||||
class MinFNode : public MaxNode {
|
||||
class MinFNode : public MinMaxNode {
|
||||
public:
|
||||
MinFNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
|
||||
MinFNode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *add_ring(const Type*, const Type*) const;
|
||||
virtual const Type *add_id() const { return TypeF::POS_INF; }
|
||||
@ -472,9 +474,9 @@ public:
|
||||
|
||||
//------------------------------MaxHFNode--------------------------------------
|
||||
// Maximum of 2 half floats.
|
||||
class MaxHFNode : public MaxNode {
|
||||
class MaxHFNode : public MinMaxNode {
|
||||
public:
|
||||
MaxHFNode(Node* in1, Node* in2) : MaxNode(in1, in2) {}
|
||||
MaxHFNode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* add_ring(const Type*, const Type*) const;
|
||||
virtual const Type* add_id() const { return TypeH::NEG_INF; }
|
||||
@ -486,9 +488,9 @@ public:
|
||||
|
||||
//------------------------------MinHFNode---------------------------------------
|
||||
// Minimum of 2 half floats.
|
||||
class MinHFNode : public MaxNode {
|
||||
class MinHFNode : public MinMaxNode {
|
||||
public:
|
||||
MinHFNode(Node* in1, Node* in2) : MaxNode(in1, in2) {}
|
||||
MinHFNode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* add_ring(const Type*, const Type*) const;
|
||||
virtual const Type* add_id() const { return TypeH::POS_INF; }
|
||||
@ -500,9 +502,9 @@ public:
|
||||
|
||||
//------------------------------MaxDNode---------------------------------------
|
||||
// Maximum of 2 doubles.
|
||||
class MaxDNode : public MaxNode {
|
||||
class MaxDNode : public MinMaxNode {
|
||||
public:
|
||||
MaxDNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
|
||||
MaxDNode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *add_ring(const Type*, const Type*) const;
|
||||
virtual const Type *add_id() const { return TypeD::NEG_INF; }
|
||||
@ -514,9 +516,9 @@ public:
|
||||
|
||||
//------------------------------MinDNode---------------------------------------
|
||||
// Minimum of 2 doubles.
|
||||
class MinDNode : public MaxNode {
|
||||
class MinDNode : public MinMaxNode {
|
||||
public:
|
||||
MinDNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
|
||||
MinDNode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *add_ring(const Type*, const Type*) const;
|
||||
virtual const Type *add_id() const { return TypeD::POS_INF; }
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1058,6 +1058,39 @@ void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_lo
|
||||
// "new_load" might actually be a constant, parameter, etc.
|
||||
if (new_load->is_Load()) {
|
||||
Node* new_addp = new_load->in(MemNode::Address);
|
||||
|
||||
// If new_load is a Load but not from an AddP, it means that the load is folded into another
|
||||
// load. And since this load is not from a field, we cannot create a unique type for it.
|
||||
// For example:
|
||||
//
|
||||
// if (b) {
|
||||
// Holder h1 = new Holder();
|
||||
// Object o = ...;
|
||||
// h.o = o.getClass();
|
||||
// } else {
|
||||
// Holder h2 = ...;
|
||||
// }
|
||||
// Holder h = Phi(h1, h2);
|
||||
// Object r = h.o;
|
||||
//
|
||||
// Then, splitting r through the merge point results in:
|
||||
//
|
||||
// if (b) {
|
||||
// Holder h1 = new Holder();
|
||||
// Object o = ...;
|
||||
// h.o = o.getClass();
|
||||
// Object o1 = h.o;
|
||||
// } else {
|
||||
// Holder h2 = ...;
|
||||
// Object o2 = h2.o;
|
||||
// }
|
||||
// Object r = Phi(o1, o2);
|
||||
//
|
||||
// In this case, o1 is folded to o.getClass() which is a Load but not from an AddP, but from
|
||||
// an OopHandle that is loaded from the Klass of o.
|
||||
if (!new_addp->is_AddP()) {
|
||||
continue;
|
||||
}
|
||||
Node* base = get_addp_base(new_addp);
|
||||
|
||||
// The base might not be something that we can create an unique
|
||||
|
||||
@ -979,9 +979,9 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
|
||||
|
||||
Node* inner_iters_max = nullptr;
|
||||
if (stride_con > 0) {
|
||||
inner_iters_max = MaxNode::max_diff_with_zero(limit, outer_phi, TypeInteger::bottom(bt), _igvn);
|
||||
inner_iters_max = MinMaxNode::max_diff_with_zero(limit, outer_phi, TypeInteger::bottom(bt), _igvn);
|
||||
} else {
|
||||
inner_iters_max = MaxNode::max_diff_with_zero(outer_phi, limit, TypeInteger::bottom(bt), _igvn);
|
||||
inner_iters_max = MinMaxNode::max_diff_with_zero(outer_phi, limit, TypeInteger::bottom(bt), _igvn);
|
||||
}
|
||||
|
||||
Node* inner_iters_limit = _igvn.integercon(iters_limit, bt);
|
||||
@ -989,7 +989,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
|
||||
// Long.MIN_VALUE to Long.MAX_VALUE for instance). Use an unsigned
|
||||
// min.
|
||||
const TypeInteger* inner_iters_actual_range = TypeInteger::make(0, iters_limit, Type::WidenMin, bt);
|
||||
Node* inner_iters_actual = MaxNode::unsigned_min(inner_iters_max, inner_iters_limit, inner_iters_actual_range, _igvn);
|
||||
Node* inner_iters_actual = MinMaxNode::unsigned_min(inner_iters_max, inner_iters_limit, inner_iters_actual_range, _igvn);
|
||||
|
||||
Node* inner_iters_actual_int;
|
||||
if (bt == T_LONG) {
|
||||
@ -1618,7 +1618,7 @@ void PhaseIdealLoop::transform_long_range_checks(int stride_con, const Node_List
|
||||
Node* max_jint_plus_one_long = longcon((jlong)max_jint + 1);
|
||||
Node* max_range = new AddLNode(max_jint_plus_one_long, L);
|
||||
register_new_node(max_range, entry_control);
|
||||
R = MaxNode::unsigned_min(R, max_range, TypeLong::POS, _igvn);
|
||||
R = MinMaxNode::unsigned_min(R, max_range, TypeLong::POS, _igvn);
|
||||
set_subtree_ctrl(R, true);
|
||||
}
|
||||
|
||||
@ -1717,9 +1717,9 @@ void PhaseIdealLoop::transform_long_range_checks(int stride_con, const Node_List
|
||||
}
|
||||
|
||||
Node* PhaseIdealLoop::clamp(Node* R, Node* L, Node* H) {
|
||||
Node* min = MaxNode::signed_min(R, H, TypeLong::LONG, _igvn);
|
||||
Node* min = MinMaxNode::signed_min(R, H, TypeLong::LONG, _igvn);
|
||||
set_subtree_ctrl(min, true);
|
||||
Node* max = MaxNode::signed_max(L, min, TypeLong::LONG, _igvn);
|
||||
Node* max = MinMaxNode::signed_max(L, min, TypeLong::LONG, _igvn);
|
||||
set_subtree_ctrl(max, true);
|
||||
return max;
|
||||
}
|
||||
@ -3485,14 +3485,14 @@ void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) {
|
||||
// the loop body to be run for LoopStripMiningIter.
|
||||
Node* max = nullptr;
|
||||
if (stride > 0) {
|
||||
max = MaxNode::max_diff_with_zero(limit, iv_phi, TypeInt::INT, *igvn);
|
||||
max = MinMaxNode::max_diff_with_zero(limit, iv_phi, TypeInt::INT, *igvn);
|
||||
} else {
|
||||
max = MaxNode::max_diff_with_zero(iv_phi, limit, TypeInt::INT, *igvn);
|
||||
max = MinMaxNode::max_diff_with_zero(iv_phi, limit, TypeInt::INT, *igvn);
|
||||
}
|
||||
// sub is positive and can be larger than the max signed int
|
||||
// value. Use an unsigned min.
|
||||
Node* const_iters = igvn->intcon(scaled_iters);
|
||||
Node* min = MaxNode::unsigned_min(max, const_iters, TypeInt::make(0, scaled_iters, Type::WidenMin), *igvn);
|
||||
Node* min = MinMaxNode::unsigned_min(max, const_iters, TypeInt::make(0, scaled_iters, Type::WidenMin), *igvn);
|
||||
// min is the number of iterations for the next inner loop execution:
|
||||
// unsigned_min(max(limit - iv_phi, 0), scaled_iters) if stride > 0
|
||||
// unsigned_min(max(iv_phi - limit, 0), scaled_iters) if stride < 0
|
||||
|
||||
@ -2577,11 +2577,11 @@ void PhaseMacroExpand::eliminate_opaque_looplimit_macro_nodes() {
|
||||
// a CMoveL construct now. At least until here, the type could be computed
|
||||
// precisely. CMoveL is not so smart, but we can give it at least the best
|
||||
// type we know abouot n now.
|
||||
Node* repl = MaxNode::signed_max(n->in(1), n->in(2), _igvn.type(n), _igvn);
|
||||
Node* repl = MinMaxNode::signed_max(n->in(1), n->in(2), _igvn.type(n), _igvn);
|
||||
_igvn.replace_node(n, repl);
|
||||
success = true;
|
||||
} else if (n->Opcode() == Op_MinL) {
|
||||
Node* repl = MaxNode::signed_min(n->in(1), n->in(2), _igvn.type(n), _igvn);
|
||||
Node* repl = MinMaxNode::signed_min(n->in(1), n->in(2), _igvn.type(n), _igvn);
|
||||
_igvn.replace_node(n, repl);
|
||||
success = true;
|
||||
}
|
||||
|
||||
@ -3913,7 +3913,6 @@ const Type* SCMemProjNode::Value(PhaseGVN* phase) const
|
||||
LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
|
||||
: Node(required),
|
||||
_type(rt),
|
||||
_adr_type(at),
|
||||
_barrier_data(0)
|
||||
{
|
||||
init_req(MemNode::Control, c );
|
||||
@ -3921,6 +3920,7 @@ LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const Ty
|
||||
init_req(MemNode::Address, adr);
|
||||
init_req(MemNode::ValueIn, val);
|
||||
init_class_id(Class_LoadStore);
|
||||
DEBUG_ONLY(_adr_type = at; adr_type();)
|
||||
}
|
||||
|
||||
//------------------------------Value-----------------------------------------
|
||||
@ -3944,6 +3944,11 @@ const Type* LoadStoreNode::Value(PhaseGVN* phase) const {
|
||||
return bottom_type();
|
||||
}
|
||||
|
||||
const TypePtr* LoadStoreNode::adr_type() const {
|
||||
const TypePtr* cross_check = DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
|
||||
return MemNode::calculate_adr_type(in(MemNode::Address)->bottom_type(), cross_check);
|
||||
}
|
||||
|
||||
uint LoadStoreNode::ideal_reg() const {
|
||||
return _type->ideal_reg();
|
||||
}
|
||||
|
||||
@ -797,11 +797,6 @@ public:
|
||||
virtual int Opcode() const;
|
||||
virtual bool is_CFG() const { return false; }
|
||||
virtual const Type *bottom_type() const {return Type::MEMORY;}
|
||||
virtual const TypePtr *adr_type() const {
|
||||
Node* ctrl = in(0);
|
||||
if (ctrl == nullptr) return nullptr; // node is dead
|
||||
return ctrl->in(MemNode::Memory)->adr_type();
|
||||
}
|
||||
virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
|
||||
virtual const Type* Value(PhaseGVN* phase) const;
|
||||
#ifndef PRODUCT
|
||||
@ -814,9 +809,11 @@ public:
|
||||
class LoadStoreNode : public Node {
|
||||
private:
|
||||
const Type* const _type; // What kind of value is loaded?
|
||||
const TypePtr* _adr_type; // What kind of memory is being addressed?
|
||||
uint8_t _barrier_data; // Bit field with barrier information
|
||||
virtual uint size_of() const; // Size is bigger
|
||||
#ifdef ASSERT
|
||||
const TypePtr* _adr_type; // What kind of memory is being addressed?
|
||||
#endif // ASSERT
|
||||
public:
|
||||
LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
|
||||
virtual bool depends_only_on_test() const { return false; }
|
||||
@ -824,7 +821,7 @@ public:
|
||||
|
||||
virtual const Type *bottom_type() const { return _type; }
|
||||
virtual uint ideal_reg() const;
|
||||
virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address
|
||||
virtual const TypePtr* adr_type() const;
|
||||
virtual const Type* Value(PhaseGVN* phase) const;
|
||||
|
||||
bool result_not_used() const;
|
||||
|
||||
@ -271,9 +271,9 @@ Node* CMoveNode::Ideal_minmax(PhaseGVN* phase, CMoveNode* cmove) {
|
||||
|
||||
// Create the Min/Max node based on the type and kind
|
||||
if (cmp_op == Op_CmpL) {
|
||||
return MaxNode::build_min_max_long(phase, cmp_l, cmp_r, is_max);
|
||||
return MinMaxNode::build_min_max_long(phase, cmp_l, cmp_r, is_max);
|
||||
} else {
|
||||
return MaxNode::build_min_max_int(cmp_l, cmp_r, is_max);
|
||||
return MinMaxNode::build_min_max_int(cmp_l, cmp_r, is_max);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -130,6 +130,7 @@ class MemBarNode;
|
||||
class MemBarStoreStoreNode;
|
||||
class MemNode;
|
||||
class MergeMemNode;
|
||||
class MinMaxNode;
|
||||
class MoveNode;
|
||||
class MulNode;
|
||||
class MultiNode;
|
||||
@ -809,6 +810,7 @@ public:
|
||||
DEFINE_CLASS_ID(AddP, Node, 9)
|
||||
DEFINE_CLASS_ID(BoxLock, Node, 10)
|
||||
DEFINE_CLASS_ID(Add, Node, 11)
|
||||
DEFINE_CLASS_ID(MinMax, Add, 0)
|
||||
DEFINE_CLASS_ID(Mul, Node, 12)
|
||||
DEFINE_CLASS_ID(ClearArray, Node, 14)
|
||||
DEFINE_CLASS_ID(Halt, Node, 15)
|
||||
@ -986,6 +988,7 @@ public:
|
||||
DEFINE_CLASS_QUERY(MemBar)
|
||||
DEFINE_CLASS_QUERY(MemBarStoreStore)
|
||||
DEFINE_CLASS_QUERY(MergeMem)
|
||||
DEFINE_CLASS_QUERY(MinMax)
|
||||
DEFINE_CLASS_QUERY(Move)
|
||||
DEFINE_CLASS_QUERY(Mul)
|
||||
DEFINE_CLASS_QUERY(Multi)
|
||||
|
||||
@ -762,26 +762,36 @@ bool PhaseGVN::is_dominator_helper(Node *d, Node *n, bool linear_only) {
|
||||
//------------------------------dead_loop_check--------------------------------
|
||||
// Check for a simple dead loop when a data node references itself directly
|
||||
// or through an other data node excluding cons and phis.
|
||||
void PhaseGVN::dead_loop_check( Node *n ) {
|
||||
// Phi may reference itself in a loop
|
||||
if (n != nullptr && !n->is_dead_loop_safe() && !n->is_CFG()) {
|
||||
// Do 2 levels check and only data inputs.
|
||||
bool no_dead_loop = true;
|
||||
uint cnt = n->req();
|
||||
for (uint i = 1; i < cnt && no_dead_loop; i++) {
|
||||
Node *in = n->in(i);
|
||||
if (in == n) {
|
||||
no_dead_loop = false;
|
||||
} else if (in != nullptr && !in->is_dead_loop_safe()) {
|
||||
uint icnt = in->req();
|
||||
for (uint j = 1; j < icnt && no_dead_loop; j++) {
|
||||
if (in->in(j) == n || in->in(j) == in)
|
||||
no_dead_loop = false;
|
||||
}
|
||||
void PhaseGVN::dead_loop_check(Node* n) {
|
||||
// Phi may reference itself in a loop.
|
||||
if (n == nullptr || n->is_dead_loop_safe() || n->is_CFG()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Do 2 levels check and only data inputs.
|
||||
for (uint i = 1; i < n->req(); i++) {
|
||||
Node* in = n->in(i);
|
||||
if (in == n) {
|
||||
n->dump_bfs(100, nullptr, "");
|
||||
fatal("Dead loop detected, node references itself: %s (%d)",
|
||||
n->Name(), n->_idx);
|
||||
}
|
||||
|
||||
if (in == nullptr || in->is_dead_loop_safe()) {
|
||||
continue;
|
||||
}
|
||||
for (uint j = 1; j < in->req(); j++) {
|
||||
if (in->in(j) == n) {
|
||||
n->dump_bfs(100, nullptr, "");
|
||||
fatal("Dead loop detected, node input references current node: %s (%d) -> %s (%d)",
|
||||
in->Name(), in->_idx, n->Name(), n->_idx);
|
||||
}
|
||||
if (in->in(j) == in) {
|
||||
n->dump_bfs(100, nullptr, "");
|
||||
fatal("Dead loop detected, node input references itself: %s (%d)",
|
||||
in->Name(), in->_idx);
|
||||
}
|
||||
}
|
||||
if (!no_dead_loop) { n->dump_bfs(100, nullptr, ""); }
|
||||
assert(no_dead_loop, "dead loop detected");
|
||||
}
|
||||
}
|
||||
|
||||
@ -2623,6 +2633,15 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check for Max/Min(A, Max/Min(B, C)) where A == B or A == C
|
||||
if (use->is_MinMax()) {
|
||||
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
|
||||
Node* u = use->fast_out(i2);
|
||||
if (u->Opcode() == use->Opcode()) {
|
||||
worklist.push(u);
|
||||
}
|
||||
}
|
||||
}
|
||||
auto enqueue_init_mem_projs = [&](ProjNode* proj) {
|
||||
add_users_to_worklist0(proj, worklist);
|
||||
};
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -147,7 +147,7 @@ public:
|
||||
static const Type* int_type_xmeet(const CT* i1, const Type* t2);
|
||||
|
||||
template <class CTP>
|
||||
static CTP int_type_union(CTP t1, CTP t2) {
|
||||
static auto int_type_union(CTP t1, CTP t2) {
|
||||
using CT = std::conditional_t<std::is_pointer_v<CTP>, std::remove_pointer_t<CTP>, CTP>;
|
||||
using S = std::remove_const_t<decltype(CT::_lo)>;
|
||||
using U = std::remove_const_t<decltype(CT::_ulo)>;
|
||||
@ -209,7 +209,7 @@ public:
|
||||
KnownBits<U> _bits;
|
||||
int _widen = 0; // dummy field to mimic the same field in TypeInt, useful in testing
|
||||
|
||||
static TypeIntMirror make(const TypeIntPrototype<S, U>& t, int widen) {
|
||||
static TypeIntMirror make(const TypeIntPrototype<S, U>& t, int widen = 0) {
|
||||
auto canonicalized_t = t.canonicalize_constraints();
|
||||
assert(!canonicalized_t.empty(), "must not be empty");
|
||||
return TypeIntMirror{canonicalized_t._data._srange._lo, canonicalized_t._data._srange._hi,
|
||||
@ -217,11 +217,15 @@ public:
|
||||
canonicalized_t._data._bits};
|
||||
}
|
||||
|
||||
TypeIntMirror meet(const TypeIntMirror& o) const {
|
||||
return TypeIntHelper::int_type_union(this, &o);
|
||||
}
|
||||
|
||||
// These allow TypeIntMirror to mimick the behaviors of TypeInt* and TypeLong*, so they can be
|
||||
// passed into RangeInference methods. These are only used in testing, so they are implemented in
|
||||
// the test file.
|
||||
static TypeIntMirror make(const TypeIntMirror& t, int widen);
|
||||
const TypeIntMirror* operator->() const;
|
||||
TypeIntMirror meet(const TypeIntMirror& o) const;
|
||||
bool contains(U u) const;
|
||||
bool contains(const TypeIntMirror& o) const;
|
||||
bool operator==(const TypeIntMirror& o) const;
|
||||
@ -322,7 +326,7 @@ private:
|
||||
// Infer a result given the input types of a binary operation
|
||||
template <class CTP, class Inference>
|
||||
static CTP infer_binary(CTP t1, CTP t2, Inference infer) {
|
||||
CTP res;
|
||||
TypeIntMirror<S<CTP>, U<CTP>> res;
|
||||
bool is_init = false;
|
||||
|
||||
SimpleIntervalIterable<CTP> t1_simple_intervals(t1);
|
||||
@ -330,10 +334,10 @@ private:
|
||||
|
||||
for (auto& st1 : t1_simple_intervals) {
|
||||
for (auto& st2 : t2_simple_intervals) {
|
||||
CTP current = infer(st1, st2);
|
||||
TypeIntMirror<S<CTP>, U<CTP>> current = infer(st1, st2);
|
||||
|
||||
if (is_init) {
|
||||
res = res->meet(current)->template cast<CT<CTP>>();
|
||||
res = res.meet(current);
|
||||
} else {
|
||||
is_init = true;
|
||||
res = current;
|
||||
@ -342,7 +346,22 @@ private:
|
||||
}
|
||||
|
||||
assert(is_init, "must be initialized");
|
||||
return res;
|
||||
// It is important that widen is computed on the whole result instead of during each step. This
|
||||
// is because we normalize the widen of small Type instances to 0, so computing the widen value
|
||||
// for each step and taking the union of them may return a widen value that conflicts with
|
||||
// other computations, trigerring the monotonicity assert during CCP.
|
||||
//
|
||||
// For example, let us consider the operation r = x ^ y:
|
||||
// - During the first step of CCP, type(x) = {0}, type(y) = [-2, 2], w = 3.
|
||||
// Since x is a constant that is the identity element of the xor operation, type(r) = type(y) = [-2, 2], w = 3
|
||||
// - During the second step, type(x) is widened to [0, 2], w = 0.
|
||||
// We then compute the range for:
|
||||
// r1 = x ^ y1, type(x) = [0, 2], w = 0, type(y1) = [0, 2], w = 0
|
||||
// r2 = x ^ y2, type(x) = [0, 2], w = 0, type(y2) = [-2, -1], w = 0
|
||||
// This results in type(r1) = [0, 3], w = 0, and type(r2) = [-4, -1], w = 0
|
||||
// So the union of type(r1) and type(r2) is [-4, 3], w = 0. This widen value is smaller than
|
||||
// that of the previous step, triggering the monotonicity assert.
|
||||
return CT<CTP>::make(res, MAX2(t1->_widen, t2->_widen));
|
||||
}
|
||||
|
||||
public:
|
||||
@ -357,7 +376,7 @@ public:
|
||||
U<CTP> uhi = MIN2(st1._uhi, st2._uhi);
|
||||
U<CTP> zeros = st1._bits._zeros | st2._bits._zeros;
|
||||
U<CTP> ones = st1._bits._ones & st2._bits._ones;
|
||||
return CT<CTP>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, MAX2(t1->_widen, t2->_widen));
|
||||
return TypeIntMirror<S<CTP>, U<CTP>>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}});
|
||||
});
|
||||
}
|
||||
|
||||
@ -372,7 +391,7 @@ public:
|
||||
U<CTP> uhi = std::numeric_limits<U<CTP>>::max();
|
||||
U<CTP> zeros = st1._bits._zeros & st2._bits._zeros;
|
||||
U<CTP> ones = st1._bits._ones | st2._bits._ones;
|
||||
return CT<CTP>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, MAX2(t1->_widen, t2->_widen));
|
||||
return TypeIntMirror<S<CTP>, U<CTP>>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}});
|
||||
});
|
||||
}
|
||||
|
||||
@ -385,7 +404,7 @@ public:
|
||||
U<CTP> uhi = std::numeric_limits<U<CTP>>::max();
|
||||
U<CTP> zeros = (st1._bits._zeros & st2._bits._zeros) | (st1._bits._ones & st2._bits._ones);
|
||||
U<CTP> ones = (st1._bits._zeros & st2._bits._ones) | (st1._bits._ones & st2._bits._zeros);
|
||||
return CT<CTP>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, MAX2(t1->_widen, t2->_widen));
|
||||
return TypeIntMirror<S<CTP>, U<CTP>>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}});
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -799,6 +799,9 @@ public:
|
||||
static const TypeInt* make(jint lo, jint hi, int widen);
|
||||
static const Type* make_or_top(const TypeIntPrototype<jint, juint>& t, int widen);
|
||||
static const TypeInt* make(const TypeIntPrototype<jint, juint>& t, int widen) { return make_or_top(t, widen)->is_int(); }
|
||||
static const TypeInt* make(const TypeIntMirror<jint, juint>& t, int widen) {
|
||||
return (new TypeInt(TypeIntPrototype<jint, juint>{{t._lo, t._hi}, {t._ulo, t._uhi}, t._bits}, widen, false))->hashcons()->is_int();
|
||||
}
|
||||
|
||||
// Check for single integer
|
||||
bool is_con() const { return _lo == _hi; }
|
||||
@ -881,6 +884,9 @@ public:
|
||||
static const TypeLong* make(jlong lo, jlong hi, int widen);
|
||||
static const Type* make_or_top(const TypeIntPrototype<jlong, julong>& t, int widen);
|
||||
static const TypeLong* make(const TypeIntPrototype<jlong, julong>& t, int widen) { return make_or_top(t, widen)->is_long(); }
|
||||
static const TypeLong* make(const TypeIntMirror<jlong, julong>& t, int widen) {
|
||||
return (new TypeLong(TypeIntPrototype<jlong, julong>{{t._lo, t._hi}, {t._ulo, t._uhi}, t._bits}, widen, false))->hashcons()->is_long();
|
||||
}
|
||||
|
||||
// Check for single integer
|
||||
bool is_con() const { return _lo == _hi; }
|
||||
|
||||
@ -1122,7 +1122,7 @@ Node* make_last(Node* initL, jint stride, Node* limitL, PhaseIdealLoop* phase) {
|
||||
Node* last = new AddLNode(initL, k_mul_stride);
|
||||
|
||||
// Make sure that the last does not lie "before" init.
|
||||
Node* last_clamped = MaxNode::build_min_max_long(&igvn, initL, last, stride > 0);
|
||||
Node* last_clamped = MinMaxNode::build_min_max_long(&igvn, initL, last, stride > 0);
|
||||
|
||||
phase->register_new_node_with_ctrl_of(diffL, initL);
|
||||
phase->register_new_node_with_ctrl_of(diffL_m1, initL);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -115,9 +115,6 @@
|
||||
#if INCLUDE_MANAGEMENT
|
||||
#include "services/finalizerService.hpp"
|
||||
#endif
|
||||
#ifdef LINUX
|
||||
#include "osContainer_linux.hpp"
|
||||
#endif
|
||||
|
||||
#include <errno.h>
|
||||
|
||||
@ -500,11 +497,9 @@ JVM_LEAF(jboolean, JVM_IsUseContainerSupport(void))
|
||||
JVM_END
|
||||
|
||||
JVM_LEAF(jboolean, JVM_IsContainerized(void))
|
||||
#ifdef LINUX
|
||||
if (OSContainer::is_containerized()) {
|
||||
if (os::is_containerized()) {
|
||||
return JNI_TRUE;
|
||||
}
|
||||
#endif
|
||||
return JNI_FALSE;
|
||||
JVM_END
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -129,7 +129,6 @@
|
||||
#ifdef LINUX
|
||||
#include "cgroupSubsystem_linux.hpp"
|
||||
#include "os_linux.hpp"
|
||||
#include "osContainer_linux.hpp"
|
||||
#endif
|
||||
|
||||
#define CHECK_JNI_EXCEPTION_(env, value) \
|
||||
@ -2582,14 +2581,12 @@ WB_ENTRY(jboolean, WB_CheckLibSpecifiesNoexecstack(JNIEnv* env, jobject o, jstri
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jboolean, WB_IsContainerized(JNIEnv* env, jobject o))
|
||||
LINUX_ONLY(return OSContainer::is_containerized();)
|
||||
return false;
|
||||
return os::is_containerized();
|
||||
WB_END
|
||||
|
||||
// Physical memory of the host machine (including containers)
|
||||
WB_ENTRY(jlong, WB_HostPhysicalMemory(JNIEnv* env, jobject o))
|
||||
LINUX_ONLY(return static_cast<jlong>(os::Linux::physical_memory());)
|
||||
return static_cast<jlong>(os::physical_memory());
|
||||
return static_cast<jlong>(os::Machine::physical_memory());
|
||||
WB_END
|
||||
|
||||
// Available memory of the host machine (container-aware)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -64,7 +64,6 @@
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "services/management.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/checkedCast.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
@ -1207,16 +1206,22 @@ bool Arguments::process_settings_file(const char* file_name, bool should_exist,
|
||||
}
|
||||
|
||||
char token[1024];
|
||||
int pos = 0;
|
||||
size_t pos = 0;
|
||||
|
||||
bool in_white_space = true;
|
||||
bool in_comment = false;
|
||||
bool in_quote = false;
|
||||
int quote_c = 0;
|
||||
char quote_c = 0;
|
||||
bool result = true;
|
||||
|
||||
int c = getc(stream);
|
||||
while(c != EOF && pos < (int)(sizeof(token)-1)) {
|
||||
int c_or_eof = getc(stream);
|
||||
while (c_or_eof != EOF && pos < (sizeof(token) - 1)) {
|
||||
// We have checked the c_or_eof for EOF. getc should only ever return the
|
||||
// EOF or an unsigned char converted to an int. We cast down to a char to
|
||||
// avoid the char to int promotions we would otherwise do in the comparisons
|
||||
// below (which would be incorrect if we ever compared to a non-ascii char),
|
||||
// and the int to char conversions we would otherwise do in the assignments.
|
||||
const char c = static_cast<char>(c_or_eof);
|
||||
if (in_white_space) {
|
||||
if (in_comment) {
|
||||
if (c == '\n') in_comment = false;
|
||||
@ -1224,7 +1229,7 @@ bool Arguments::process_settings_file(const char* file_name, bool should_exist,
|
||||
if (c == '#') in_comment = true;
|
||||
else if (!isspace((unsigned char) c)) {
|
||||
in_white_space = false;
|
||||
token[pos++] = checked_cast<char>(c);
|
||||
token[pos++] = c;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -1244,10 +1249,10 @@ bool Arguments::process_settings_file(const char* file_name, bool should_exist,
|
||||
} else if (in_quote && (c == quote_c)) {
|
||||
in_quote = false;
|
||||
} else {
|
||||
token[pos++] = checked_cast<char>(c);
|
||||
token[pos++] = c;
|
||||
}
|
||||
}
|
||||
c = getc(stream);
|
||||
c_or_eof = getc(stream);
|
||||
}
|
||||
if (pos > 0) {
|
||||
token[pos] = '\0';
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -466,10 +466,7 @@ void before_exit(JavaThread* thread, bool halt) {
|
||||
event.commit();
|
||||
}
|
||||
|
||||
// 2nd argument (emit_event_shutdown) should be set to false
|
||||
// because EventShutdown would be emitted at Threads::destroy_vm().
|
||||
// (one of the callers of before_exit())
|
||||
JFR_ONLY(Jfr::on_vm_shutdown(true, false, halt);)
|
||||
JFR_ONLY(Jfr::on_vm_shutdown(false, halt);)
|
||||
|
||||
// Stop the WatcherThread. We do this before disenrolling various
|
||||
// PeriodicTasks to reduce the likelihood of races.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -81,10 +81,6 @@
|
||||
#include "utilities/permitForbiddenFunctions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
#ifdef LINUX
|
||||
#include "osContainer_linux.hpp"
|
||||
#endif
|
||||
|
||||
#ifndef _WINDOWS
|
||||
# include <poll.h>
|
||||
#endif
|
||||
@ -2205,11 +2201,14 @@ static void assert_nonempty_range(const char* addr, size_t bytes) {
|
||||
}
|
||||
|
||||
bool os::used_memory(physical_memory_size_type& value) {
|
||||
#ifdef LINUX
|
||||
if (OSContainer::is_containerized()) {
|
||||
return OSContainer::memory_usage_in_bytes(value);
|
||||
if (is_containerized()) {
|
||||
return Container::used_memory(value);
|
||||
}
|
||||
#endif
|
||||
|
||||
return Machine::used_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::used_memory(physical_memory_size_type& value) {
|
||||
physical_memory_size_type avail_mem = 0;
|
||||
// Return value ignored - defaulting to 0 on failure.
|
||||
(void)os::available_memory(avail_mem);
|
||||
@ -2218,6 +2217,44 @@ bool os::used_memory(physical_memory_size_type& value) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifndef LINUX
|
||||
bool os::is_containerized() {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::processor_count(double& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::available_memory(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::used_memory(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::total_swap_space(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::free_swap_space(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::memory_limit(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::memory_soft_limit(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::memory_throttle_limit(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
bool os::commit_memory(char* addr, size_t bytes, bool executable) {
|
||||
assert_nonempty_range(addr, bytes);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -342,6 +342,52 @@ class os: AllStatic {
|
||||
static bool is_server_class_machine();
|
||||
static size_t rss();
|
||||
|
||||
// On platforms with container support (currently only Linux) we combine machine values with
|
||||
// potential container values in os:: methods, abstracting which value is actually used.
|
||||
// The os::Machine and os::Container classes and containing methods are used to get machine
|
||||
// and container values (when available) separately.
|
||||
static bool is_containerized();
|
||||
|
||||
// The os::Machine class reports system resource metrics from the perspective of the operating
|
||||
// system, without considering container-imposed limits. The values returned by these methods
|
||||
// reflect the resources visible to the process as reported by the OS, and may already be
|
||||
// affected by mechanisms such as virtualization, hypervisor limits, or process affinity,
|
||||
// but do NOT consider further restrictions imposed by container runtimes (e.g., cgroups)
|
||||
class Machine : AllStatic {
|
||||
public:
|
||||
static int active_processor_count();
|
||||
|
||||
[[nodiscard]] static bool available_memory(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool used_memory(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool free_memory(physical_memory_size_type& value);
|
||||
|
||||
[[nodiscard]] static bool total_swap_space(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool free_swap_space(physical_memory_size_type& value);
|
||||
|
||||
static physical_memory_size_type physical_memory();
|
||||
};
|
||||
|
||||
// The os::Container class reports resource limits as imposed by a supported container runtime
|
||||
// (currently only cgroup-based Linux runtimes). If the process is running inside a
|
||||
// containerized environment, methods from this class report the effective limits imposed
|
||||
// by the container, which may be more restrictive than what os::Machine reports.
|
||||
// Methods return true and set the out-parameter if a limit is found,
|
||||
// or false if no limit exists or it cannot be determined.
|
||||
class Container : AllStatic {
|
||||
public:
|
||||
[[nodiscard]] static bool processor_count(double& value); // Returns the core-equivalent CPU quota
|
||||
|
||||
[[nodiscard]] static bool available_memory(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool used_memory(physical_memory_size_type& value);
|
||||
|
||||
[[nodiscard]] static bool total_swap_space(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool free_swap_space(physical_memory_size_type& value);
|
||||
|
||||
[[nodiscard]] static bool memory_limit(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool memory_soft_limit(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool memory_throttle_limit(physical_memory_size_type& value);
|
||||
};
|
||||
|
||||
// Returns the id of the processor on which the calling thread is currently executing.
|
||||
// The returned value is guaranteed to be between 0 and (os::processor_count() - 1).
|
||||
static uint processor_id();
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user