mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
Merge
This commit is contained in:
commit
d7c258f0ab
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -61,7 +61,8 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBGTEST, \
|
||||
INCLUDE_FILES := gtest-all.cc gmock-all.cc, \
|
||||
DISABLED_WARNINGS_gcc := format-nonliteral maybe-uninitialized undef \
|
||||
unused-result zero-as-null-pointer-constant, \
|
||||
DISABLED_WARNINGS_clang := format-nonliteral undef unused-result, \
|
||||
DISABLED_WARNINGS_clang := format-nonliteral undef unused-result \
|
||||
zero-as-null-pointer-constant, \
|
||||
DISABLED_WARNINGS_microsoft := 4530, \
|
||||
DEFAULT_CFLAGS := false, \
|
||||
CFLAGS := $(JVM_CFLAGS) \
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -722,22 +722,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
__ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
|
||||
__ andsw(zr, rscratch1, JVM_ACC_STATIC);
|
||||
__ br(Assembler::EQ, L_skip_barrier); // non-static
|
||||
}
|
||||
// Bypass the barrier for non-static methods
|
||||
__ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
|
||||
__ andsw(zr, rscratch1, JVM_ACC_STATIC);
|
||||
__ br(Assembler::EQ, L_skip_barrier); // non-static
|
||||
|
||||
__ load_method_holder(rscratch2, rmethod);
|
||||
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
|
||||
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
|
||||
__ load_method_holder(rscratch2, rmethod);
|
||||
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
|
||||
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm);
|
||||
@ -1508,7 +1506,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// SVC, HVC, or SMC. Make it a NOP.
|
||||
__ nop();
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
__ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
|
||||
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -2290,7 +2290,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
|
||||
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
__ br(Assembler::NE, L_clinit_barrier_slow);
|
||||
__ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
|
||||
__ load_method_holder(temp, temp);
|
||||
@ -2340,8 +2341,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = temp;
|
||||
|
||||
__ br(Assembler::NE, L_clinit_barrier_slow);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -356,10 +356,10 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
assert(is_interpreted_frame(), "Not an interpreted frame");
|
||||
// These are reasonable sanity checks
|
||||
if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
|
||||
if (fp() == nullptr || (intptr_t(fp()) & (wordSize-1)) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
|
||||
if (sp() == nullptr || (intptr_t(sp()) & (wordSize-1)) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (fp() + interpreter_frame_initial_sp_offset < sp()) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -172,7 +172,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
|
||||
|
||||
address addr = oop_addr != nullptr ? (address)oop_addr : (address)metadata_addr;
|
||||
|
||||
if(pc == 0) {
|
||||
if (pc == nullptr) {
|
||||
offset = addr - instruction_address() - 8;
|
||||
} else {
|
||||
offset = addr - pc - 8;
|
||||
@ -228,7 +228,7 @@ void NativeMovConstReg::set_data(intptr_t x, address pc) {
|
||||
|
||||
void NativeMovConstReg::set_pc_relative_offset(address addr, address pc) {
|
||||
int offset;
|
||||
if (pc == 0) {
|
||||
if (pc == nullptr) {
|
||||
offset = addr - instruction_address() - 8;
|
||||
} else {
|
||||
offset = addr - pc - 8;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -371,7 +371,7 @@ class NativeMovConstReg: public NativeInstruction {
|
||||
public:
|
||||
|
||||
intptr_t data() const;
|
||||
void set_data(intptr_t x, address pc = 0);
|
||||
void set_data(intptr_t x, address pc = nullptr);
|
||||
bool is_pc_relative() {
|
||||
return !is_movw();
|
||||
}
|
||||
|
||||
@ -1109,11 +1109,11 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
|
||||
lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
|
||||
ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
|
||||
addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
|
||||
add(R11_scratch1, R12_scratch2, R12_scratch2);
|
||||
add(R11_scratch1, R11_scratch1, R12_scratch2);
|
||||
cmpd(CR0, R11_scratch1, R14_bcp);
|
||||
beq(CR0, verify_continue);
|
||||
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), R19_method, R14_bcp, R28_mdx);
|
||||
|
||||
bind(verify_continue);
|
||||
#endif
|
||||
|
||||
@ -4535,7 +4535,7 @@ void MacroAssembler::push_cont_fastpath() {
|
||||
Label done;
|
||||
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||
cmpld(CR0, R1_SP, R0);
|
||||
ble(CR0, done);
|
||||
ble(CR0, done); // if (SP <= _cont_fastpath) goto done;
|
||||
st_ptr(R1_SP, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||
bind(done);
|
||||
}
|
||||
@ -4546,7 +4546,7 @@ void MacroAssembler::pop_cont_fastpath() {
|
||||
Label done;
|
||||
ld_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||
cmpld(CR0, R1_SP, R0);
|
||||
ble(CR0, done);
|
||||
blt(CR0, done); // if (SP < _cont_fastpath) goto done;
|
||||
li(R0, 0);
|
||||
st_ptr(R0, JavaThread::cont_fastpath_offset(), R16_thread);
|
||||
bind(done);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -1237,26 +1237,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
__ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
|
||||
__ andi_(R0, R0, JVM_ACC_STATIC);
|
||||
__ beq(CR0, L_skip_barrier); // non-static
|
||||
}
|
||||
// Bypass the barrier for non-static methods
|
||||
__ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
|
||||
__ andi_(R0, R0, JVM_ACC_STATIC);
|
||||
__ beq(CR0, L_skip_barrier); // non-static
|
||||
|
||||
Register klass = R11_scratch1;
|
||||
__ load_method_holder(klass, R19_method);
|
||||
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
|
||||
Register klass = R11_scratch1;
|
||||
__ load_method_holder(klass, R19_method);
|
||||
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
|
||||
|
||||
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
|
||||
__ mtctr(klass);
|
||||
__ bctr();
|
||||
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
|
||||
__ mtctr(klass);
|
||||
__ bctr();
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);
|
||||
@ -2210,7 +2208,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// --------------------------------------------------------------------------
|
||||
vep_start_pc = (intptr_t)__ pc();
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
Register klass = r_temp_1;
|
||||
// Notify OOP recorder (don't need the relocation)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -2199,7 +2199,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rca
|
||||
__ isync(); // Order load wrt. succeeding loads.
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register method = Rscratch;
|
||||
const Register klass = Rscratch;
|
||||
|
||||
@ -2244,8 +2245,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no, Register Rcac
|
||||
__ isync(); // Order load wrt. succeeding loads.
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = R4_ARG2;
|
||||
|
||||
// InterpreterRuntime::resolve_get_put sets field_holder and finally release-stores put_code.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -213,7 +213,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
|
||||
// Is vector's size (in bytes) bigger than a size saved by default?
|
||||
// riscv does not ovlerlay the floating-point registers on vector registers like aarch64.
|
||||
bool SharedRuntime::is_wide_vector(int size) {
|
||||
return UseRVV;
|
||||
return UseRVV && size > 0;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -637,22 +637,20 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
__ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
|
||||
__ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
|
||||
__ beqz(t1, L_skip_barrier); // non-static
|
||||
}
|
||||
// Bypass the barrier for non-static methods
|
||||
__ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
|
||||
__ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
|
||||
__ beqz(t1, L_skip_barrier); // non-static
|
||||
|
||||
__ load_method_holder(t1, xmethod);
|
||||
__ clinit_barrier(t1, t0, &L_skip_barrier);
|
||||
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
|
||||
__ load_method_holder(t1, xmethod);
|
||||
__ clinit_barrier(t1, t0, &L_skip_barrier);
|
||||
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm);
|
||||
@ -1443,7 +1441,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ nop(); // 4 bytes
|
||||
}
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
__ mov_metadata(t1, method->method_holder()); // InstanceKlass*
|
||||
__ clinit_barrier(t1, t0, &L_skip_barrier);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -2192,7 +2192,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
|
||||
__ mv(t0, (int) code);
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
__ bne(temp, t0, L_clinit_barrier_slow); // have we resolved this bytecode?
|
||||
__ ld(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
|
||||
__ load_method_holder(temp, temp);
|
||||
@ -2243,8 +2244,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
__ mv(t0, (int) code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = temp;
|
||||
|
||||
__ bne(temp, t0, L_clinit_barrier_slow);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -1567,7 +1567,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
//---------------------------------------------------------------------
|
||||
wrapper_VEPStart = __ offset();
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
Register klass = Z_R1_scratch;
|
||||
// Notify OOP recorder (don't need the relocation)
|
||||
@ -2378,24 +2379,22 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
__ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
|
||||
__ z_bfalse(L_skip_barrier); // non-static
|
||||
}
|
||||
// Bypass the barrier for non-static methods
|
||||
__ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
|
||||
__ z_bfalse(L_skip_barrier); // non-static
|
||||
|
||||
Register klass = Z_R11;
|
||||
__ load_method_holder(klass, Z_method);
|
||||
__ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
|
||||
Register klass = Z_R11;
|
||||
__ load_method_holder(klass, Z_method);
|
||||
__ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
|
||||
|
||||
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
|
||||
__ z_br(klass);
|
||||
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
|
||||
__ z_br(klass);
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
|
||||
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
|
||||
return;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -2377,7 +2377,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
|
||||
__ z_cli(Address(Rcache, bc_offset), code);
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register method = Z_R1_scratch;
|
||||
const Register klass = Z_R1_scratch;
|
||||
__ z_brne(L_clinit_barrier_slow);
|
||||
@ -2427,8 +2428,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
__ z_cli(Address(cache, code_offset), code);
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = index;
|
||||
|
||||
__ z_brne(L_clinit_barrier_slow);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2024, 2025, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2024, 2026, Intel Corporation. All rights reserved.
|
||||
*
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -1330,10 +1330,12 @@ static void big_case_loop_helper(bool sizeKnown, int size, Label &noMatch, Label
|
||||
// Clarification: The BYTE_K compare above compares haystack[(n-32):(n-1)]. We need to
|
||||
// compare haystack[(k-1):(k-1+31)]. Subtracting either index gives shift value of
|
||||
// (k + 31 - n): x = (k-1+31)-(n-1) = k-1+31-n+1 = k+31-n.
|
||||
// When isU is set, similarly, shift is from haystack[(n-32):(n-1)] to [(k-2):(k-2+31)]
|
||||
|
||||
if (sizeKnown) {
|
||||
__ movl(temp2, 31 + size);
|
||||
__ movl(temp2, (isU ? 30 : 31) + size);
|
||||
} else {
|
||||
__ movl(temp2, 31);
|
||||
__ movl(temp2, isU ? 30 : 31);
|
||||
__ addl(temp2, needleLen);
|
||||
}
|
||||
__ subl(temp2, hsLength);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,14 +32,10 @@ const KRegister::KRegisterImpl all_KRegisterImpls [KRegister::number_
|
||||
|
||||
const char * Register::RegisterImpl::name() const {
|
||||
static const char *const names[number_of_registers] = {
|
||||
#ifdef _LP64
|
||||
"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
|
||||
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
|
||||
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
|
||||
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
|
||||
#else
|
||||
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
|
||||
#endif // _LP64
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "noreg";
|
||||
}
|
||||
@ -54,11 +50,9 @@ const char* FloatRegister::FloatRegisterImpl::name() const {
|
||||
const char* XMMRegister::XMMRegisterImpl::name() const {
|
||||
static const char *const names[number_of_registers] = {
|
||||
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
|
||||
#ifdef _LP64
|
||||
,"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
|
||||
,"xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23"
|
||||
,"xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31"
|
||||
#endif // _LP64
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "xnoreg";
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,7 @@
|
||||
class VMRegImpl;
|
||||
typedef VMRegImpl* VMReg;
|
||||
|
||||
// The implementation of integer registers for the x86/x64 architectures.
|
||||
// The implementation of integer registers for the x64 architectures.
|
||||
class Register {
|
||||
private:
|
||||
int _encoding;
|
||||
@ -44,11 +44,9 @@ private:
|
||||
public:
|
||||
inline friend constexpr Register as_Register(int encoding);
|
||||
|
||||
enum {
|
||||
number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
|
||||
number_of_byte_registers = LP64_ONLY( 32 ) NOT_LP64( 4 ),
|
||||
max_slots_per_register = LP64_ONLY( 2 ) NOT_LP64( 1 )
|
||||
};
|
||||
static const int number_of_registers = 32;
|
||||
static const int number_of_byte_registers = 32;
|
||||
static const int max_slots_per_register = 2;
|
||||
|
||||
class RegisterImpl: public AbstractRegisterImpl {
|
||||
friend class Register;
|
||||
@ -79,11 +77,9 @@ public:
|
||||
|
||||
// Actually available GP registers for use, depending on actual CPU capabilities and flags.
|
||||
static int available_gp_registers() {
|
||||
#ifdef _LP64
|
||||
if (!UseAPX) {
|
||||
return number_of_registers / 2;
|
||||
}
|
||||
#endif // _LP64
|
||||
return number_of_registers;
|
||||
}
|
||||
};
|
||||
@ -116,9 +112,8 @@ constexpr Register rsp = as_Register(4);
|
||||
constexpr Register rbp = as_Register(5);
|
||||
constexpr Register rsi = as_Register(6);
|
||||
constexpr Register rdi = as_Register(7);
|
||||
#ifdef _LP64
|
||||
constexpr Register r8 = as_Register( 8);
|
||||
constexpr Register r9 = as_Register( 9);
|
||||
constexpr Register r8 = as_Register(8);
|
||||
constexpr Register r9 = as_Register(9);
|
||||
constexpr Register r10 = as_Register(10);
|
||||
constexpr Register r11 = as_Register(11);
|
||||
constexpr Register r12 = as_Register(12);
|
||||
@ -141,7 +136,6 @@ constexpr Register r28 = as_Register(28);
|
||||
constexpr Register r29 = as_Register(29);
|
||||
constexpr Register r30 = as_Register(30);
|
||||
constexpr Register r31 = as_Register(31);
|
||||
#endif // _LP64
|
||||
|
||||
|
||||
// The implementation of x87 floating point registers for the ia32 architecture.
|
||||
@ -154,10 +148,8 @@ private:
|
||||
public:
|
||||
inline friend constexpr FloatRegister as_FloatRegister(int encoding);
|
||||
|
||||
enum {
|
||||
number_of_registers = 8,
|
||||
max_slots_per_register = 2
|
||||
};
|
||||
static const int number_of_registers = 8;
|
||||
static const int max_slots_per_register = 2;
|
||||
|
||||
class FloatRegisterImpl: public AbstractRegisterImpl {
|
||||
friend class FloatRegister;
|
||||
@ -217,10 +209,8 @@ private:
|
||||
public:
|
||||
inline friend constexpr XMMRegister as_XMMRegister(int encoding);
|
||||
|
||||
enum {
|
||||
number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
|
||||
max_slots_per_register = LP64_ONLY( 16 ) NOT_LP64( 16 ) // 512-bit
|
||||
};
|
||||
static const int number_of_registers = 32;
|
||||
static const int max_slots_per_register = 16; // 512-bit
|
||||
|
||||
class XMMRegisterImpl: public AbstractRegisterImpl {
|
||||
friend class XMMRegister;
|
||||
@ -250,11 +240,9 @@ public:
|
||||
|
||||
// Actually available XMM registers for use, depending on actual CPU capabilities and flags.
|
||||
static int available_xmm_registers() {
|
||||
#ifdef _LP64
|
||||
if (UseAVX < 3) {
|
||||
return number_of_registers / 2;
|
||||
}
|
||||
#endif // _LP64
|
||||
return number_of_registers;
|
||||
}
|
||||
};
|
||||
@ -287,7 +275,6 @@ constexpr XMMRegister xmm4 = as_XMMRegister( 4);
|
||||
constexpr XMMRegister xmm5 = as_XMMRegister( 5);
|
||||
constexpr XMMRegister xmm6 = as_XMMRegister( 6);
|
||||
constexpr XMMRegister xmm7 = as_XMMRegister( 7);
|
||||
#ifdef _LP64
|
||||
constexpr XMMRegister xmm8 = as_XMMRegister( 8);
|
||||
constexpr XMMRegister xmm9 = as_XMMRegister( 9);
|
||||
constexpr XMMRegister xmm10 = as_XMMRegister(10);
|
||||
@ -312,7 +299,6 @@ constexpr XMMRegister xmm28 = as_XMMRegister(28);
|
||||
constexpr XMMRegister xmm29 = as_XMMRegister(29);
|
||||
constexpr XMMRegister xmm30 = as_XMMRegister(30);
|
||||
constexpr XMMRegister xmm31 = as_XMMRegister(31);
|
||||
#endif // _LP64
|
||||
|
||||
|
||||
// The implementation of AVX-512 opmask registers.
|
||||
@ -394,25 +380,17 @@ constexpr KRegister k7 = as_KRegister(7);
|
||||
// Define a class that exports it.
|
||||
class ConcreteRegisterImpl : public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
max_gpr = Register::number_of_registers * Register::max_slots_per_register,
|
||||
max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register,
|
||||
max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register,
|
||||
max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register,
|
||||
static const int max_gpr = Register::number_of_registers * Register::max_slots_per_register;
|
||||
static const int max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
|
||||
static const int max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register;
|
||||
static const int max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register;
|
||||
|
||||
// A big enough number for C2: all the registers plus flags
|
||||
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
|
||||
// There is no requirement that any ordering here matches any ordering c2 gives
|
||||
// it's optoregs.
|
||||
|
||||
// x86_32.ad defines additional dummy FILL0-FILL7 registers, in order to tally
|
||||
// REG_COUNT (computed by ADLC based on the number of reg_defs seen in .ad files)
|
||||
// with ConcreteRegisterImpl::number_of_registers additional count of 8 is being
|
||||
// added for 32 bit jvm.
|
||||
number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
|
||||
NOT_LP64( 8 + ) // FILL0-FILL7 in x86_32.ad
|
||||
1 // eflags
|
||||
};
|
||||
static const int number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
|
||||
1; // eflags
|
||||
};
|
||||
|
||||
template <>
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1043,26 +1043,24 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
Register method = rbx;
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
Register method = rbx;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
Register flags = rscratch1;
|
||||
__ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
|
||||
__ testl(flags, JVM_ACC_STATIC);
|
||||
__ jcc(Assembler::zero, L_skip_barrier); // non-static
|
||||
}
|
||||
// Bypass the barrier for non-static methods
|
||||
Register flags = rscratch1;
|
||||
__ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
|
||||
__ testl(flags, JVM_ACC_STATIC);
|
||||
__ jcc(Assembler::zero, L_skip_barrier); // non-static
|
||||
|
||||
Register klass = rscratch1;
|
||||
__ load_method_holder(klass, method);
|
||||
__ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
|
||||
Register klass = rscratch1;
|
||||
__ load_method_holder(klass, method);
|
||||
__ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
|
||||
|
||||
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
|
||||
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm);
|
||||
@ -1904,7 +1902,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
int vep_offset = ((intptr_t)__ pc()) - start;
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
Register klass = r10;
|
||||
__ mov_metadata(klass, method->method_holder()); // InstanceKlass*
|
||||
@ -3602,4 +3601,3 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
|
||||
}
|
||||
|
||||
#endif // INCLUDE_JFR
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -64,6 +64,39 @@ static address kyberAvx512ConstsAddr(int offset) {
|
||||
|
||||
const Register scratch = r10;
|
||||
|
||||
ATTRIBUTE_ALIGNED(64) static const uint8_t kyberAvx512_12To16Dup[] = {
|
||||
// 0 - 63
|
||||
0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15, 16,
|
||||
16, 17, 18, 19, 19, 20, 21, 22, 22, 23, 24, 25, 25, 26, 27, 28, 28, 29, 30,
|
||||
31, 31, 32, 33, 34, 34, 35, 36, 37, 37, 38, 39, 40, 40, 41, 42, 43, 43, 44,
|
||||
45, 46, 46, 47
|
||||
};
|
||||
|
||||
static address kyberAvx512_12To16DupAddr() {
|
||||
return (address) kyberAvx512_12To16Dup;
|
||||
}
|
||||
|
||||
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512_12To16Shift[] = {
|
||||
// 0 - 31
|
||||
0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0,
|
||||
4, 0, 4, 0, 4, 0, 4
|
||||
};
|
||||
|
||||
static address kyberAvx512_12To16ShiftAddr() {
|
||||
return (address) kyberAvx512_12To16Shift;
|
||||
}
|
||||
|
||||
ATTRIBUTE_ALIGNED(64) static const uint64_t kyberAvx512_12To16And[] = {
|
||||
// 0 - 7
|
||||
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
|
||||
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
|
||||
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF
|
||||
};
|
||||
|
||||
static address kyberAvx512_12To16AndAddr() {
|
||||
return (address) kyberAvx512_12To16And;
|
||||
}
|
||||
|
||||
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512NttPerms[] = {
|
||||
// 0
|
||||
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
|
||||
@ -822,10 +855,65 @@ address generate_kyber12To16_avx512(StubGenerator *stubgen,
|
||||
|
||||
const Register perms = r11;
|
||||
|
||||
Label Loop;
|
||||
Label Loop, VBMILoop;
|
||||
|
||||
__ addptr(condensed, condensedOffs);
|
||||
|
||||
if (VM_Version::supports_avx512_vbmi()) {
|
||||
// mask load for the first 48 bytes of each vector
|
||||
__ mov64(rax, 0x0000FFFFFFFFFFFF);
|
||||
__ kmovql(k1, rax);
|
||||
|
||||
__ lea(perms, ExternalAddress(kyberAvx512_12To16DupAddr()));
|
||||
__ evmovdqub(xmm20, Address(perms), Assembler::AVX_512bit);
|
||||
|
||||
__ lea(perms, ExternalAddress(kyberAvx512_12To16ShiftAddr()));
|
||||
__ evmovdquw(xmm21, Address(perms), Assembler::AVX_512bit);
|
||||
|
||||
__ lea(perms, ExternalAddress(kyberAvx512_12To16AndAddr()));
|
||||
__ evmovdquq(xmm22, Address(perms), Assembler::AVX_512bit);
|
||||
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(VBMILoop);
|
||||
|
||||
__ evmovdqub(xmm0, k1, Address(condensed, 0), false,
|
||||
Assembler::AVX_512bit);
|
||||
__ evmovdqub(xmm1, k1, Address(condensed, 48), false,
|
||||
Assembler::AVX_512bit);
|
||||
__ evmovdqub(xmm2, k1, Address(condensed, 96), false,
|
||||
Assembler::AVX_512bit);
|
||||
__ evmovdqub(xmm3, k1, Address(condensed, 144), false,
|
||||
Assembler::AVX_512bit);
|
||||
|
||||
__ evpermb(xmm4, k0, xmm20, xmm0, false, Assembler::AVX_512bit);
|
||||
__ evpermb(xmm5, k0, xmm20, xmm1, false, Assembler::AVX_512bit);
|
||||
__ evpermb(xmm6, k0, xmm20, xmm2, false, Assembler::AVX_512bit);
|
||||
__ evpermb(xmm7, k0, xmm20, xmm3, false, Assembler::AVX_512bit);
|
||||
|
||||
__ evpsrlvw(xmm4, xmm4, xmm21, Assembler::AVX_512bit);
|
||||
__ evpsrlvw(xmm5, xmm5, xmm21, Assembler::AVX_512bit);
|
||||
__ evpsrlvw(xmm6, xmm6, xmm21, Assembler::AVX_512bit);
|
||||
__ evpsrlvw(xmm7, xmm7, xmm21, Assembler::AVX_512bit);
|
||||
|
||||
__ evpandq(xmm0, xmm22, xmm4, Assembler::AVX_512bit);
|
||||
__ evpandq(xmm1, xmm22, xmm5, Assembler::AVX_512bit);
|
||||
__ evpandq(xmm2, xmm22, xmm6, Assembler::AVX_512bit);
|
||||
__ evpandq(xmm3, xmm22, xmm7, Assembler::AVX_512bit);
|
||||
|
||||
store4regs(parsed, 0, xmm0_3, _masm);
|
||||
|
||||
__ addptr(condensed, 192);
|
||||
__ addptr(parsed, 256);
|
||||
__ subl(parsedLength, 128);
|
||||
__ jcc(Assembler::greater, VBMILoop);
|
||||
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ mov64(rax, 0); // return 0
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
__ lea(perms, ExternalAddress(kyberAvx512_12To16PermsAddr()));
|
||||
|
||||
load4regs(xmm24_27, perms, 0, _masm);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2216,7 +2216,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
|
||||
__ cmpl(temp, code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register method = temp;
|
||||
const Register klass = temp;
|
||||
|
||||
@ -2264,8 +2265,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
__ cmpl(temp, code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = temp;
|
||||
|
||||
__ jcc(Assembler::notEqual, L_clinit_barrier_slow);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,9 +32,7 @@ void VMRegImpl::set_regName() {
|
||||
int i;
|
||||
for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) {
|
||||
regName[i++] = reg->name();
|
||||
#ifdef AMD64
|
||||
regName[i++] = reg->name();
|
||||
#endif // AMD64
|
||||
reg = reg->successor();
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,14 +52,8 @@ inline bool is_KRegister() {
|
||||
}
|
||||
|
||||
inline Register as_Register() {
|
||||
|
||||
assert( is_Register(), "must be");
|
||||
// Yuk
|
||||
#ifdef AMD64
|
||||
assert(is_Register(), "must be");
|
||||
return ::as_Register(value() >> 1);
|
||||
#else
|
||||
return ::as_Register(value());
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
inline FloatRegister as_FloatRegister() {
|
||||
@ -82,9 +76,6 @@ inline KRegister as_KRegister() {
|
||||
|
||||
inline bool is_concrete() {
|
||||
assert(is_reg(), "must be");
|
||||
#ifndef AMD64
|
||||
if (is_Register()) return true;
|
||||
#endif // AMD64
|
||||
// Do not use is_XMMRegister() here as it depends on the UseAVX setting.
|
||||
if (value() >= ConcreteRegisterImpl::max_fpr && value() < ConcreteRegisterImpl::max_xmm) {
|
||||
int base = value() - ConcreteRegisterImpl::max_fpr;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,7 @@
|
||||
#define CPU_X86_VMREG_X86_INLINE_HPP
|
||||
|
||||
inline VMReg Register::RegisterImpl::as_VMReg() const {
|
||||
return VMRegImpl::as_VMReg(encoding() LP64_ONLY( << 1 ));
|
||||
return VMRegImpl::as_VMReg(encoding() << 1);
|
||||
}
|
||||
|
||||
inline VMReg FloatRegister::FloatRegisterImpl::as_VMReg() const {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -258,10 +258,18 @@ bool os::free_memory(physical_memory_size_type& value) {
|
||||
return Aix::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_memory(physical_memory_size_type& value) {
|
||||
return Aix::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::available_memory(physical_memory_size_type& value) {
|
||||
return Aix::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::available_memory(physical_memory_size_type& value) {
|
||||
return Aix::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Aix::available_memory(physical_memory_size_type& value) {
|
||||
os::Aix::meminfo_t mi;
|
||||
if (os::Aix::get_meminfo(&mi)) {
|
||||
@ -273,6 +281,10 @@ bool os::Aix::available_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::total_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
|
||||
perfstat_memory_total_t memory_info;
|
||||
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
|
||||
return false;
|
||||
@ -282,6 +294,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::free_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
|
||||
perfstat_memory_total_t memory_info;
|
||||
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
|
||||
return false;
|
||||
@ -294,6 +310,10 @@ physical_memory_size_type os::physical_memory() {
|
||||
return Aix::physical_memory();
|
||||
}
|
||||
|
||||
physical_memory_size_type os::Machine::physical_memory() {
|
||||
return Aix::physical_memory();
|
||||
}
|
||||
|
||||
size_t os::rss() { return (size_t)0; }
|
||||
|
||||
// Cpu architecture string
|
||||
@ -2264,6 +2284,10 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
return Machine::active_processor_count();
|
||||
}
|
||||
|
||||
int os::Machine::active_processor_count() {
|
||||
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
|
||||
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
|
||||
return online_cpus;
|
||||
|
||||
@ -132,7 +132,7 @@ public:
|
||||
static const char* tagToStr(uint32_t user_tag) {
|
||||
switch (user_tag) {
|
||||
case 0:
|
||||
return 0;
|
||||
return nullptr;
|
||||
X1(MALLOC, malloc);
|
||||
X1(MALLOC_SMALL, malloc_small);
|
||||
X1(MALLOC_LARGE, malloc_large);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -137,10 +137,18 @@ bool os::available_memory(physical_memory_size_type& value) {
|
||||
return Bsd::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::available_memory(physical_memory_size_type& value) {
|
||||
return Bsd::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::free_memory(physical_memory_size_type& value) {
|
||||
return Bsd::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_memory(physical_memory_size_type& value) {
|
||||
return Bsd::available_memory(value);
|
||||
}
|
||||
|
||||
// Available here means free. Note that this number is of no much use. As an estimate
|
||||
// for future memory pressure it is far too conservative, since MacOS will use a lot
|
||||
// of unused memory for caches, and return it willingly in case of needs.
|
||||
@ -181,6 +189,10 @@ void os::Bsd::print_uptime_info(outputStream* st) {
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::total_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
|
||||
#if defined(__APPLE__)
|
||||
struct xsw_usage vmusage;
|
||||
size_t size = sizeof(vmusage);
|
||||
@ -195,6 +207,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::free_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
|
||||
#if defined(__APPLE__)
|
||||
struct xsw_usage vmusage;
|
||||
size_t size = sizeof(vmusage);
|
||||
@ -212,6 +228,10 @@ physical_memory_size_type os::physical_memory() {
|
||||
return Bsd::physical_memory();
|
||||
}
|
||||
|
||||
physical_memory_size_type os::Machine::physical_memory() {
|
||||
return Bsd::physical_memory();
|
||||
}
|
||||
|
||||
size_t os::rss() {
|
||||
size_t rss = 0;
|
||||
#ifdef __APPLE__
|
||||
@ -608,7 +628,7 @@ static void *thread_native_entry(Thread *thread) {
|
||||
log_info(os, thread)("Thread finished (tid: %zu, pthread id: %zu).",
|
||||
os::current_thread_id(), (uintx) pthread_self());
|
||||
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
@ -1400,7 +1420,7 @@ int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *pa
|
||||
#elif defined(__APPLE__)
|
||||
for (uint32_t i = 1; i < _dyld_image_count(); i++) {
|
||||
// Value for top_address is returned as 0 since we don't have any information about module size
|
||||
if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), (address)0, param)) {
|
||||
if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), nullptr, param)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@ -2189,6 +2209,10 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
return Machine::active_processor_count();
|
||||
}
|
||||
|
||||
int os::Machine::active_processor_count() {
|
||||
return _processor_count;
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -631,22 +631,20 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
|
||||
* return:
|
||||
* true if there were no errors. false otherwise.
|
||||
*/
|
||||
bool CgroupSubsystem::active_processor_count(int& value) {
|
||||
int cpu_count;
|
||||
int result = -1;
|
||||
|
||||
bool CgroupSubsystem::active_processor_count(double& value) {
|
||||
// We use a cache with a timeout to avoid performing expensive
|
||||
// computations in the event this function is called frequently.
|
||||
// [See 8227006].
|
||||
CachingCgroupController<CgroupCpuController>* contrl = cpu_controller();
|
||||
CachedMetric* cpu_limit = contrl->metrics_cache();
|
||||
CachingCgroupController<CgroupCpuController, double>* contrl = cpu_controller();
|
||||
CachedMetric<double>* cpu_limit = contrl->metrics_cache();
|
||||
if (!cpu_limit->should_check_metric()) {
|
||||
value = (int)cpu_limit->value();
|
||||
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", value);
|
||||
value = cpu_limit->value();
|
||||
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %.2f", value);
|
||||
return true;
|
||||
}
|
||||
|
||||
cpu_count = os::Linux::active_processor_count();
|
||||
int cpu_count = os::Linux::active_processor_count();
|
||||
double result = -1;
|
||||
if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
|
||||
return false;
|
||||
}
|
||||
@ -671,8 +669,8 @@ bool CgroupSubsystem::active_processor_count(int& value) {
|
||||
*/
|
||||
bool CgroupSubsystem::memory_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& value) {
|
||||
CachingCgroupController<CgroupMemoryController>* contrl = memory_controller();
|
||||
CachedMetric* memory_limit = contrl->metrics_cache();
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* contrl = memory_controller();
|
||||
CachedMetric<physical_memory_size_type>* memory_limit = contrl->metrics_cache();
|
||||
if (!memory_limit->should_check_metric()) {
|
||||
value = memory_limit->value();
|
||||
return true;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -181,20 +181,21 @@ class CgroupController: public CHeapObj<mtInternal> {
|
||||
static bool limit_from_str(char* limit_str, physical_memory_size_type& value);
|
||||
};
|
||||
|
||||
template <typename MetricType>
|
||||
class CachedMetric : public CHeapObj<mtInternal>{
|
||||
private:
|
||||
volatile physical_memory_size_type _metric;
|
||||
volatile MetricType _metric;
|
||||
volatile jlong _next_check_counter;
|
||||
public:
|
||||
CachedMetric() {
|
||||
_metric = value_unlimited;
|
||||
_metric = static_cast<MetricType>(value_unlimited);
|
||||
_next_check_counter = min_jlong;
|
||||
}
|
||||
bool should_check_metric() {
|
||||
return os::elapsed_counter() > _next_check_counter;
|
||||
}
|
||||
physical_memory_size_type value() { return _metric; }
|
||||
void set_value(physical_memory_size_type value, jlong timeout) {
|
||||
MetricType value() { return _metric; }
|
||||
void set_value(MetricType value, jlong timeout) {
|
||||
_metric = value;
|
||||
// Metric is unlikely to change, but we want to remain
|
||||
// responsive to configuration changes. A very short grace time
|
||||
@ -205,19 +206,19 @@ class CachedMetric : public CHeapObj<mtInternal>{
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
template <class T, typename MetricType>
|
||||
class CachingCgroupController : public CHeapObj<mtInternal> {
|
||||
private:
|
||||
T* _controller;
|
||||
CachedMetric* _metrics_cache;
|
||||
CachedMetric<MetricType>* _metrics_cache;
|
||||
|
||||
public:
|
||||
CachingCgroupController(T* cont) {
|
||||
_controller = cont;
|
||||
_metrics_cache = new CachedMetric();
|
||||
_metrics_cache = new CachedMetric<MetricType>();
|
||||
}
|
||||
|
||||
CachedMetric* metrics_cache() { return _metrics_cache; }
|
||||
CachedMetric<MetricType>* metrics_cache() { return _metrics_cache; }
|
||||
T* controller() { return _controller; }
|
||||
};
|
||||
|
||||
@ -277,7 +278,7 @@ class CgroupMemoryController: public CHeapObj<mtInternal> {
|
||||
class CgroupSubsystem: public CHeapObj<mtInternal> {
|
||||
public:
|
||||
bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
|
||||
bool active_processor_count(int& value);
|
||||
bool active_processor_count(double& value);
|
||||
|
||||
virtual bool pids_max(uint64_t& value) = 0;
|
||||
virtual bool pids_current(uint64_t& value) = 0;
|
||||
@ -286,8 +287,8 @@ class CgroupSubsystem: public CHeapObj<mtInternal> {
|
||||
virtual char * cpu_cpuset_cpus() = 0;
|
||||
virtual char * cpu_cpuset_memory_nodes() = 0;
|
||||
virtual const char * container_type() = 0;
|
||||
virtual CachingCgroupController<CgroupMemoryController>* memory_controller() = 0;
|
||||
virtual CachingCgroupController<CgroupCpuController>* cpu_controller() = 0;
|
||||
virtual CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() = 0;
|
||||
virtual CachingCgroupController<CgroupCpuController, double>* cpu_controller() = 0;
|
||||
virtual CgroupCpuacctController* cpuacct_controller() = 0;
|
||||
|
||||
bool cpu_quota(int& value);
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2024, 2025, Red Hat, Inc.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,9 +26,8 @@
|
||||
#include "cgroupUtil_linux.hpp"
|
||||
#include "os_linux.hpp"
|
||||
|
||||
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, int& value) {
|
||||
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, double& value) {
|
||||
assert(upper_bound > 0, "upper bound of cpus must be positive");
|
||||
int limit_count = upper_bound;
|
||||
int quota = -1;
|
||||
int period = -1;
|
||||
if (!cpu_ctrl->cpu_quota(quota)) {
|
||||
@ -37,20 +37,15 @@ bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound,
|
||||
return false;
|
||||
}
|
||||
int quota_count = 0;
|
||||
int result = upper_bound;
|
||||
double result = upper_bound;
|
||||
|
||||
if (quota > -1 && period > 0) {
|
||||
quota_count = ceilf((float)quota / (float)period);
|
||||
log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
|
||||
if (quota > 0 && period > 0) { // Use quotas
|
||||
double cpu_quota = static_cast<double>(quota) / period;
|
||||
log_trace(os, container)("CPU Quota based on quota/period: %.2f", cpu_quota);
|
||||
result = MIN2(result, cpu_quota);
|
||||
}
|
||||
|
||||
// Use quotas
|
||||
if (quota_count != 0) {
|
||||
limit_count = quota_count;
|
||||
}
|
||||
|
||||
result = MIN2(upper_bound, limit_count);
|
||||
log_trace(os, container)("OSContainer::active_processor_count: %d", result);
|
||||
log_trace(os, container)("OSContainer::active_processor_count: %.2f", result);
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
@ -73,11 +68,11 @@ physical_memory_size_type CgroupUtil::get_updated_mem_limit(CgroupMemoryControll
|
||||
|
||||
// Get an updated cpu limit. The return value is strictly less than or equal to the
|
||||
// passed in 'lowest' value.
|
||||
int CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
|
||||
double CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
|
||||
int lowest,
|
||||
int upper_bound) {
|
||||
assert(lowest > 0 && lowest <= upper_bound, "invariant");
|
||||
int cpu_limit_val = -1;
|
||||
double cpu_limit_val = -1;
|
||||
if (CgroupUtil::processor_count(cpu, upper_bound, cpu_limit_val) && cpu_limit_val != upper_bound) {
|
||||
assert(cpu_limit_val <= upper_bound, "invariant");
|
||||
if (lowest > cpu_limit_val) {
|
||||
@ -172,7 +167,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
|
||||
assert(cg_path[0] == '/', "cgroup path must start with '/'");
|
||||
int host_cpus = os::Linux::active_processor_count();
|
||||
int lowest_limit = host_cpus;
|
||||
int cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
|
||||
double cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
|
||||
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
|
||||
char* limit_cg_path = nullptr;
|
||||
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Red Hat, Inc.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +32,7 @@
|
||||
class CgroupUtil: AllStatic {
|
||||
|
||||
public:
|
||||
static bool processor_count(CgroupCpuController* cpu, int upper_bound, int& value);
|
||||
static bool processor_count(CgroupCpuController* cpu, int upper_bound, double& value);
|
||||
// Given a memory controller, adjust its path to a point in the hierarchy
|
||||
// that represents the closest memory limit.
|
||||
static void adjust_controller(CgroupMemoryController* m);
|
||||
@ -42,9 +43,7 @@ class CgroupUtil: AllStatic {
|
||||
static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
|
||||
physical_memory_size_type lowest,
|
||||
physical_memory_size_type upper_bound);
|
||||
static int get_updated_cpu_limit(CgroupCpuController* c,
|
||||
int lowest,
|
||||
int upper_bound);
|
||||
static double get_updated_cpu_limit(CgroupCpuController* c, int lowest, int upper_bound);
|
||||
};
|
||||
|
||||
#endif // CGROUP_UTIL_LINUX_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -328,8 +328,8 @@ CgroupV1Subsystem::CgroupV1Subsystem(CgroupV1Controller* cpuset,
|
||||
_pids(pids) {
|
||||
CgroupUtil::adjust_controller(memory);
|
||||
CgroupUtil::adjust_controller(cpu);
|
||||
_memory = new CachingCgroupController<CgroupMemoryController>(memory);
|
||||
_cpu = new CachingCgroupController<CgroupCpuController>(cpu);
|
||||
_memory = new CachingCgroupController<CgroupMemoryController, physical_memory_size_type>(memory);
|
||||
_cpu = new CachingCgroupController<CgroupCpuController, double>(cpu);
|
||||
}
|
||||
|
||||
bool CgroupV1Subsystem::is_containerized() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -214,15 +214,15 @@ class CgroupV1Subsystem: public CgroupSubsystem {
|
||||
const char * container_type() override {
|
||||
return "cgroupv1";
|
||||
}
|
||||
CachingCgroupController<CgroupMemoryController>* memory_controller() override { return _memory; }
|
||||
CachingCgroupController<CgroupCpuController>* cpu_controller() override { return _cpu; }
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() override { return _memory; }
|
||||
CachingCgroupController<CgroupCpuController, double>* cpu_controller() override { return _cpu; }
|
||||
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; }
|
||||
|
||||
private:
|
||||
/* controllers */
|
||||
CachingCgroupController<CgroupMemoryController>* _memory = nullptr;
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* _memory = nullptr;
|
||||
CgroupV1Controller* _cpuset = nullptr;
|
||||
CachingCgroupController<CgroupCpuController>* _cpu = nullptr;
|
||||
CachingCgroupController<CgroupCpuController, double>* _cpu = nullptr;
|
||||
CgroupV1CpuacctController* _cpuacct = nullptr;
|
||||
CgroupV1Controller* _pids = nullptr;
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Red Hat Inc.
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -156,8 +156,8 @@ CgroupV2Subsystem::CgroupV2Subsystem(CgroupV2MemoryController * memory,
|
||||
_unified(unified) {
|
||||
CgroupUtil::adjust_controller(memory);
|
||||
CgroupUtil::adjust_controller(cpu);
|
||||
_memory = new CachingCgroupController<CgroupMemoryController>(memory);
|
||||
_cpu = new CachingCgroupController<CgroupCpuController>(cpu);
|
||||
_memory = new CachingCgroupController<CgroupMemoryController, physical_memory_size_type>(memory);
|
||||
_cpu = new CachingCgroupController<CgroupCpuController, double>(cpu);
|
||||
_cpuacct = cpuacct;
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2024, Red Hat Inc.
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -152,8 +152,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
|
||||
/* One unified controller */
|
||||
CgroupV2Controller _unified;
|
||||
/* Caching wrappers for cpu/memory metrics */
|
||||
CachingCgroupController<CgroupMemoryController>* _memory = nullptr;
|
||||
CachingCgroupController<CgroupCpuController>* _cpu = nullptr;
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* _memory = nullptr;
|
||||
CachingCgroupController<CgroupCpuController, double>* _cpu = nullptr;
|
||||
|
||||
CgroupCpuacctController* _cpuacct = nullptr;
|
||||
|
||||
@ -175,8 +175,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
|
||||
const char * container_type() override {
|
||||
return "cgroupv2";
|
||||
}
|
||||
CachingCgroupController<CgroupMemoryController>* memory_controller() override { return _memory; }
|
||||
CachingCgroupController<CgroupCpuController>* cpu_controller() override { return _cpu; }
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() override { return _memory; }
|
||||
CachingCgroupController<CgroupCpuController, double>* cpu_controller() override { return _cpu; }
|
||||
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; };
|
||||
};
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -86,8 +86,8 @@ void OSContainer::init() {
|
||||
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
|
||||
physical_memory_size_type mem_limit_val = value_unlimited;
|
||||
(void)memory_limit_in_bytes(mem_limit_val); // discard error and use default
|
||||
int host_cpus = os::Linux::active_processor_count();
|
||||
int cpus = host_cpus;
|
||||
double host_cpus = os::Linux::active_processor_count();
|
||||
double cpus = host_cpus;
|
||||
(void)active_processor_count(cpus); // discard error and use default
|
||||
any_mem_cpu_limit_present = mem_limit_val != value_unlimited || host_cpus != cpus;
|
||||
if (any_mem_cpu_limit_present) {
|
||||
@ -127,8 +127,7 @@ bool OSContainer::available_memory_in_bytes(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_swap,
|
||||
physical_memory_size_type& value) {
|
||||
bool OSContainer::available_swap_in_bytes(physical_memory_size_type& value) {
|
||||
physical_memory_size_type mem_limit = 0;
|
||||
physical_memory_size_type mem_swap_limit = 0;
|
||||
if (memory_limit_in_bytes(mem_limit) &&
|
||||
@ -179,8 +178,7 @@ bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_sw
|
||||
assert(num < 25, "buffer too small");
|
||||
mem_limit_buf[num] = '\0';
|
||||
log_trace(os,container)("OSContainer::available_swap_in_bytes: container_swap_limit=%s"
|
||||
" container_mem_limit=%s, host_free_swap: " PHYS_MEM_TYPE_FORMAT,
|
||||
mem_swap_buf, mem_limit_buf, host_free_swap);
|
||||
" container_mem_limit=%s", mem_swap_buf, mem_limit_buf);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -252,7 +250,7 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
|
||||
return cgroup_subsystem->cpu_cpuset_memory_nodes();
|
||||
}
|
||||
|
||||
bool OSContainer::active_processor_count(int& value) {
|
||||
bool OSContainer::active_processor_count(double& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->active_processor_count(value);
|
||||
}
|
||||
@ -291,11 +289,13 @@ template<typename T> struct metric_fmt;
|
||||
template<> struct metric_fmt<unsigned long long int> { static constexpr const char* fmt = "%llu"; };
|
||||
template<> struct metric_fmt<unsigned long int> { static constexpr const char* fmt = "%lu"; };
|
||||
template<> struct metric_fmt<int> { static constexpr const char* fmt = "%d"; };
|
||||
template<> struct metric_fmt<double> { static constexpr const char* fmt = "%.2f"; };
|
||||
template<> struct metric_fmt<const char*> { static constexpr const char* fmt = "%s"; };
|
||||
|
||||
template void OSContainer::print_container_metric<unsigned long long int>(outputStream*, const char*, unsigned long long int, const char*);
|
||||
template void OSContainer::print_container_metric<unsigned long int>(outputStream*, const char*, unsigned long int, const char*);
|
||||
template void OSContainer::print_container_metric<int>(outputStream*, const char*, int, const char*);
|
||||
template void OSContainer::print_container_metric<double>(outputStream*, const char*, double, const char*);
|
||||
template void OSContainer::print_container_metric<const char*>(outputStream*, const char*, const char*, const char*);
|
||||
|
||||
template <typename T>
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -72,8 +72,7 @@ class OSContainer: AllStatic {
|
||||
static const char * container_type();
|
||||
|
||||
static bool available_memory_in_bytes(physical_memory_size_type& value);
|
||||
static bool available_swap_in_bytes(physical_memory_size_type host_free_swap,
|
||||
physical_memory_size_type& value);
|
||||
static bool available_swap_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_limit_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_and_swap_limit_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_and_swap_usage_in_bytes(physical_memory_size_type& value);
|
||||
@ -84,7 +83,7 @@ class OSContainer: AllStatic {
|
||||
static bool rss_usage_in_bytes(physical_memory_size_type& value);
|
||||
static bool cache_usage_in_bytes(physical_memory_size_type& value);
|
||||
|
||||
static bool active_processor_count(int& value);
|
||||
static bool active_processor_count(double& value);
|
||||
|
||||
static char * cpu_cpuset_cpus();
|
||||
static char * cpu_cpuset_memory_nodes();
|
||||
|
||||
@ -211,15 +211,58 @@ static bool suppress_primordial_thread_resolution = false;
|
||||
|
||||
// utility functions
|
||||
|
||||
bool os::is_containerized() {
|
||||
return OSContainer::is_containerized();
|
||||
}
|
||||
|
||||
bool os::Container::memory_limit(physical_memory_size_type& value) {
|
||||
physical_memory_size_type result = 0;
|
||||
if (OSContainer::memory_limit_in_bytes(result) && result != value_unlimited) {
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::memory_soft_limit(physical_memory_size_type& value) {
|
||||
physical_memory_size_type result = 0;
|
||||
if (OSContainer::memory_soft_limit_in_bytes(result) && result != 0 && result != value_unlimited) {
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::memory_throttle_limit(physical_memory_size_type& value) {
|
||||
physical_memory_size_type result = 0;
|
||||
if (OSContainer::memory_throttle_limit_in_bytes(result) && result != value_unlimited) {
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::used_memory(physical_memory_size_type& value) {
|
||||
return OSContainer::memory_usage_in_bytes(value);
|
||||
}
|
||||
|
||||
bool os::available_memory(physical_memory_size_type& value) {
|
||||
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
|
||||
if (is_containerized() && Container::available_memory(value)) {
|
||||
log_trace(os)("available container memory: " PHYS_MEM_TYPE_FORMAT, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
return Machine::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::available_memory(physical_memory_size_type& value) {
|
||||
return Linux::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Container::available_memory(physical_memory_size_type& value) {
|
||||
return OSContainer::available_memory_in_bytes(value);
|
||||
}
|
||||
|
||||
bool os::Linux::available_memory(physical_memory_size_type& value) {
|
||||
physical_memory_size_type avail_mem = 0;
|
||||
|
||||
@ -251,11 +294,15 @@ bool os::Linux::available_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::free_memory(physical_memory_size_type& value) {
|
||||
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
|
||||
if (is_containerized() && Container::available_memory(value)) {
|
||||
log_trace(os)("free container memory: " PHYS_MEM_TYPE_FORMAT, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
return Machine::free_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_memory(physical_memory_size_type& value) {
|
||||
return Linux::free_memory(value);
|
||||
}
|
||||
|
||||
@ -274,21 +321,30 @@ bool os::Linux::free_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
if (OSContainer::is_containerized()) {
|
||||
physical_memory_size_type mem_swap_limit = value_unlimited;
|
||||
physical_memory_size_type memory_limit = value_unlimited;
|
||||
if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
|
||||
OSContainer::memory_limit_in_bytes(memory_limit)) {
|
||||
if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
|
||||
mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
|
||||
value = mem_swap_limit - memory_limit;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} // fallback to the host swap space if the container returned unlimited
|
||||
if (is_containerized() && Container::total_swap_space(value)) {
|
||||
return true;
|
||||
} // fallback to the host swap space if the container value fails
|
||||
return Machine::total_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
|
||||
return Linux::host_swap(value);
|
||||
}
|
||||
|
||||
bool os::Container::total_swap_space(physical_memory_size_type& value) {
|
||||
physical_memory_size_type mem_swap_limit = value_unlimited;
|
||||
physical_memory_size_type memory_limit = value_unlimited;
|
||||
if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
|
||||
OSContainer::memory_limit_in_bytes(memory_limit)) {
|
||||
if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
|
||||
mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
|
||||
value = mem_swap_limit - memory_limit;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool host_free_swap_f(physical_memory_size_type& value) {
|
||||
struct sysinfo si;
|
||||
int ret = sysinfo(&si);
|
||||
@ -309,32 +365,45 @@ bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
|
||||
if (OSContainer::is_containerized()) {
|
||||
if (OSContainer::available_swap_in_bytes(host_free_swap_val, value)) {
|
||||
if (is_containerized()) {
|
||||
if (Container::free_swap_space(value)) {
|
||||
return true;
|
||||
}
|
||||
// Fall through to use host value
|
||||
log_trace(os,container)("os::free_swap_space: containerized value unavailable"
|
||||
" returning host value: " PHYS_MEM_TYPE_FORMAT, host_free_swap_val);
|
||||
}
|
||||
|
||||
value = host_free_swap_val;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
|
||||
return host_free_swap_f(value);
|
||||
}
|
||||
|
||||
bool os::Container::free_swap_space(physical_memory_size_type& value) {
|
||||
return OSContainer::available_swap_in_bytes(value);
|
||||
}
|
||||
|
||||
physical_memory_size_type os::physical_memory() {
|
||||
if (OSContainer::is_containerized()) {
|
||||
if (is_containerized()) {
|
||||
physical_memory_size_type mem_limit = value_unlimited;
|
||||
if (OSContainer::memory_limit_in_bytes(mem_limit) && mem_limit != value_unlimited) {
|
||||
if (Container::memory_limit(mem_limit) && mem_limit != value_unlimited) {
|
||||
log_trace(os)("total container memory: " PHYS_MEM_TYPE_FORMAT, mem_limit);
|
||||
return mem_limit;
|
||||
}
|
||||
}
|
||||
|
||||
physical_memory_size_type phys_mem = Linux::physical_memory();
|
||||
physical_memory_size_type phys_mem = Machine::physical_memory();
|
||||
log_trace(os)("total system memory: " PHYS_MEM_TYPE_FORMAT, phys_mem);
|
||||
return phys_mem;
|
||||
}
|
||||
|
||||
physical_memory_size_type os::Machine::physical_memory() {
|
||||
return Linux::physical_memory();
|
||||
}
|
||||
|
||||
// Returns the resident set size (RSS) of the process.
|
||||
// Falls back to using VmRSS from /proc/self/status if /proc/self/smaps_rollup is unavailable.
|
||||
// Note: On kernels with memory cgroups or shared memory, VmRSS may underreport RSS.
|
||||
@ -2439,20 +2508,21 @@ bool os::Linux::print_container_info(outputStream* st) {
|
||||
OSContainer::print_container_metric(st, "cpu_memory_nodes", p != nullptr ? p : "not supported");
|
||||
free(p);
|
||||
|
||||
int i = -1;
|
||||
bool supported = OSContainer::active_processor_count(i);
|
||||
double cpus = -1;
|
||||
bool supported = OSContainer::active_processor_count(cpus);
|
||||
if (supported) {
|
||||
assert(i > 0, "must be");
|
||||
assert(cpus > 0, "must be");
|
||||
if (ActiveProcessorCount > 0) {
|
||||
OSContainer::print_container_metric(st, "active_processor_count", ActiveProcessorCount, "(from -XX:ActiveProcessorCount)");
|
||||
} else {
|
||||
OSContainer::print_container_metric(st, "active_processor_count", i);
|
||||
OSContainer::print_container_metric(st, "active_processor_count", cpus);
|
||||
}
|
||||
} else {
|
||||
OSContainer::print_container_metric(st, "active_processor_count", "not supported");
|
||||
}
|
||||
|
||||
|
||||
int i = -1;
|
||||
supported = OSContainer::cpu_quota(i);
|
||||
if (supported && i > 0) {
|
||||
OSContainer::print_container_metric(st, "cpu_quota", i);
|
||||
@ -4737,15 +4807,26 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
int active_cpus = -1;
|
||||
if (OSContainer::is_containerized() && OSContainer::active_processor_count(active_cpus)) {
|
||||
log_trace(os)("active_processor_count: determined by OSContainer: %d",
|
||||
active_cpus);
|
||||
} else {
|
||||
active_cpus = os::Linux::active_processor_count();
|
||||
if (is_containerized()) {
|
||||
double cpu_quota;
|
||||
if (Container::processor_count(cpu_quota)) {
|
||||
int active_cpus = ceilf(cpu_quota); // Round fractional CPU quota up.
|
||||
assert(active_cpus <= Machine::active_processor_count(), "must be");
|
||||
log_trace(os)("active_processor_count: determined by OSContainer: %d",
|
||||
active_cpus);
|
||||
return active_cpus;
|
||||
}
|
||||
}
|
||||
|
||||
return active_cpus;
|
||||
return Machine::active_processor_count();
|
||||
}
|
||||
|
||||
int os::Machine::active_processor_count() {
|
||||
return os::Linux::active_processor_count();
|
||||
}
|
||||
|
||||
bool os::Container::processor_count(double& value) {
|
||||
return OSContainer::active_processor_count(value);
|
||||
}
|
||||
|
||||
static bool should_warn_invalid_processor_id() {
|
||||
@ -4882,9 +4963,14 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
oflag |= O_CLOEXEC;
|
||||
|
||||
int fd = ::open(path, oflag, mode);
|
||||
if (fd == -1) return -1;
|
||||
// No further checking is needed if open() returned an error or
|
||||
// access mode is not read only.
|
||||
if (fd == -1 || (oflag & O_ACCMODE) != O_RDONLY) {
|
||||
return fd;
|
||||
}
|
||||
|
||||
//If the open succeeded, the file might still be a directory
|
||||
// If the open succeeded and is read only, the file might be a directory
|
||||
// which the JVM doesn't allow to be read.
|
||||
{
|
||||
struct stat buf;
|
||||
int ret = ::fstat(fd, &buf);
|
||||
|
||||
@ -112,6 +112,10 @@ static void save_memory_to_file(char* addr, size_t size) {
|
||||
result = ::close(fd);
|
||||
if (result == OS_ERR) {
|
||||
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
|
||||
} else {
|
||||
if (!successful_write) {
|
||||
remove(destfile);
|
||||
}
|
||||
}
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(char, destfile);
|
||||
@ -949,6 +953,7 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size
|
||||
warning("Insufficient space for shared memory file: %s/%s\n", dirname, filename);
|
||||
}
|
||||
result = OS_ERR;
|
||||
remove(filename);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -839,10 +839,18 @@ bool os::available_memory(physical_memory_size_type& value) {
|
||||
return win32::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::available_memory(physical_memory_size_type& value) {
|
||||
return win32::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::free_memory(physical_memory_size_type& value) {
|
||||
return win32::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_memory(physical_memory_size_type& value) {
|
||||
return win32::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::win32::available_memory(physical_memory_size_type& value) {
|
||||
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
|
||||
// value if total memory is larger than 4GB
|
||||
@ -858,7 +866,11 @@ bool os::win32::available_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::total_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
|
||||
MEMORYSTATUSEX ms;
|
||||
ms.dwLength = sizeof(ms);
|
||||
BOOL res = GlobalMemoryStatusEx(&ms);
|
||||
@ -872,6 +884,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::free_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
|
||||
MEMORYSTATUSEX ms;
|
||||
ms.dwLength = sizeof(ms);
|
||||
BOOL res = GlobalMemoryStatusEx(&ms);
|
||||
@ -888,6 +904,10 @@ physical_memory_size_type os::physical_memory() {
|
||||
return win32::physical_memory();
|
||||
}
|
||||
|
||||
physical_memory_size_type os::Machine::physical_memory() {
|
||||
return win32::physical_memory();
|
||||
}
|
||||
|
||||
size_t os::rss() {
|
||||
size_t rss = 0;
|
||||
PROCESS_MEMORY_COUNTERS_EX pmex;
|
||||
@ -911,6 +931,10 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
return Machine::active_processor_count();
|
||||
}
|
||||
|
||||
int os::Machine::active_processor_count() {
|
||||
bool schedules_all_processor_groups = win32::is_windows_11_or_greater() || win32::is_windows_server_2022_or_greater();
|
||||
if (UseAllWindowsProcessorGroups && !schedules_all_processor_groups && !win32::processor_group_warning_displayed()) {
|
||||
win32::set_processor_group_warning_displayed(true);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -614,6 +614,10 @@ struct StringTableDeleteCheck : StackObj {
|
||||
};
|
||||
|
||||
void StringTable::clean_dead_entries(JavaThread* jt) {
|
||||
// BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
|
||||
// When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
|
||||
// Take SuspendMark first to keep lock order and avoid deadlock.
|
||||
NativeHeapTrimmer::SuspendMark sm("stringtable");
|
||||
StringTableHash::BulkDeleteTask bdt(_local_table);
|
||||
if (!bdt.prepare(jt)) {
|
||||
return;
|
||||
@ -621,7 +625,6 @@ void StringTable::clean_dead_entries(JavaThread* jt) {
|
||||
|
||||
StringTableDeleteCheck stdc;
|
||||
StringTableDoDelete stdd;
|
||||
NativeHeapTrimmer::SuspendMark sm("stringtable");
|
||||
{
|
||||
TraceTime timer("Clean", TRACETIME_LOG(Debug, stringtable, perf));
|
||||
while(bdt.do_task(jt, stdc, stdd)) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -763,6 +763,10 @@ struct SymbolTableDeleteCheck : StackObj {
|
||||
};
|
||||
|
||||
void SymbolTable::clean_dead_entries(JavaThread* jt) {
|
||||
// BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
|
||||
// When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
|
||||
// Take SuspendMark first to keep lock order and avoid deadlock.
|
||||
NativeHeapTrimmer::SuspendMark sm("symboltable");
|
||||
SymbolTableHash::BulkDeleteTask bdt(_local_table);
|
||||
if (!bdt.prepare(jt)) {
|
||||
return;
|
||||
@ -770,7 +774,6 @@ void SymbolTable::clean_dead_entries(JavaThread* jt) {
|
||||
|
||||
SymbolTableDeleteCheck stdc;
|
||||
SymbolTableDoDelete stdd;
|
||||
NativeHeapTrimmer::SuspendMark sm("symboltable");
|
||||
{
|
||||
TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf));
|
||||
while (bdt.do_task(jt, stdc, stdd)) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1371,6 +1371,7 @@ void AOTCodeAddressTable::init_extrs() {
|
||||
SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
|
||||
#endif
|
||||
#if INCLUDE_ZGC
|
||||
SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
|
||||
SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
|
||||
#if defined(AMD64)
|
||||
SET_ADDRESS(_extrs, &ZPointerLoadShift);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -209,6 +209,17 @@ void G1Arguments::initialize() {
|
||||
FLAG_SET_DEFAULT(GCTimeRatio, 24);
|
||||
}
|
||||
|
||||
// Do not interfere with GC-Pressure driven heap resizing unless the user
|
||||
// explicitly sets otherwise. G1 heap sizing should be free to grow or shrink
|
||||
// the heap based on GC pressure, rather than being forced to satisfy
|
||||
// MinHeapFreeRatio or MaxHeapFreeRatio defaults that the user did not set.
|
||||
if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
|
||||
FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
|
||||
FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
|
||||
}
|
||||
|
||||
// Below, we might need to calculate the pause time interval based on
|
||||
// the pause target. When we do so we are going to give G1 maximum
|
||||
// flexibility and allow it to do pauses when it needs to. So, we'll
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1024,10 +1024,6 @@ size_t G1CardSet::num_containers() {
|
||||
return cl._count;
|
||||
}
|
||||
|
||||
G1CardSetCoarsenStats G1CardSet::coarsen_stats() {
|
||||
return _coarsen_stats;
|
||||
}
|
||||
|
||||
void G1CardSet::print_coarsen_stats(outputStream* out) {
|
||||
_last_coarsen_stats.subtract_from(_coarsen_stats);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,20 +44,20 @@ G1CardTableClaimTable::~G1CardTableClaimTable() {
|
||||
|
||||
void G1CardTableClaimTable::initialize(uint max_reserved_regions) {
|
||||
assert(_card_claims == nullptr, "Must not be initialized twice");
|
||||
_card_claims = NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC);
|
||||
_card_claims = NEW_C_HEAP_ARRAY(Atomic<uint>, max_reserved_regions, mtGC);
|
||||
_max_reserved_regions = max_reserved_regions;
|
||||
reset_all_to_unclaimed();
|
||||
}
|
||||
|
||||
void G1CardTableClaimTable::reset_all_to_unclaimed() {
|
||||
for (uint i = 0; i < _max_reserved_regions; i++) {
|
||||
_card_claims[i] = 0;
|
||||
_card_claims[i].store_relaxed(0);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CardTableClaimTable::reset_all_to_claimed() {
|
||||
for (uint i = 0; i < _max_reserved_regions; i++) {
|
||||
_card_claims[i] = (uint)G1HeapRegion::CardsPerRegion;
|
||||
_card_claims[i].store_relaxed((uint)G1HeapRegion::CardsPerRegion);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/g1/g1CardTable.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
class G1HeapRegionClosure;
|
||||
|
||||
@ -45,7 +46,7 @@ class G1CardTableClaimTable : public CHeapObj<mtGC> {
|
||||
|
||||
// Card table iteration claim values for every heap region, from 0 (completely unclaimed)
|
||||
// to (>=) G1HeapRegion::CardsPerRegion (completely claimed).
|
||||
uint volatile* _card_claims;
|
||||
Atomic<uint>* _card_claims;
|
||||
|
||||
uint _cards_per_chunk; // For conversion between card index and chunk index.
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,26 +29,25 @@
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
|
||||
bool G1CardTableClaimTable::has_unclaimed_cards(uint region) {
|
||||
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
|
||||
return AtomicAccess::load(&_card_claims[region]) < G1HeapRegion::CardsPerRegion;
|
||||
return _card_claims[region].load_relaxed() < G1HeapRegion::CardsPerRegion;
|
||||
}
|
||||
|
||||
void G1CardTableClaimTable::reset_to_unclaimed(uint region) {
|
||||
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
|
||||
AtomicAccess::store(&_card_claims[region], 0u);
|
||||
_card_claims[region].store_relaxed(0u);
|
||||
}
|
||||
|
||||
uint G1CardTableClaimTable::claim_cards(uint region, uint increment) {
|
||||
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
|
||||
return AtomicAccess::fetch_then_add(&_card_claims[region], increment, memory_order_relaxed);
|
||||
return _card_claims[region].fetch_then_add(increment, memory_order_relaxed);
|
||||
}
|
||||
|
||||
uint G1CardTableClaimTable::claim_chunk(uint region) {
|
||||
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
|
||||
return AtomicAccess::fetch_then_add(&_card_claims[region], cards_per_chunk(), memory_order_relaxed);
|
||||
return _card_claims[region].fetch_then_add(cards_per_chunk(), memory_order_relaxed);
|
||||
}
|
||||
|
||||
uint G1CardTableClaimTable::claim_all_cards(uint region) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,7 @@
|
||||
#include "gc/g1/g1HeapRegion.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/concurrentHashTable.inline.hpp"
|
||||
#include "utilities/concurrentHashTableTasks.inline.hpp"
|
||||
|
||||
@ -60,7 +60,7 @@ class G1CodeRootSetHashTable : public CHeapObj<mtGC> {
|
||||
HashTable _table;
|
||||
HashTableScanTask _table_scanner;
|
||||
|
||||
size_t volatile _num_entries;
|
||||
Atomic<size_t> _num_entries;
|
||||
|
||||
bool is_empty() const { return number_of_entries() == 0; }
|
||||
|
||||
@ -120,7 +120,7 @@ public:
|
||||
bool grow_hint = false;
|
||||
bool inserted = _table.insert(Thread::current(), lookup, method, &grow_hint);
|
||||
if (inserted) {
|
||||
AtomicAccess::inc(&_num_entries);
|
||||
_num_entries.add_then_fetch(1u);
|
||||
}
|
||||
if (grow_hint) {
|
||||
_table.grow(Thread::current());
|
||||
@ -131,7 +131,7 @@ public:
|
||||
HashTableLookUp lookup(method);
|
||||
bool removed = _table.remove(Thread::current(), lookup);
|
||||
if (removed) {
|
||||
AtomicAccess::dec(&_num_entries);
|
||||
_num_entries.sub_then_fetch(1u);
|
||||
}
|
||||
return removed;
|
||||
}
|
||||
@ -182,7 +182,7 @@ public:
|
||||
guarantee(succeeded, "unable to clean table");
|
||||
|
||||
if (num_deleted != 0) {
|
||||
size_t current_size = AtomicAccess::sub(&_num_entries, num_deleted);
|
||||
size_t current_size = _num_entries.sub_then_fetch(num_deleted);
|
||||
shrink_to_match(current_size);
|
||||
}
|
||||
}
|
||||
@ -226,7 +226,7 @@ public:
|
||||
|
||||
size_t mem_size() { return sizeof(*this) + _table.get_mem_size(Thread::current()); }
|
||||
|
||||
size_t number_of_entries() const { return AtomicAccess::load(&_num_entries); }
|
||||
size_t number_of_entries() const { return _num_entries.load_relaxed(); }
|
||||
};
|
||||
|
||||
uintx G1CodeRootSetHashTable::HashTableLookUp::get_hash() const {
|
||||
|
||||
@ -103,7 +103,6 @@
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/cpuTimeCounters.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
@ -687,8 +686,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
// before the allocation is that we avoid having to keep track of the newly
|
||||
// allocated memory while we do a GC.
|
||||
// Only try that if we can actually perform a GC.
|
||||
if (is_init_completed() && policy()->need_to_start_conc_mark("concurrent humongous allocation",
|
||||
word_size)) {
|
||||
if (is_init_completed() &&
|
||||
policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) {
|
||||
try_collect(word_size, GCCause::_g1_humongous_allocation, collection_counters(this));
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -54,6 +54,7 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
@ -124,7 +125,7 @@ class G1JavaThreadsListClaimer : public StackObj {
|
||||
ThreadsListHandle _list;
|
||||
uint _claim_step;
|
||||
|
||||
volatile uint _cur_claim;
|
||||
Atomic<uint> _cur_claim;
|
||||
|
||||
// Attempts to claim _claim_step JavaThreads, returning an array of claimed
|
||||
// JavaThread* with count elements. Returns null (and a zero count) if there
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,7 +41,6 @@
|
||||
#include "gc/shared/markBitMap.inline.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "oops/stackChunkOop.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/threadSMR.inline.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
@ -53,10 +52,10 @@ inline bool G1STWIsAliveClosure::do_object_b(oop p) {
|
||||
|
||||
inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) {
|
||||
count = 0;
|
||||
if (AtomicAccess::load(&_cur_claim) >= _list.length()) {
|
||||
if (_cur_claim.load_relaxed() >= _list.length()) {
|
||||
return nullptr;
|
||||
}
|
||||
uint claim = AtomicAccess::fetch_then_add(&_cur_claim, _claim_step);
|
||||
uint claim = _cur_claim.fetch_then_add(_claim_step);
|
||||
if (claim >= _list.length()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,7 +27,7 @@
|
||||
#include "gc/g1/g1CollectionSetChooser.hpp"
|
||||
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/quickSort.hpp"
|
||||
|
||||
// Determine collection set candidates (from marking): For all regions determine
|
||||
@ -50,7 +50,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
|
||||
G1HeapRegion** _data;
|
||||
|
||||
uint volatile _cur_claim_idx;
|
||||
Atomic<uint> _cur_claim_idx;
|
||||
|
||||
static int compare_region_gc_efficiency(G1HeapRegion** rr1, G1HeapRegion** rr2) {
|
||||
G1HeapRegion* r1 = *rr1;
|
||||
@ -105,7 +105,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
|
||||
// Claim a new chunk, returning its bounds [from, to[.
|
||||
void claim_chunk(uint& from, uint& to) {
|
||||
uint result = AtomicAccess::add(&_cur_claim_idx, _chunk_size);
|
||||
uint result = _cur_claim_idx.add_then_fetch(_chunk_size);
|
||||
assert(_max_size > result - 1,
|
||||
"Array too small, is %u should be %u with chunk size %u.",
|
||||
_max_size, result, _chunk_size);
|
||||
@ -121,14 +121,15 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
}
|
||||
|
||||
void sort_by_gc_efficiency() {
|
||||
if (_cur_claim_idx == 0) {
|
||||
uint length = _cur_claim_idx.load_relaxed();
|
||||
if (length == 0) {
|
||||
return;
|
||||
}
|
||||
for (uint i = _cur_claim_idx; i < _max_size; i++) {
|
||||
for (uint i = length; i < _max_size; i++) {
|
||||
assert(_data[i] == nullptr, "must be");
|
||||
}
|
||||
qsort(_data, _cur_claim_idx, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
|
||||
for (uint i = _cur_claim_idx; i < _max_size; i++) {
|
||||
qsort(_data, length, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
|
||||
for (uint i = length; i < _max_size; i++) {
|
||||
assert(_data[i] == nullptr, "must be");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -67,7 +67,6 @@
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
@ -148,25 +147,25 @@ bool G1CMMarkStack::initialize() {
|
||||
}
|
||||
|
||||
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_chunk() {
|
||||
if (_size >= _max_capacity) {
|
||||
if (_size.load_relaxed() >= _max_capacity) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
size_t cur_idx = AtomicAccess::fetch_then_add(&_size, 1u);
|
||||
size_t cur_idx = _size.fetch_then_add(1u);
|
||||
|
||||
if (cur_idx >= _max_capacity) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
size_t bucket = get_bucket(cur_idx);
|
||||
if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
|
||||
if (_buckets[bucket].load_acquire() == nullptr) {
|
||||
if (!_should_grow) {
|
||||
// Prefer to restart the CM.
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
MutexLocker x(G1MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
|
||||
if (_buckets[bucket].load_acquire() == nullptr) {
|
||||
size_t desired_capacity = bucket_size(bucket) * 2;
|
||||
if (!try_expand_to(desired_capacity)) {
|
||||
return nullptr;
|
||||
@ -175,7 +174,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_
|
||||
}
|
||||
|
||||
size_t bucket_idx = get_bucket_index(cur_idx);
|
||||
TaskQueueEntryChunk* result = ::new (&_buckets[bucket][bucket_idx]) TaskQueueEntryChunk;
|
||||
TaskQueueEntryChunk* result = ::new (&_buckets[bucket].load_relaxed()[bucket_idx]) TaskQueueEntryChunk;
|
||||
result->next = nullptr;
|
||||
return result;
|
||||
}
|
||||
@ -197,10 +196,10 @@ bool G1CMMarkStack::ChunkAllocator::initialize(size_t initial_capacity, size_t m
|
||||
_max_capacity = max_capacity;
|
||||
_num_buckets = get_bucket(_max_capacity) + 1;
|
||||
|
||||
_buckets = NEW_C_HEAP_ARRAY(TaskQueueEntryChunk*, _num_buckets, mtGC);
|
||||
_buckets = NEW_C_HEAP_ARRAY(Atomic<TaskQueueEntryChunk*>, _num_buckets, mtGC);
|
||||
|
||||
for (size_t i = 0; i < _num_buckets; i++) {
|
||||
_buckets[i] = nullptr;
|
||||
_buckets[i].store_relaxed(nullptr);
|
||||
}
|
||||
|
||||
size_t new_capacity = bucket_size(0);
|
||||
@ -240,9 +239,9 @@ G1CMMarkStack::ChunkAllocator::~ChunkAllocator() {
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < _num_buckets; i++) {
|
||||
if (_buckets[i] != nullptr) {
|
||||
MmapArrayAllocator<TaskQueueEntryChunk>::free(_buckets[i], bucket_size(i));
|
||||
_buckets[i] = nullptr;
|
||||
if (_buckets[i].load_relaxed() != nullptr) {
|
||||
MmapArrayAllocator<TaskQueueEntryChunk>::free(_buckets[i].load_relaxed(), bucket_size(i));
|
||||
_buckets[i].store_relaxed(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -259,7 +258,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
|
||||
// and the new capacity (new_capacity). This step ensures that there are no gaps in the
|
||||
// array and that the capacity accurately reflects the reserved memory.
|
||||
for (; i <= highest_bucket; i++) {
|
||||
if (AtomicAccess::load_acquire(&_buckets[i]) != nullptr) {
|
||||
if (_buckets[i].load_acquire() != nullptr) {
|
||||
continue; // Skip over already allocated buckets.
|
||||
}
|
||||
|
||||
@ -279,7 +278,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
|
||||
return false;
|
||||
}
|
||||
_capacity += bucket_capacity;
|
||||
AtomicAccess::release_store(&_buckets[i], bucket_base);
|
||||
_buckets[i].release_store(bucket_base);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,6 +37,7 @@
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "gc/shared/workerUtils.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/compilerWarnings.hpp"
|
||||
#include "utilities/numberSeq.hpp"
|
||||
|
||||
@ -172,9 +173,9 @@ private:
|
||||
size_t _capacity;
|
||||
size_t _num_buckets;
|
||||
bool _should_grow;
|
||||
TaskQueueEntryChunk* volatile* _buckets;
|
||||
Atomic<TaskQueueEntryChunk*>* _buckets;
|
||||
char _pad0[DEFAULT_PADDING_SIZE];
|
||||
volatile size_t _size;
|
||||
Atomic<size_t> _size;
|
||||
char _pad4[DEFAULT_PADDING_SIZE - sizeof(size_t)];
|
||||
|
||||
size_t bucket_size(size_t bucket) {
|
||||
@ -212,7 +213,7 @@ private:
|
||||
bool initialize(size_t initial_capacity, size_t max_capacity);
|
||||
|
||||
void reset() {
|
||||
_size = 0;
|
||||
_size.store_relaxed(0);
|
||||
_should_grow = false;
|
||||
}
|
||||
|
||||
@ -556,14 +557,14 @@ public:
|
||||
// mark_in_bitmap call. Updates various statistics data.
|
||||
void add_to_liveness(uint worker_id, oop const obj, size_t size);
|
||||
// Did the last marking find a live object between bottom and TAMS?
|
||||
bool contains_live_object(uint region) const { return _region_mark_stats[region]._live_words != 0; }
|
||||
bool contains_live_object(uint region) const { return _region_mark_stats[region].live_words() != 0; }
|
||||
// Live bytes in the given region as determined by concurrent marking, i.e. the amount of
|
||||
// live bytes between bottom and TAMS.
|
||||
size_t live_bytes(uint region) const { return _region_mark_stats[region]._live_words * HeapWordSize; }
|
||||
size_t live_bytes(uint region) const { return _region_mark_stats[region].live_words() * HeapWordSize; }
|
||||
// Set live bytes for concurrent marking.
|
||||
void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words = live_bytes / HeapWordSize; }
|
||||
void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words.store_relaxed(live_bytes / HeapWordSize); }
|
||||
// Approximate number of incoming references found during marking.
|
||||
size_t incoming_refs(uint region) const { return _region_mark_stats[region]._incoming_refs; }
|
||||
size_t incoming_refs(uint region) const { return _region_mark_stats[region].incoming_refs(); }
|
||||
|
||||
// Update the TAMS for the given region to the current top.
|
||||
inline void update_top_at_mark_start(G1HeapRegion* r);
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +29,6 @@
|
||||
#include "gc/g1/g1EvacFailureRegions.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
G1EvacFailureRegions::G1EvacFailureRegions() :
|
||||
@ -43,7 +43,7 @@ G1EvacFailureRegions::~G1EvacFailureRegions() {
|
||||
}
|
||||
|
||||
void G1EvacFailureRegions::pre_collection(uint max_regions) {
|
||||
AtomicAccess::store(&_num_regions_evac_failed, 0u);
|
||||
_num_regions_evac_failed.store_relaxed(0u);
|
||||
_regions_evac_failed.resize(max_regions);
|
||||
_regions_pinned.resize(max_regions);
|
||||
_regions_alloc_failed.resize(max_regions);
|
||||
@ -69,6 +69,6 @@ void G1EvacFailureRegions::par_iterate(G1HeapRegionClosure* closure,
|
||||
G1CollectedHeap::heap()->par_iterate_regions_array(closure,
|
||||
hrclaimer,
|
||||
_evac_failed_regions,
|
||||
AtomicAccess::load(&_num_regions_evac_failed),
|
||||
num_regions_evac_failed(),
|
||||
worker_id);
|
||||
}
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +26,7 @@
|
||||
#ifndef SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
|
||||
#define SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
|
||||
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
|
||||
class G1AbstractSubTask;
|
||||
@ -53,14 +55,14 @@ class G1EvacFailureRegions {
|
||||
// Evacuation failed regions (indexes) in the current collection.
|
||||
uint* _evac_failed_regions;
|
||||
// Number of regions evacuation failed in the current collection.
|
||||
volatile uint _num_regions_evac_failed;
|
||||
Atomic<uint> _num_regions_evac_failed;
|
||||
|
||||
public:
|
||||
G1EvacFailureRegions();
|
||||
~G1EvacFailureRegions();
|
||||
|
||||
uint get_region_idx(uint idx) const {
|
||||
assert(idx < _num_regions_evac_failed, "precondition");
|
||||
assert(idx < _num_regions_evac_failed.load_relaxed(), "precondition");
|
||||
return _evac_failed_regions[idx];
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,10 +30,9 @@
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
|
||||
uint G1EvacFailureRegions::num_regions_evac_failed() const {
|
||||
return AtomicAccess::load(&_num_regions_evac_failed);
|
||||
return _num_regions_evac_failed.load_relaxed();
|
||||
}
|
||||
|
||||
bool G1EvacFailureRegions::has_regions_evac_failed() const {
|
||||
@ -57,7 +57,7 @@ bool G1EvacFailureRegions::record(uint worker_id, uint region_idx, bool cause_pi
|
||||
bool success = _regions_evac_failed.par_set_bit(region_idx,
|
||||
memory_order_relaxed);
|
||||
if (success) {
|
||||
size_t offset = AtomicAccess::fetch_then_add(&_num_regions_evac_failed, 1u);
|
||||
size_t offset = _num_regions_evac_failed.fetch_then_add(1u);
|
||||
_evac_failed_regions[offset] = region_idx;
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -122,7 +122,7 @@ public:
|
||||
ReferenceProcessor* reference_processor();
|
||||
size_t live_words(uint region_index) const {
|
||||
assert(region_index < _heap->max_num_regions(), "sanity");
|
||||
return _live_stats[region_index]._live_words;
|
||||
return _live_stats[region_index].live_words();
|
||||
}
|
||||
|
||||
void before_marking_update_attribute_table(G1HeapRegion* hr);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,7 +37,6 @@
|
||||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
|
||||
class G1AdjustLiveClosure : public StackObj {
|
||||
G1AdjustClosure* _adjust_closure;
|
||||
|
||||
@ -108,7 +108,7 @@ void G1FullGCMarker::follow_array_chunk(objArrayOop array, int index) {
|
||||
push_objarray(array, end_index);
|
||||
}
|
||||
|
||||
array->oop_iterate_range(mark_closure(), beg_index, end_index);
|
||||
array->oop_iterate_elements_range(mark_closure(), beg_index, end_index);
|
||||
}
|
||||
|
||||
inline void G1FullGCMarker::follow_object(oop obj) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +31,6 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/padded.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,6 @@
|
||||
#include "gc/g1/g1CodeRootSet.hpp"
|
||||
#include "gc/g1/g1CollectionSetCandidates.hpp"
|
||||
#include "gc/g1/g1FromCardCache.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
@ -123,9 +122,6 @@ public:
|
||||
|
||||
static void initialize(MemRegion reserved);
|
||||
|
||||
// Coarsening statistics since VM start.
|
||||
static G1CardSetCoarsenStats coarsen_stats() { return G1CardSet::coarsen_stats(); }
|
||||
|
||||
inline uintptr_t to_card(OopOrNarrowOopStar from) const;
|
||||
|
||||
private:
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,6 @@
|
||||
#include "gc/g1/g1CardSet.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
void G1HeapRegionRemSet::set_state_untracked() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,7 +24,6 @@
|
||||
|
||||
#include "gc/g1/g1MonotonicArena.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "utilities/globalCounter.inline.hpp"
|
||||
|
||||
@ -61,13 +60,13 @@ void G1MonotonicArena::SegmentFreeList::bulk_add(Segment& first,
|
||||
size_t num,
|
||||
size_t mem_size) {
|
||||
_list.prepend(first, last);
|
||||
AtomicAccess::add(&_num_segments, num, memory_order_relaxed);
|
||||
AtomicAccess::add(&_mem_size, mem_size, memory_order_relaxed);
|
||||
_num_segments.add_then_fetch(num, memory_order_relaxed);
|
||||
_mem_size.add_then_fetch(mem_size, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void G1MonotonicArena::SegmentFreeList::print_on(outputStream* out, const char* prefix) {
|
||||
out->print_cr("%s: segments %zu size %zu",
|
||||
prefix, AtomicAccess::load(&_num_segments), AtomicAccess::load(&_mem_size));
|
||||
prefix, _num_segments.load_relaxed(), _mem_size.load_relaxed());
|
||||
}
|
||||
|
||||
G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& num_segments,
|
||||
@ -75,12 +74,12 @@ G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& nu
|
||||
GlobalCounter::CriticalSection cs(Thread::current());
|
||||
|
||||
Segment* result = _list.pop_all();
|
||||
num_segments = AtomicAccess::load(&_num_segments);
|
||||
mem_size = AtomicAccess::load(&_mem_size);
|
||||
num_segments = _num_segments.load_relaxed();
|
||||
mem_size = _mem_size.load_relaxed();
|
||||
|
||||
if (result != nullptr) {
|
||||
AtomicAccess::sub(&_num_segments, num_segments, memory_order_relaxed);
|
||||
AtomicAccess::sub(&_mem_size, mem_size, memory_order_relaxed);
|
||||
_num_segments.sub_then_fetch(num_segments, memory_order_relaxed);
|
||||
_mem_size.sub_then_fetch(mem_size, memory_order_relaxed);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -96,8 +95,8 @@ void G1MonotonicArena::SegmentFreeList::free_all() {
|
||||
Segment::delete_segment(cur);
|
||||
}
|
||||
|
||||
AtomicAccess::sub(&_num_segments, num_freed, memory_order_relaxed);
|
||||
AtomicAccess::sub(&_mem_size, mem_size_freed, memory_order_relaxed);
|
||||
_num_segments.sub_then_fetch(num_freed, memory_order_relaxed);
|
||||
_mem_size.sub_then_fetch(mem_size_freed, memory_order_relaxed);
|
||||
}
|
||||
|
||||
G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
|
||||
@ -115,7 +114,7 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
|
||||
}
|
||||
|
||||
// Install it as current allocation segment.
|
||||
Segment* old = AtomicAccess::cmpxchg(&_first, prev, next);
|
||||
Segment* old = _first.compare_exchange(prev, next);
|
||||
if (old != prev) {
|
||||
// Somebody else installed the segment, use that one.
|
||||
Segment::delete_segment(next);
|
||||
@ -126,9 +125,9 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
|
||||
_last = next;
|
||||
}
|
||||
// Successfully installed the segment into the list.
|
||||
AtomicAccess::inc(&_num_segments, memory_order_relaxed);
|
||||
AtomicAccess::add(&_mem_size, next->mem_size(), memory_order_relaxed);
|
||||
AtomicAccess::add(&_num_total_slots, next->num_slots(), memory_order_relaxed);
|
||||
_num_segments.add_then_fetch(1u, memory_order_relaxed);
|
||||
_mem_size.add_then_fetch(next->mem_size(), memory_order_relaxed);
|
||||
_num_total_slots.add_then_fetch(next->num_slots(), memory_order_relaxed);
|
||||
return next;
|
||||
}
|
||||
}
|
||||
@ -155,7 +154,7 @@ uint G1MonotonicArena::slot_size() const {
|
||||
}
|
||||
|
||||
void G1MonotonicArena::drop_all() {
|
||||
Segment* cur = AtomicAccess::load_acquire(&_first);
|
||||
Segment* cur = _first.load_acquire();
|
||||
|
||||
if (cur != nullptr) {
|
||||
assert(_last != nullptr, "If there is at least one segment, there must be a last one.");
|
||||
@ -175,25 +174,25 @@ void G1MonotonicArena::drop_all() {
|
||||
cur = next;
|
||||
}
|
||||
#endif
|
||||
assert(num_segments == _num_segments, "Segment count inconsistent %u %u", num_segments, _num_segments);
|
||||
assert(mem_size == _mem_size, "Memory size inconsistent");
|
||||
assert(num_segments == _num_segments.load_relaxed(), "Segment count inconsistent %u %u", num_segments, _num_segments.load_relaxed());
|
||||
assert(mem_size == _mem_size.load_relaxed(), "Memory size inconsistent");
|
||||
assert(last == _last, "Inconsistent last segment");
|
||||
|
||||
_segment_free_list->bulk_add(*first, *_last, _num_segments, _mem_size);
|
||||
_segment_free_list->bulk_add(*first, *_last, _num_segments.load_relaxed(), _mem_size.load_relaxed());
|
||||
}
|
||||
|
||||
_first = nullptr;
|
||||
_first.store_relaxed(nullptr);
|
||||
_last = nullptr;
|
||||
_num_segments = 0;
|
||||
_mem_size = 0;
|
||||
_num_total_slots = 0;
|
||||
_num_allocated_slots = 0;
|
||||
_num_segments.store_relaxed(0);
|
||||
_mem_size.store_relaxed(0);
|
||||
_num_total_slots.store_relaxed(0);
|
||||
_num_allocated_slots.store_relaxed(0);
|
||||
}
|
||||
|
||||
void* G1MonotonicArena::allocate() {
|
||||
assert(slot_size() > 0, "instance size not set.");
|
||||
|
||||
Segment* cur = AtomicAccess::load_acquire(&_first);
|
||||
Segment* cur = _first.load_acquire();
|
||||
if (cur == nullptr) {
|
||||
cur = new_segment(cur);
|
||||
}
|
||||
@ -201,7 +200,7 @@ void* G1MonotonicArena::allocate() {
|
||||
while (true) {
|
||||
void* slot = cur->allocate_slot();
|
||||
if (slot != nullptr) {
|
||||
AtomicAccess::inc(&_num_allocated_slots, memory_order_relaxed);
|
||||
_num_allocated_slots.add_then_fetch(1u, memory_order_relaxed);
|
||||
guarantee(is_aligned(slot, _alloc_options->slot_alignment()),
|
||||
"result " PTR_FORMAT " not aligned at %u", p2i(slot), _alloc_options->slot_alignment());
|
||||
return slot;
|
||||
@ -213,7 +212,7 @@ void* G1MonotonicArena::allocate() {
|
||||
}
|
||||
|
||||
uint G1MonotonicArena::num_segments() const {
|
||||
return AtomicAccess::load(&_num_segments);
|
||||
return _num_segments.load_relaxed();
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -238,7 +237,7 @@ uint G1MonotonicArena::calculate_length() const {
|
||||
|
||||
template <typename SegmentClosure>
|
||||
void G1MonotonicArena::iterate_segments(SegmentClosure& closure) const {
|
||||
Segment* cur = AtomicAccess::load_acquire(&_first);
|
||||
Segment* cur = _first.load_acquire();
|
||||
|
||||
assert((cur != nullptr) == (_last != nullptr),
|
||||
"If there is at least one segment, there must be a last one");
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -28,6 +28,7 @@
|
||||
|
||||
#include "gc/shared/freeListAllocator.hpp"
|
||||
#include "nmt/memTag.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/lockFreeStack.hpp"
|
||||
|
||||
@ -65,27 +66,27 @@ private:
|
||||
// AllocOptions provides parameters for Segment sizing and expansion.
|
||||
const AllocOptions* _alloc_options;
|
||||
|
||||
Segment* volatile _first; // The (start of the) list of all segments.
|
||||
Segment* _last; // The last segment of the list of all segments.
|
||||
volatile uint _num_segments; // Number of assigned segments to this allocator.
|
||||
volatile size_t _mem_size; // Memory used by all segments.
|
||||
Atomic<Segment*> _first; // The (start of the) list of all segments.
|
||||
Segment* _last; // The last segment of the list of all segments.
|
||||
Atomic<uint> _num_segments; // Number of assigned segments to this allocator.
|
||||
Atomic<size_t> _mem_size; // Memory used by all segments.
|
||||
|
||||
SegmentFreeList* _segment_free_list; // The global free segment list to preferentially
|
||||
// get new segments from.
|
||||
|
||||
volatile uint _num_total_slots; // Number of slots available in all segments (allocated + not yet used).
|
||||
volatile uint _num_allocated_slots; // Number of total slots allocated ever (including free and pending).
|
||||
Atomic<uint> _num_total_slots; // Number of slots available in all segments (allocated + not yet used).
|
||||
Atomic<uint> _num_allocated_slots; // Number of total slots allocated ever (including free and pending).
|
||||
|
||||
inline Segment* new_segment(Segment* const prev);
|
||||
|
||||
DEBUG_ONLY(uint calculate_length() const;)
|
||||
|
||||
public:
|
||||
const Segment* first_segment() const { return AtomicAccess::load(&_first); }
|
||||
const Segment* first_segment() const { return _first.load_relaxed(); }
|
||||
|
||||
uint num_total_slots() const { return AtomicAccess::load(&_num_total_slots); }
|
||||
uint num_total_slots() const { return _num_total_slots.load_relaxed(); }
|
||||
uint num_allocated_slots() const {
|
||||
uint allocated = AtomicAccess::load(&_num_allocated_slots);
|
||||
uint allocated = _num_allocated_slots.load_relaxed();
|
||||
assert(calculate_length() == allocated, "Must be");
|
||||
return allocated;
|
||||
}
|
||||
@ -116,11 +117,11 @@ static constexpr uint SegmentPayloadMaxAlignment = 8;
|
||||
class alignas(SegmentPayloadMaxAlignment) G1MonotonicArena::Segment {
|
||||
const uint _slot_size;
|
||||
const uint _num_slots;
|
||||
Segment* volatile _next;
|
||||
Atomic<Segment*> _next;
|
||||
// Index into the next free slot to allocate into. Full if equal (or larger)
|
||||
// to _num_slots (can be larger because we atomically increment this value and
|
||||
// check only afterwards if the allocation has been successful).
|
||||
uint volatile _next_allocate;
|
||||
Atomic<uint> _next_allocate;
|
||||
const MemTag _mem_tag;
|
||||
|
||||
static size_t header_size() { return align_up(sizeof(Segment), SegmentPayloadMaxAlignment); }
|
||||
@ -139,21 +140,21 @@ class alignas(SegmentPayloadMaxAlignment) G1MonotonicArena::Segment {
|
||||
Segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag);
|
||||
~Segment() = default;
|
||||
public:
|
||||
Segment* volatile* next_addr() { return &_next; }
|
||||
Atomic<Segment*>* next_addr() { return &_next; }
|
||||
|
||||
void* allocate_slot();
|
||||
|
||||
uint num_slots() const { return _num_slots; }
|
||||
|
||||
Segment* next() const { return _next; }
|
||||
Segment* next() const { return _next.load_relaxed(); }
|
||||
|
||||
void set_next(Segment* next) {
|
||||
assert(next != this, " loop condition");
|
||||
_next = next;
|
||||
_next.store_relaxed(next);
|
||||
}
|
||||
|
||||
void reset(Segment* next) {
|
||||
_next_allocate = 0;
|
||||
_next_allocate.store_relaxed(0);
|
||||
assert(next != this, " loop condition");
|
||||
set_next(next);
|
||||
memset(payload(0), 0, payload_size());
|
||||
@ -166,7 +167,7 @@ public:
|
||||
uint length() const {
|
||||
// _next_allocate might grow larger than _num_slots in multi-thread environments
|
||||
// due to races.
|
||||
return MIN2(_next_allocate, _num_slots);
|
||||
return MIN2(_next_allocate.load_relaxed(), _num_slots);
|
||||
}
|
||||
|
||||
static size_t size_in_bytes(uint slot_size, uint num_slots) {
|
||||
@ -176,7 +177,7 @@ public:
|
||||
static Segment* create_segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag);
|
||||
static void delete_segment(Segment* segment);
|
||||
|
||||
bool is_full() const { return _next_allocate >= _num_slots; }
|
||||
bool is_full() const { return _next_allocate.load_relaxed() >= _num_slots; }
|
||||
};
|
||||
|
||||
static_assert(alignof(G1MonotonicArena::Segment) >= SegmentPayloadMaxAlignment, "assert alignment of Segment (and indirectly its payload)");
|
||||
@ -186,15 +187,15 @@ static_assert(alignof(G1MonotonicArena::Segment) >= SegmentPayloadMaxAlignment,
|
||||
// performed by multiple threads concurrently.
|
||||
// Counts and memory usage are current on a best-effort basis if accessed concurrently.
|
||||
class G1MonotonicArena::SegmentFreeList {
|
||||
static Segment* volatile* next_ptr(Segment& segment) {
|
||||
static Atomic<Segment*>* next_ptr(Segment& segment) {
|
||||
return segment.next_addr();
|
||||
}
|
||||
using SegmentStack = LockFreeStack<Segment, &SegmentFreeList::next_ptr>;
|
||||
|
||||
SegmentStack _list;
|
||||
|
||||
volatile size_t _num_segments;
|
||||
volatile size_t _mem_size;
|
||||
Atomic<size_t> _num_segments;
|
||||
Atomic<size_t> _mem_size;
|
||||
|
||||
public:
|
||||
SegmentFreeList() : _list(), _num_segments(0), _mem_size(0) { }
|
||||
@ -210,8 +211,8 @@ public:
|
||||
|
||||
void print_on(outputStream* out, const char* prefix = "");
|
||||
|
||||
size_t num_segments() const { return AtomicAccess::load(&_num_segments); }
|
||||
size_t mem_size() const { return AtomicAccess::load(&_mem_size); }
|
||||
size_t num_segments() const { return _num_segments.load_relaxed(); }
|
||||
size_t mem_size() const { return _mem_size.load_relaxed(); }
|
||||
};
|
||||
|
||||
// Configuration for G1MonotonicArena, e.g slot size, slot number of next Segment.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -28,14 +28,13 @@
|
||||
|
||||
#include "gc/g1/g1MonotonicArena.hpp"
|
||||
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/globalCounter.inline.hpp"
|
||||
|
||||
inline void* G1MonotonicArena::Segment::allocate_slot() {
|
||||
if (_next_allocate >= _num_slots) {
|
||||
if (_next_allocate.load_relaxed() >= _num_slots) {
|
||||
return nullptr;
|
||||
}
|
||||
uint result = AtomicAccess::fetch_then_add(&_next_allocate, 1u, memory_order_relaxed);
|
||||
uint result = _next_allocate.fetch_then_add(1u, memory_order_relaxed);
|
||||
if (result >= _num_slots) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -48,8 +47,8 @@ inline G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get() {
|
||||
|
||||
Segment* result = _list.pop();
|
||||
if (result != nullptr) {
|
||||
AtomicAccess::dec(&_num_segments, memory_order_relaxed);
|
||||
AtomicAccess::sub(&_mem_size, result->mem_size(), memory_order_relaxed);
|
||||
_num_segments.sub_then_fetch(1u, memory_order_relaxed);
|
||||
_mem_size.sub_then_fetch(result->mem_size(), memory_order_relaxed);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,6 @@
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,6 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -238,9 +237,9 @@ void G1ParScanThreadState::do_partial_array(PartialArrayState* state, bool stole
|
||||
G1HeapRegionAttr dest_attr = _g1h->region_attr(to_array);
|
||||
G1SkipCardMarkSetter x(&_scanner, dest_attr.is_new_survivor());
|
||||
// Process claimed task.
|
||||
to_array->oop_iterate_range(&_scanner,
|
||||
checked_cast<int>(claim._start),
|
||||
checked_cast<int>(claim._end));
|
||||
to_array->oop_iterate_elements_range(&_scanner,
|
||||
checked_cast<int>(claim._start),
|
||||
checked_cast<int>(claim._end));
|
||||
}
|
||||
|
||||
MAYBE_INLINE_EVACUATION
|
||||
@ -260,7 +259,7 @@ void G1ParScanThreadState::start_partial_objarray(oop from_obj,
|
||||
// Process the initial chunk. No need to process the type in the
|
||||
// klass, as it will already be handled by processing the built-in
|
||||
// module.
|
||||
to_array->oop_iterate_range(&_scanner, 0, checked_cast<int>(initial_chunk_size));
|
||||
to_array->oop_iterate_elements_range(&_scanner, 0, checked_cast<int>(initial_chunk_size));
|
||||
}
|
||||
|
||||
MAYBE_INLINE_EVACUATION
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -943,7 +943,7 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
|
||||
phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSToYoungGenCards));
|
||||
}
|
||||
|
||||
record_pause(this_pause, start_time_sec, end_time_sec, allocation_failure);
|
||||
record_pause(this_pause, start_time_sec, end_time_sec);
|
||||
|
||||
if (G1GCPauseTypeHelper::is_last_young_pause(this_pause)) {
|
||||
assert(!G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause),
|
||||
@ -1389,16 +1389,13 @@ void G1Policy::update_gc_pause_time_ratios(G1GCPauseType gc_type, double start_t
|
||||
|
||||
void G1Policy::record_pause(G1GCPauseType gc_type,
|
||||
double start,
|
||||
double end,
|
||||
bool allocation_failure) {
|
||||
double end) {
|
||||
// Manage the MMU tracker. For some reason it ignores Full GCs.
|
||||
if (gc_type != G1GCPauseType::FullGC) {
|
||||
_mmu_tracker->add_pause(start, end);
|
||||
}
|
||||
|
||||
if (!allocation_failure) {
|
||||
update_gc_pause_time_ratios(gc_type, start, end);
|
||||
}
|
||||
update_gc_pause_time_ratios(gc_type, start, end);
|
||||
|
||||
update_time_to_mixed_tracking(gc_type, start, end);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -275,8 +275,7 @@ private:
|
||||
// Record the given STW pause with the given start and end times (in s).
|
||||
void record_pause(G1GCPauseType gc_type,
|
||||
double start,
|
||||
double end,
|
||||
bool allocation_failure = false);
|
||||
double end);
|
||||
|
||||
void update_gc_pause_time_ratios(G1GCPauseType gc_type, double start_sec, double end_sec);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/pair.hpp"
|
||||
@ -40,20 +41,23 @@
|
||||
// * the number of incoming references found during marking. This is an approximate
|
||||
// value because we do not mark through all objects.
|
||||
struct G1RegionMarkStats {
|
||||
size_t _live_words;
|
||||
size_t _incoming_refs;
|
||||
Atomic<size_t> _live_words;
|
||||
Atomic<size_t> _incoming_refs;
|
||||
|
||||
// Clear all members.
|
||||
void clear() {
|
||||
_live_words = 0;
|
||||
_incoming_refs = 0;
|
||||
_live_words.store_relaxed(0);
|
||||
_incoming_refs.store_relaxed(0);
|
||||
}
|
||||
// Clear all members after a marking overflow. Only needs to clear the number of
|
||||
// incoming references as all objects will be rescanned, while the live words are
|
||||
// gathered whenever a thread can mark an object, which is synchronized.
|
||||
void clear_during_overflow() {
|
||||
_incoming_refs = 0;
|
||||
_incoming_refs.store_relaxed(0);
|
||||
}
|
||||
|
||||
size_t live_words() const { return _live_words.load_relaxed(); }
|
||||
size_t incoming_refs() const { return _incoming_refs.load_relaxed(); }
|
||||
};
|
||||
|
||||
// Per-marking thread cache for the region mark statistics.
|
||||
@ -112,12 +116,16 @@ public:
|
||||
void add_live_words(oop obj);
|
||||
void add_live_words(uint region_idx, size_t live_words) {
|
||||
G1RegionMarkStatsCacheEntry* const cur = find_for_add(region_idx);
|
||||
cur->_stats._live_words += live_words;
|
||||
// This method is only ever called single-threaded, so we do not need atomic
|
||||
// update here.
|
||||
cur->_stats._live_words.store_relaxed(cur->_stats.live_words() + live_words);
|
||||
}
|
||||
|
||||
void inc_incoming_refs(uint region_idx) {
|
||||
G1RegionMarkStatsCacheEntry* const cur = find_for_add(region_idx);
|
||||
cur->_stats._incoming_refs++;
|
||||
// This method is only ever called single-threaded, so we do not need atomic
|
||||
// update here.
|
||||
cur->_stats._incoming_refs.store_relaxed(cur->_stats.incoming_refs() + 1u);
|
||||
}
|
||||
|
||||
void reset(uint region_idx) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,8 +27,6 @@
|
||||
|
||||
#include "gc/g1/g1RegionMarkStatsCache.hpp"
|
||||
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
|
||||
inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCache::find_for_add(uint region_idx) {
|
||||
uint const cache_idx = hash(region_idx);
|
||||
|
||||
@ -46,12 +44,12 @@ inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCac
|
||||
|
||||
inline void G1RegionMarkStatsCache::evict(uint idx) {
|
||||
G1RegionMarkStatsCacheEntry* cur = &_cache[idx];
|
||||
if (cur->_stats._live_words != 0) {
|
||||
AtomicAccess::add(&_target[cur->_region_idx]._live_words, cur->_stats._live_words);
|
||||
if (cur->_stats.live_words() != 0) {
|
||||
_target[cur->_region_idx]._live_words.add_then_fetch(cur->_stats.live_words());
|
||||
}
|
||||
|
||||
if (cur->_stats._incoming_refs != 0) {
|
||||
AtomicAccess::add(&_target[cur->_region_idx]._incoming_refs, cur->_stats._incoming_refs);
|
||||
if (cur->_stats.incoming_refs() != 0) {
|
||||
_target[cur->_region_idx]._incoming_refs.add_then_fetch(cur->_stats.incoming_refs());
|
||||
}
|
||||
|
||||
cur->clear();
|
||||
|
||||
@ -412,7 +412,7 @@ void SerialFullGC::follow_array_chunk(objArrayOop array, int index) {
|
||||
const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
|
||||
const int end_index = beg_index + stride;
|
||||
|
||||
array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
|
||||
array->oop_iterate_elements_range(&mark_and_push_closure, beg_index, end_index);
|
||||
|
||||
if (end_index < len) {
|
||||
SerialFullGC::push_objarray(array, end_index); // Push the continuation.
|
||||
|
||||
@ -96,8 +96,22 @@ void WorkerThreads::initialize_workers() {
|
||||
}
|
||||
}
|
||||
|
||||
bool WorkerThreads::allow_inject_creation_failure() const {
|
||||
if (!is_init_completed()) {
|
||||
// Never allow creation failures during VM init
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_created_workers == 0) {
|
||||
// Never allow creation failures of the first worker, it will cause the VM to exit
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
WorkerThread* WorkerThreads::create_worker(uint name_suffix) {
|
||||
if (is_init_completed() && InjectGCWorkerCreationFailure) {
|
||||
if (InjectGCWorkerCreationFailure && allow_inject_creation_failure()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
@ -104,6 +104,7 @@ public:
|
||||
WorkerThreads(const char* name, uint max_workers);
|
||||
|
||||
void initialize_workers();
|
||||
bool allow_inject_creation_failure() const;
|
||||
|
||||
uint max_workers() const { return _max_workers; }
|
||||
uint created_workers() const { return _created_workers; }
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,8 +22,9 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cppstdlib/new.hpp"
|
||||
#include "gc/shared/workerUtils.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
// *** WorkerThreadsBarrierSync
|
||||
@ -80,21 +81,21 @@ void WorkerThreadsBarrierSync::abort() {
|
||||
|
||||
SubTasksDone::SubTasksDone(uint n) :
|
||||
_tasks(nullptr), _n_tasks(n) {
|
||||
_tasks = NEW_C_HEAP_ARRAY(bool, n, mtInternal);
|
||||
_tasks = NEW_C_HEAP_ARRAY(Atomic<bool>, n, mtInternal);
|
||||
for (uint i = 0; i < _n_tasks; i++) {
|
||||
_tasks[i] = false;
|
||||
::new (&_tasks[i]) Atomic<bool>(false);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void SubTasksDone::all_tasks_claimed_impl(uint skipped[], size_t skipped_size) {
|
||||
if (AtomicAccess::cmpxchg(&_verification_done, false, true)) {
|
||||
if (!_verification_done.compare_set(false, true)) {
|
||||
// another thread has done the verification
|
||||
return;
|
||||
}
|
||||
// all non-skipped tasks are claimed
|
||||
for (uint i = 0; i < _n_tasks; ++i) {
|
||||
if (!_tasks[i]) {
|
||||
if (!_tasks[i].load_relaxed()) {
|
||||
auto is_skipped = false;
|
||||
for (size_t j = 0; j < skipped_size; ++j) {
|
||||
if (i == skipped[j]) {
|
||||
@ -109,27 +110,27 @@ void SubTasksDone::all_tasks_claimed_impl(uint skipped[], size_t skipped_size) {
|
||||
for (size_t i = 0; i < skipped_size; ++i) {
|
||||
auto task_index = skipped[i];
|
||||
assert(task_index < _n_tasks, "Array in range.");
|
||||
assert(!_tasks[task_index], "%d is both claimed and skipped.", task_index);
|
||||
assert(!_tasks[task_index].load_relaxed(), "%d is both claimed and skipped.", task_index);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
bool SubTasksDone::try_claim_task(uint t) {
|
||||
assert(t < _n_tasks, "bad task id.");
|
||||
return !_tasks[t] && !AtomicAccess::cmpxchg(&_tasks[t], false, true);
|
||||
return !_tasks[t].load_relaxed() && _tasks[t].compare_set(false, true);
|
||||
}
|
||||
|
||||
SubTasksDone::~SubTasksDone() {
|
||||
assert(_verification_done, "all_tasks_claimed must have been called.");
|
||||
FREE_C_HEAP_ARRAY(bool, _tasks);
|
||||
assert(_verification_done.load_relaxed(), "all_tasks_claimed must have been called.");
|
||||
FREE_C_HEAP_ARRAY(Atomic<bool>, _tasks);
|
||||
}
|
||||
|
||||
// *** SequentialSubTasksDone
|
||||
|
||||
bool SequentialSubTasksDone::try_claim_task(uint& t) {
|
||||
t = _num_claimed;
|
||||
t = _num_claimed.load_relaxed();
|
||||
if (t < _num_tasks) {
|
||||
t = AtomicAccess::add(&_num_claimed, 1u) - 1;
|
||||
t = _num_claimed.fetch_then_add(1u);
|
||||
}
|
||||
return t < _num_tasks;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,7 @@
|
||||
#include "cppstdlib/type_traits.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "metaprogramming/enableIf.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -79,11 +80,11 @@ public:
|
||||
// enumeration type.
|
||||
|
||||
class SubTasksDone: public CHeapObj<mtInternal> {
|
||||
volatile bool* _tasks;
|
||||
Atomic<bool>* _tasks;
|
||||
uint _n_tasks;
|
||||
|
||||
// make sure verification logic is run exactly once to avoid duplicate assertion failures
|
||||
DEBUG_ONLY(volatile bool _verification_done = false;)
|
||||
DEBUG_ONLY(Atomic<bool> _verification_done;)
|
||||
void all_tasks_claimed_impl(uint skipped[], size_t skipped_size) NOT_DEBUG_RETURN;
|
||||
|
||||
NONCOPYABLE(SubTasksDone);
|
||||
@ -127,7 +128,7 @@ public:
|
||||
class SequentialSubTasksDone : public CHeapObj<mtInternal> {
|
||||
|
||||
uint _num_tasks; // Total number of tasks available.
|
||||
volatile uint _num_claimed; // Number of tasks claimed.
|
||||
Atomic<uint> _num_claimed; // Number of tasks claimed.
|
||||
|
||||
NONCOPYABLE(SequentialSubTasksDone);
|
||||
|
||||
@ -135,7 +136,8 @@ public:
|
||||
SequentialSubTasksDone(uint num_tasks) : _num_tasks(num_tasks), _num_claimed(0) { }
|
||||
~SequentialSubTasksDone() {
|
||||
// Claiming may try to claim more tasks than there are.
|
||||
assert(_num_claimed >= _num_tasks, "Claimed %u tasks of %u", _num_claimed, _num_tasks);
|
||||
assert(_num_claimed.load_relaxed() >= _num_tasks,
|
||||
"Claimed %u tasks of %u", _num_claimed.load_relaxed(), _num_tasks);
|
||||
}
|
||||
|
||||
// Attempt to claim the next unclaimed task in the sequence,
|
||||
|
||||
@ -129,6 +129,13 @@ protected:
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void update_livedata(size_t live) {
|
||||
_region_union._live_data = live;
|
||||
#ifdef ASSERT
|
||||
_union_tag = is_live_data;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline ShenandoahHeapRegion* get_region() const {
|
||||
assert(_union_tag != is_uninitialized, "Cannot fetch region from uninitialized RegionData");
|
||||
return _region;
|
||||
|
||||
@ -89,6 +89,17 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll
|
||||
return false;
|
||||
}
|
||||
|
||||
// Between consecutive mixed-evacuation cycles, the live data within each candidate region may change due to
|
||||
// promotions and old-gen evacuations. Re-sort the candidate regions in order to first evacuate regions that have
|
||||
// the smallest amount of live data. These are easiest to evacuate with least effort. Doing these first allows
|
||||
// us to more quickly replenish free memory with empty regions.
|
||||
for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; i++) {
|
||||
ShenandoahHeapRegion* r = _region_data[i].get_region();
|
||||
_region_data[i].update_livedata(r->get_mixed_candidate_live_data_bytes());
|
||||
}
|
||||
QuickSort::sort<RegionData>(_region_data + _next_old_collection_candidate, unprocessed_old_collection_candidates(),
|
||||
compare_by_live);
|
||||
|
||||
_first_pinned_candidate = NOT_FOUND;
|
||||
|
||||
uint included_old_regions = 0;
|
||||
@ -414,6 +425,8 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
|
||||
ShenandoahHeapRegion* r = candidates[i].get_region();
|
||||
size_t region_garbage = r->garbage();
|
||||
size_t region_free = r->free();
|
||||
|
||||
r->capture_mixed_candidate_garbage();
|
||||
candidates_garbage += region_garbage;
|
||||
unfragmented += region_free;
|
||||
}
|
||||
@ -456,6 +469,8 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
|
||||
r->index(), ShenandoahHeapRegion::region_state_to_string(r->state()));
|
||||
const size_t region_garbage = r->garbage();
|
||||
const size_t region_free = r->free();
|
||||
|
||||
r->capture_mixed_candidate_garbage();
|
||||
candidates_garbage += region_garbage;
|
||||
unfragmented += region_free;
|
||||
defrag_count++;
|
||||
|
||||
@ -104,17 +104,6 @@ void ShenandoahGenerationalHeap::initialize_heuristics() {
|
||||
// Initialize global generation and heuristics even in generational mode.
|
||||
ShenandoahHeap::initialize_heuristics();
|
||||
|
||||
// Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
|
||||
// for old would be total heap - minimum capacity of young. This means the sum of the maximum
|
||||
// allowed for old and young could exceed the total heap size. It remains the case that the
|
||||
// _actual_ capacity of young + old = total.
|
||||
size_t region_count = num_regions();
|
||||
size_t max_young_regions = MAX2((region_count * ShenandoahMaxYoungPercentage) / 100, (size_t) 1U);
|
||||
size_t initial_capacity_young = max_young_regions * ShenandoahHeapRegion::region_size_bytes();
|
||||
size_t max_capacity_young = initial_capacity_young;
|
||||
size_t initial_capacity_old = max_capacity() - max_capacity_young;
|
||||
size_t max_capacity_old = max_capacity() - initial_capacity_young;
|
||||
|
||||
_young_generation = new ShenandoahYoungGeneration(max_workers());
|
||||
_old_generation = new ShenandoahOldGeneration(max_workers());
|
||||
_young_generation->initialize_heuristics(mode());
|
||||
|
||||
@ -75,6 +75,7 @@ ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool c
|
||||
_plab_allocs(0),
|
||||
_live_data(0),
|
||||
_critical_pins(0),
|
||||
_mixed_candidate_garbage_words(0),
|
||||
_update_watermark(start),
|
||||
_age(0),
|
||||
#ifdef SHENANDOAH_CENSUS_NOISE
|
||||
@ -565,6 +566,7 @@ void ShenandoahHeapRegion::recycle_internal() {
|
||||
assert(_recycling.is_set() && is_trash(), "Wrong state");
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
_mixed_candidate_garbage_words = 0;
|
||||
set_top(bottom());
|
||||
clear_live_data();
|
||||
reset_alloc_metadata();
|
||||
|
||||
@ -43,6 +43,7 @@ class ShenandoahHeapRegion {
|
||||
friend class VMStructs;
|
||||
friend class ShenandoahHeapRegionStateConstant;
|
||||
private:
|
||||
|
||||
/*
|
||||
Region state is described by a state machine. Transitions are guarded by
|
||||
heap lock, which allows changing the state of several regions atomically.
|
||||
@ -259,6 +260,8 @@ private:
|
||||
volatile size_t _live_data;
|
||||
volatile size_t _critical_pins;
|
||||
|
||||
size_t _mixed_candidate_garbage_words;
|
||||
|
||||
HeapWord* volatile _update_watermark;
|
||||
|
||||
uint _age;
|
||||
@ -398,6 +401,14 @@ public:
|
||||
// above TAMS.
|
||||
inline size_t get_live_data_words() const;
|
||||
|
||||
inline size_t get_mixed_candidate_live_data_bytes() const;
|
||||
inline size_t get_mixed_candidate_live_data_words() const;
|
||||
|
||||
inline void capture_mixed_candidate_garbage();
|
||||
|
||||
// Returns garbage by calculating difference between used and get_live_data_words. The value returned is only
|
||||
// meaningful immediately following completion of marking. If there have been subsequent allocations in this region,
|
||||
// use a different approach to determine garbage, such as (used() - get_mixed_candidate_live_data_bytes())
|
||||
inline size_t garbage() const;
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
|
||||
@ -163,6 +163,23 @@ inline size_t ShenandoahHeapRegion::get_live_data_bytes() const {
|
||||
return get_live_data_words() * HeapWordSize;
|
||||
}
|
||||
|
||||
inline size_t ShenandoahHeapRegion::get_mixed_candidate_live_data_bytes() const {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
assert(used() >= _mixed_candidate_garbage_words * HeapWordSize, "used must exceed garbage");
|
||||
return used() - _mixed_candidate_garbage_words * HeapWordSize;
|
||||
}
|
||||
|
||||
inline size_t ShenandoahHeapRegion::get_mixed_candidate_live_data_words() const {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
assert(used() >= _mixed_candidate_garbage_words * HeapWordSize, "used must exceed garbage");
|
||||
return used() / HeapWordSize - _mixed_candidate_garbage_words;
|
||||
}
|
||||
|
||||
inline void ShenandoahHeapRegion::capture_mixed_candidate_garbage() {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
_mixed_candidate_garbage_words = garbage() / HeapWordSize;
|
||||
}
|
||||
|
||||
inline bool ShenandoahHeapRegion::has_live() const {
|
||||
return get_live_data_words() != 0;
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q,
|
||||
|
||||
if (len <= (int) ObjArrayMarkingStride*2) {
|
||||
// A few slices only, process directly
|
||||
array->oop_iterate_range(cl, 0, len);
|
||||
array->oop_iterate_elements_range(cl, 0, len);
|
||||
} else {
|
||||
int bits = log2i_graceful(len);
|
||||
// Compensate for non-power-of-two arrays, cover the array in excess:
|
||||
@ -216,7 +216,7 @@ inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q,
|
||||
// Process the irregular tail, if present
|
||||
int from = last_idx;
|
||||
if (from < len) {
|
||||
array->oop_iterate_range(cl, from, len);
|
||||
array->oop_iterate_elements_range(cl, from, len);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -248,7 +248,7 @@ inline void ShenandoahMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl,
|
||||
assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
|
||||
#endif
|
||||
|
||||
array->oop_iterate_range(cl, from, to);
|
||||
array->oop_iterate_elements_range(cl, from, to);
|
||||
}
|
||||
|
||||
template <ShenandoahGenerationType GENERATION>
|
||||
|
||||
@ -430,18 +430,6 @@
|
||||
"by thread type (worker or mutator) and evacuation type (young, " \
|
||||
"old, or promotion.") \
|
||||
\
|
||||
product(uintx, ShenandoahMinYoungPercentage, 20, EXPERIMENTAL, \
|
||||
"The minimum percentage of the heap to use for the young " \
|
||||
"generation. Heuristics will not adjust the young generation " \
|
||||
"to be less than this.") \
|
||||
range(0, 100) \
|
||||
\
|
||||
product(uintx, ShenandoahMaxYoungPercentage, 100, EXPERIMENTAL, \
|
||||
"The maximum percentage of the heap to use for the young " \
|
||||
"generation. Heuristics will not adjust the young generation " \
|
||||
"to be more than this.") \
|
||||
range(0, 100) \
|
||||
\
|
||||
product(uintx, ShenandoahCriticalFreeThreshold, 1, EXPERIMENTAL, \
|
||||
"How much of the heap needs to be free after recovery cycles, " \
|
||||
"either Degenerated or Full GC to be claimed successful. If this "\
|
||||
|
||||
@ -456,7 +456,7 @@ void ZHeapIterator::follow_array_chunk(const ZHeapIteratorContext& context, cons
|
||||
|
||||
// Follow array chunk
|
||||
ZHeapIteratorOopClosure<false /* VisitReferents */> cl(this, context, obj);
|
||||
ZIterator::oop_iterate_range(obj, &cl, start, end);
|
||||
ZIterator::oop_iterate_elements_range(obj, &cl, start, end);
|
||||
}
|
||||
|
||||
template <bool VisitWeaks>
|
||||
|
||||
@ -41,7 +41,7 @@ public:
|
||||
static void oop_iterate(oop obj, OopClosureT* cl);
|
||||
|
||||
template <typename OopClosureT>
|
||||
static void oop_iterate_range(objArrayOop obj, OopClosureT* cl, int start, int end);
|
||||
static void oop_iterate_elements_range(objArrayOop obj, OopClosureT* cl, int start, int end);
|
||||
|
||||
// This function skips invisible roots
|
||||
template <typename Function>
|
||||
|
||||
@ -66,9 +66,9 @@ void ZIterator::oop_iterate(oop obj, OopClosureT* cl) {
|
||||
}
|
||||
|
||||
template <typename OopClosureT>
|
||||
void ZIterator::oop_iterate_range(objArrayOop obj, OopClosureT* cl, int start, int end) {
|
||||
void ZIterator::oop_iterate_elements_range(objArrayOop obj, OopClosureT* cl, int start, int end) {
|
||||
assert(!is_invisible_object_array(obj), "not safe");
|
||||
obj->oop_iterate_range(cl, start, end);
|
||||
obj->oop_iterate_elements_range(cl, start, end);
|
||||
}
|
||||
|
||||
template <typename Function>
|
||||
|
||||
@ -66,10 +66,6 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#ifdef LINUX
|
||||
#include "os_linux.hpp"
|
||||
#include "osContainer_linux.hpp"
|
||||
#endif
|
||||
|
||||
#define NO_TRANSITION(result_type, header) extern "C" { result_type JNICALL header {
|
||||
#define NO_TRANSITION_END } }
|
||||
@ -400,35 +396,18 @@ JVM_ENTRY_NO_ENV(jboolean, jfr_is_class_instrumented(JNIEnv* env, jclass jvm, jc
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jboolean, jfr_is_containerized(JNIEnv* env, jclass jvm))
|
||||
#ifdef LINUX
|
||||
return OSContainer::is_containerized();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
return os::is_containerized();
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jlong, jfr_host_total_memory(JNIEnv* env, jclass jvm))
|
||||
#ifdef LINUX
|
||||
// We want the host memory, not the container limit.
|
||||
// os::physical_memory() would return the container limit.
|
||||
return static_cast<jlong>(os::Linux::physical_memory());
|
||||
#else
|
||||
return static_cast<jlong>(os::physical_memory());
|
||||
#endif
|
||||
return static_cast<jlong>(os::Machine::physical_memory());
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jlong, jfr_host_total_swap_memory(JNIEnv* env, jclass jvm))
|
||||
#ifdef LINUX
|
||||
// We want the host swap memory, not the container value.
|
||||
physical_memory_size_type host_swap = 0;
|
||||
(void)os::Linux::host_swap(host_swap); // Discard return value and treat as no swap
|
||||
return static_cast<jlong>(host_swap);
|
||||
#else
|
||||
physical_memory_size_type total_swap_space = 0;
|
||||
// Return value ignored - defaulting to 0 on failure.
|
||||
(void)os::total_swap_space(total_swap_space);
|
||||
(void)os::Machine::total_swap_space(total_swap_space);
|
||||
return static_cast<jlong>(total_swap_space);
|
||||
#endif
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(void, jfr_emit_data_loss(JNIEnv* env, jclass jvm, jlong bytes))
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,8 @@
|
||||
#ifndef SHARE_JFR_UTILITIES_JFRSET_HPP
|
||||
#define SHARE_JFR_UTILITIES_JFRSET_HPP
|
||||
|
||||
#include "cppstdlib/new.hpp"
|
||||
#include "cppstdlib/type_traits.hpp"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
@ -67,7 +69,9 @@ class JfrSetStorage : public AnyObj {
|
||||
} else {
|
||||
table = NEW_RESOURCE_ARRAY(K, table_size);
|
||||
}
|
||||
memset(table, 0, table_size * sizeof(K));
|
||||
for (unsigned i = 0; i < table_size; ++i) {
|
||||
::new (&table[i]) K{};
|
||||
}
|
||||
return table;
|
||||
}
|
||||
|
||||
@ -88,7 +92,7 @@ class JfrSetStorage : public AnyObj {
|
||||
assert(is_nonempty(), "invariant");
|
||||
for (unsigned i = 0; i < _table_size; ++i) {
|
||||
K k = _table[i];
|
||||
if (k != 0) {
|
||||
if (k != K{}) {
|
||||
functor(k);
|
||||
}
|
||||
}
|
||||
@ -107,7 +111,14 @@ class JfrSetStorage : public AnyObj {
|
||||
}
|
||||
|
||||
void clear() {
|
||||
memset(_table, 0, _table_size * sizeof(K));
|
||||
for (unsigned i = 0; i < _table_size; ++i) {
|
||||
if constexpr (std::is_copy_assignable_v<K>) {
|
||||
_table[i] = K{};
|
||||
} else {
|
||||
_table[i].~K();
|
||||
::new (&_table[i]) K{};
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -136,11 +147,11 @@ class JfrSet : public JfrSetStorage<CONFIG> {
|
||||
_resize_threshold = old_table_size;
|
||||
for (unsigned i = 0; i < old_table_size; ++i) {
|
||||
const K k = old_table[i];
|
||||
if (k != 0) {
|
||||
if (k != K{}) {
|
||||
uint32_t idx = slot_idx(CONFIG::hash(k));
|
||||
do {
|
||||
K v = this->_table[idx];
|
||||
if (v == 0) {
|
||||
if (v == K{}) {
|
||||
this->_table[idx] = k;
|
||||
break;
|
||||
}
|
||||
@ -161,7 +172,7 @@ class JfrSet : public JfrSetStorage<CONFIG> {
|
||||
K* result = nullptr;
|
||||
while (true) {
|
||||
K v = this->_table[idx];
|
||||
if (v == 0) {
|
||||
if (v == K{}) {
|
||||
result = &this->_table[idx];
|
||||
break;
|
||||
}
|
||||
@ -196,7 +207,7 @@ class JfrSet : public JfrSetStorage<CONFIG> {
|
||||
// Already exists.
|
||||
return false;
|
||||
}
|
||||
assert(*slot == 0, "invariant");
|
||||
assert(*slot == K{}, "invariant");
|
||||
*slot = k;
|
||||
if (++this->_elements == _resize_threshold) {
|
||||
resize();
|
||||
|
||||
@ -135,17 +135,16 @@ class ObjArrayKlass : public ArrayKlass {
|
||||
template <typename T, typename OopClosureType>
|
||||
inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
// Iterate over oop elements within [start, end), and metadata.
|
||||
template <typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_range(objArrayOop a, OopClosureType* closure, int start, int end);
|
||||
|
||||
public:
|
||||
// Iterate over all oop elements.
|
||||
// Iterate over all oop elements, and no metadata.
|
||||
template <typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_elements(objArrayOop a, OopClosureType* closure);
|
||||
|
||||
// Iterate over oop elements within index range [start, end), and no metadata.
|
||||
template <typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_elements_range(objArrayOop a, OopClosureType* closure, int start, int end);
|
||||
|
||||
private:
|
||||
// Iterate over all oop elements with indices within mr.
|
||||
// Iterate over all oop elements bounded by addresses [low, high), and no metadata.
|
||||
template <typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_elements_bounded(objArrayOop a, OopClosureType* closure, void* low, void* high);
|
||||
|
||||
|
||||
@ -38,10 +38,18 @@
|
||||
|
||||
template <typename T, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_elements(objArrayOop a, OopClosureType* closure) {
|
||||
T* p = (T*)a->base();
|
||||
T* const end = p + a->length();
|
||||
oop_oop_iterate_elements_range<T>(a, closure, 0, a->length());
|
||||
}
|
||||
|
||||
for (;p < end; p++) {
|
||||
// Like oop_oop_iterate but only iterates over a specified range and only used
|
||||
// for objArrayOops.
|
||||
template <typename T, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_elements_range(objArrayOop a, OopClosureType* closure, int start, int end) {
|
||||
T* base = (T*)a->base();
|
||||
T* p = base + start;
|
||||
T* const end_p = base + end;
|
||||
|
||||
for (;p < end_p; ++p) {
|
||||
Devirtualizer::do_oop(closure, p);
|
||||
}
|
||||
}
|
||||
@ -98,24 +106,4 @@ void ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, Me
|
||||
oop_oop_iterate_elements_bounded<T>(a, closure, mr.start(), mr.end());
|
||||
}
|
||||
|
||||
// Like oop_oop_iterate but only iterates over a specified range and only used
|
||||
// for objArrayOops.
|
||||
template <typename T, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_range(objArrayOop a, OopClosureType* closure, int start, int end) {
|
||||
T* low = (T*)a->base() + start;
|
||||
T* high = (T*)a->base() + end;
|
||||
|
||||
oop_oop_iterate_elements_bounded<T>(a, closure, low, high);
|
||||
}
|
||||
|
||||
// Placed here to resolve include cycle between objArrayKlass.inline.hpp and objArrayOop.inline.hpp
|
||||
template <typename OopClosureType>
|
||||
void objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) {
|
||||
if (UseCompressedOops) {
|
||||
((ObjArrayKlass*)klass())->oop_oop_iterate_range<narrowOop>(this, blk, start, end);
|
||||
} else {
|
||||
((ObjArrayKlass*)klass())->oop_oop_iterate_range<oop>(this, blk, start, end);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_OOPS_OBJARRAYKLASS_INLINE_HPP
|
||||
|
||||
@ -83,9 +83,9 @@ class objArrayOopDesc : public arrayOopDesc {
|
||||
Klass* element_klass();
|
||||
|
||||
public:
|
||||
// special iterators for index ranges, returns size of object
|
||||
// Special iterators for an element index range.
|
||||
template <typename OopClosureType>
|
||||
void oop_iterate_range(OopClosureType* blk, int start, int end);
|
||||
void oop_iterate_elements_range(OopClosureType* blk, int start, int end);
|
||||
};
|
||||
|
||||
// See similar requirement for oopDesc.
|
||||
|
||||
@ -29,6 +29,7 @@
|
||||
|
||||
#include "oops/access.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
|
||||
@ -51,4 +52,13 @@ inline void objArrayOopDesc::obj_at_put(int index, oop value) {
|
||||
HeapAccess<IS_ARRAY>::oop_store_at(as_oop(), offset, value);
|
||||
}
|
||||
|
||||
template <typename OopClosureType>
|
||||
void objArrayOopDesc::oop_iterate_elements_range(OopClosureType* blk, int start, int end) {
|
||||
if (UseCompressedOops) {
|
||||
((ObjArrayKlass*)klass())->oop_oop_iterate_elements_range<narrowOop>(this, blk, start, end);
|
||||
} else {
|
||||
((ObjArrayKlass*)klass())->oop_oop_iterate_elements_range<oop>(this, blk, start, end);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_OOPS_OBJARRAYOOP_INLINE_HPP
|
||||
|
||||
@ -1195,7 +1195,7 @@ const Type* XorLNode::Value(PhaseGVN* phase) const {
|
||||
return AddNode::Value(phase);
|
||||
}
|
||||
|
||||
Node* MaxNode::build_min_max_int(Node* a, Node* b, bool is_max) {
|
||||
Node* MinMaxNode::build_min_max_int(Node* a, Node* b, bool is_max) {
|
||||
if (is_max) {
|
||||
return new MaxINode(a, b);
|
||||
} else {
|
||||
@ -1203,7 +1203,7 @@ Node* MaxNode::build_min_max_int(Node* a, Node* b, bool is_max) {
|
||||
}
|
||||
}
|
||||
|
||||
Node* MaxNode::build_min_max_long(PhaseGVN* phase, Node* a, Node* b, bool is_max) {
|
||||
Node* MinMaxNode::build_min_max_long(PhaseGVN* phase, Node* a, Node* b, bool is_max) {
|
||||
if (is_max) {
|
||||
return new MaxLNode(phase->C, a, b);
|
||||
} else {
|
||||
@ -1211,7 +1211,7 @@ Node* MaxNode::build_min_max_long(PhaseGVN* phase, Node* a, Node* b, bool is_max
|
||||
}
|
||||
}
|
||||
|
||||
Node* MaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, const Type* t, PhaseGVN& gvn) {
|
||||
Node* MinMaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, const Type* t, PhaseGVN& gvn) {
|
||||
bool is_int = gvn.type(a)->isa_int();
|
||||
assert(is_int || gvn.type(a)->isa_long(), "int or long inputs");
|
||||
assert(is_int == (gvn.type(b)->isa_int() != nullptr), "inconsistent inputs");
|
||||
@ -1243,7 +1243,7 @@ Node* MaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, co
|
||||
return res;
|
||||
}
|
||||
|
||||
Node* MaxNode::build_min_max_diff_with_zero(Node* a, Node* b, bool is_max, const Type* t, PhaseGVN& gvn) {
|
||||
Node* MinMaxNode::build_min_max_diff_with_zero(Node* a, Node* b, bool is_max, const Type* t, PhaseGVN& gvn) {
|
||||
bool is_int = gvn.type(a)->isa_int();
|
||||
assert(is_int || gvn.type(a)->isa_long(), "int or long inputs");
|
||||
assert(is_int == (gvn.type(b)->isa_int() != nullptr), "inconsistent inputs");
|
||||
@ -1290,7 +1290,7 @@ static bool can_overflow(const TypeLong* t, jlong c) {
|
||||
// Let <x, x_off> = x_operands and <y, y_off> = y_operands.
|
||||
// If x == y and neither add(x, x_off) nor add(y, y_off) overflow, return
|
||||
// add(x, op(x_off, y_off)). Otherwise, return nullptr.
|
||||
Node* MaxNode::extract_add(PhaseGVN* phase, ConstAddOperands x_operands, ConstAddOperands y_operands) {
|
||||
Node* MinMaxNode::extract_add(PhaseGVN* phase, ConstAddOperands x_operands, ConstAddOperands y_operands) {
|
||||
Node* x = x_operands.first;
|
||||
Node* y = y_operands.first;
|
||||
int opcode = Opcode();
|
||||
@ -1327,7 +1327,7 @@ static ConstAddOperands as_add_with_constant(Node* n) {
|
||||
return ConstAddOperands(x, c_type->is_int()->get_con());
|
||||
}
|
||||
|
||||
Node* MaxNode::IdealI(PhaseGVN* phase, bool can_reshape) {
|
||||
Node* MinMaxNode::IdealI(PhaseGVN* phase, bool can_reshape) {
|
||||
Node* n = AddNode::Ideal(phase, can_reshape);
|
||||
if (n != nullptr) {
|
||||
return n;
|
||||
@ -1401,7 +1401,7 @@ Node* MaxINode::Identity(PhaseGVN* phase) {
|
||||
return in(2);
|
||||
}
|
||||
|
||||
return MaxNode::Identity(phase);
|
||||
return MinMaxNode::Identity(phase);
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
@ -1434,7 +1434,7 @@ Node* MinINode::Identity(PhaseGVN* phase) {
|
||||
return in(1);
|
||||
}
|
||||
|
||||
return MaxNode::Identity(phase);
|
||||
return MinMaxNode::Identity(phase);
|
||||
}
|
||||
|
||||
//------------------------------add_ring---------------------------------------
|
||||
@ -1564,7 +1564,7 @@ Node* MaxLNode::Identity(PhaseGVN* phase) {
|
||||
return in(2);
|
||||
}
|
||||
|
||||
return MaxNode::Identity(phase);
|
||||
return MinMaxNode::Identity(phase);
|
||||
}
|
||||
|
||||
Node* MaxLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
@ -1596,7 +1596,7 @@ Node* MinLNode::Identity(PhaseGVN* phase) {
|
||||
return in(1);
|
||||
}
|
||||
|
||||
return MaxNode::Identity(phase);
|
||||
return MinMaxNode::Identity(phase);
|
||||
}
|
||||
|
||||
Node* MinLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
@ -1610,7 +1610,7 @@ Node* MinLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
int MaxNode::opposite_opcode() const {
|
||||
int MinMaxNode::opposite_opcode() const {
|
||||
if (Opcode() == max_opcode()) {
|
||||
return min_opcode();
|
||||
} else {
|
||||
@ -1621,7 +1621,7 @@ int MaxNode::opposite_opcode() const {
|
||||
|
||||
// Given a redundant structure such as Max/Min(A, Max/Min(B, C)) where A == B or A == C, return the useful part of the structure.
|
||||
// 'operation' is the node expected to be the inner 'Max/Min(B, C)', and 'operand' is the node expected to be the 'A' operand of the outer node.
|
||||
Node* MaxNode::find_identity_operation(Node* operation, Node* operand) {
|
||||
Node* MinMaxNode::find_identity_operation(Node* operation, Node* operand) {
|
||||
if (operation->Opcode() == Opcode() || operation->Opcode() == opposite_opcode()) {
|
||||
Node* n1 = operation->in(1);
|
||||
Node* n2 = operation->in(2);
|
||||
@ -1645,17 +1645,17 @@ Node* MaxNode::find_identity_operation(Node* operation, Node* operand) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Node* MaxNode::Identity(PhaseGVN* phase) {
|
||||
Node* MinMaxNode::Identity(PhaseGVN* phase) {
|
||||
if (in(1) == in(2)) {
|
||||
return in(1);
|
||||
}
|
||||
|
||||
Node* identity_1 = MaxNode::find_identity_operation(in(2), in(1));
|
||||
Node* identity_1 = MinMaxNode::find_identity_operation(in(2), in(1));
|
||||
if (identity_1 != nullptr) {
|
||||
return identity_1;
|
||||
}
|
||||
|
||||
Node* identity_2 = MaxNode::find_identity_operation(in(1), in(2));
|
||||
Node* identity_2 = MinMaxNode::find_identity_operation(in(1), in(2));
|
||||
if (identity_2 != nullptr) {
|
||||
return identity_2;
|
||||
}
|
||||
|
||||
@ -324,14 +324,16 @@ public:
|
||||
//------------------------------MaxNode----------------------------------------
|
||||
// Max (or min) of 2 values. Included with the ADD nodes because it inherits
|
||||
// all the behavior of addition on a ring.
|
||||
class MaxNode : public AddNode {
|
||||
class MinMaxNode : public AddNode {
|
||||
private:
|
||||
static Node* build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, const Type* t, PhaseGVN& gvn);
|
||||
static Node* build_min_max_diff_with_zero(Node* a, Node* b, bool is_max, const Type* t, PhaseGVN& gvn);
|
||||
Node* extract_add(PhaseGVN* phase, ConstAddOperands x_operands, ConstAddOperands y_operands);
|
||||
|
||||
public:
|
||||
MaxNode( Node *in1, Node *in2 ) : AddNode(in1,in2) {}
|
||||
MinMaxNode(Node* in1, Node* in2) : AddNode(in1, in2) {
|
||||
init_class_id(Class_MinMax);
|
||||
}
|
||||
virtual int Opcode() const = 0;
|
||||
virtual int max_opcode() const = 0;
|
||||
virtual int min_opcode() const = 0;
|
||||
@ -373,9 +375,9 @@ public:
|
||||
//------------------------------MaxINode---------------------------------------
|
||||
// Maximum of 2 integers. Included with the ADD nodes because it inherits
|
||||
// all the behavior of addition on a ring.
|
||||
class MaxINode : public MaxNode {
|
||||
class MaxINode : public MinMaxNode {
|
||||
public:
|
||||
MaxINode( Node *in1, Node *in2 ) : MaxNode(in1,in2) {}
|
||||
MaxINode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *add_ring( const Type *, const Type * ) const;
|
||||
virtual const Type *add_id() const { return TypeInt::make(min_jint); }
|
||||
@ -390,9 +392,9 @@ public:
|
||||
//------------------------------MinINode---------------------------------------
|
||||
// MINimum of 2 integers. Included with the ADD nodes because it inherits
|
||||
// all the behavior of addition on a ring.
|
||||
class MinINode : public MaxNode {
|
||||
class MinINode : public MinMaxNode {
|
||||
public:
|
||||
MinINode( Node *in1, Node *in2 ) : MaxNode(in1,in2) {}
|
||||
MinINode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *add_ring( const Type *, const Type * ) const;
|
||||
virtual const Type *add_id() const { return TypeInt::make(max_jint); }
|
||||
@ -406,9 +408,9 @@ public:
|
||||
|
||||
//------------------------------MaxLNode---------------------------------------
|
||||
// MAXimum of 2 longs.
|
||||
class MaxLNode : public MaxNode {
|
||||
class MaxLNode : public MinMaxNode {
|
||||
public:
|
||||
MaxLNode(Compile* C, Node* in1, Node* in2) : MaxNode(in1, in2) {
|
||||
MaxLNode(Compile* C, Node* in1, Node* in2) : MinMaxNode(in1, in2) {
|
||||
init_flags(Flag_is_macro);
|
||||
C->add_macro_node(this);
|
||||
}
|
||||
@ -425,9 +427,9 @@ public:
|
||||
|
||||
//------------------------------MinLNode---------------------------------------
|
||||
// MINimum of 2 longs.
|
||||
class MinLNode : public MaxNode {
|
||||
class MinLNode : public MinMaxNode {
|
||||
public:
|
||||
MinLNode(Compile* C, Node* in1, Node* in2) : MaxNode(in1, in2) {
|
||||
MinLNode(Compile* C, Node* in1, Node* in2) : MinMaxNode(in1, in2) {
|
||||
init_flags(Flag_is_macro);
|
||||
C->add_macro_node(this);
|
||||
}
|
||||
@ -444,9 +446,9 @@ public:
|
||||
|
||||
//------------------------------MaxFNode---------------------------------------
|
||||
// Maximum of 2 floats.
|
||||
class MaxFNode : public MaxNode {
|
||||
class MaxFNode : public MinMaxNode {
|
||||
public:
|
||||
MaxFNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
|
||||
MaxFNode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *add_ring(const Type*, const Type*) const;
|
||||
virtual const Type *add_id() const { return TypeF::NEG_INF; }
|
||||
@ -458,9 +460,9 @@ public:
|
||||
|
||||
//------------------------------MinFNode---------------------------------------
|
||||
// Minimum of 2 floats.
|
||||
class MinFNode : public MaxNode {
|
||||
class MinFNode : public MinMaxNode {
|
||||
public:
|
||||
MinFNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
|
||||
MinFNode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *add_ring(const Type*, const Type*) const;
|
||||
virtual const Type *add_id() const { return TypeF::POS_INF; }
|
||||
@ -472,9 +474,9 @@ public:
|
||||
|
||||
//------------------------------MaxHFNode--------------------------------------
|
||||
// Maximum of 2 half floats.
|
||||
class MaxHFNode : public MaxNode {
|
||||
class MaxHFNode : public MinMaxNode {
|
||||
public:
|
||||
MaxHFNode(Node* in1, Node* in2) : MaxNode(in1, in2) {}
|
||||
MaxHFNode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* add_ring(const Type*, const Type*) const;
|
||||
virtual const Type* add_id() const { return TypeH::NEG_INF; }
|
||||
@ -486,9 +488,9 @@ public:
|
||||
|
||||
//------------------------------MinHFNode---------------------------------------
|
||||
// Minimum of 2 half floats.
|
||||
class MinHFNode : public MaxNode {
|
||||
class MinHFNode : public MinMaxNode {
|
||||
public:
|
||||
MinHFNode(Node* in1, Node* in2) : MaxNode(in1, in2) {}
|
||||
MinHFNode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* add_ring(const Type*, const Type*) const;
|
||||
virtual const Type* add_id() const { return TypeH::POS_INF; }
|
||||
@ -500,9 +502,9 @@ public:
|
||||
|
||||
//------------------------------MaxDNode---------------------------------------
|
||||
// Maximum of 2 doubles.
|
||||
class MaxDNode : public MaxNode {
|
||||
class MaxDNode : public MinMaxNode {
|
||||
public:
|
||||
MaxDNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
|
||||
MaxDNode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *add_ring(const Type*, const Type*) const;
|
||||
virtual const Type *add_id() const { return TypeD::NEG_INF; }
|
||||
@ -514,9 +516,9 @@ public:
|
||||
|
||||
//------------------------------MinDNode---------------------------------------
|
||||
// Minimum of 2 doubles.
|
||||
class MinDNode : public MaxNode {
|
||||
class MinDNode : public MinMaxNode {
|
||||
public:
|
||||
MinDNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
|
||||
MinDNode(Node* in1, Node* in2) : MinMaxNode(in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type *add_ring(const Type*, const Type*) const;
|
||||
virtual const Type *add_id() const { return TypeD::POS_INF; }
|
||||
|
||||
@ -1489,8 +1489,7 @@ Node* GraphKit::must_be_not_null(Node* value, bool do_replace_in_map) {
|
||||
}
|
||||
Node *if_f = _gvn.transform(new IfFalseNode(iff));
|
||||
Node *frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr));
|
||||
Node* halt = _gvn.transform(new HaltNode(if_f, frame, "unexpected null in intrinsic"));
|
||||
C->root()->add_req(halt);
|
||||
halt(if_f, frame, "unexpected null in intrinsic");
|
||||
Node *if_t = _gvn.transform(new IfTrueNode(iff));
|
||||
set_control(if_t);
|
||||
return cast_not_null(value, do_replace_in_map);
|
||||
@ -2073,6 +2072,12 @@ void GraphKit::increment_counter(Node* counter_addr) {
|
||||
store_to_memory(ctrl, counter_addr, incr, T_LONG, MemNode::unordered);
|
||||
}
|
||||
|
||||
void GraphKit::halt(Node* ctrl, Node* frameptr, const char* reason, bool generate_code_in_product) {
|
||||
Node* halt = new HaltNode(ctrl, frameptr, reason
|
||||
PRODUCT_ONLY(COMMA generate_code_in_product));
|
||||
halt = _gvn.transform(halt);
|
||||
root()->add_req(halt);
|
||||
}
|
||||
|
||||
//------------------------------uncommon_trap----------------------------------
|
||||
// Bail out to the interpreter in mid-method. Implemented by calling the
|
||||
@ -2195,11 +2200,15 @@ Node* GraphKit::uncommon_trap(int trap_request,
|
||||
// The debug info is the only real input to this call.
|
||||
|
||||
// Halt-and-catch fire here. The above call should never return!
|
||||
HaltNode* halt = new HaltNode(control(), frameptr(), "uncommon trap returned which should never happen"
|
||||
PRODUCT_ONLY(COMMA /*reachable*/false));
|
||||
_gvn.set_type_bottom(halt);
|
||||
root()->add_req(halt);
|
||||
|
||||
// We only emit code for the HaltNode in debug, which is enough for
|
||||
// verifying correctness. In product, we don't want to emit it so
|
||||
// that we can save on code space. HaltNode often get folded because
|
||||
// the compiler can prove that the unreachable path is dead. But we
|
||||
// cannot generally expect that for uncommon traps, which are often
|
||||
// reachable and occasionally taken.
|
||||
halt(control(), frameptr(),
|
||||
"uncommon trap returned which should never happen",
|
||||
false /* don't emit code in product */);
|
||||
stop_and_kill_map();
|
||||
return call;
|
||||
}
|
||||
|
||||
@ -709,6 +709,8 @@ class GraphKit : public Phase {
|
||||
void increment_counter(address counter_addr); // increment a debug counter
|
||||
void increment_counter(Node* counter_addr); // increment a debug counter
|
||||
|
||||
void halt(Node* ctrl, Node* frameptr, const char* reason, bool generate_code_in_product = true);
|
||||
|
||||
// Bail out to the interpreter right now
|
||||
// The optional klass is the one causing the trap.
|
||||
// The optional reason is debug information written to the compile log.
|
||||
|
||||
@ -979,9 +979,9 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
|
||||
|
||||
Node* inner_iters_max = nullptr;
|
||||
if (stride_con > 0) {
|
||||
inner_iters_max = MaxNode::max_diff_with_zero(limit, outer_phi, TypeInteger::bottom(bt), _igvn);
|
||||
inner_iters_max = MinMaxNode::max_diff_with_zero(limit, outer_phi, TypeInteger::bottom(bt), _igvn);
|
||||
} else {
|
||||
inner_iters_max = MaxNode::max_diff_with_zero(outer_phi, limit, TypeInteger::bottom(bt), _igvn);
|
||||
inner_iters_max = MinMaxNode::max_diff_with_zero(outer_phi, limit, TypeInteger::bottom(bt), _igvn);
|
||||
}
|
||||
|
||||
Node* inner_iters_limit = _igvn.integercon(iters_limit, bt);
|
||||
@ -989,7 +989,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
|
||||
// Long.MIN_VALUE to Long.MAX_VALUE for instance). Use an unsigned
|
||||
// min.
|
||||
const TypeInteger* inner_iters_actual_range = TypeInteger::make(0, iters_limit, Type::WidenMin, bt);
|
||||
Node* inner_iters_actual = MaxNode::unsigned_min(inner_iters_max, inner_iters_limit, inner_iters_actual_range, _igvn);
|
||||
Node* inner_iters_actual = MinMaxNode::unsigned_min(inner_iters_max, inner_iters_limit, inner_iters_actual_range, _igvn);
|
||||
|
||||
Node* inner_iters_actual_int;
|
||||
if (bt == T_LONG) {
|
||||
@ -1618,7 +1618,7 @@ void PhaseIdealLoop::transform_long_range_checks(int stride_con, const Node_List
|
||||
Node* max_jint_plus_one_long = longcon((jlong)max_jint + 1);
|
||||
Node* max_range = new AddLNode(max_jint_plus_one_long, L);
|
||||
register_new_node(max_range, entry_control);
|
||||
R = MaxNode::unsigned_min(R, max_range, TypeLong::POS, _igvn);
|
||||
R = MinMaxNode::unsigned_min(R, max_range, TypeLong::POS, _igvn);
|
||||
set_subtree_ctrl(R, true);
|
||||
}
|
||||
|
||||
@ -1717,9 +1717,9 @@ void PhaseIdealLoop::transform_long_range_checks(int stride_con, const Node_List
|
||||
}
|
||||
|
||||
Node* PhaseIdealLoop::clamp(Node* R, Node* L, Node* H) {
|
||||
Node* min = MaxNode::signed_min(R, H, TypeLong::LONG, _igvn);
|
||||
Node* min = MinMaxNode::signed_min(R, H, TypeLong::LONG, _igvn);
|
||||
set_subtree_ctrl(min, true);
|
||||
Node* max = MaxNode::signed_max(L, min, TypeLong::LONG, _igvn);
|
||||
Node* max = MinMaxNode::signed_max(L, min, TypeLong::LONG, _igvn);
|
||||
set_subtree_ctrl(max, true);
|
||||
return max;
|
||||
}
|
||||
@ -3485,14 +3485,14 @@ void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) {
|
||||
// the loop body to be run for LoopStripMiningIter.
|
||||
Node* max = nullptr;
|
||||
if (stride > 0) {
|
||||
max = MaxNode::max_diff_with_zero(limit, iv_phi, TypeInt::INT, *igvn);
|
||||
max = MinMaxNode::max_diff_with_zero(limit, iv_phi, TypeInt::INT, *igvn);
|
||||
} else {
|
||||
max = MaxNode::max_diff_with_zero(iv_phi, limit, TypeInt::INT, *igvn);
|
||||
max = MinMaxNode::max_diff_with_zero(iv_phi, limit, TypeInt::INT, *igvn);
|
||||
}
|
||||
// sub is positive and can be larger than the max signed int
|
||||
// value. Use an unsigned min.
|
||||
Node* const_iters = igvn->intcon(scaled_iters);
|
||||
Node* min = MaxNode::unsigned_min(max, const_iters, TypeInt::make(0, scaled_iters, Type::WidenMin), *igvn);
|
||||
Node* min = MinMaxNode::unsigned_min(max, const_iters, TypeInt::make(0, scaled_iters, Type::WidenMin), *igvn);
|
||||
// min is the number of iterations for the next inner loop execution:
|
||||
// unsigned_min(max(limit - iv_phi, 0), scaled_iters) if stride > 0
|
||||
// unsigned_min(max(iv_phi - limit, 0), scaled_iters) if stride < 0
|
||||
|
||||
@ -2577,11 +2577,11 @@ void PhaseMacroExpand::eliminate_opaque_looplimit_macro_nodes() {
|
||||
// a CMoveL construct now. At least until here, the type could be computed
|
||||
// precisely. CMoveL is not so smart, but we can give it at least the best
|
||||
// type we know abouot n now.
|
||||
Node* repl = MaxNode::signed_max(n->in(1), n->in(2), _igvn.type(n), _igvn);
|
||||
Node* repl = MinMaxNode::signed_max(n->in(1), n->in(2), _igvn.type(n), _igvn);
|
||||
_igvn.replace_node(n, repl);
|
||||
success = true;
|
||||
} else if (n->Opcode() == Op_MinL) {
|
||||
Node* repl = MaxNode::signed_min(n->in(1), n->in(2), _igvn.type(n), _igvn);
|
||||
Node* repl = MinMaxNode::signed_min(n->in(1), n->in(2), _igvn.type(n), _igvn);
|
||||
_igvn.replace_node(n, repl);
|
||||
success = true;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user