8373595: A new ObjectMonitorTable implementation

This commit is contained in:
Fredrik Bredberg 2026-01-16 17:30:01 +01:00
parent b7346c307f
commit d7411cf6f9
38 changed files with 853 additions and 446 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,7 @@
#include "opto/output.hpp"
#include "opto/subnode.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
@ -221,37 +222,54 @@ void C2_MacroAssembler::fast_lock(Register obj, Register box, Register t1,
if (!UseObjectMonitorTable) {
assert(t1_monitor == t1_mark, "should be the same here");
} else {
const Register t1_hash = t1;
Label monitor_found;
// Save the mark, we might need it to extract the hash.
mov(rscratch2, t1_mark);
// Look for the monitor in the om_cache.
// Load cache address
lea(t3_t, Address(rthread, JavaThread::om_cache_oops_offset()));
const int num_unrolled = 2;
const int num_unrolled = OMCache::CAPACITY;
for (int i = 0; i < num_unrolled; i++) {
ldr(t1, Address(t3_t));
cmp(obj, t1);
ldr(t2, Address(t3_t));
ldr(t1_monitor, Address(t3_t, OMCache::oop_to_monitor_difference()));
cmp(obj, t2);
br(Assembler::EQ, monitor_found);
increment(t3_t, in_bytes(OMCache::oop_to_oop_difference()));
}
Label loop;
// Look for the monitor in the table.
// Search for obj in cache.
bind(loop);
// Get the hash code.
ubfx(t1_hash, rscratch2, markWord::hash_shift, markWord::hash_bits);
// Check for match.
ldr(t1, Address(t3_t));
cmp(obj, t1);
br(Assembler::EQ, monitor_found);
// Get the table and calculate the bucket's address
lea(t3, ExternalAddress(ObjectMonitorTable::current_table_address()));
ldr(t3, Address(t3));
ldr(t2, Address(t3, ObjectMonitorTable::table_capacity_mask_offset()));
ands(t1_hash, t1_hash, t2);
ldr(t3, Address(t3, ObjectMonitorTable::table_buckets_offset()));
// Search until null encountered, guaranteed _null_sentinel at end.
increment(t3_t, in_bytes(OMCache::oop_to_oop_difference()));
cbnz(t1, loop);
// Cache Miss, NE set from cmp above, cbnz does not set flags
b(slow_path);
// Read the monitor from the bucket.
lsl(t1_hash, t1_hash, LogBytesPerWord);
ldr(t1_monitor, Address(t3, t1_hash));
// Check if the monitor in the bucket is special (empty, tombstone or removed).
cmp(t1_monitor, (unsigned char)ObjectMonitorTable::SpecialPointerValues::below_is_special);
br(Assembler::LT, slow_path);
// Check if object matches.
ldr(t3, Address(t1_monitor, ObjectMonitor::object_offset()));
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->try_resolve_weak_handle_in_c2(this, t3, t2, slow_path);
cmp(t3, obj);
br(Assembler::NE, slow_path);
bind(monitor_found);
ldr(t1_monitor, Address(t3_t, OMCache::oop_to_monitor_difference()));
}
const Register t2_owner_addr = t2;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -441,6 +441,11 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na
return opto_reg;
}
void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
// Load the oop from the weak handle.
__ ldr(obj, Address(obj));
}
#undef __
#define __ _masm->

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -135,6 +135,7 @@ public:
OptoReg::Name opto_reg);
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
#endif // COMPILER2
};

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -446,6 +447,30 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
__ bind(done);
}
#ifdef COMPILER2
void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj,
Register tmp, Label& slow_path) {
assert_different_registers(obj, tmp);
Label done;
// Resolve weak handle using the standard implementation.
BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
// Check if the reference is null, and if it is, take the fast path.
__ cbz(obj, done);
Address gc_state(rthread, ShenandoahThreadLocalData::gc_state_offset());
__ lea(tmp, gc_state);
__ ldrb(tmp, Address(tmp));
// Check if the heap is under weak-reference/roots processing, in
// which case we need to take the slow path.
__ tbnz(tmp, ShenandoahHeap::WEAK_ROOTS_BITPOS, slow_path);
__ bind(done);
}
#endif
// Special Shenandoah CAS implementation that handles false negatives due
// to concurrent evacuation. The service is more complex than a
// traditional CAS operation because the CAS operation is intended to

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -86,6 +87,9 @@ public:
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
#ifdef COMPILER2
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
#endif
void cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
bool acquire, bool release, bool is_cae, Register result);
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1326,6 +1326,23 @@ void ZStoreBarrierStubC2Aarch64::emit_code(MacroAssembler& masm) {
register_stub(this);
}
#undef __
#define __ masm->
void ZBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
// Resolve weak handle using the standard implementation.
BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
// Check if the oop is bad, in which case we need to take the slow path.
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadBeforeMov);
__ movzw(tmp, barrier_Relocation::unpatched);
__ tst(obj, tmp);
__ br(Assembler::NE, slow_path);
// Oop is okay, so we uncolor it.
__ lsr(obj, obj, ZPointerLoadShift);
}
#undef __
#endif // COMPILER2

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -191,6 +191,7 @@ public:
ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm,
ZStoreBarrierStubC2* stub) const;
void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
#endif // COMPILER2
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -275,6 +275,11 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na
return opto_reg;
}
void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
// Load the oop from the weak handle.
__ ld(obj, 0, obj);
}
#undef __
#define __ _masm->

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -81,6 +81,8 @@ public:
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const;
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj,
Register tmp, Label& slow_path);
#endif // COMPILER2
};

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2025, Red Hat, Inc. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -649,6 +650,33 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
__ block_comment("} try_resolve_jobject_in_native (shenandoahgc)");
}
#ifdef COMPILER2
void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler *masm, Register obj,
Register tmp, Label &slow_path) {
__ block_comment("try_resolve_weak_handle_in_c2 (shenandoahgc) {");
assert_different_registers(obj, tmp);
Label done;
// Resolve weak handle using the standard implementation.
BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
// Check if the reference is null, and if it is, take the fast path.
__ cmpdi(CR0, obj, 0);
__ beq(CR0, done);
// Check if the heap is under weak-reference/roots processing, in
// which case we need to take the slow path.
__ lbz(tmp, in_bytes(ShenandoahThreadLocalData::gc_state_offset()), R16_thread);
__ andi_(tmp, tmp, ShenandoahHeap::WEAK_ROOTS);
__ bne(CR0, slow_path);
__ bind(done);
__ block_comment("} try_resolve_weak_handle_in_c2 (shenandoahgc)");
}
#endif
// Special shenandoah CAS implementation that handles false negatives due
// to concurrent evacuation. That is, the CAS operation is intended to succeed in
// the following scenarios (success criteria):

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
* Copyright (c) 2012, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -121,6 +122,9 @@ public:
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
Register obj, Register tmp, Label& slowpath);
#ifdef COMPILER2
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
#endif
};
#endif // CPU_PPC_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_PPC_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -950,6 +950,19 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ b(*stub->continuation());
}
void ZBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
// Resolve weak handle using the standard implementation.
BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
// Check if the oop is bad, in which case we need to take the slow path.
__ ld(tmp, in_bytes(ZThreadLocalData::mark_bad_mask_offset()), R16_thread);
__ and_(tmp, obj, tmp);
__ bne(CR0, slow_path);
// Oop is okay, so we uncolor it.
__ srdi(obj, obj, ZPointerLoadShift);
}
#undef __
#endif // COMPILER2

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -108,6 +108,8 @@ public:
void generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const;
void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
#endif // COMPILER2
void store_barrier_fast(MacroAssembler* masm,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -2756,39 +2756,57 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
addi(owner_addr, mark, in_bytes(ObjectMonitor::owner_offset()) - monitor_tag);
mark = noreg;
} else {
const Register cache_addr = tmp3;
const Register tmp3_bucket = tmp3;
const Register tmp2_hash = tmp2;
Label monitor_found;
Register cache_addr = tmp2;
// Save the mark, we might need it to extract the hash.
mr(tmp2_hash, mark);
// Look for the monitor in the om_cache.
// Load cache address
addi(cache_addr, R16_thread, in_bytes(JavaThread::om_cache_oops_offset()));
const int num_unrolled = 2;
const int num_unrolled = OMCache::CAPACITY;
for (int i = 0; i < num_unrolled; i++) {
ld(R0, 0, cache_addr);
ld(monitor, in_bytes(OMCache::oop_to_monitor_difference()), cache_addr);
cmpd(CR0, R0, obj);
beq(CR0, monitor_found);
addi(cache_addr, cache_addr, in_bytes(OMCache::oop_to_oop_difference()));
}
Label loop;
// Look for the monitor in the table.
// Search for obj in cache.
bind(loop);
// Get the hash code.
srdi(tmp2_hash, tmp2_hash, markWord::hash_shift);
// Check for match.
ld(R0, 0, cache_addr);
cmpd(CR0, R0, obj);
beq(CR0, monitor_found);
// Get the table and calculate the bucket's address
load_const_optimized(tmp3, ObjectMonitorTable::current_table_address(), R0);
ld_ptr(tmp3, 0, tmp3);
ld(tmp1, in_bytes(ObjectMonitorTable::table_capacity_mask_offset()), tmp3);
andr(tmp2_hash, tmp2_hash, tmp1);
ld(tmp3, in_bytes(ObjectMonitorTable::table_buckets_offset()), tmp3);
sldi(tmp2_hash, tmp2_hash, LogBytesPerWord);
add(tmp3_bucket, tmp3, tmp2_hash);
// Search until null encountered, guaranteed _null_sentinel at end.
addi(cache_addr, cache_addr, in_bytes(OMCache::oop_to_oop_difference()));
cmpdi(CR1, R0, 0);
bne(CR1, loop);
// Cache Miss, CR0.NE set from cmp above
b(slow_path);
// Read the monitor from the bucket.
ld_ptr(monitor, 0, tmp3_bucket);
// Check if the monitor in the bucket is special (empty, tombstone or removed).
cmpdi(CR0, monitor, ObjectMonitorTable::SpecialPointerValues::below_is_special);
blt(CR0, slow_path);
// Check if object matches.
ld(tmp3, in_bytes(ObjectMonitor::object_offset()), monitor);
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->try_resolve_weak_handle_in_c2(this, tmp3, tmp2, slow_path);
cmpd(CR0, tmp3, obj);
bne(CR0, slow_path);
bind(monitor_found);
ld(monitor, in_bytes(OMCache::oop_to_monitor_difference()), cache_addr);
// Compute owner address.
addi(owner_addr, monitor, in_bytes(ObjectMonitor::owner_offset()));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -31,6 +31,7 @@
#include "opto/output.hpp"
#include "opto/subnode.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "utilities/globalDefinitions.hpp"
#ifdef PRODUCT
@ -123,35 +124,54 @@ void C2_MacroAssembler::fast_lock(Register obj, Register box,
if (!UseObjectMonitorTable) {
assert(tmp1_monitor == tmp1_mark, "should be the same here");
} else {
const Register tmp2_hash = tmp2;
const Register tmp3_bucket = tmp3;
Label monitor_found;
// Save the mark, we might need it to extract the hash.
mv(tmp2_hash, tmp1_mark);
// Look for the monitor in the om_cache.
// Load cache address
la(tmp3_t, Address(xthread, JavaThread::om_cache_oops_offset()));
const int num_unrolled = 2;
const int num_unrolled = OMCache::CAPACITY;
for (int i = 0; i < num_unrolled; i++) {
ld(tmp1, Address(tmp3_t));
beq(obj, tmp1, monitor_found);
ld(t0, Address(tmp3_t));
ld(tmp1_monitor, Address(tmp3_t, OMCache::oop_to_monitor_difference()));
beq(obj, t0, monitor_found);
add(tmp3_t, tmp3_t, in_bytes(OMCache::oop_to_oop_difference()));
}
Label loop;
// Look for the monitor in the table.
// Search for obj in cache.
bind(loop);
// Get the hash code.
srli(tmp2_hash, tmp2_hash, markWord::hash_shift);
// Check for match.
ld(tmp1, Address(tmp3_t));
beq(obj, tmp1, monitor_found);
// Get the table and calculate the bucket's address.
la(tmp3_t, ExternalAddress(ObjectMonitorTable::current_table_address()));
ld(tmp3_t, Address(tmp3_t));
ld(tmp1, Address(tmp3_t, ObjectMonitorTable::table_capacity_mask_offset()));
andr(tmp2_hash, tmp2_hash, tmp1);
ld(tmp3_t, Address(tmp3_t, ObjectMonitorTable::table_buckets_offset()));
slli(tmp2_hash, tmp2_hash, LogBytesPerWord);
add(tmp3_bucket, tmp3_t, tmp2_hash);
// Search until null encountered, guaranteed _null_sentinel at end.
add(tmp3_t, tmp3_t, in_bytes(OMCache::oop_to_oop_difference()));
bnez(tmp1, loop);
// Cache Miss. Take the slowpath.
j(slow_path);
// Read the monitor from the bucket.
ld(tmp1_monitor, Address(tmp3_bucket));
// Check if the monitor in the bucket is special (empty, tombstone or removed).
li(tmp2, ObjectMonitorTable::SpecialPointerValues::below_is_special);
bltu(tmp1_monitor, tmp2, slow_path);
// Check if object matches.
ld(tmp3, Address(tmp1_monitor, ObjectMonitor::object_offset()));
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->try_resolve_weak_handle_in_c2(this, tmp3, tmp2, slow_path);
bne(tmp3, obj, slow_path);
bind(monitor_found);
ld(tmp1_monitor, Address(tmp3_t, OMCache::oop_to_monitor_difference()));
}
const Register tmp2_owner_addr = tmp2;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -369,6 +369,11 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na
return opto_reg;
}
void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
// Load the oop from the weak handle.
__ ld(obj, Address(obj));
}
#undef __
#define __ _masm->

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -110,6 +110,8 @@ public:
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj,
Register tmp, Label& slow_path);
#endif // COMPILER2
};

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -465,6 +466,29 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
__ bind(done);
}
#ifdef COMPILER2
void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler *masm, Register obj,
Register tmp, Label& slow_path) {
Label done;
// Resolve weak handle using the standard implementation.
BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
// Check if the reference is null, and if it is, take the fast path.
__ beqz(obj, done);
assert(obj != tmp, "need tmp");
Address gc_state(xthread, ShenandoahThreadLocalData::gc_state_offset());
__ lbu(tmp, gc_state);
// Check if the heap is under weak-reference/roots processing, in
// which case we need to take the slow path.
__ test_bit(tmp, tmp, ShenandoahHeap::WEAK_ROOTS_BITPOS);
__ bnez(tmp, slow_path);
__ bind(done);
}
#endif
// Special Shenandoah CAS implementation that handles false negatives due
// to concurrent evacuation. The service is more complex than a
// traditional CAS operation because the CAS operation is intended to

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -91,7 +92,9 @@ public:
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
#ifdef COMPILER2
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path);
#endif
void cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
Assembler::Aqrl acquire, Assembler::Aqrl release, bool is_cae, Register result);
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -602,6 +602,25 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
}
#ifdef COMPILER2
void ZBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_weak_handle_in_c2 {");
// Resolve weak handle using the standard implementation.
BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
// Check if the oop is bad, in which case we need to take the slow path.
__ ld(tmp, mark_bad_mask_from_thread(xthread));
__ andr(tmp, obj, tmp);
__ bnez(tmp, slow_path);
// Oop is okay, so we uncolor it.
__ srli(obj, obj, ZPointerLoadShift);
BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_weak_handle_in_c2");
}
#endif
static uint16_t patch_barrier_relocation_value(int format) {
switch (format) {
case ZBarrierRelocationFormatLoadBadMask:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -170,6 +170,10 @@ public:
ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm,
ZStoreBarrierStubC2* stub) const;
void try_resolve_weak_handle_in_c2(MacroAssembler* masm,
Register obj,
Register tmp,
Label& slow_path);
#endif // COMPILER2
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -206,6 +206,11 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na
return opto_reg;
}
void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Register tmp, Label& slow_path) {
// Load the oop from the weak handle.
__ z_lg(obj, Address(obj));
}
#undef __
#define __ _masm->

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -65,6 +65,8 @@ public:
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg) const;
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj,
Register tmp, Label& slow_path);
#endif // COMPILER2
static const int OFFSET_TO_PATCHABLE_DATA_INSTRUCTION = 6 + 6 + 6; // iihf(6) + iilf(6) + lg(6)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* Copyright 2024 IBM Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -6372,45 +6372,58 @@ void MacroAssembler::compiler_fast_lock_object(Register obj, Register box, Regis
if (!UseObjectMonitorTable) {
assert(tmp1_monitor == mark, "should be the same here");
} else {
const Register cache_addr = tmp2;
const Register tmp1_bucket = tmp1;
const Register hash = Z_R0_scratch;
NearLabel monitor_found;
// Save the mark, we might need it to extract the hash.
z_lgr(hash, mark);
// Look for the monitor in the om_cache.
// load cache address
z_la(tmp1, Address(Z_thread, JavaThread::om_cache_oops_offset()));
z_la(cache_addr, Address(Z_thread, JavaThread::om_cache_oops_offset()));
const int num_unrolled = 2;
for (int i = 0; i < num_unrolled; i++) {
z_cg(obj, Address(tmp1));
z_lg(tmp1_monitor, Address(cache_addr, OMCache::oop_to_monitor_difference()));
z_cg(obj, Address(cache_addr));
z_bre(monitor_found);
add2reg(tmp1, in_bytes(OMCache::oop_to_oop_difference()));
add2reg(cache_addr, in_bytes(OMCache::oop_to_oop_difference()));
}
NearLabel loop;
// Search for obj in cache
// Get the hash code.
z_srlg(hash, hash, markWord::hash_shift);
bind(loop);
// Get the table and calculate the bucket's address.
load_const_optimized(tmp2, ObjectMonitorTable::current_table_address());
z_lg(tmp2, Address(tmp2));
z_lg(tmp1, Address(tmp2, ObjectMonitorTable::table_capacity_mask_offset()));
z_ngr(hash, tmp1);
z_lg(tmp1, Address(tmp2, ObjectMonitorTable::table_buckets_offset()));
z_sllg(hash, hash, LogBytesPerWord);
z_agr(tmp1_bucket, hash);
// check for match.
z_cg(obj, Address(tmp1));
z_bre(monitor_found);
// Read the monitor from the bucket.
z_lg(tmp1_monitor, Address(tmp1_bucket));
// search until null encountered, guaranteed _null_sentinel at end.
add2reg(tmp1, in_bytes(OMCache::oop_to_oop_difference()));
z_cghsi(0, tmp1, 0);
z_brne(loop); // if not EQ to 0, go for another loop
// Check if the monitor in the bucket is special (empty, tombstone or removed).
z_cghi(tmp1_monitor, ObjectMonitorTable::SpecialPointerValues::below_is_special);
z_brl(slow_path);
// we reached to the end, cache miss
z_ltgr(obj, obj); // set CC to NE
z_bru(slow_path);
// Check if object matches.
z_lg(tmp2, Address(tmp1_monitor, ObjectMonitor::object_offset()));
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->try_resolve_weak_handle_in_c2(this, tmp2, Z_R0_scratch, slow_path);
z_cgr(obj, tmp2);
z_brne(slow_path);
// cache hit
bind(monitor_found);
z_lg(tmp1_monitor, Address(tmp1, OMCache::oop_to_monitor_difference()));
}
NearLabel monitor_locked;
// lock the monitor
// mark contains the tagged ObjectMonitor*.
const Register tagged_monitor = mark;
const Register zero = tmp2;
const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast<int>(markWord::monitor_value));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@
#include "opto/opcodes.hpp"
#include "opto/subnode.hpp"
#include "runtime/globals.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/checkedCast.hpp"
@ -217,7 +218,6 @@ inline Assembler::AvxVectorLen C2_MacroAssembler::vector_length_encoding(int vle
// In the case of failure, the node will branch directly to the
// FailureLabel
// obj: object to lock
// box: on-stack box address -- KILLED
// rax: tmp -- KILLED
@ -286,7 +286,7 @@ void C2_MacroAssembler::fast_lock(Register obj, Register box, Register rax_reg,
// After successful lock, push object on lock-stack.
movptr(Address(thread, top), obj);
addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
jmpb(locked);
jmp(locked);
}
{ // Handle inflated monitor.
@ -297,38 +297,50 @@ void C2_MacroAssembler::fast_lock(Register obj, Register box, Register rax_reg,
if (!UseObjectMonitorTable) {
assert(mark == monitor, "should be the same here");
} else {
// Uses ObjectMonitorTable. Look for the monitor in the om_cache.
// Fetch ObjectMonitor* from the cache or take the slow-path.
const Register hash = t;
Label monitor_found;
// Load cache address
lea(t, Address(thread, JavaThread::om_cache_oops_offset()));
// Look for the monitor in the om_cache.
const int num_unrolled = 2;
// Load cache address
lea(rax_reg, Address(thread, JavaThread::om_cache_oops_offset()));
const int num_unrolled = OMCache::CAPACITY;
for (int i = 0; i < num_unrolled; i++) {
cmpptr(obj, Address(t));
movptr(monitor, Address(rax_reg, OMCache::oop_to_monitor_difference()));
cmpptr(obj, Address(rax_reg));
jccb(Assembler::equal, monitor_found);
increment(t, in_bytes(OMCache::oop_to_oop_difference()));
increment(rax_reg, in_bytes(OMCache::oop_to_oop_difference()));
}
Label loop;
// Look for the monitor in the table.
// Search for obj in cache.
bind(loop);
// Get the hash code.
movptr(hash, Address(obj, oopDesc::mark_offset_in_bytes()));
shrq(hash, markWord::hash_shift);
andq(hash, markWord::hash_mask);
// Check for match.
cmpptr(obj, Address(t));
jccb(Assembler::equal, monitor_found);
// Get the table and calculate the bucket's address.
lea(rax_reg, ExternalAddress(ObjectMonitorTable::current_table_address()));
movptr(rax_reg, Address(rax_reg));
andq(hash, Address(rax_reg, ObjectMonitorTable::table_capacity_mask_offset()));
movptr(rax_reg, Address(rax_reg, ObjectMonitorTable::table_buckets_offset()));
// Search until null encountered, guaranteed _null_sentinel at end.
cmpptr(Address(t), 1);
jcc(Assembler::below, slow_path); // 0 check, but with ZF=0 when *t == 0
increment(t, in_bytes(OMCache::oop_to_oop_difference()));
jmpb(loop);
// Read the monitor from the bucket.
movptr(monitor, Address(rax_reg, hash, Address::times_ptr));
// Check if the monitor in the bucket is special (empty, tombstone or removed)
cmpptr(monitor, ObjectMonitorTable::SpecialPointerValues::below_is_special);
jcc(Assembler::below, slow_path);
// Check if object matches.
movptr(rax_reg, Address(monitor, ObjectMonitor::object_offset()));
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->try_resolve_weak_handle_in_c2(this, rax_reg, slow_path);
cmpptr(rax_reg, obj);
jcc(Assembler::notEqual, slow_path);
// Cache hit.
bind(monitor_found);
movptr(monitor, Address(t, OMCache::oop_to_monitor_difference()));
}
const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast<int>(markWord::monitor_value));
const Address recursions_address(monitor, ObjectMonitor::recursions_offset() - monitor_tag);
@ -487,14 +499,14 @@ void C2_MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register t,
cmpl(top, in_bytes(JavaThread::lock_stack_base_offset()));
jcc(Assembler::below, check_done);
cmpptr(obj, Address(thread, top));
jccb(Assembler::notEqual, inflated_check_lock_stack);
jcc(Assembler::notEqual, inflated_check_lock_stack);
stop("Fast Unlock lock on stack");
bind(check_done);
if (UseObjectMonitorTable) {
movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
}
testptr(mark, markWord::monitor_value);
jccb(Assembler::notZero, inflated);
jcc(Assembler::notZero, inflated);
stop("Fast Unlock not monitor");
#endif
@ -519,7 +531,7 @@ void C2_MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register t,
// Check if recursive.
cmpptr(recursions_address, 0);
jccb(Assembler::notZero, recursive);
jcc(Assembler::notZero, recursive);
// Set owner to null.
// Release to satisfy the JMM
@ -530,11 +542,11 @@ void C2_MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register t,
// Check if the entry_list is empty.
cmpptr(entry_list_address, NULL_WORD);
jccb(Assembler::zero, unlocked); // If so we are done.
jcc(Assembler::zero, unlocked); // If so we are done.
// Check if there is a successor.
cmpptr(succ_address, NULL_WORD);
jccb(Assembler::notZero, unlocked); // If so we are done.
jcc(Assembler::notZero, unlocked); // If so we are done.
// Save the monitor pointer in the current thread, so we can try to
// reacquire the lock in SharedRuntime::monitor_exit_helper().

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -395,6 +395,11 @@ OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Na
extern void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
void BarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Label& slowpath) {
// Load the oop from the weak handle.
__ movptr(obj, Address(obj));
}
#undef __
#define __ _masm->

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -109,6 +109,8 @@ public:
#ifdef COMPILER2
OptoReg::Name refine_register(const Node* node,
OptoReg::Name opto_reg);
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Label& slowpath);
#endif // COMPILER2
};

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -629,6 +630,27 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
__ bind(done);
}
#ifdef COMPILER2
void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Label& slowpath) {
Label done;
// Resolve weak handle using the standard implementation.
BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, slowpath);
// Check if the reference is null, and if it is, take the fast path.
__ testptr(obj, obj);
__ jcc(Assembler::zero, done);
Address gc_state(r15_thread, ShenandoahThreadLocalData::gc_state_offset());
// Check if the heap is under weak-reference/roots processing, in
// which case we need to take the slow path.
__ testb(gc_state, ShenandoahHeap::WEAK_ROOTS);
__ jccb(Assembler::notZero, slowpath);
__ bind(done);
}
#endif // COMPILER2
// Special Shenandoah CAS implementation that handles false negatives
// due to concurrent evacuation.
void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -84,6 +85,9 @@ public:
Address dst, Register val, Register tmp1, Register tmp2, Register tmp3);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
#ifdef COMPILER2
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Label& slowpath);
#endif // COMPILER2
};
#endif // CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1328,6 +1328,19 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ jmp(slow_continuation);
}
void ZBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Label& slow_path) {
// Resolve weak handle using the standard implementation.
BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, slow_path);
// Check if the oop is bad, in which case we need to take the slow path.
__ testptr(obj, Address(r15_thread, ZThreadLocalData::mark_bad_mask_offset()));
__ jcc(Assembler::notZero, slow_path);
// Oop is okay, so we uncolor it.
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl);
__ shrq(obj, barrier_Relocation::unpatched);
}
#undef __
#endif // COMPILER2

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -167,6 +167,8 @@ public:
ZLoadBarrierStubC2* stub) const;
void generate_c2_store_barrier_stub(MacroAssembler* masm,
ZStoreBarrierStubC2* stub) const;
virtual void try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj, Label& slow_path);
#endif // COMPILER2
void store_barrier_fast(MacroAssembler* masm,

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 2022, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -132,7 +132,7 @@ class LockStack {
class OMCache {
friend class VMStructs;
public:
static constexpr int CAPACITY = 8;
static constexpr int CAPACITY = 2;
private:
struct OMCacheEntry {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -218,6 +218,7 @@ class ObjectMonitor : public CHeapObj<mtObjectMonitor> {
static int Knob_SpinLimit;
static ByteSize object_offset() { return byte_offset_of(ObjectMonitor, _object); }
static ByteSize metadata_offset() { return byte_offset_of(ObjectMonitor, _metadata); }
static ByteSize owner_offset() { return byte_offset_of(ObjectMonitor, _owner); }
static ByteSize recursions_offset() { return byte_offset_of(ObjectMonitor, _recursions); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,272 +37,410 @@
// -----------------------------------------------------------------------------
// ConcurrentHashTable storing links from objects to ObjectMonitors
using ConcurrentTable = ConcurrentHashTable<ObjectMonitorTableConfig, mtObjectMonitor>;
ObjectMonitorTable::Table* volatile ObjectMonitorTable::_curr;
static ConcurrentTable* _table = nullptr;
static volatile size_t _items_count = 0;
static size_t _table_size = 0;
static volatile bool _resize = false;
class ObjectMonitorTable::Table : public CHeapObj<mtObjectMonitor> {
friend class ObjectMonitorTable;
class ObjectMonitorTableConfig : public AllStatic {
public:
using Value = ObjectMonitor*;
static uintx get_hash(Value const& value, bool* is_dead) {
return (uintx)value->hash();
const size_t _capacity_mask; // One less than its power-of-two capacity
Table* volatile _prev; // Set while rehashing
ObjectMonitor* volatile* _buckets; // The payload
char _padding[DEFAULT_CACHE_LINE_SIZE];
volatile size_t _items_count;
static ObjectMonitor* tombstone() {
return (ObjectMonitor*)ObjectMonitorTable::SpecialPointerValues::tombstone;
}
static void* allocate_node(void* context, size_t size, Value const& value) {
ObjectMonitorTable::inc_items_count();
return AllocateHeap(size, mtObjectMonitor);
};
static void free_node(void* context, void* memory, Value const& value) {
ObjectMonitorTable::dec_items_count();
FreeHeap(memory);
static ObjectMonitor* removed_entry() {
return (ObjectMonitor*)ObjectMonitorTable::SpecialPointerValues::removed;
}
// Make sure we leave space for previous versions to relocate too.
bool try_inc_items_count() {
for (;;) {
size_t population = AtomicAccess::load(&_items_count);
if (should_grow(population)) {
return false;
}
if (AtomicAccess::cmpxchg(&_items_count, population, population + 1, memory_order_relaxed) == population) {
return true;
}
}
}
double get_load_factor(size_t count) {
return (double)count / (double)capacity();
}
void inc_items_count() {
AtomicAccess::inc(&_items_count, memory_order_relaxed);
}
void dec_items_count() {
AtomicAccess::dec(&_items_count, memory_order_relaxed);
}
public:
Table(size_t capacity, Table* prev)
: _capacity_mask(capacity - 1),
_prev(prev),
_buckets(NEW_C_HEAP_ARRAY(ObjectMonitor*, capacity, mtObjectMonitor)),
_items_count(0)
{
for (size_t i = 0; i < capacity; ++i) {
_buckets[i] = nullptr;
}
}
~Table() {
FREE_C_HEAP_ARRAY(ObjectMonitor*, _buckets);
}
Table* prev() {
return AtomicAccess::load(&_prev);
}
size_t capacity() {
return _capacity_mask + 1;
}
bool should_grow(size_t population) {
return get_load_factor(population) > GROW_LOAD_FACTOR;
}
bool should_grow() {
return should_grow(AtomicAccess::load(&_items_count));
}
ObjectMonitor* get(oop obj, int hash) {
// Acquire tombstones and relocations in case prev transitioned to null
Table* prev = AtomicAccess::load_acquire(&_prev);
if (prev != nullptr) {
ObjectMonitor* result = prev->get(obj, hash);
if (result != nullptr) {
return result;
}
}
const size_t start_index = size_t(hash) & _capacity_mask;
size_t index = start_index;
for (;;) {
ObjectMonitor* volatile* bucket = _buckets + index;
ObjectMonitor* monitor = AtomicAccess::load(bucket);
if (monitor == tombstone() || monitor == nullptr) {
// Not found
break;
}
if (monitor != removed_entry() && monitor->object_peek() == obj) {
// Found matching monitor.
OrderAccess::acquire();
return monitor;
}
index = (index + 1) & _capacity_mask;
if (index == start_index) {
// Not found - wrap around.
break;
}
}
// Rehashing could have stareted by now, but if a monitor has been inserted in a
// newer table, it was inserted after the get linearization point.
return nullptr;
}
ObjectMonitor* get_set(oop obj, ObjectMonitor* new_monitor, int hash) {
// Acquire any tombstones and relocations if prev transitioned to null.
Table* prev = AtomicAccess::load_acquire(&_prev);
if (prev != nullptr) {
ObjectMonitor* result = prev->get_set(obj, new_monitor, hash);
if (result != nullptr) {
return result;
}
}
const size_t start_index = size_t(hash) & _capacity_mask;
size_t index = start_index;
for (;;) {
ObjectMonitor* volatile* bucket = _buckets + index;
ObjectMonitor* monitor = AtomicAccess::load(bucket);
if (monitor == nullptr) {
// Empty slot to install the new monitor.
if (try_inc_items_count()) {
// Succeeding in claiming an item.
ObjectMonitor* result = AtomicAccess::cmpxchg(bucket, monitor, new_monitor, memory_order_release);
if (result == monitor) {
// Success - already incremented.
return new_monitor;
}
// Something else was installed in place.
dec_items_count();
monitor = result;
} else {
// Out of allowance; leaving place for rehashing to succeed.
// To avoid concurrent inserts succeeding, place a tombstone here.
ObjectMonitor* result = AtomicAccess::cmpxchg(bucket, monitor, tombstone());
if (result == monitor) {
// Success; nobody will try to insert here again, except reinsert from rehashing.
return nullptr;
}
monitor = result;
}
}
if (monitor == tombstone()) {
// Can't insert into this table.
return nullptr;
}
if (monitor != removed_entry() && monitor->object_peek() == obj) {
// Found matching monitor.
return monitor;
}
index = (index + 1) & _capacity_mask;
if (index == start_index) {
// No slot to install in this table.
return nullptr;
}
}
}
void remove(oop obj, ObjectMonitor* old_monitor, int hash) {
// Acquire any tombstones and relocations if prev transitioned to null.
Table* prev = AtomicAccess::load_acquire(&_prev);
if (prev != nullptr) {
prev->remove(obj, old_monitor, hash);
}
const size_t start_index = size_t(hash) & _capacity_mask;
size_t index = start_index;
for (;;) {
ObjectMonitor* volatile* bucket = _buckets + index;
ObjectMonitor* monitor = AtomicAccess::load(bucket);
if (monitor == nullptr) {
// Monitor does not exist in this table.
return;
}
if (monitor == old_monitor) {
// Found matching entry; remove it.
AtomicAccess::cmpxchg(bucket, monitor, removed_entry());
return;
}
index = (index + 1) & _capacity_mask;
if (index == start_index) {
// Not found
return;
}
}
}
void reinsert(oop obj, ObjectMonitor* new_monitor) {
int hash = obj->mark().hash();
const size_t start_index = size_t(hash) & _capacity_mask;
size_t index = start_index;
for (;;) {
ObjectMonitor* volatile* bucket = _buckets + index;
ObjectMonitor* monitor = AtomicAccess::load(bucket);
if (monitor == nullptr) {
// Empty slot to install the new monitor.
ObjectMonitor* result = AtomicAccess::cmpxchg(bucket, monitor, new_monitor, memory_order_release);
if (result == monitor) {
// Success - unconditionally increment.
inc_items_count();
return;
}
// Another monitor was installed.
monitor = result;
}
if (monitor == tombstone()) {
// A concurrent inserter did not get enough allowance in the table.
// But reinsert always succeeds - we will take the spot.
ObjectMonitor* result = AtomicAccess::cmpxchg(bucket, monitor, new_monitor, memory_order_release);
if (result == monitor) {
// Success - unconditionally increment.
inc_items_count();
return;
}
// Another monitor was installed.
monitor = result;
}
assert(monitor != nullptr, "invariant");
assert(monitor != tombstone(), "invariant");
assert(monitor == removed_entry() || monitor->object_peek() != obj, "invariant");
index = (index + 1) & _capacity_mask;
assert(index != start_index, "should never be full");
}
}
void rebuild() {
Table* prev = _prev;
if (prev == nullptr) {
// Base case for recursion - no previous version.
return;
}
// Finish rebuilding up to prev as target so we can use prev as source.
prev->rebuild();
JavaThread* current = JavaThread::current();
// Relocate entries from prev after
for (size_t index = 0; index <= prev->_capacity_mask; index++) {
if ((index & 128) == 0) {
// Poll for safepoints to improve time to safepoint
ThreadBlockInVM tbivm(current);
}
ObjectMonitor* volatile* bucket = prev->_buckets + index;
ObjectMonitor* monitor = AtomicAccess::load(bucket);
if (monitor == nullptr) {
// Empty slot; put a tombstone there.
ObjectMonitor* result = AtomicAccess::cmpxchg(bucket, monitor, tombstone(), memory_order_relaxed);
if (result == nullptr) {
// Success; move to next entry.
continue;
}
// Concurrent insert; relocate.
monitor = result;
}
if (monitor != tombstone() && monitor != removed_entry()) {
// A monitor
oop obj = monitor->object_peek();
if (!monitor->is_being_async_deflated() && obj != nullptr) {
// Re-insert still live monitor.
reinsert(obj, monitor);
}
}
}
// Unlink this table, releasing the tombstones and relocations.
AtomicAccess::release_store(&_prev, (Table*)nullptr);
}
};
class Lookup : public StackObj {
oop _obj;
public:
explicit Lookup(oop obj) : _obj(obj) {}
uintx get_hash() const {
uintx hash = _obj->mark().hash();
assert(hash != 0, "should have a hash");
return hash;
}
bool equals(ObjectMonitor** value) {
assert(*value != nullptr, "must be");
return (*value)->object_refers_to(_obj);
}
bool is_dead(ObjectMonitor** value) {
assert(*value != nullptr, "must be");
return false;
}
};
class LookupMonitor : public StackObj {
ObjectMonitor* _monitor;
public:
explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
uintx get_hash() const {
return _monitor->hash();
}
bool equals(ObjectMonitor** value) {
return (*value) == _monitor;
}
bool is_dead(ObjectMonitor** value) {
assert(*value != nullptr, "must be");
return (*value)->object_is_dead();
}
};
void ObjectMonitorTable::inc_items_count() {
AtomicAccess::inc(&_items_count, memory_order_relaxed);
}
void ObjectMonitorTable::dec_items_count() {
AtomicAccess::dec(&_items_count, memory_order_relaxed);
}
double ObjectMonitorTable::get_load_factor() {
size_t count = AtomicAccess::load(&_items_count);
return (double)count / (double)_table_size;
}
size_t ObjectMonitorTable::table_size(Thread* current) {
return ((size_t)1) << _table->get_size_log2(current);
}
size_t ObjectMonitorTable::max_log_size() {
// TODO[OMTable]: Evaluate the max size.
// TODO[OMTable]: Need to fix init order to use Universe::heap()->max_capacity();
// Using MaxHeapSize directly this early may be wrong, and there
// are definitely rounding errors (alignment).
const size_t max_capacity = MaxHeapSize;
const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize;
const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast<int>(min_object_size));
const size_t log_max_objects = log2i_graceful(max_objects);
return MAX2(MIN2<size_t>(SIZE_BIG_LOG2, log_max_objects), min_log_size());
}
// ~= log(AvgMonitorsPerThreadEstimate default)
size_t ObjectMonitorTable::min_log_size() {
return 10;
}
template<typename V>
size_t ObjectMonitorTable::clamp_log_size(V log_size) {
return MAX2(MIN2(log_size, checked_cast<V>(max_log_size())), checked_cast<V>(min_log_size()));
}
size_t ObjectMonitorTable::initial_log_size() {
const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1)));
return clamp_log_size(estimate);
}
size_t ObjectMonitorTable::grow_hint() {
return ConcurrentTable::DEFAULT_GROW_HINT;
}
void ObjectMonitorTable::create() {
_table = new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint());
_items_count = 0;
_table_size = table_size(Thread::current());
_resize = false;
}
void ObjectMonitorTable::verify_monitor_get_result(oop obj, ObjectMonitor* monitor) {
#ifdef ASSERT
if (SafepointSynchronize::is_at_safepoint()) {
bool has_monitor = obj->mark().has_monitor();
assert(has_monitor == (monitor != nullptr),
"Inconsistency between markWord and ObjectMonitorTable has_monitor: %s monitor: " PTR_FORMAT,
BOOL_TO_STR(has_monitor), p2i(monitor));
}
#endif
_curr = new Table(128, nullptr);
}
ObjectMonitor* ObjectMonitorTable::monitor_get(Thread* current, oop obj) {
ObjectMonitor* result = nullptr;
Lookup lookup_f(obj);
auto found_f = [&](ObjectMonitor** found) {
assert((*found)->object_peek() == obj, "must be");
result = *found;
};
_table->get(current, lookup_f, found_f);
verify_monitor_get_result(obj, result);
const int hash = obj->mark().hash();
Table* curr = AtomicAccess::load_acquire(&_curr);
return curr->get(obj, hash);
}
// Returns a new table to try inserting into.
ObjectMonitorTable::Table *ObjectMonitorTable::grow_table(Table *curr) {
Table *new_table = AtomicAccess::load(&_curr);
if (new_table != curr) {
// Table changed; no need to try further
return new_table;
}
new_table = new Table(curr->capacity() << 1, curr);
Table *result =
AtomicAccess::cmpxchg(&_curr, curr, new_table, memory_order_acq_rel);
if (result == curr) {
// Successfully started rehashing.
log_info(monitorinflation)("Growing object monitor table");
ObjectSynchronizer::request_deflate_idle_monitors();
return new_table;
}
// Somebody else started rehashing; restart in new table.
delete new_table;
return result;
}
void ObjectMonitorTable::try_notify_grow() {
if (!_table->is_max_size_reached() && !AtomicAccess::load(&_resize)) {
AtomicAccess::store(&_resize, true);
if (Service_lock->try_lock()) {
Service_lock->notify();
Service_lock->unlock();
}
}
}
bool ObjectMonitorTable::should_grow() {
return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached();
}
bool ObjectMonitorTable::should_resize() {
return should_grow() || should_shrink() || AtomicAccess::load(&_resize);
}
template <typename Task, typename... Args>
bool ObjectMonitorTable::run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) {
if (task.prepare(current)) {
log_trace(monitortable)("Started to %s", task_name);
TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf));
while (task.do_task(current, args...)) {
task.pause(current);
{
ThreadBlockInVM tbivm(current);
}
task.cont(current);
}
task.done(current);
return true;
}
return false;
}
bool ObjectMonitorTable::grow(JavaThread* current) {
ConcurrentTable::GrowTask grow_task(_table);
if (run_task(current, grow_task, "Grow")) {
_table_size = table_size(current);
log_info(monitortable)("Grown to size: %zu", _table_size);
return true;
}
return false;
}
bool ObjectMonitorTable::clean(JavaThread* current) {
ConcurrentTable::BulkDeleteTask clean_task(_table);
auto is_dead = [&](ObjectMonitor** monitor) {
return (*monitor)->object_is_dead();
};
auto do_nothing = [&](ObjectMonitor** monitor) {};
NativeHeapTrimmer::SuspendMark sm("ObjectMonitorTable");
return run_task(current, clean_task, "Clean", is_dead, do_nothing);
}
bool ObjectMonitorTable::resize(JavaThread* current) {
LogTarget(Info, monitortable) lt;
bool success = false;
if (should_grow()) {
lt.print("Start growing with load factor %f", get_load_factor());
success = grow(current);
} else {
if (!_table->is_max_size_reached() && AtomicAccess::load(&_resize)) {
lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
}
lt.print("Start cleaning with load factor %f", get_load_factor());
success = clean(current);
}
AtomicAccess::store(&_resize, false);
return success;
}
ObjectMonitor* ObjectMonitorTable::monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
// Enter the monitor into the concurrent hashtable.
ObjectMonitor* result = monitor;
Lookup lookup_f(obj);
auto found_f = [&](ObjectMonitor** found) {
assert((*found)->object_peek() == obj, "must be");
result = *found;
};
bool grow;
_table->insert_get(current, lookup_f, monitor, found_f, &grow);
verify_monitor_get_result(obj, result);
if (grow) {
try_notify_grow();
}
return result;
}
const int hash = obj->mark().hash();
Table* curr = AtomicAccess::load_acquire(&_curr);
bool ObjectMonitorTable::remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
LookupMonitor lookup_f(monitor);
return _table->remove(current, lookup_f);
}
bool ObjectMonitorTable::contains_monitor(Thread* current, ObjectMonitor* monitor) {
LookupMonitor lookup_f(monitor);
bool result = false;
auto found_f = [&](ObjectMonitor** found) {
result = true;
};
_table->get(current, lookup_f, found_f);
return result;
}
void ObjectMonitorTable::print_on(outputStream* st) {
auto printer = [&] (ObjectMonitor** entry) {
ObjectMonitor* om = *entry;
oop obj = om->object_peek();
st->print("monitor=" PTR_FORMAT ", ", p2i(om));
st->print("object=" PTR_FORMAT, p2i(obj));
assert(obj->mark().hash() == om->hash(), "hash must match");
st->cr();
return true;
};
if (SafepointSynchronize::is_at_safepoint()) {
_table->do_safepoint_scan(printer);
} else {
_table->do_scan(Thread::current(), printer);
for (;;) {
// Curr is the latest table and is reasonably loaded.
ObjectMonitor* result = curr->get_set(obj, monitor, hash);
if (result != nullptr) {
return result;
// Table rehashing started; try again in the new table
}
curr = grow_table(curr);
}
}
void ObjectMonitorTable::remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
oop obj = monitor->object_peek();
if (obj == nullptr) {
// Defer removal until subsequent rebuilding.
return;
}
const int hash = obj->mark().hash();
Table* curr = AtomicAccess::load_acquire(&_curr);
curr->remove(obj, monitor, hash);
}
// Before handshake; rehash and unlink tables.
void ObjectMonitorTable::rebuild(GrowableArray<Table*>* delete_list) {
Table* new_table;
{
Table* curr = AtomicAccess::load_acquire(&_curr);
new_table = new Table(curr->capacity(), curr);
Table* result = AtomicAccess::cmpxchg(&_curr, curr, new_table, memory_order_release);
if (result != curr) {
// Somebody else racingly started rehashing; try again.
new_table = result;
}
}
for (Table* curr = new_table->prev(); curr != nullptr; curr = curr->prev()) {
delete_list->append(curr);
}
// Rebuild with the new table as target.
new_table->rebuild();
}
// After handshake; destroy old tables
void ObjectMonitorTable::destroy(GrowableArray<Table*>* delete_list) {
for (ObjectMonitorTable::Table* table: *delete_list) {
delete table;
}
}
address ObjectMonitorTable::current_table_address() {
return (address)(&_curr);
}
ByteSize ObjectMonitorTable::table_capacity_mask_offset() {
return byte_offset_of(Table, _capacity_mask);
}
ByteSize ObjectMonitorTable::table_buckets_offset() {
return byte_offset_of(Table, _buckets);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,9 @@
#include "memory/allStatic.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/sizes.hpp"
template <typename T> class GrowableArray;
class JavaThread;
class ObjectMonitor;
class ObjectMonitorTableConfig;
@ -36,42 +38,35 @@ class outputStream;
class Thread;
class ObjectMonitorTable : AllStatic {
friend class ObjectMonitorTableConfig;
static constexpr double GROW_LOAD_FACTOR = 0.125;
private:
static void inc_items_count();
static void dec_items_count();
static double get_load_factor();
static size_t table_size(Thread* current);
static size_t max_log_size();
static size_t min_log_size();
public:
class Table;
template <typename V>
static size_t clamp_log_size(V log_size);
static size_t initial_log_size();
static size_t grow_hint();
private:
static Table* volatile _curr;
static Table* grow_table(Table* curr);
public:
typedef enum {
empty = 0,
tombstone = 1,
removed = 2,
below_is_special = (removed + 1)
} SpecialPointerValues;
public:
static void create();
static void verify_monitor_get_result(oop obj, ObjectMonitor* monitor);
static ObjectMonitor* monitor_get(Thread* current, oop obj);
static void try_notify_grow();
static bool should_shrink() { return false; } // Not implemented
static constexpr double GROW_LOAD_FACTOR = 0.75;
static bool should_grow();
static bool should_resize();
template <typename Task, typename... Args>
static bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args);
static bool grow(JavaThread* current);
static bool clean(JavaThread* current);
static bool resize(JavaThread* current);
static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj);
static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor);
static bool contains_monitor(Thread* current, ObjectMonitor* monitor);
static void print_on(outputStream* st);
static void rebuild(GrowableArray<Table*>* delete_list);
static void destroy(GrowableArray<Table*>* delete_list);
static void remove_monitor_entry(Thread* current, ObjectMonitor* monitor);
static void monitor_reinsert(Table* from, ObjectMonitor* monitor, oop obj);
// Compiler support
static address current_table_address();
static ByteSize table_capacity_mask_offset();
static ByteSize table_buckets_offset();
};
#endif // SHARE_RUNTIME_OBJECTMONITORTABLE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -85,7 +85,6 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
bool cldg_cleanup_work = false;
bool jvmti_tagmap_work = false;
bool oopmap_cache_work = false;
bool object_monitor_table_work = false;
{
// Need state transition ThreadBlockInVM so that this thread
// will be handled by safepoint correctly when this thread is
@ -112,8 +111,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
(oop_handles_to_release = JavaThread::has_oop_handles_to_release()) |
(cldg_cleanup_work = ClassLoaderDataGraph::should_clean_metaspaces_and_reset()) |
(jvmti_tagmap_work = JvmtiTagMap::has_object_free_events_and_reset()) |
(oopmap_cache_work = OopMapCache::has_cleanup_work()) |
(object_monitor_table_work = ObjectSynchronizer::needs_resize())
(oopmap_cache_work = OopMapCache::has_cleanup_work())
) == 0) {
// Wait until notified that there is some work to do or timer expires.
// Some cleanup requests don't notify the ServiceThread so work needs to be done at periodic intervals.
@ -171,10 +169,6 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
if (oopmap_cache_work) {
OopMapCache::cleanup();
}
if (object_monitor_table_work) {
ObjectSynchronizer::resize_table(jt);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1195,13 +1195,10 @@ size_t ObjectSynchronizer::deflate_idle_monitors() {
GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer);
#ifdef ASSERT
GrowableArray<ObjectMonitorTable::Table*> table_delete_list;
if (UseObjectMonitorTable) {
for (ObjectMonitor* monitor : delete_list) {
assert(!ObjectSynchronizer::contains_monitor(current, monitor), "Should have been removed");
}
ObjectMonitorTable::rebuild(&table_delete_list);
}
#endif
log.before_handshake(unlinked_count);
@ -1222,6 +1219,9 @@ size_t ObjectSynchronizer::deflate_idle_monitors() {
// Delete the unlinked ObjectMonitors.
deleted_count = delete_monitors(&delete_list, &safepointer);
if (UseObjectMonitorTable) {
ObjectMonitorTable::destroy(&table_delete_list);
}
assert(unlinked_count == deleted_count, "must be");
}
@ -1549,11 +1549,11 @@ ObjectMonitor* ObjectSynchronizer::add_monitor(JavaThread* current, ObjectMonito
return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
}
bool ObjectSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
void ObjectSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
assert(UseObjectMonitorTable, "must be");
assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
return ObjectMonitorTable::remove_monitor_entry(current, monitor);
ObjectMonitorTable::remove_monitor_entry(current, monitor);
}
void ObjectSynchronizer::deflate_mark_word(oop obj) {
@ -1575,20 +1575,6 @@ void ObjectSynchronizer::create_om_table() {
ObjectMonitorTable::create();
}
bool ObjectSynchronizer::needs_resize() {
if (!UseObjectMonitorTable) {
return false;
}
return ObjectMonitorTable::should_resize();
}
bool ObjectSynchronizer::resize_table(JavaThread* current) {
if (!UseObjectMonitorTable) {
return true;
}
return ObjectMonitorTable::resize(current);
}
class ObjectSynchronizer::LockStackInflateContendedLocks : private OopClosure {
private:
oop _contended_oops[LockStack::CAPACITY];
@ -2296,10 +2282,7 @@ ObjectMonitor* ObjectSynchronizer::inflate_and_enter(oop object, BasicLock* lock
void ObjectSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
if (obj != nullptr) {
deflate_mark_word(obj);
}
bool removed = remove_monitor(current, monitor, obj);
if (obj != nullptr) {
assert(removed, "Should have removed the entry if obj was alive");
remove_monitor(current, monitor, obj);
}
}
@ -2308,11 +2291,6 @@ ObjectMonitor* ObjectSynchronizer::get_monitor_from_table(Thread* current, oop o
return ObjectMonitorTable::monitor_get(current, obj);
}
bool ObjectSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
assert(UseObjectMonitorTable, "must be");
return ObjectMonitorTable::contains_monitor(current, monitor);
}
ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) {
return mark.monitor();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,6 +30,7 @@
#include "runtime/basicLock.hpp"
#include "runtime/handles.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/objectMonitorTable.hpp"
#include "utilities/hashTable.hpp"
template <typename T> class GrowableArray;
@ -213,7 +214,7 @@ public:
static ObjectMonitor* get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause);
static ObjectMonitor* add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj);
static bool remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj);
static void remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj);
static void deflate_mark_word(oop object);