8367982: Unify ObjectSynchronizer and LightweightSynchronizer

Reviewed-by: pchilanomate, coleenp
This commit is contained in:
Fredrik Bredberg 2025-11-06 12:16:19 +00:00
parent 093e128771
commit 3930b1d4dd
80 changed files with 1551 additions and 1717 deletions

View File

@ -16262,7 +16262,7 @@ instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
// ============================================================================
// inlined locking and unlocking
instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
@ -16271,13 +16271,13 @@ instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp
format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
ins_encode %{
__ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
%}
ins_pipe(pipe_serial);
%}
instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
match(Set cr (FastUnlock object box));
effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
@ -16286,7 +16286,7 @@ instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNo
format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2, $tmp3" %}
ins_encode %{
__ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
%}
ins_pipe(pipe_serial);

View File

@ -70,7 +70,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register basic_lo
null_check_offset = offset();
lightweight_lock(basic_lock, obj, hdr, temp, rscratch2, slow_case);
fast_lock(basic_lock, obj, hdr, temp, rscratch2, slow_case);
return null_check_offset;
}
@ -83,7 +83,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register basic
ldr(obj, Address(basic_lock, BasicObjectLock::obj_offset()));
verify_oop(obj);
lightweight_unlock(obj, hdr, temp, rscratch2, slow_case);
fast_unlock(obj, hdr, temp, rscratch2, slow_case);
}

View File

@ -147,8 +147,8 @@ address C2_MacroAssembler::arrays_hashcode(Register ary, Register cnt, Register
return pc();
}
void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register t1,
Register t2, Register t3) {
void C2_MacroAssembler::fast_lock(Register obj, Register box, Register t1,
Register t2, Register t3) {
assert_different_registers(obj, box, t1, t2, t3, rscratch2);
// Handle inflated monitor.
@ -173,7 +173,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist
const Register t1_mark = t1;
const Register t3_t = t3;
{ // Lightweight locking
{ // Fast locking
// Push lock to the lock stack and finish successfully. MUST branch to with flag == EQ
Label push;
@ -303,8 +303,8 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist
// C2 uses the value of Flags (NE vs EQ) to determine the continuation.
}
void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Register t1,
Register t2, Register t3) {
void C2_MacroAssembler::fast_unlock(Register obj, Register box, Register t1,
Register t2, Register t3) {
assert_different_registers(obj, box, t1, t2, t3);
// Handle inflated monitor.
@ -318,7 +318,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Regi
const Register t2_top = t2;
const Register t3_t = t3;
{ // Lightweight unlock
{ // Fast unlock
Label push_and_slow_path;
@ -2859,4 +2859,4 @@ void C2_MacroAssembler::vector_expand_sve(FloatRegister dst, FloatRegister src,
sve_sub(dst, size, 1);
// dst = 00 87 00 65 00 43 00 21
sve_tbl(dst, size, src, dst);
}
}

View File

@ -51,9 +51,9 @@
FloatRegister vmul3, FloatRegister vpow, FloatRegister vpowm,
BasicType eltype);
// Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file.
void fast_lock_lightweight(Register object, Register box, Register t1, Register t2, Register t3);
void fast_unlock_lightweight(Register object, Register box, Register t1, Register t2, Register t3);
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
void fast_lock(Register object, Register box, Register t1, Register t2, Register t3);
void fast_unlock(Register object, Register box, Register t1, Register t2, Register t3);
void string_compare(Register str1, Register str2,
Register cnt1, Register cnt2, Register result,

View File

@ -709,7 +709,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
Label slow_case, done;
lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
fast_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
b(done);
bind(slow_case);
@ -741,7 +741,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
const Register swap_reg = r0;
const Register header_reg = c_rarg2; // Will contain the old oopMark
const Register obj_reg = c_rarg3; // Will contain the oop
const Register tmp_reg = c_rarg4; // Temporary used by lightweight_unlock
const Register tmp_reg = c_rarg4; // Temporary used by fast_unlock
save_bcp(); // Save in case of exception
@ -752,7 +752,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
Label slow_case, done;
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
fast_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
b(done);
bind(slow_case);

View File

@ -6934,12 +6934,12 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
}
}
// Implements lightweight-locking.
// Implements fast-locking.
//
// - obj: the object to be locked
// - t1, t2, t3: temporary registers, will be destroyed
// - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) {
void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) {
assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1);
Label push;
@ -6993,12 +6993,12 @@ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Registe
strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
}
// Implements lightweight-unlocking.
// Implements fast-unlocking.
//
// - obj: the object to be unlocked
// - t1, t2, t3: temporary registers
// - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
void MacroAssembler::fast_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
// cmpxchg clobbers rscratch1.
assert_different_registers(obj, t1, t2, t3, rscratch1);
@ -7044,7 +7044,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2,
// Check header not unlocked (0b01).
Label not_unlocked;
tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked);
stop("lightweight_unlock already unlocked");
stop("fast_unlock already unlocked");
bind(not_unlocked);
#endif

View File

@ -1721,8 +1721,8 @@ public:
// Code for java.lang.Thread::onSpinWait() intrinsic.
void spin_wait();
void lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow);
void lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow);
void fast_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow);
void fast_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow);
private:
// Check the current thread doesn't need a cross modify fence.

View File

@ -1707,7 +1707,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
const Register obj_reg = r19; // Will contain the oop
const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
const Register old_hdr = r13; // value of old header at unlock time
const Register lock_tmp = r14; // Temporary used by lightweight_lock/unlock
const Register lock_tmp = r14; // Temporary used by fast_lock/unlock
const Register tmp = lr;
Label slow_path_lock;
@ -1724,7 +1724,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Load the oop from the handle
__ ldr(obj_reg, Address(oop_handle_reg, 0));
__ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
__ fast_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
// Slow path will re-enter here
__ bind(lock_done);
@ -1833,7 +1833,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
save_native_result(masm, ret_type, stack_slots);
}
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
__ fast_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
// slow path re-enters here
__ bind(unlock_done);

View File

@ -195,7 +195,7 @@ enum Ampere_CPU_Model {
// Aarch64 supports fast class initialization checks
static bool supports_fast_class_init_checks() { return true; }
constexpr static bool supports_stack_watermark_barrier() { return true; }
constexpr static bool supports_recursive_lightweight_locking() { return true; }
constexpr static bool supports_recursive_fast_locking() { return true; }
constexpr static bool supports_secondary_supers_table() { return true; }

View File

@ -201,7 +201,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register basic_lo
Register t2 = hdr; // blow
Register t3 = Rtemp; // blow
lightweight_lock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
fast_lock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
// Success: fall through
return null_check_offset;
}
@ -218,7 +218,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register basic
Register t2 = hdr; // blow
Register t3 = Rtemp; // blow
lightweight_unlock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
fast_unlock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
// Success: fall through
}

View File

@ -90,8 +90,8 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc
b(done, ne);
}
lightweight_lock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
1 /* savemask (save t1) */, done);
MacroAssembler::fast_lock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
1 /* savemask (save t1) */, done);
cmp(Roop, Roop); // Success: set Z
bind(done);
@ -107,8 +107,8 @@ void C2_MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscra
Label done;
lightweight_unlock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
1 /* savemask (save t1) */, done);
MacroAssembler::fast_unlock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
1 /* savemask (save t1) */, done);
cmp(Roop, Roop); // Success: Set Z
// Fall through

View File

@ -904,7 +904,7 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
b(slow_case, ne);
}
lightweight_lock(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case);
fast_lock(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case);
b(done);
bind(slow_case);
@ -945,8 +945,8 @@ void InterpreterMacroAssembler::unlock_object(Register Rlock) {
cmpoop(Rtemp, Robj);
b(slow_case, ne);
lightweight_unlock(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */,
1 /* savemask (save t1) */, slow_case);
fast_unlock(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */,
1 /* savemask (save t1) */, slow_case);
b(done);
bind(slow_case);

View File

@ -1750,14 +1750,14 @@ void MacroAssembler::read_polling_page(Register dest, relocInfo::relocType rtype
POISON_REG(mask, 1, R2, poison) \
POISON_REG(mask, 2, R3, poison)
// Attempt to lightweight-lock an object
// Attempt to fast-lock an object
// Registers:
// - obj: the object to be locked
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
// Result:
// - Success: fallthrough
// - Error: break to slow, Z cleared.
void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
void MacroAssembler::fast_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
assert_different_registers(obj, t1, t2, t3);
#ifdef ASSERT
@ -1807,14 +1807,14 @@ void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Re
// Success: fall through
}
// Attempt to lightweight-unlock an object
// Attempt to fast-unlock an object
// Registers:
// - obj: the object to be unlocked
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
// Result:
// - Success: fallthrough
// - Error: break to slow, Z cleared.
void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
void MacroAssembler::fast_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
assert_different_registers(obj, t1, t2, t3);
#ifdef ASSERT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1010,23 +1010,23 @@ public:
void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
// Attempt to lightweight-lock an object
// Attempt to fast-lock an object
// Registers:
// - obj: the object to be locked
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
// Result:
// - Success: fallthrough
// - Error: break to slow, Z cleared.
void lightweight_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
void fast_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
// Attempt to lightweight-unlock an object
// Attempt to fast-unlock an object
// Registers:
// - obj: the object to be unlocked
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
// Result:
// - Success: fallthrough
// - Error: break to slow, Z cleared.
void lightweight_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
void fast_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
#ifndef PRODUCT
// Preserves flags and all registers.

View File

@ -1139,8 +1139,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ mov(sync_handle, R1);
log_trace(fastlock)("SharedRuntime lock fast");
__ lightweight_lock(sync_obj /* object */, basic_lock /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
0x7 /* savemask */, slow_lock);
__ fast_lock(sync_obj /* object */, basic_lock /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
0x7 /* savemask */, slow_lock);
// Fall through to lock_done
__ bind(lock_done);
}
@ -1195,8 +1195,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
Label slow_unlock, unlock_done;
if (method->is_synchronized()) {
log_trace(fastlock)("SharedRuntime unlock fast");
__ lightweight_unlock(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
7 /* savemask */, slow_unlock);
__ fast_unlock(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
7 /* savemask */, slow_unlock);
// Fall through
__ bind(unlock_done);

View File

@ -82,7 +82,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
// Save object being locked into the BasicObjectLock...
std(Roop, in_bytes(BasicObjectLock::obj_offset()), Rbox);
lightweight_lock(Rbox, Roop, Rmark, Rscratch, slow_int);
fast_lock(Rbox, Roop, Rmark, Rscratch, slow_int);
b(done);
bind(slow_int);
@ -104,7 +104,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
ld(Roop, in_bytes(BasicObjectLock::obj_offset()), Rbox);
verify_oop(Roop, FILE_AND_LINE);
lightweight_unlock(Roop, Rmark, slow_int);
fast_unlock(Roop, Rmark, slow_int);
b(done);
bind(slow_int);
b(slow_case); // far

View File

@ -36,14 +36,14 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
void C2_MacroAssembler::fast_lock_lightweight(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3) {
compiler_fast_lock_lightweight_object(flag, obj, box, tmp1, tmp2, tmp3);
void C2_MacroAssembler::fast_lock(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3) {
compiler_fast_lock_object(flag, obj, box, tmp1, tmp2, tmp3);
}
void C2_MacroAssembler::fast_unlock_lightweight(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3) {
compiler_fast_unlock_lightweight_object(flag, obj, box, tmp1, tmp2, tmp3);
void C2_MacroAssembler::fast_unlock(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3) {
compiler_fast_unlock_object(flag, obj, box, tmp1, tmp2, tmp3);
}
void C2_MacroAssembler::load_narrow_klass_compact_c2(Register dst, Register obj, int disp) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,11 +28,11 @@
// C2_MacroAssembler contains high-level macros for C2
public:
// Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file.
void fast_lock_lightweight(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3);
void fast_unlock_lightweight(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3);
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
void fast_lock(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3);
void fast_unlock(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3);
void load_narrow_klass_compact_c2(Register dst, Register obj, int disp);

View File

@ -958,7 +958,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
assert_different_registers(header, tmp);
lightweight_lock(monitor, object, header, tmp, slow_case);
fast_lock(monitor, object, header, tmp, slow_case);
b(done);
bind(slow_case);
@ -987,7 +987,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
// The object address from the monitor is in object.
ld(object, in_bytes(BasicObjectLock::obj_offset()), monitor);
lightweight_unlock(object, header, slow_case);
fast_unlock(object, header, slow_case);
b(free_slot);

View File

@ -2671,8 +2671,8 @@ address MacroAssembler::emit_trampoline_stub(int destination_toc_offset,
}
// "The box" is the space on the stack where we copy the object mark.
void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3) {
void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3) {
assert_different_registers(obj, box, tmp1, tmp2, tmp3);
assert(UseObjectMonitorTable || tmp3 == noreg, "tmp3 not needed");
assert(flag == CR0, "bad condition register");
@ -2699,7 +2699,7 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
Register mark = tmp1;
{ // Lightweight locking
{ // Fast locking
// Push lock to the lock stack and finish successfully. MUST reach to with flag == EQ
Label push;
@ -2847,8 +2847,8 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla
// C2 uses the value of flag (NE vs EQ) to determine the continuation.
}
void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3) {
void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3) {
assert_different_registers(obj, tmp1, tmp2, tmp3);
assert(flag == CR0, "bad condition register");
@ -2863,7 +2863,7 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f
const Register top = tmp2;
const Register t = tmp3;
{ // Lightweight unlock
{ // Fast unlock
Label push_and_slow;
// Check if obj is top of lock-stack.
@ -2904,7 +2904,7 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f
Label not_unlocked;
andi_(t, mark, markWord::unlocked_value);
beq(CR0, not_unlocked);
stop("lightweight_unlock already unlocked");
stop("fast_unlock already unlocked");
bind(not_unlocked);
#endif
@ -4588,11 +4588,11 @@ void MacroAssembler::atomically_flip_locked_state(bool is_unlock, Register obj,
}
}
// Implements lightweight-locking.
// Implements fast-locking.
//
// - obj: the object to be locked
// - t1, t2: temporary register
void MacroAssembler::lightweight_lock(Register box, Register obj, Register t1, Register t2, Label& slow) {
void MacroAssembler::fast_lock(Register box, Register obj, Register t1, Register t2, Label& slow) {
assert_different_registers(box, obj, t1, t2, R0);
Label push;
@ -4644,11 +4644,11 @@ void MacroAssembler::lightweight_lock(Register box, Register obj, Register t1, R
stw(top, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread);
}
// Implements lightweight-unlocking.
// Implements fast-unlocking.
//
// - obj: the object to be unlocked
// - t1: temporary register
void MacroAssembler::lightweight_unlock(Register obj, Register t1, Label& slow) {
void MacroAssembler::fast_unlock(Register obj, Register t1, Label& slow) {
assert_different_registers(obj, t1);
#ifdef ASSERT
@ -4706,7 +4706,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register t1, Label& slow)
Label not_unlocked;
andi_(t, mark, markWord::unlocked_value);
beq(CR0, not_unlocked);
stop("lightweight_unlock already unlocked");
stop("fast_unlock already unlocked");
bind(not_unlocked);
#endif

View File

@ -698,8 +698,8 @@ class MacroAssembler: public Assembler {
void push_cont_fastpath();
void pop_cont_fastpath();
void atomically_flip_locked_state(bool is_unlock, Register obj, Register tmp, Label& failed, int semantics);
void lightweight_lock(Register box, Register obj, Register t1, Register t2, Label& slow);
void lightweight_unlock(Register obj, Register t1, Label& slow);
void fast_lock(Register box, Register obj, Register t1, Register t2, Label& slow);
void fast_unlock(Register obj, Register t1, Label& slow);
// allocation (for C1)
void tlab_allocate(
@ -713,11 +713,11 @@ class MacroAssembler: public Assembler {
enum { trampoline_stub_size = 6 * 4 };
address emit_trampoline_stub(int destination_toc_offset, int insts_call_instruction_offset, Register Rtoc = noreg);
void compiler_fast_lock_lightweight_object(ConditionRegister flag, Register oop, Register box,
Register tmp1, Register tmp2, Register tmp3);
void compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
Register tmp1, Register tmp2, Register tmp3);
void compiler_fast_unlock_lightweight_object(ConditionRegister flag, Register oop, Register box,
Register tmp1, Register tmp2, Register tmp3);
void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
Register tmp1, Register tmp2, Register tmp3);
// Check if safepoint requested and if so branch
void safepoint_poll(Label& slow_path, Register temp, bool at_return, bool in_nmethod);

View File

@ -11551,15 +11551,15 @@ instruct partialSubtypeCheckConstSuper(rarg3RegP sub, rarg2RegP super_reg, immP
// inlined locking and unlocking
instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{
instruct cmpFastLock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{
predicate(!UseObjectMonitorTable);
match(Set crx (FastLock oop box));
effect(TEMP tmp1, TEMP tmp2);
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2" %}
ins_encode %{
__ fast_lock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, noreg /*tmp3*/);
__ fast_lock($crx$$CondRegister, $oop$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, noreg /*tmp3*/);
// If locking was successful, crx should indicate 'EQ'.
// The compiler generates a branch to the runtime call to
// _complete_monitor_locking_Java for the case where crx is 'NE'.
@ -11574,8 +11574,8 @@ instruct cmpFastLockMonitorTable(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iR
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2, $tmp3" %}
ins_encode %{
__ fast_lock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
__ fast_lock($crx$$CondRegister, $oop$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
// If locking was successful, crx should indicate 'EQ'.
// The compiler generates a branch to the runtime call to
// _complete_monitor_locking_Java for the case where crx is 'NE'.
@ -11583,14 +11583,14 @@ instruct cmpFastLockMonitorTable(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iR
ins_pipe(pipe_class_compare);
%}
instruct cmpFastUnlockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
instruct cmpFastUnlock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
match(Set crx (FastUnlock oop box));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
format %{ "FASTUNLOCK $oop, $box, $tmp1, $tmp2" %}
ins_encode %{
__ fast_unlock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
__ fast_unlock($crx$$CondRegister, $oop$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
// If unlocking was successful, crx should indicate 'EQ'.
// The compiler generates a branch to the runtime call to
// _complete_monitor_unlocking_Java for the case where crx is 'NE'.

View File

@ -2381,7 +2381,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Try fastpath for locking.
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg;
__ compiler_fast_lock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
__ compiler_fast_lock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
__ beq(CR0, locked);
// None of the above fast optimizations worked so we have to get into the
@ -2600,7 +2600,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ addi(r_box, R1_SP, lock_offset);
// Try fastpath for unlocking.
__ compiler_fast_unlock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
__ compiler_fast_unlock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
__ beq(CR0, done);
// Save and restore any potential method result value around the unlocking operation.

View File

@ -62,7 +62,7 @@ public:
// PPC64 supports fast class initialization checks
static bool supports_fast_class_init_checks() { return true; }
constexpr static bool supports_stack_watermark_barrier() { return true; }
constexpr static bool supports_recursive_lightweight_locking() { return true; }
constexpr static bool supports_recursive_fast_locking() { return true; }
constexpr static bool supports_secondary_supers_table() { return true; }
static bool supports_float16() { return PowerArchitecturePPC64 >= 9; }

View File

@ -59,7 +59,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register basic_lo
null_check_offset = offset();
lightweight_lock(basic_lock, obj, hdr, temp, t1, slow_case);
fast_lock(basic_lock, obj, hdr, temp, t1, slow_case);
return null_check_offset;
}
@ -71,7 +71,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register basic
ld(obj, Address(basic_lock, BasicObjectLock::obj_offset()));
verify_oop(obj);
lightweight_unlock(obj, hdr, temp, t1, slow_case);
fast_unlock(obj, hdr, temp, t1, slow_case);
}
// Defines obj, preserves var_size_in_bytes

View File

@ -43,8 +43,8 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
void C2_MacroAssembler::fast_lock(Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
// Flag register, zero for success; non-zero for failure.
Register flag = t1;
@ -74,7 +74,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box,
const Register tmp1_mark = tmp1;
const Register tmp3_t = tmp3;
{ // Lightweight locking
{ // Fast locking
// Push lock to the lock stack and finish successfully. MUST branch to with flag == 0
Label push;
@ -205,8 +205,8 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box,
// C2 uses the value of flag (0 vs !0) to determine the continuation.
}
void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3) {
void C2_MacroAssembler::fast_unlock(Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3) {
// Flag register, zero for success; non-zero for failure.
Register flag = t1;
@ -225,7 +225,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box,
const Register tmp2_top = tmp2;
const Register tmp3_t = tmp3;
{ // Lightweight unlock
{ // Fast unlock
Label push_and_slow_path;
// Check if obj is top of lock-stack.

View File

@ -49,11 +49,11 @@
const int STUB_THRESHOLD, Label *STUB, Label *DONE);
public:
// Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file.
void fast_lock_lightweight(Register object, Register box,
Register tmp1, Register tmp2, Register tmp3, Register tmp4);
void fast_unlock_lightweight(Register object, Register box,
Register tmp1, Register tmp2, Register tmp3);
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
void fast_lock(Register object, Register box,
Register tmp1, Register tmp2, Register tmp3, Register tmp4);
void fast_unlock(Register object, Register box,
Register tmp1, Register tmp2, Register tmp3);
void string_compare(Register str1, Register str2,
Register cnt1, Register cnt2, Register result,

View File

@ -751,7 +751,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
ld(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
Label done, slow_case;
lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
fast_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
j(done);
bind(slow_case);
@ -782,7 +782,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
const Register swap_reg = x10;
const Register header_reg = c_rarg2; // Will contain the old oopMark
const Register obj_reg = c_rarg3; // Will contain the oop
const Register tmp_reg = c_rarg4; // Temporary used by lightweight_unlock
const Register tmp_reg = c_rarg4; // Temporary used by fast_unlock
save_bcp(); // Save in case of exception
@ -793,7 +793,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
sd(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
Label done, slow_case;
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
fast_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
j(done);
bind(slow_case);

View File

@ -6435,12 +6435,12 @@ void MacroAssembler::test_bit(Register Rd, Register Rs, uint32_t bit_pos) {
}
}
// Implements lightweight-locking.
// Implements fast-locking.
//
// - obj: the object to be locked
// - tmp1, tmp2, tmp3: temporary registers, will be destroyed
// - slow: branched to if locking fails
void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) {
void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) {
assert_different_registers(basic_lock, obj, tmp1, tmp2, tmp3, t0);
Label push;
@ -6499,7 +6499,7 @@ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Registe
// - obj: the object to be unlocked
// - tmp1, tmp2, tmp3: temporary registers
// - slow: branched to if unlocking fails
void MacroAssembler::lightweight_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) {
void MacroAssembler::fast_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) {
assert_different_registers(obj, tmp1, tmp2, tmp3, t0);
#ifdef ASSERT
@ -6546,7 +6546,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register tmp1, Register tm
Label not_unlocked;
test_bit(t, mark, exact_log2(markWord::unlocked_value));
beqz(t, not_unlocked);
stop("lightweight_unlock already unlocked");
stop("fast_unlock already unlocked");
bind(not_unlocked);
#endif

View File

@ -1639,8 +1639,8 @@ private:
void store_conditional(Register dst, Register new_val, Register addr, Assembler::operand_size size, Assembler::Aqrl release);
public:
void lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);
void lightweight_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);
void fast_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);
void fast_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);
public:
enum {

View File

@ -11039,36 +11039,36 @@ instruct tlsLoadP(javaThread_RegP dst)
// inlined locking and unlocking
// using t1 as the 'flag' register to bridge the BoolNode producers and consumers
instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box,
iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box,
iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
%{
match(Set cr (FastLock object box));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
ins_cost(10 * DEFAULT_COST);
format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLockLightweight" %}
format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLock" %}
ins_encode %{
__ fast_lock_lightweight($object$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
__ fast_lock($object$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
%}
ins_pipe(pipe_serial);
%}
// using t1 as the 'flag' register to bridge the BoolNode producers and consumers
instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box,
iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box,
iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
match(Set cr (FastUnlock object box));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
ins_cost(10 * DEFAULT_COST);
format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlockLightweight" %}
format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlock" %}
ins_encode %{
__ fast_unlock_lightweight($object$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
__ fast_unlock($object$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
%}
ins_pipe(pipe_serial);

View File

@ -1642,7 +1642,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
const Register obj_reg = x9; // Will contain the oop
const Register lock_reg = x30; // Address of compiler lock object (BasicLock)
const Register old_hdr = x30; // value of old header at unlock time
const Register lock_tmp = x31; // Temporary used by lightweight_lock/unlock
const Register lock_tmp = x31; // Temporary used by fast_lock/unlock
const Register tmp = ra;
Label slow_path_lock;
@ -1659,7 +1659,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Load the oop from the handle
__ ld(obj_reg, Address(oop_handle_reg, 0));
__ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
__ fast_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
// Slow path will re-enter here
__ bind(lock_done);
@ -1754,7 +1754,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
save_native_result(masm, ret_type, stack_slots);
}
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
__ fast_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
// slow path re-enters here
__ bind(unlock_done);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2023, Rivos Inc. All rights reserved.
@ -498,7 +498,7 @@ private:
constexpr static bool supports_stack_watermark_barrier() { return true; }
constexpr static bool supports_recursive_lightweight_locking() { return true; }
constexpr static bool supports_recursive_fast_locking() { return true; }
constexpr static bool supports_secondary_supers_table() { return true; }

View File

@ -67,7 +67,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
// Save object being locked into the BasicObjectLock...
z_stg(Roop, Address(Rbox, BasicObjectLock::obj_offset()));
lightweight_lock(Rbox, Roop, Rmark, tmp, slow_case);
fast_lock(Rbox, Roop, Rmark, tmp, slow_case);
}
void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
@ -77,7 +77,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
z_lg(Roop, Address(Rbox, BasicObjectLock::obj_offset()));
verify_oop(Roop, FILE_AND_LINE);
lightweight_unlock(Roop, Rmark, Z_R1_scratch, slow_case);
fast_unlock(Roop, Rmark, Z_R1_scratch, slow_case);
}
void C1_MacroAssembler::try_allocate(

View File

@ -32,13 +32,13 @@
#define BLOCK_COMMENT(str) block_comment(str)
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register temp1, Register temp2) {
compiler_fast_lock_lightweight_object(obj, box, temp1, temp2);
void C2_MacroAssembler::fast_lock(Register obj, Register box, Register temp1, Register temp2) {
compiler_fast_lock_object(obj, box, temp1, temp2);
}
void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Register temp1, Register temp2) {
compiler_fast_unlock_lightweight_object(obj, box, temp1, temp2);
void C2_MacroAssembler::fast_unlock(Register obj, Register box, Register temp1, Register temp2) {
compiler_fast_unlock_object(obj, box, temp1, temp2);
}
void C2_MacroAssembler::load_narrow_klass_compact_c2(Register dst, Address src) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -29,9 +29,9 @@
// C2_MacroAssembler contains high-level macros for C2
public:
// Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in s390.ad file.
void fast_lock_lightweight(Register obj, Register box, Register temp1, Register temp2);
void fast_unlock_lightweight(Register obj, Register box, Register temp1, Register temp2);
// Code used by cmpFastLock and cmpFastUnlock mach instructions in s390.ad file.
void fast_lock(Register obj, Register box, Register temp1, Register temp2);
void fast_unlock(Register obj, Register box, Register temp1, Register temp2);
void load_narrow_klass_compact_c2(Register dst, Address src);

View File

@ -1019,7 +1019,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
NearLabel done, slow_case;
lightweight_lock(monitor, object, header, tmp, slow_case);
fast_lock(monitor, object, header, tmp, slow_case);
z_bru(done);
bind(slow_case);
@ -1054,7 +1054,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
clear_mem(obj_entry, sizeof(oop));
lightweight_unlock(object, header, current_header, slow_case);
fast_unlock(object, header, current_header, slow_case);
z_bru(done);
// The lock has been converted into a heavy lock and hence

View File

@ -6138,11 +6138,11 @@ void MacroAssembler::zap_from_to(Register low, Register high, Register val, Regi
}
#endif // !PRODUCT
// Implements lightweight-locking.
// Implements fast-locking.
// - obj: the object to be locked, contents preserved.
// - temp1, temp2: temporary registers, contents destroyed.
// Note: make sure Z_R1 is not manipulated here when C2 compiler is in play
void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register temp1, Register temp2, Label& slow) {
void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register temp1, Register temp2, Label& slow) {
assert_different_registers(basic_lock, obj, temp1, temp2);
@ -6203,11 +6203,11 @@ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Registe
z_alsi(in_bytes(ls_top_offset), Z_thread, oopSize);
}
// Implements lightweight-unlocking.
// Implements fast-unlocking.
// - obj: the object to be unlocked
// - temp1, temp2: temporary registers, will be destroyed
// - Z_R1_scratch: will be killed in case of Interpreter & C1 Compiler
void MacroAssembler::lightweight_unlock(Register obj, Register temp1, Register temp2, Label& slow) {
void MacroAssembler::fast_unlock(Register obj, Register temp1, Register temp2, Label& slow) {
assert_different_registers(obj, temp1, temp2);
@ -6264,7 +6264,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register temp1, Register t
NearLabel not_unlocked;
z_tmll(mark, markWord::unlocked_value);
z_braz(not_unlocked);
stop("lightweight_unlock already unlocked");
stop("fast_unlock already unlocked");
bind(not_unlocked);
#endif // ASSERT
@ -6289,7 +6289,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register temp1, Register t
bind(unlocked);
}
void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Register box, Register tmp1, Register tmp2) {
void MacroAssembler::compiler_fast_lock_object(Register obj, Register box, Register tmp1, Register tmp2) {
assert_different_registers(obj, box, tmp1, tmp2, Z_R0_scratch);
// Handle inflated monitor.
@ -6314,8 +6314,8 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Registe
const int mark_offset = oopDesc::mark_offset_in_bytes();
const ByteSize ls_top_offset = JavaThread::lock_stack_top_offset();
BLOCK_COMMENT("compiler_fast_lightweight_locking {");
{ // lightweight locking
BLOCK_COMMENT("compiler_fast_locking {");
{ // Fast locking
// Push lock to the lock stack and finish successfully. MUST reach to with flag == EQ
NearLabel push;
@ -6362,9 +6362,9 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Registe
z_cgr(obj, obj); // set the CC to EQ, as it could be changed by alsi
z_bru(locked);
}
BLOCK_COMMENT("} compiler_fast_lightweight_locking");
BLOCK_COMMENT("} compiler_fast_locking");
BLOCK_COMMENT("handle_inflated_monitor_lightweight_locking {");
BLOCK_COMMENT("handle_inflated_monitor_locking {");
{ // Handle inflated monitor.
bind(inflated);
@ -6441,7 +6441,7 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Registe
// set the CC now
z_cgr(obj, obj);
}
BLOCK_COMMENT("} handle_inflated_monitor_lightweight_locking");
BLOCK_COMMENT("} handle_inflated_monitor_locking");
bind(locked);
@ -6464,7 +6464,7 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Registe
// C2 uses the value of flag (NE vs EQ) to determine the continuation.
}
void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Register box, Register tmp1, Register tmp2) {
void MacroAssembler::compiler_fast_unlock_object(Register obj, Register box, Register tmp1, Register tmp2) {
assert_different_registers(obj, box, tmp1, tmp2);
// Handle inflated monitor.
@ -6479,8 +6479,8 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Regis
const int mark_offset = oopDesc::mark_offset_in_bytes();
const ByteSize ls_top_offset = JavaThread::lock_stack_top_offset();
BLOCK_COMMENT("compiler_fast_lightweight_unlock {");
{ // Lightweight Unlock
BLOCK_COMMENT("compiler_fast_unlock {");
{ // Fast Unlock
NearLabel push_and_slow_path;
// Check if obj is top of lock-stack.
@ -6525,7 +6525,7 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Regis
NearLabel not_unlocked;
z_tmll(mark, markWord::unlocked_value);
z_braz(not_unlocked);
stop("lightweight_unlock already unlocked");
stop("fast_unlock already unlocked");
bind(not_unlocked);
#endif // ASSERT
@ -6546,7 +6546,7 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Regis
z_ltgr(obj, obj); // object is not null here
z_bru(slow_path);
}
BLOCK_COMMENT("} compiler_fast_lightweight_unlock");
BLOCK_COMMENT("} compiler_fast_unlock");
{ // Handle inflated monitor.

View File

@ -790,10 +790,10 @@ class MacroAssembler: public Assembler {
// Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
void increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg);
void lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Label& slow);
void lightweight_unlock(Register obj, Register tmp1, Register tmp2, Label& slow);
void compiler_fast_lock_lightweight_object(Register obj, Register box, Register tmp1, Register tmp2);
void compiler_fast_unlock_lightweight_object(Register obj, Register box, Register tmp1, Register tmp2);
void fast_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Label& slow);
void fast_unlock(Register obj, Register tmp1, Register tmp2, Label& slow);
void compiler_fast_lock_object(Register obj, Register box, Register tmp1, Register tmp2);
void compiler_fast_unlock_object(Register obj, Register box, Register tmp1, Register tmp2);
void resolve_jobject(Register value, Register tmp1, Register tmp2);
void resolve_global_jobject(Register value, Register tmp1, Register tmp2);

View File

@ -10123,14 +10123,14 @@ instruct partialSubtypeCheckConstSuper(rarg2RegP sub, rarg1RegP super, immP supe
// ============================================================================
// inlined locking and unlocking
instruct cmpFastLockLightweight(flagsReg pcc, iRegP_N2P oop, iRegP_N2P box, iRegP tmp1, iRegP tmp2) %{
instruct cmpFastLock(flagsReg pcc, iRegP_N2P oop, iRegP_N2P box, iRegP tmp1, iRegP tmp2) %{
match(Set pcc (FastLock oop box));
effect(TEMP tmp1, TEMP tmp2);
ins_cost(100);
// TODO: s390 port size(VARIABLE_SIZE);
format %{ "FASTLOCK $oop, $box; KILL Z_ARG4, Z_ARG5" %}
ins_encode %{
__ fast_lock_lightweight($oop$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register);
__ fast_lock($oop$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register);
// If locking was successful, cc should indicate 'EQ'.
// The compiler generates a branch to the runtime call to
// _complete_monitor_locking_Java for the case where cc is 'NE'.
@ -10138,14 +10138,14 @@ instruct cmpFastLockLightweight(flagsReg pcc, iRegP_N2P oop, iRegP_N2P box, iReg
ins_pipe(pipe_class_dummy);
%}
instruct cmpFastUnlockLightweight(flagsReg pcc, iRegP_N2P oop, iRegP_N2P box, iRegP tmp1, iRegP tmp2) %{
instruct cmpFastUnlock(flagsReg pcc, iRegP_N2P oop, iRegP_N2P box, iRegP tmp1, iRegP tmp2) %{
match(Set pcc (FastUnlock oop box));
effect(TEMP tmp1, TEMP tmp2);
ins_cost(100);
// TODO: s390 port size(FIXED_SIZE);
format %{ "FASTUNLOCK $oop, $box; KILL Z_ARG4, Z_ARG5" %}
ins_encode %{
__ fast_unlock_lightweight($oop$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register);
__ fast_unlock($oop$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register);
// If unlocking was successful, cc should indicate 'EQ'.
// The compiler generates a branch to the runtime call to
// _complete_monitor_unlocking_Java for the case where cc is 'NE'.

View File

@ -1765,7 +1765,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Try fastpath for locking.
// Fast_lock kills r_temp_1, r_temp_2.
__ compiler_fast_lock_lightweight_object(r_oop, r_box, r_tmp1, r_tmp2);
__ compiler_fast_lock_object(r_oop, r_box, r_tmp1, r_tmp2);
__ z_bre(done);
//-------------------------------------------------------------------------
@ -1961,7 +1961,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Try fastpath for unlocking.
// Fast_unlock kills r_tmp1, r_tmp2.
__ compiler_fast_unlock_lightweight_object(r_oop, r_box, r_tmp1, r_tmp2);
__ compiler_fast_unlock_object(r_oop, r_box, r_tmp1, r_tmp2);
__ z_bre(done);
// Slow path for unlocking.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -425,7 +425,7 @@ class VM_Version: public Abstract_VM_Version {
constexpr static bool supports_secondary_supers_table() { return true; }
constexpr static bool supports_recursive_lightweight_locking() { return true; }
constexpr static bool supports_recursive_fast_locking() { return true; }
// CPU feature query functions
static const char* get_model_string() { return _model_string; }

View File

@ -53,7 +53,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register basic_lo
null_check_offset = offset();
lightweight_lock(basic_lock, obj, hdr, tmp, slow_case);
fast_lock(basic_lock, obj, hdr, tmp, slow_case);
return null_check_offset;
}
@ -66,7 +66,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register basic
movptr(obj, Address(basic_lock, BasicObjectLock::obj_offset()));
verify_oop(obj);
lightweight_unlock(obj, rax, hdr, slow_case);
fast_unlock(obj, rax, hdr, slow_case);
}

View File

@ -58,11 +58,11 @@ void C2EntryBarrierStub::emit(C2_MacroAssembler& masm) {
__ jmp(continuation(), false /* maybe_short */);
}
int C2FastUnlockLightweightStub::max_size() const {
int C2FastUnlockStub::max_size() const {
return 128;
}
void C2FastUnlockLightweightStub::emit(C2_MacroAssembler& masm) {
void C2FastUnlockStub::emit(C2_MacroAssembler& masm) {
assert(_t == rax, "must be");
{ // Restore lock-stack and handle the unlock in runtime.

View File

@ -222,8 +222,8 @@ inline Assembler::AvxVectorLen C2_MacroAssembler::vector_length_encoding(int vle
// box: on-stack box address -- KILLED
// rax: tmp -- KILLED
// t : tmp -- KILLED
void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register rax_reg,
Register t, Register thread) {
void C2_MacroAssembler::fast_lock(Register obj, Register box, Register rax_reg,
Register t, Register thread) {
assert(rax_reg == rax, "Used for CAS");
assert_different_registers(obj, box, rax_reg, t, thread);
@ -247,7 +247,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist
const Register mark = t;
{ // Lightweight Lock
{ // Fast Lock
Label push;
@ -415,7 +415,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist
// A perfectly viable alternative is to elide the owner check except when
// Xcheck:jni is enabled.
void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, Register t, Register thread) {
void C2_MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register t, Register thread) {
assert(reg_rax == rax, "Used for CAS");
assert_different_registers(obj, reg_rax, t);
@ -430,16 +430,16 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax,
const Register box = reg_rax;
Label dummy;
C2FastUnlockLightweightStub* stub = nullptr;
C2FastUnlockStub* stub = nullptr;
if (!Compile::current()->output()->in_scratch_emit_size()) {
stub = new (Compile::current()->comp_arena()) C2FastUnlockLightweightStub(obj, mark, reg_rax, thread);
stub = new (Compile::current()->comp_arena()) C2FastUnlockStub(obj, mark, reg_rax, thread);
Compile::current()->output()->add_stub(stub);
}
Label& push_and_slow_path = stub == nullptr ? dummy : stub->push_and_slow_path();
{ // Lightweight Unlock
{ // Fast Unlock
// Load top.
movl(top, Address(thread, JavaThread::lock_stack_top_offset()));

View File

@ -35,9 +35,9 @@ public:
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
// See full description in c2_MacroAssembler_x86.cpp.
void fast_lock_lightweight(Register obj, Register box, Register rax_reg,
Register t, Register thread);
void fast_unlock_lightweight(Register obj, Register reg_rax, Register t, Register thread);
void fast_lock(Register obj, Register box, Register rax_reg,
Register t, Register thread);
void fast_unlock(Register obj, Register reg_rax, Register t, Register thread);
void verify_int_in_range(uint idx, const TypeInt* t, Register val);
void verify_long_in_range(uint idx, const TypeLong* t, Register val, Register tmp);

View File

@ -1107,7 +1107,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
// Load object pointer into obj_reg
movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
lightweight_lock(lock_reg, obj_reg, swap_reg, tmp_reg, slow_case);
fast_lock(lock_reg, obj_reg, swap_reg, tmp_reg, slow_case);
jmp(done);
bind(slow_case);
@ -1149,7 +1149,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// Free entry
movptr(Address(lock_reg, BasicObjectLock::obj_offset()), NULL_WORD);
lightweight_unlock(obj_reg, swap_reg, header_reg, slow_case);
fast_unlock(obj_reg, swap_reg, header_reg, slow_case);
jmp(done);
bind(slow_case);

View File

@ -9653,13 +9653,13 @@ void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigne
bind(L_stack_ok);
}
// Implements lightweight-locking.
// Implements fast-locking.
//
// obj: the object to be locked
// reg_rax: rax
// thread: the thread which attempts to lock obj
// tmp: a temporary register
void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) {
void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) {
Register thread = r15_thread;
assert(reg_rax == rax, "");
@ -9715,13 +9715,13 @@ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Registe
movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
}
// Implements lightweight-unlocking.
// Implements fast-unlocking.
//
// obj: the object to be unlocked
// reg_rax: rax
// thread: the thread
// tmp: a temporary register
void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
void MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
Register thread = r15_thread;
assert(reg_rax == rax, "");
@ -9753,7 +9753,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register
Label not_unlocked;
testptr(reg_rax, markWord::unlocked_value);
jcc(Assembler::zero, not_unlocked);
stop("lightweight_unlock already unlocked");
stop("fast_unlock already unlocked");
bind(not_unlocked);
#endif

View File

@ -2054,8 +2054,8 @@ public:
void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow);
void lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow);
void fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow);
void fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow);
void save_legacy_gprs();
void restore_legacy_gprs();

View File

@ -2141,7 +2141,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Load the oop from the handle
__ movptr(obj_reg, Address(oop_handle_reg, 0));
__ lightweight_lock(lock_reg, obj_reg, swap_reg, rscratch1, slow_path_lock);
__ fast_lock(lock_reg, obj_reg, swap_reg, rscratch1, slow_path_lock);
// Slow path will re-enter here
__ bind(lock_done);
@ -2266,7 +2266,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
save_native_result(masm, ret_type, stack_slots);
}
__ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
__ fast_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
// slow path re-enters here
__ bind(unlock_done);

View File

@ -995,7 +995,7 @@ public:
return true;
}
constexpr static bool supports_recursive_lightweight_locking() {
constexpr static bool supports_recursive_fast_locking() {
return true;
}

View File

@ -16925,24 +16925,24 @@ instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
// ============================================================================
// inlined locking and unlocking
instruct cmpFastLockLightweight(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI rax_reg, rRegP tmp) %{
instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI rax_reg, rRegP tmp) %{
match(Set cr (FastLock object box));
effect(TEMP rax_reg, TEMP tmp, USE_KILL box);
ins_cost(300);
format %{ "fastlock $object,$box\t! kills $box,$rax_reg,$tmp" %}
ins_encode %{
__ fast_lock_lightweight($object$$Register, $box$$Register, $rax_reg$$Register, $tmp$$Register, r15_thread);
__ fast_lock($object$$Register, $box$$Register, $rax_reg$$Register, $tmp$$Register, r15_thread);
%}
ins_pipe(pipe_slow);
%}
instruct cmpFastUnlockLightweight(rFlagsReg cr, rRegP object, rax_RegP rax_reg, rRegP tmp) %{
instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP rax_reg, rRegP tmp) %{
match(Set cr (FastUnlock object rax_reg));
effect(TEMP tmp, USE_KILL rax_reg);
ins_cost(300);
format %{ "fastunlock $object,$rax_reg\t! kills $rax_reg,$tmp" %}
ins_encode %{
__ fast_unlock_lightweight($object$$Register, $rax_reg$$Register, $tmp$$Register, r15_thread);
__ fast_unlock($object$$Register, $rax_reg$$Register, $tmp$$Register, r15_thread);
%}
ins_pipe(pipe_slow);
%}

View File

@ -70,7 +70,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/stackWatermarkSet.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.inline.hpp"
#include "runtime/synchronizer.hpp"
#include "utilities/align.hpp"
#include "utilities/checkedCast.hpp"
#include "utilities/copy.hpp"

View File

@ -211,7 +211,7 @@ class markWord {
}
ObjectMonitor* monitor() const {
assert(has_monitor(), "check");
assert(!UseObjectMonitorTable, "Lightweight locking with OM table does not use markWord for monitors");
assert(!UseObjectMonitorTable, "Locking with OM table does not use markWord for monitors");
// Use xor instead of &~ to provide one extra tag-bit check.
return (ObjectMonitor*) (value() ^ monitor_value);
}
@ -237,7 +237,7 @@ class markWord {
return from_pointer(lock);
}
static markWord encode(ObjectMonitor* monitor) {
assert(!UseObjectMonitorTable, "Lightweight locking with OM table does not use markWord for monitors");
assert(!UseObjectMonitorTable, "Locking with OM table does not use markWord for monitors");
uintptr_t tmp = (uintptr_t) monitor;
return markWord(tmp | monitor_value);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -97,7 +97,7 @@ public:
void emit(C2_MacroAssembler& masm);
};
class C2FastUnlockLightweightStub : public C2CodeStub {
class C2FastUnlockStub : public C2CodeStub {
private:
Register _obj;
Register _mark;
@ -107,8 +107,8 @@ private:
Label _push_and_slow_path;
Label _unlocked_continuation;
public:
C2FastUnlockLightweightStub(Register obj, Register mark, Register t, Register thread) : C2CodeStub(),
_obj(obj), _mark(mark), _t(t), _thread(thread) {}
C2FastUnlockStub(Register obj, Register mark, Register t, Register thread) : C2CodeStub(),
_obj(obj), _mark(mark), _t(t), _thread(thread) {}
int max_size() const;
void emit(C2_MacroAssembler& masm);
Label& slow_path() { return _slow_path; }

View File

@ -55,7 +55,7 @@
#include "runtime/osThread.hpp"
#include "runtime/signature.hpp"
#include "runtime/stackWatermarkSet.inline.hpp"
#include "runtime/synchronizer.inline.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/threads.hpp"
#include "runtime/threadSMR.inline.hpp"
#include "runtime/vframe.inline.hpp"

View File

@ -87,7 +87,6 @@
#include "runtime/javaCalls.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/lightweightSynchronizer.hpp"
#include "runtime/lockStack.hpp"
#include "runtime/os.hpp"
#include "runtime/stackFrameStream.inline.hpp"
@ -1974,8 +1973,8 @@ WB_ENTRY(jint, WB_getLockStackCapacity(JNIEnv* env))
return (jint) LockStack::CAPACITY;
WB_END
WB_ENTRY(jboolean, WB_supportsRecursiveLightweightLocking(JNIEnv* env))
return (jboolean) VM_Version::supports_recursive_lightweight_locking();
WB_ENTRY(jboolean, WB_supportsRecursiveFastLocking(JNIEnv* env))
return (jboolean) VM_Version::supports_recursive_fast_locking();
WB_END
WB_ENTRY(jboolean, WB_DeflateIdleMonitors(JNIEnv* env, jobject wb))
@ -2996,7 +2995,7 @@ static JNINativeMethod methods[] = {
{CC"isUbsanEnabled", CC"()Z", (void*)&WB_IsUbsanEnabled },
{CC"getInUseMonitorCount", CC"()J", (void*)&WB_getInUseMonitorCount },
{CC"getLockStackCapacity", CC"()I", (void*)&WB_getLockStackCapacity },
{CC"supportsRecursiveLightweightLocking", CC"()Z", (void*)&WB_supportsRecursiveLightweightLocking },
{CC"supportsRecursiveFastLocking", CC"()Z", (void*)&WB_supportsRecursiveFastLocking },
{CC"forceSafepoint", CC"()V", (void*)&WB_ForceSafepoint },
{CC"forceClassLoaderStatsSafepoint", CC"()V", (void*)&WB_ForceClassLoaderStatsSafepoint },
{CC"getConstantPool0", CC"(Ljava/lang/Class;)J", (void*)&WB_GetConstantPool },

View File

@ -191,8 +191,8 @@ class Abstract_VM_Version: AllStatic {
// Does platform support stack watermark barriers for concurrent stack processing?
constexpr static bool supports_stack_watermark_barrier() { return false; }
// Is recursive lightweight locking implemented for this platform?
constexpr static bool supports_recursive_lightweight_locking() { return false; }
// Is recursive fast locking implemented for this platform?
constexpr static bool supports_recursive_fast_locking() { return false; }
// Does platform support secondary supers table lookup?
constexpr static bool supports_secondary_supers_table() { return false; }

View File

@ -74,7 +74,6 @@
#include "runtime/javaThread.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/keepStackGCProcessed.hpp"
#include "runtime/lightweightSynchronizer.hpp"
#include "runtime/lockStack.inline.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/osThread.hpp"
@ -85,7 +84,7 @@
#include "runtime/stackValue.hpp"
#include "runtime/stackWatermarkSet.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.inline.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/threadWXSetters.inline.hpp"
#include "runtime/vframe.hpp"
@ -1680,8 +1679,8 @@ bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInf
}
ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
if (deoptee_thread->lock_stack().contains(obj())) {
LightweightSynchronizer::inflate_fast_locked_object(obj(), ObjectSynchronizer::InflateCause::inflate_cause_vm_internal,
deoptee_thread, thread);
ObjectSynchronizer::inflate_fast_locked_object(obj(), ObjectSynchronizer::InflateCause::inflate_cause_vm_internal,
deoptee_thread, thread);
}
assert(mon_info->owner()->is_locked(), "object must be locked now");
assert(obj->mark().has_monitor(), "must be");

View File

@ -1954,14 +1954,14 @@ const int ObjectAlignmentInBytes = 8;
"fence. Add cleanliness checks.") \
\
product(bool, UseObjectMonitorTable, false, DIAGNOSTIC, \
"With Lightweight Locking mode, use a table to record inflated " \
"monitors rather than the first word of the object.") \
"Use a table to record inflated monitors rather than the first " \
"word of the object.") \
\
product(int, LightweightFastLockingSpins, 13, DIAGNOSTIC, \
"Specifies the number of times lightweight fast locking will " \
"attempt to CAS the markWord before inflating. Between each " \
"CAS it will spin for exponentially more time, resulting in " \
"a total number of spins on the order of O(2^value)") \
product(int, FastLockingSpins, 13, DIAGNOSTIC, \
"Specifies the number of times fast locking will attempt to " \
"CAS the markWord before inflating. Between each CAS it will " \
"spin for exponentially more time, resulting in a total number " \
"of spins on the order of O(2^value)") \
range(1, 30) \
\
product(uint, TrimNativeHeapInterval, 0, \

View File

@ -1409,7 +1409,7 @@ void JavaThread::oops_do_no_frames(OopClosure* f, NMethodClosure* cf) {
entry = entry->parent();
}
// Due to lightweight locking
// Due to fast locking
lock_stack().oops_do(f);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,80 +0,0 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_RUNTIME_LIGHTWEIGHTSYNCHRONIZER_HPP
#define SHARE_RUNTIME_LIGHTWEIGHTSYNCHRONIZER_HPP
#include "memory/allStatic.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/synchronizer.hpp"
class ObjectMonitorTable;
class LightweightSynchronizer : AllStatic {
private:
static ObjectMonitor* get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted);
static ObjectMonitor* get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause);
static ObjectMonitor* add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj);
static bool remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj);
static void deflate_mark_word(oop object);
static void ensure_lock_stack_space(JavaThread* current);
class CacheSetter;
class LockStackInflateContendedLocks;
class VerifyThreadState;
public:
static void initialize();
static bool needs_resize();
static bool resize_table(JavaThread* current);
private:
static inline bool fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current);
static bool fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation);
public:
static void enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread);
static void enter(Handle obj, BasicLock* lock, JavaThread* current);
static void exit(oop object, BasicLock* lock, JavaThread* current);
static ObjectMonitor* inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current);
static ObjectMonitor* inflate_locked_or_imse(oop object, ObjectSynchronizer::InflateCause cause, TRAPS);
static ObjectMonitor* inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current);
static ObjectMonitor* inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current);
static void deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor);
static ObjectMonitor* get_monitor_from_table(Thread* current, oop obj);
static bool contains_monitor(Thread* current, ObjectMonitor* monitor);
static bool quick_enter(oop obj, BasicLock* Lock, JavaThread* current);
};
#endif // SHARE_RUNTIME_LIGHTWEIGHTSYNCHRONIZER_HPP

View File

@ -35,7 +35,7 @@
#include "runtime/safepoint.hpp"
#include "runtime/stackWatermark.hpp"
#include "runtime/stackWatermarkSet.inline.hpp"
#include "runtime/synchronizer.inline.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/thread.hpp"
#include "utilities/copy.hpp"
#include "utilities/debug.hpp"
@ -82,7 +82,7 @@ void LockStack::verify(const char* msg) const {
int top = to_index(_top);
for (int i = 0; i < top; i++) {
assert(_base[i] != nullptr, "no zapped before top");
if (VM_Version::supports_recursive_lightweight_locking()) {
if (VM_Version::supports_recursive_fast_locking()) {
oop o = _base[i];
for (; i < top - 1; i++) {
// Consecutive entries may be the same

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 2022, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,11 +31,11 @@
#include "memory/iterator.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/lightweightSynchronizer.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/stackWatermark.hpp"
#include "runtime/stackWatermarkSet.inline.hpp"
#include "runtime/synchronizer.hpp"
#include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp"
@ -87,7 +87,7 @@ inline bool LockStack::is_empty() const {
}
inline bool LockStack::is_recursive(oop o) const {
if (!VM_Version::supports_recursive_lightweight_locking()) {
if (!VM_Version::supports_recursive_fast_locking()) {
return false;
}
verify("pre-is_recursive");
@ -119,7 +119,7 @@ inline bool LockStack::is_recursive(oop o) const {
}
inline bool LockStack::try_recursive_enter(oop o) {
if (!VM_Version::supports_recursive_lightweight_locking()) {
if (!VM_Version::supports_recursive_fast_locking()) {
return false;
}
verify("pre-try_recursive_enter");
@ -145,7 +145,7 @@ inline bool LockStack::try_recursive_enter(oop o) {
}
inline bool LockStack::try_recursive_exit(oop o) {
if (!VM_Version::supports_recursive_lightweight_locking()) {
if (!VM_Version::supports_recursive_fast_locking()) {
return false;
}
verify("pre-try_recursive_exit");
@ -254,7 +254,7 @@ inline void OMCache::set_monitor(ObjectMonitor *monitor) {
oop obj = monitor->object_peek();
assert(obj != nullptr, "must be alive");
assert(monitor == LightweightSynchronizer::get_monitor_from_table(JavaThread::current(), obj), "must exist in table");
assert(monitor == ObjectSynchronizer::get_monitor_from_table(JavaThread::current(), obj), "must exist in table");
OMCacheEntry to_insert = {obj, monitor};

View File

@ -43,7 +43,6 @@
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/lightweightSynchronizer.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/orderAccess.hpp"
@ -51,6 +50,7 @@
#include "runtime/safefetch.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/threads.hpp"
#include "services/threadService.hpp"
#include "utilities/debug.hpp"
@ -415,7 +415,7 @@ bool ObjectMonitor::try_lock_with_contention_mark(JavaThread* locking_thread, Ob
}
void ObjectMonitor::enter_for_with_contention_mark(JavaThread* locking_thread, ObjectMonitorContentionMark& contention_mark) {
// Used by LightweightSynchronizer::inflate_and_enter in deoptimization path to enter for another thread.
// Used by ObjectSynchronizer::inflate_and_enter in deoptimization path to enter for another thread.
// The monitor is private to or already owned by locking_thread which must be suspended.
// So this code may only contend with deflation.
assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
@ -856,7 +856,7 @@ bool ObjectMonitor::deflate_monitor(Thread* current) {
}
if (UseObjectMonitorTable) {
LightweightSynchronizer::deflate_monitor(current, obj, this);
ObjectSynchronizer::deflate_monitor(current, obj, this);
} else if (obj != nullptr) {
// Install the old mark word if nobody else has already done it.
install_displaced_markword_in_object(obj);

View File

@ -160,10 +160,10 @@ class ObjectMonitor : public CHeapObj<mtObjectMonitor> {
// Because of frequent access, the metadata field is at offset zero (0).
// Enforced by the assert() in metadata_addr().
// * Lightweight locking with UseObjectMonitorTable:
// * Locking with UseObjectMonitorTable:
// Contains the _object's hashCode.
// * * Lightweight locking without UseObjectMonitorTable:
// Contains the displaced object header word - mark
// * Locking without UseObjectMonitorTable:
// Contains the displaced object header word - mark
volatile uintptr_t _metadata; // metadata
WeakHandle _object; // backward object pointer
// Separate _metadata and _owner on different cache lines since both can

View File

@ -74,22 +74,22 @@ inline volatile uintptr_t* ObjectMonitor::metadata_addr() {
}
inline markWord ObjectMonitor::header() const {
assert(!UseObjectMonitorTable, "Lightweight locking with OM table does not use header");
assert(!UseObjectMonitorTable, "Locking with OM table does not use header");
return markWord(metadata());
}
inline void ObjectMonitor::set_header(markWord hdr) {
assert(!UseObjectMonitorTable, "Lightweight locking with OM table does not use header");
assert(!UseObjectMonitorTable, "Locking with OM table does not use header");
set_metadata(hdr.value());
}
inline intptr_t ObjectMonitor::hash() const {
assert(UseObjectMonitorTable, "Only used by lightweight locking with OM table");
assert(UseObjectMonitorTable, "Only used when locking with OM table");
return metadata();
}
inline void ObjectMonitor::set_hash(intptr_t hash) {
assert(UseObjectMonitorTable, "Only used by lightweight locking with OM table");
assert(UseObjectMonitorTable, "Only used when locking with OM table");
set_metadata(hash);
}

View File

@ -36,10 +36,10 @@
#include "prims/resolvedMethodTable.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/lightweightSynchronizer.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/synchronizer.hpp"
#include "services/finalizerService.hpp"
#include "services/gcNotifier.hpp"
#include "services/lowMemoryDetector.hpp"
@ -113,7 +113,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
(cldg_cleanup_work = ClassLoaderDataGraph::should_clean_metaspaces_and_reset()) |
(jvmti_tagmap_work = JvmtiTagMap::has_object_free_events_and_reset()) |
(oopmap_cache_work = OopMapCache::has_cleanup_work()) |
(object_monitor_table_work = LightweightSynchronizer::needs_resize())
(object_monitor_table_work = ObjectSynchronizer::needs_resize())
) == 0) {
// Wait until notified that there is some work to do or timer expires.
// Some cleanup requests don't notify the ServiceThread so work needs to be done at periodic intervals.
@ -173,7 +173,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
}
if (object_monitor_table_work) {
LightweightSynchronizer::resize_table(jt);
ObjectSynchronizer::resize_table(jt);
}
}
}

View File

@ -72,7 +72,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/stackWatermarkSet.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.inline.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/vframe.inline.hpp"
#include "runtime/vframeArray.hpp"
@ -2029,7 +2029,7 @@ void SharedRuntime::monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThrea
ExceptionMark em(current);
// Check if C2_MacroAssembler::fast_unlock() or
// C2_MacroAssembler::fast_unlock_lightweight() unlocked an inflated
// C2_MacroAssembler::fast_unlock() unlocked an inflated
// monitor before going slow path. Since there is no safepoint
// polling when calling into the VM, we can be sure that the monitor
// hasn't been deallocated.

File diff suppressed because it is too large Load Diff

View File

@ -94,8 +94,8 @@ public:
// deoptimization at monitor exit. Hence, it does not take a Handle argument.
// This is the "slow path" version of monitor enter and exit.
static inline void enter(Handle obj, BasicLock* lock, JavaThread* current);
static inline void exit(oop obj, BasicLock* lock, JavaThread* current);
static void enter(Handle obj, BasicLock* lock, JavaThread* current);
static void exit(oop obj, BasicLock* lock, JavaThread* current);
// Used to enter a monitor for another thread. This requires that the
// locking_thread is suspended, and that entering on a potential
@ -115,7 +115,7 @@ public:
static void notifyall(Handle obj, TRAPS);
static bool quick_notify(oopDesc* obj, JavaThread* current, bool All);
static inline bool quick_enter(oop obj, BasicLock* Lock, JavaThread* current);
static bool quick_enter(oop obj, BasicLock* Lock, JavaThread* current);
// Special internal-use-only method for use by JVM infrastructure
// that needs to wait() on a java-level object but that can't risk
@ -125,9 +125,9 @@ public:
public:
static const char* inflate_cause_name(const InflateCause cause);
inline static ObjectMonitor* read_monitor(markWord mark);
inline static ObjectMonitor* read_monitor(Thread* current, oop obj);
inline static ObjectMonitor* read_monitor(Thread* current, oop obj, markWord mark);
static ObjectMonitor* read_monitor(markWord mark);
static ObjectMonitor* read_monitor(Thread* current, oop obj);
static ObjectMonitor* read_monitor(Thread* current, oop obj, markWord mark);
// Returns the identity hash value for an oop
// NOTE: It may cause monitor inflation
@ -195,7 +195,6 @@ public:
private:
friend class SynchronizerTest;
friend class LightweightSynchronizer;
static MonitorList _in_use_list;
static volatile bool _is_async_deflation_requested;
@ -209,6 +208,44 @@ public:
static u_char* get_gvars_stw_random_addr();
static void handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread);
static ObjectMonitor* get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted);
static ObjectMonitor* get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause);
static ObjectMonitor* add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj);
static bool remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj);
static void deflate_mark_word(oop object);
static void ensure_lock_stack_space(JavaThread* current);
class CacheSetter;
class LockStackInflateContendedLocks;
class VerifyThreadState;
static void create_om_table();
public:
static bool needs_resize();
static bool resize_table(JavaThread* current);
private:
static inline bool fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current);
static bool fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation);
public:
static ObjectMonitor* inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current);
static ObjectMonitor* inflate_locked_or_imse(oop object, ObjectSynchronizer::InflateCause cause, TRAPS);
static ObjectMonitor* inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current);
static ObjectMonitor* inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current);
static void deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor);
static ObjectMonitor* get_monitor_from_table(Thread* current, oop obj);
static bool contains_monitor(Thread* current, ObjectMonitor* monitor);
static bool quick_enter_internal(oop obj, BasicLock* Lock, JavaThread* current);
};
// ObjectLocker enforces balanced locking and can never throw an

View File

@ -1,71 +0,0 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_RUNTIME_SYNCHRONIZER_INLINE_HPP
#define SHARE_RUNTIME_SYNCHRONIZER_INLINE_HPP
#include "runtime/synchronizer.hpp"
#include "runtime/lightweightSynchronizer.hpp"
#include "runtime/safepointVerifiers.hpp"
inline ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) {
return mark.monitor();
}
inline ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj) {
return ObjectSynchronizer::read_monitor(current, obj, obj->mark());
}
inline ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj, markWord mark) {
if (!UseObjectMonitorTable) {
return read_monitor(mark);
} else {
return LightweightSynchronizer::get_monitor_from_table(current, obj);
}
}
inline void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
assert(current == Thread::current(), "must be");
LightweightSynchronizer::enter(obj, lock, current);
}
inline bool ObjectSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
assert(current->thread_state() == _thread_in_Java, "invariant");
NoSafepointVerifier nsv;
if (obj == nullptr) return false; // Need to throw NPE
if (obj->klass()->is_value_based()) {
return false;
}
return LightweightSynchronizer::quick_enter(obj, lock, current);
}
inline void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
LightweightSynchronizer::exit(object, lock, current);
}
#endif // SHARE_RUNTIME_SYNCHRONIZER_INLINE_HPP

View File

@ -48,7 +48,7 @@
#include "runtime/signature.hpp"
#include "runtime/stackFrameStream.inline.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.inline.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/vframe.inline.hpp"
#include "runtime/vframe_hp.hpp"
#include "runtime/vframeArray.hpp"

View File

@ -48,7 +48,7 @@
#include "runtime/javaThread.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/synchronizer.inline.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threads.hpp"
#include "runtime/threadSMR.inline.hpp"

View File

@ -63,7 +63,7 @@ public:
} while (false)
TEST_VM_F(LockStackTest, is_recursive) {
if (!VM_Version::supports_recursive_lightweight_locking()) {
if (!VM_Version::supports_recursive_fast_locking()) {
return;
}
@ -130,7 +130,7 @@ TEST_VM_F(LockStackTest, is_recursive) {
}
TEST_VM_F(LockStackTest, try_recursive_enter) {
if (!VM_Version::supports_recursive_lightweight_locking()) {
if (!VM_Version::supports_recursive_fast_locking()) {
return;
}
@ -197,7 +197,7 @@ TEST_VM_F(LockStackTest, try_recursive_enter) {
}
TEST_VM_F(LockStackTest, contains) {
const bool test_recursive = VM_Version::supports_recursive_lightweight_locking();
const bool test_recursive = VM_Version::supports_recursive_fast_locking();
JavaThread* THREAD = JavaThread::current();
// the thread should be in vm to use locks
@ -259,7 +259,7 @@ TEST_VM_F(LockStackTest, contains) {
}
TEST_VM_F(LockStackTest, remove) {
const bool test_recursive = VM_Version::supports_recursive_lightweight_locking();
const bool test_recursive = VM_Version::supports_recursive_fast_locking();
JavaThread* THREAD = JavaThread::current();
// the thread should be in vm to use locks

View File

@ -202,7 +202,7 @@ public class TestRecursiveLocking {
assertNotInflated();
} else {
// Second time we want to lock A, the lock stack
// looks like this [A, B]. Lightweight locking
// looks like this [A, B]. Fast locking
// doesn't allow interleaving ([A, B, A]), instead
// it inflates A and removes it from the lock
// stack. Which leaves us with only [B] on the
@ -220,11 +220,10 @@ public class TestRecursiveLocking {
counter++;
// Legacy tolerates endless recursions. While testing
// lightweight we don't go deeper than the size of the
// lock stack, which in this test case will be filled
// with a number of B-elements. See comment in runA()
// above for more info.
// Legacy tolerates endless recursions. While testing we
// don't go deeper than the size of the lock stack, which
// in this test case will be filled with a number of
// B-elements. See comment in runA() above for more info.
assertNotInflated();
if (depth == 1) {

View File

@ -24,7 +24,7 @@
/*
* @test TestLockStackCapacity
* @summary Tests the interaction between recursive lightweight locking and
* @summary Tests the interaction between recursive fast locking and
* when the lock stack capacity is exceeded.
* @requires vm.flagless
* @library /testlibrary /test/lib
@ -93,8 +93,8 @@ public class TestLockStackCapacity {
}
public static void main(String... args) throws Exception {
if (!WB.supportsRecursiveLightweightLocking()) {
throw new SkippedException("Test only valid if lightweight locking supports recursion");
if (!WB.supportsRecursiveFastLocking()) {
throw new SkippedException("Test only valid if fast locking supports recursion");
}
SynchronizedObject.runTest();

View File

@ -97,7 +97,7 @@
* -Xlog:monitorinflation=trace:file=monitorinflation.log
*
* @bug 8341819
* @comment Regression test for re-locking racing with deflation with lightweight locking.
* @comment Regression test for re-locking racing with deflation with fast locking.
* @run driver EATests
* -XX:+UnlockDiagnosticVMOptions
* -Xms256m -Xmx256m
@ -237,7 +237,7 @@ class EATestsTarget {
// Relocking test cases
new EARelockingSimpleTarget() .run();
new EARelockingWithManyLightweightLocksTarget() .run();
new EARelockingWithManyFastLocksTarget() .run();
new EARelockingSimpleWithAccessInOtherThreadTarget() .run();
new EARelockingSimpleWithAccessInOtherThread_02_DynamicCall_Target() .run();
new EARelockingRecursiveTarget() .run();
@ -363,7 +363,7 @@ public class EATests extends TestScaffold {
// Relocking test cases
new EARelockingSimple() .run(this);
new EARelockingWithManyLightweightLocks() .run(this);
new EARelockingWithManyFastLocks() .run(this);
new EARelockingSimpleWithAccessInOtherThread() .run(this);
new EARelockingSimpleWithAccessInOtherThread_02_DynamicCall() .run(this);
new EARelockingRecursive() .run(this);
@ -1750,12 +1750,11 @@ class EARelockingSimpleTarget extends EATestCaseBaseTarget {
/**
* Like {@link EARelockingSimple}. The difference is that there are many
* lightweight locked objects when the relocking is done. With
* lightweight the lock stack of the thread will be full because of
* this.
* fast locked objects when the relocking is done, which means that the
* lock stack of the thread will be full because of this.
*/
class EARelockingWithManyLightweightLocks extends EATestCaseBaseDebugger {
class EARelockingWithManyFastLocks extends EATestCaseBaseDebugger {
public void runTestCase() throws Exception {
BreakpointEvent bpe = resumeTo(TARGET_TESTCASE_BASE_NAME, "dontinline_brkpt", "()V");
@ -1765,7 +1764,7 @@ class EARelockingWithManyLightweightLocks extends EATestCaseBaseDebugger {
}
}
class EARelockingWithManyLightweightLocksTarget extends EATestCaseBaseTarget {
class EARelockingWithManyFastLocksTarget extends EATestCaseBaseTarget {
static class Lock {
}
@ -2260,7 +2259,7 @@ class EARelockingArgEscapeLWLockedInCalleeFrame_2Target extends EATestCaseBaseTa
/**
* Similar to {@link EARelockingArgEscapeLWLockedInCalleeFrame_2Target}. It does
* not use recursive locking and exposed a bug in the lightweight-locking implementation.
* not use recursive locking and exposed a bug in the fast-locking implementation.
*/
class EARelockingArgEscapeLWLockedInCalleeFrameNoRecursive extends EATestCaseBaseDebugger {

View File

@ -124,7 +124,7 @@ public class WhiteBox {
public native int getLockStackCapacity();
public native boolean supportsRecursiveLightweightLocking();
public native boolean supportsRecursiveFastLocking();
public native void forceSafepoint();