8364570: Remove LockingMode related code from riscv64

Reviewed-by: fyang, fjiang
This commit is contained in:
Fredrik Bredberg 2025-08-13 08:47:08 +00:00
parent 72e22b4de5
commit e77cdd93ea
9 changed files with 55 additions and 576 deletions

View File

@ -339,11 +339,7 @@ int LIR_Assembler::emit_unwind_handler() {
if (method()->is_synchronized()) {
monitor_address(0, FrameMap::r10_opr);
stub = new MonitorExitStub(FrameMap::r10_opr, true, 0);
if (LockingMode == LM_MONITOR) {
__ j(*stub->entry());
} else {
__ unlock_object(x15, x14, x10, x16, *stub->entry());
}
__ unlock_object(x15, x14, x10, x16, *stub->entry());
__ bind(*stub->continuation());
}
@ -1497,13 +1493,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register hdr = op->hdr_opr()->as_register();
Register lock = op->lock_opr()->as_register();
Register temp = op->scratch_opr()->as_register();
if (LockingMode == LM_MONITOR) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
__ null_check(obj, -1);
}
__ j(*op->stub()->entry());
} else if (op->code() == lir_lock) {
if (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// add debug info for NullPointerException only if one is possible
int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry());

View File

@ -49,8 +49,6 @@ void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
}
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
const int aligned_mask = BytesPerWord - 1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert_different_registers(hdr, obj, disp_hdr, temp, t0, t1);
int null_check_offset = -1;
@ -61,97 +59,19 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
null_check_offset = offset();
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_lock(disp_hdr, obj, hdr, temp, t1, slow_case);
} else if (LockingMode == LM_LEGACY) {
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(hdr, obj);
lbu(hdr, Address(hdr, Klass::misc_flags_offset()));
test_bit(temp, hdr, exact_log2(KlassFlags::_misc_is_value_based_class));
bnez(temp, slow_case, /* is_far */ true);
}
Label done;
// Load object header
ld(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
ori(hdr, hdr, markWord::unlocked_value);
// save unlocked object header into the displaced header location on the stack
sd(hdr, Address(disp_hdr, 0));
// test if object header is still the same (i.e. unlocked), and if so, store the
// displaced header address in the object header - if it is not the same, get the
// object header instead
la(temp, Address(obj, hdr_offset));
// if the object header was the same, we're done
cmpxchgptr(hdr, disp_hdr, temp, t1, done, /*fallthough*/nullptr);
// if the object header was not the same, it is now in the hdr register
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
//
// 1) (hdr & aligned_mask) == 0
// 2) sp <= hdr
// 3) hdr <= sp + page_size
//
// these 3 tests can be done by evaluating the following expression:
//
// (hdr -sp) & (aligned_mask - page_size)
//
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
sub(hdr, hdr, sp);
mv(temp, aligned_mask - (int)os::vm_page_size());
andr(hdr, hdr, temp);
// for recursive locking, the result is zero => save it in the displaced header
// location (null in the displaced hdr location indicates recursive locking)
sd(hdr, Address(disp_hdr, 0));
// otherwise we don't care about the result and handle locking via runtime call
bnez(hdr, slow_case, /* is_far */ true);
// done
bind(done);
inc_held_monitor_count(t0);
}
lightweight_lock(disp_hdr, obj, hdr, temp, t1, slow_case);
return null_check_offset;
}
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
const int aligned_mask = BytesPerWord - 1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert_different_registers(hdr, obj, disp_hdr, temp, t0, t1);
Label done;
if (LockingMode != LM_LIGHTWEIGHT) {
// load displaced header
ld(hdr, Address(disp_hdr, 0));
// if the loaded hdr is null we had recursive locking
// if we had recursive locking, we are done
beqz(hdr, done);
}
// load object
ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
verify_oop(obj);
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_unlock(obj, hdr, temp, t1, slow_case);
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to
// the displaced header, get the object header instead
// if the object header was not pointing to the displaced header,
// we do unlocking via runtime call
if (hdr_offset) {
la(temp, Address(obj, hdr_offset));
cmpxchgptr(disp_hdr, hdr, temp, t1, done, &slow_case);
} else {
cmpxchgptr(disp_hdr, hdr, obj, t1, done, &slow_case);
}
// done
bind(done);
dec_held_monitor_count(t0);
}
lightweight_unlock(obj, hdr, temp, t1, slow_case);
}
// Defines obj, preserves var_size_in_bytes

View File

@ -43,240 +43,11 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg,
Register tmp1Reg, Register tmp2Reg, Register tmp3Reg, Register tmp4Reg) {
// Use cr register to indicate the fast_lock result: zero for success; non-zero for failure.
Register flag = t1;
Register oop = objectReg;
Register box = boxReg;
Register disp_hdr = tmp1Reg;
Register tmp = tmp2Reg;
Label object_has_monitor;
// Finish fast lock successfully. MUST branch to with flag == 0
Label locked;
// Finish fast lock unsuccessfully. slow_path MUST branch to with flag != 0
Label slow_path;
assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
assert_different_registers(oop, box, tmp, disp_hdr, flag, tmp3Reg, t0);
mv(flag, 1);
// Load markWord from object into displaced_header.
ld(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(tmp, oop);
lbu(tmp, Address(tmp, Klass::misc_flags_offset()));
test_bit(tmp, tmp, exact_log2(KlassFlags::_misc_is_value_based_class));
bnez(tmp, slow_path);
}
// Check for existing monitor
test_bit(tmp, disp_hdr, exact_log2(markWord::monitor_value));
bnez(tmp, object_has_monitor);
if (LockingMode == LM_MONITOR) {
j(slow_path);
} else {
assert(LockingMode == LM_LEGACY, "must be");
// Set tmp to be (markWord of object | UNLOCK_VALUE).
ori(tmp, disp_hdr, markWord::unlocked_value);
// Initialize the box. (Must happen before we update the object mark!)
sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
// Compare object markWord with an unlocked value (tmp) and if
// equal exchange the stack address of our box with object markWord.
// On failure disp_hdr contains the possibly locked markWord.
cmpxchg(/*memory address*/oop, /*expected value*/tmp, /*new value*/box, Assembler::int64,
Assembler::aq, Assembler::rl, /*result*/disp_hdr);
beq(disp_hdr, tmp, locked);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
// If the compare-and-exchange succeeded, then we found an unlocked
// object, will have now locked it will continue at label locked
// We did not see an unlocked object so try the fast recursive case.
// Check if the owner is self by comparing the value in the
// markWord of object (disp_hdr) with the stack pointer.
sub(disp_hdr, disp_hdr, sp);
mv(tmp, (intptr_t) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
// If (mark & lock_mask) == 0 and mark - sp < page_size, we are stack-locking and goto label
// locked, hence we can store 0 as the displaced header in the box, which indicates that it
// is a recursive lock.
andr(tmp/*==0?*/, disp_hdr, tmp);
sd(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
beqz(tmp, locked);
j(slow_path);
}
// Handle existing monitor.
bind(object_has_monitor);
// Try to CAS owner (no owner => current thread's _monitor_owner_id).
add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value));
Register tid = tmp4Reg;
ld(tid, Address(xthread, JavaThread::monitor_owner_id_offset()));
cmpxchg(/*memory address*/tmp, /*expected value*/zr, /*new value*/tid, Assembler::int64,
Assembler::aq, Assembler::rl, /*result*/tmp3Reg); // cas succeeds if tmp3Reg == zr(expected)
// Store a non-null value into the box to avoid looking like a re-entrant
// lock. The fast-path monitor unlock code checks for
// markWord::monitor_value so use markWord::unused_mark which has the
// relevant bit set, and also matches ObjectSynchronizer::slow_enter.
mv(tmp, (address)markWord::unused_mark().value());
sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
beqz(tmp3Reg, locked); // CAS success means locking succeeded
bne(tmp3Reg, tid, slow_path); // Check for recursive locking
// Recursive lock case
increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1, tmp2Reg, tmp3Reg);
bind(locked);
mv(flag, zr);
if (LockingMode == LM_LEGACY) {
inc_held_monitor_count(t0);
}
#ifdef ASSERT
// Check that locked label is reached with flag == 0.
Label flag_correct;
beqz(flag, flag_correct);
stop("Fast Lock Flag != 0");
#endif
bind(slow_path);
#ifdef ASSERT
// Check that slow_path label is reached with flag != 0.
bnez(flag, flag_correct);
stop("Fast Lock Flag == 0");
bind(flag_correct);
#endif
// C2 uses the value of flag (0 vs !0) to determine the continuation.
}
void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg,
Register tmp1Reg, Register tmp2Reg) {
// Use cr register to indicate the fast_unlock result: zero for success; non-zero for failure.
Register flag = t1;
Register oop = objectReg;
Register box = boxReg;
Register disp_hdr = tmp1Reg;
Register owner_addr = tmp1Reg;
Register tmp = tmp2Reg;
Label object_has_monitor;
// Finish fast lock successfully. MUST branch to with flag == 0
Label unlocked;
// Finish fast lock unsuccessfully. slow_path MUST branch to with flag != 0
Label slow_path;
assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
assert_different_registers(oop, box, tmp, disp_hdr, flag, t0);
mv(flag, 1);
if (LockingMode == LM_LEGACY) {
// Find the lock address and load the displaced header from the stack.
ld(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
// If the displaced header is 0, we have a recursive unlock.
beqz(disp_hdr, unlocked);
}
// Handle existing monitor.
ld(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
test_bit(t0, tmp, exact_log2(markWord::monitor_value));
bnez(t0, object_has_monitor);
if (LockingMode == LM_MONITOR) {
j(slow_path);
} else {
assert(LockingMode == LM_LEGACY, "must be");
// Check if it is still a light weight lock, this is true if we
// see the stack address of the basicLock in the markWord of the
// object.
cmpxchg(/*memory address*/oop, /*expected value*/box, /*new value*/disp_hdr, Assembler::int64,
Assembler::relaxed, Assembler::rl, /*result*/tmp);
beq(box, tmp, unlocked); // box == tmp if cas succeeds
j(slow_path);
}
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
// Handle existing monitor.
bind(object_has_monitor);
subi(tmp, tmp, (int)markWord::monitor_value); // monitor
ld(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
Label notRecursive;
beqz(disp_hdr, notRecursive); // Will be 0 if not recursive.
// Recursive lock
subi(disp_hdr, disp_hdr, 1);
sd(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
j(unlocked);
bind(notRecursive);
// Compute owner address.
la(owner_addr, Address(tmp, ObjectMonitor::owner_offset()));
// Set owner to null.
// Release to satisfy the JMM
membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
sd(zr, Address(owner_addr));
// We need a full fence after clearing owner to avoid stranding.
// StoreLoad achieves this.
membar(StoreLoad);
// Check if the entry_list is empty.
ld(t0, Address(tmp, ObjectMonitor::entry_list_offset()));
beqz(t0, unlocked); // If so we are done.
// Check if there is a successor.
ld(t0, Address(tmp, ObjectMonitor::succ_offset()));
bnez(t0, unlocked); // If so we are done.
// Save the monitor pointer in the current thread, so we can try to
// reacquire the lock in SharedRuntime::monitor_exit_helper().
sd(tmp, Address(xthread, JavaThread::unlocked_inflated_monitor_offset()));
mv(flag, 1);
j(slow_path);
bind(unlocked);
mv(flag, zr);
if (LockingMode == LM_LEGACY) {
dec_held_monitor_count(t0);
}
#ifdef ASSERT
// Check that unlocked label is reached with flag == 0.
Label flag_correct;
beqz(flag, flag_correct);
stop("Fast Lock Flag != 0");
#endif
bind(slow_path);
#ifdef ASSERT
// Check that slow_path label is reached with flag != 0.
bnez(flag, flag_correct);
stop("Fast Lock Flag == 0");
bind(flag_correct);
#endif
// C2 uses the value of flag (0 vs !0) to determine the continuation.
}
void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box,
Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
// Flag register, zero for success; non-zero for failure.
Register flag = t1;
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
assert_different_registers(obj, box, tmp1, tmp2, tmp3, tmp4, flag, t0);
mv(flag, 1);
@ -439,7 +210,6 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box,
// Flag register, zero for success; non-zero for failure.
Register flag = t1;
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
assert_different_registers(obj, box, tmp1, tmp2, tmp3, flag, t0);
mv(flag, 1);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -49,11 +49,6 @@
const int STUB_THRESHOLD, Label *STUB, Label *DONE);
public:
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
void fast_lock(Register object, Register box,
Register tmp1, Register tmp2, Register tmp3, Register tmp4);
void fast_unlock(Register object, Register box, Register tmp1, Register tmp2);
// Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file.
void fast_lock_lightweight(Register object, Register box,
Register tmp1, Register tmp2, Register tmp3, Register tmp4);

View File

@ -733,84 +733,26 @@ void InterpreterMacroAssembler::leave_jfr_critical_section() {
void InterpreterMacroAssembler::lock_object(Register lock_reg)
{
assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
if (LockingMode == LM_MONITOR) {
call_VM_preemptable(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
lock_reg);
} else {
Label count, done;
const Register swap_reg = x10;
const Register tmp = c_rarg2;
const Register obj_reg = c_rarg3; // Will contain the oop
const Register tmp2 = c_rarg4;
const Register tmp3 = c_rarg5;
const Register tmp = c_rarg2;
const Register obj_reg = c_rarg3; // Will contain the oop
const Register tmp2 = c_rarg4;
const Register tmp3 = c_rarg5;
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
const int mark_offset = lock_offset +
BasicLock::displaced_header_offset_in_bytes();
// Load object pointer into obj_reg (c_rarg3)
ld(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
Label slow_case;
Label done, slow_case;
lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
j(done);
// Load object pointer into obj_reg c_rarg3
ld(obj_reg, Address(lock_reg, obj_offset));
bind(slow_case);
// Call the runtime routine for slow case
call_VM_preemptable(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
lock_reg);
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
j(done);
} else if (LockingMode == LM_LEGACY) {
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(tmp, obj_reg);
lbu(tmp, Address(tmp, Klass::misc_flags_offset()));
test_bit(tmp, tmp, exact_log2(KlassFlags::_misc_is_value_based_class));
bnez(tmp, slow_case);
}
// Load (object->mark() | 1) into swap_reg
ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
ori(swap_reg, t0, 1);
// Save (object->mark() | 1) into BasicLock's displaced header
sd(swap_reg, Address(lock_reg, mark_offset));
assert(lock_offset == 0,
"displached header must be first word in BasicObjectLock");
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, tmp, count, /*fallthrough*/nullptr);
// Test if the oopMark is an obvious stack pointer, i.e.,
// 1) (mark & 7) == 0, and
// 2) sp <= mark < mark + os::pagesize()
//
// These 3 tests can be done by evaluating the following
// expression: ((mark - sp) & (7 - os::vm_page_size())),
// assuming both stack pointer and pagesize have their
// least significant 3 bits clear.
// NOTE: the oopMark is in swap_reg x10 as the result of cmpxchg
sub(swap_reg, swap_reg, sp);
mv(t0, (int64_t)(7 - (int)os::vm_page_size()));
andr(swap_reg, swap_reg, t0);
// Save the test result, for recursive case, the result is zero
sd(swap_reg, Address(lock_reg, mark_offset));
bnez(swap_reg, slow_case);
bind(count);
inc_held_monitor_count(t0);
j(done);
}
bind(slow_case);
// Call the runtime routine for slow case
call_VM_preemptable(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
lock_reg);
bind(done);
}
bind(done);
}
@ -829,58 +771,30 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
{
assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
if (LockingMode == LM_MONITOR) {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
} else {
Label count, done;
const Register swap_reg = x10;
const Register header_reg = c_rarg2; // Will contain the old oopMark
const Register obj_reg = c_rarg3; // Will contain the oop
const Register tmp_reg = c_rarg4; // Temporary used by lightweight_unlock
const Register swap_reg = x10;
const Register header_reg = c_rarg2; // Will contain the old oopMark
const Register obj_reg = c_rarg3; // Will contain the oop
const Register tmp_reg = c_rarg4; // Temporary used by lightweight_unlock
save_bcp(); // Save in case of exception
save_bcp(); // Save in case of exception
// Load oop into obj_reg (c_rarg3)
ld(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
if (LockingMode != LM_LIGHTWEIGHT) {
// Convert from BasicObjectLock structure to object and BasicLock
// structure Store the BasicLock address into x10
la(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
}
// Free entry
sd(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
// Load oop into obj_reg(c_rarg3)
ld(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
Label done, slow_case;
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
j(done);
// Free entry
sd(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
bind(slow_case);
// Call the runtime routine for slow case.
sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
Label slow_case;
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
j(done);
} else if (LockingMode == LM_LEGACY) {
// Load the old header from BasicLock structure
ld(header_reg, Address(swap_reg,
BasicLock::displaced_header_offset_in_bytes()));
// Test for recursion
beqz(header_reg, count);
// Atomic swap back the old header
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, tmp_reg, count, &slow_case);
bind(count);
dec_held_monitor_count(t0);
j(done);
}
bind(slow_case);
// Call the runtime routine for slow case.
sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
bind(done);
restore_bcp();
}
bind(done);
restore_bcp();
}

View File

@ -6421,7 +6421,6 @@ void MacroAssembler::test_bit(Register Rd, Register Rs, uint32_t bit_pos) {
// - tmp1, tmp2, tmp3: temporary registers, will be destroyed
// - slow: branched to if locking fails
void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(basic_lock, obj, tmp1, tmp2, tmp3, t0);
Label push;
@ -6481,7 +6480,6 @@ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Registe
// - tmp1, tmp2, tmp3: temporary registers
// - slow: branched to if unlocking fails
void MacroAssembler::lightweight_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, tmp1, tmp2, tmp3, t0);
#ifdef ASSERT

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
// Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -11021,45 +11021,9 @@ instruct tlsLoadP(javaThread_RegP dst)
// inlined locking and unlocking
// using t1 as the 'flag' register to bridge the BoolNode producers and consumers
instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box,
iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
%{
predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set cr (FastLock object box));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
ins_cost(10 * DEFAULT_COST);
format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLock" %}
ins_encode %{
__ fast_lock($object$$Register, $box$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
%}
ins_pipe(pipe_serial);
%}
// using t1 as the 'flag' register to bridge the BoolNode producers and consumers
instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2)
%{
predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set cr (FastUnlock object box));
effect(TEMP tmp1, TEMP tmp2);
ins_cost(10 * DEFAULT_COST);
format %{ "fastunlock $object,$box\t! kills $tmp1, $tmp2, #@cmpFastUnlock" %}
ins_encode %{
__ fast_unlock($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register);
%}
ins_pipe(pipe_serial);
%}
instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box,
iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
%{
predicate(LockingMode == LM_LIGHTWEIGHT);
match(Set cr (FastLock object box));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
@ -11074,10 +11038,10 @@ instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box,
ins_pipe(pipe_serial);
%}
// using t1 as the 'flag' register to bridge the BoolNode producers and consumers
instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box,
iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
predicate(LockingMode == LM_LIGHTWEIGHT);
match(Set cr (FastUnlock object box));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);

View File

@ -1637,7 +1637,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// We use the same pc/oopMap repeatedly when we call out.
Label native_return;
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
if (method->is_object_wait0()) {
// For convenience we use the pc we want to resume to in case of preemption on Object.wait.
__ set_last_Java_frame(sp, noreg, native_return, t0);
} else {
@ -1679,8 +1679,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
Label lock_done;
if (method->is_synchronized()) {
Label count;
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
// Get the handle (the 2nd argument)
@ -1693,42 +1691,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Load the oop from the handle
__ ld(obj_reg, Address(oop_handle_reg, 0));
if (LockingMode == LM_MONITOR) {
__ j(slow_path_lock);
} else if (LockingMode == LM_LEGACY) {
// Load (object->mark() | 1) into swap_reg % x10
__ ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ ori(swap_reg, t0, 1);
// Save (object->mark() | 1) into BasicLock's displaced header
__ sd(swap_reg, Address(lock_reg, mark_word_offset));
// src -> dest if dest == x10 else x10 <- dest
__ cmpxchg_obj_header(x10, lock_reg, obj_reg, lock_tmp, count, /*fallthrough*/nullptr);
// Test if the oopMark is an obvious stack pointer, i.e.,
// 1) (mark & 3) == 0, and
// 2) sp <= mark < mark + os::pagesize()
// These 3 tests can be done by evaluating the following
// expression: ((mark - sp) & (3 - os::vm_page_size())),
// assuming both stack pointer and pagesize have their
// least significant 2 bits clear.
// NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg
__ sub(swap_reg, swap_reg, sp);
__ mv(t0, 3 - (int)os::vm_page_size());
__ andr(swap_reg, swap_reg, t0);
// Save the test result, for recursive case, the result is zero
__ sd(swap_reg, Address(lock_reg, mark_word_offset));
__ bnez(swap_reg, slow_path_lock);
__ bind(count);
__ inc_held_monitor_count(t0);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
}
__ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
// Slow path will re-enter here
__ bind(lock_done);
@ -1789,7 +1752,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
__ sw(t0, Address(t1));
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
if (method->is_object_wait0()) {
// Check preemption for Object.wait()
__ ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
__ beqz(t1, native_return);
@ -1818,48 +1781,18 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Get locked oop from the handle we passed to jni
__ ld(obj_reg, Address(oop_handle_reg, 0));
Label done, not_recursive;
if (LockingMode == LM_LEGACY) {
// Simple recursive lock?
__ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
__ bnez(t0, not_recursive);
__ dec_held_monitor_count(t0);
__ j(done);
}
__ bind(not_recursive);
// Must save x10 if if it is live now because cmpxchg must use it
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
save_native_result(masm, ret_type, stack_slots);
}
if (LockingMode == LM_MONITOR) {
__ j(slow_path_unlock);
} else if (LockingMode == LM_LEGACY) {
// get address of the stack lock
__ la(x10, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
// get old displaced header
__ ld(old_hdr, Address(x10, 0));
// Atomic swap old header if oop still contains the stack lock
Label count;
__ cmpxchg_obj_header(x10, old_hdr, obj_reg, lock_tmp, count, &slow_path_unlock);
__ bind(count);
__ dec_held_monitor_count(t0);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "");
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
}
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
// slow path re-enters here
__ bind(unlock_done);
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
restore_native_result(masm, ret_type, stack_slots);
}
__ bind(done);
}
Label dtrace_method_exit, dtrace_method_exit_done;

View File

@ -1253,22 +1253,17 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ mv(t0, _thread_in_Java);
__ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
if (LockingMode != LM_LEGACY) {
// Check preemption for Object.wait()
Label not_preempted;
__ ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
__ beqz(t1, not_preempted);
__ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
__ jr(t1);
__ bind(native_return);
__ restore_after_resume(true /* is_native */);
// reload result_handler
__ ld(result_handler, Address(fp, frame::interpreter_frame_result_handler_offset * wordSize));
__ bind(not_preempted);
} else {
// any pc will do so just use this one for LM_LEGACY to keep code together.
__ bind(native_return);
}
// Check preemption for Object.wait()
Label not_preempted;
__ ld(t1, Address(xthread, JavaThread::preempt_alternate_return_offset()));
__ beqz(t1, not_preempted);
__ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
__ jr(t1);
__ bind(native_return);
__ restore_after_resume(true /* is_native */);
// reload result_handler
__ ld(result_handler, Address(fp, frame::interpreter_frame_result_handler_offset * wordSize));
__ bind(not_preempted);
// reset_last_Java_frame
__ reset_last_Java_frame(true);