diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index 1e506edb634..44d7bf1e0fa 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -16262,7 +16262,7 @@ instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl) // ============================================================================ // inlined locking and unlocking -instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3) +instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3) %{ match(Set cr (FastLock object box)); effect(TEMP tmp, TEMP tmp2, TEMP tmp3); @@ -16271,13 +16271,13 @@ instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %} ins_encode %{ - __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register); + __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register); %} ins_pipe(pipe_serial); %} -instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3) +instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3) %{ match(Set cr (FastUnlock object box)); effect(TEMP tmp, TEMP tmp2, TEMP tmp3); @@ -16286,7 +16286,7 @@ instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNo format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2, $tmp3" %} ins_encode %{ - __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register); + __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register); %} ins_pipe(pipe_serial); diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp index 31c36e749c5..e934632715c 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp @@ -70,7 +70,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register basic_lo null_check_offset = offset(); - lightweight_lock(basic_lock, obj, hdr, temp, rscratch2, slow_case); + fast_lock(basic_lock, obj, hdr, temp, rscratch2, slow_case); return null_check_offset; } @@ -83,7 +83,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register basic ldr(obj, Address(basic_lock, BasicObjectLock::obj_offset())); verify_oop(obj); - lightweight_unlock(obj, hdr, temp, rscratch2, slow_case); + fast_unlock(obj, hdr, temp, rscratch2, slow_case); } diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp index 5f71222ed88..ebb4a897906 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp @@ -147,8 +147,8 @@ address C2_MacroAssembler::arrays_hashcode(Register ary, Register cnt, Register return pc(); } -void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register t1, - Register t2, Register t3) { +void C2_MacroAssembler::fast_lock(Register obj, Register box, Register t1, + Register t2, Register t3) { assert_different_registers(obj, box, t1, t2, t3, rscratch2); // Handle inflated monitor. @@ -173,7 +173,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist const Register t1_mark = t1; const Register t3_t = t3; - { // Lightweight locking + { // Fast locking // Push lock to the lock stack and finish successfully. MUST branch to with flag == EQ Label push; @@ -303,8 +303,8 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist // C2 uses the value of Flags (NE vs EQ) to determine the continuation. } -void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Register t1, - Register t2, Register t3) { +void C2_MacroAssembler::fast_unlock(Register obj, Register box, Register t1, + Register t2, Register t3) { assert_different_registers(obj, box, t1, t2, t3); // Handle inflated monitor. @@ -318,7 +318,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Regi const Register t2_top = t2; const Register t3_t = t3; - { // Lightweight unlock + { // Fast unlock Label push_and_slow_path; @@ -2859,4 +2859,4 @@ void C2_MacroAssembler::vector_expand_sve(FloatRegister dst, FloatRegister src, sve_sub(dst, size, 1); // dst = 00 87 00 65 00 43 00 21 sve_tbl(dst, size, src, dst); -} \ No newline at end of file +} diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp index 09850a60c64..ccd091938a3 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp @@ -51,9 +51,9 @@ FloatRegister vmul3, FloatRegister vpow, FloatRegister vpowm, BasicType eltype); - // Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file. - void fast_lock_lightweight(Register object, Register box, Register t1, Register t2, Register t3); - void fast_unlock_lightweight(Register object, Register box, Register t1, Register t2, Register t3); + // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. + void fast_lock(Register object, Register box, Register t1, Register t2, Register t3); + void fast_unlock(Register object, Register box, Register t1, Register t2, Register t3); void string_compare(Register str1, Register str2, Register cnt1, Register cnt2, Register result, diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp index cf4d5a63496..957c2aee1c1 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp @@ -709,7 +709,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); Label slow_case, done; - lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case); + fast_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case); b(done); bind(slow_case); @@ -741,7 +741,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) const Register swap_reg = r0; const Register header_reg = c_rarg2; // Will contain the old oopMark const Register obj_reg = c_rarg3; // Will contain the oop - const Register tmp_reg = c_rarg4; // Temporary used by lightweight_unlock + const Register tmp_reg = c_rarg4; // Temporary used by fast_unlock save_bcp(); // Save in case of exception @@ -752,7 +752,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) str(zr, Address(lock_reg, BasicObjectLock::obj_offset())); Label slow_case, done; - lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case); + fast_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case); b(done); bind(slow_case); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index c83e6e12fa1..ceedb4f1063 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -6934,12 +6934,12 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { } } -// Implements lightweight-locking. +// Implements fast-locking. // // - obj: the object to be locked // - t1, t2, t3: temporary registers, will be destroyed // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). -void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) { +void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) { assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1); Label push; @@ -6993,12 +6993,12 @@ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Registe strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); } -// Implements lightweight-unlocking. +// Implements fast-unlocking. // // - obj: the object to be unlocked // - t1, t2, t3: temporary registers // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding). -void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { +void MacroAssembler::fast_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { // cmpxchg clobbers rscratch1. assert_different_registers(obj, t1, t2, t3, rscratch1); @@ -7044,7 +7044,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, // Check header not unlocked (0b01). Label not_unlocked; tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked); - stop("lightweight_unlock already unlocked"); + stop("fast_unlock already unlocked"); bind(not_unlocked); #endif diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp index 4468eaa40c5..4baa07d7d49 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -1721,8 +1721,8 @@ public: // Code for java.lang.Thread::onSpinWait() intrinsic. void spin_wait(); - void lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow); - void lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow); + void fast_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow); + void fast_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow); private: // Check the current thread doesn't need a cross modify fence. diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp index 39609cbe0ac..89ae6bc10e0 100644 --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp @@ -1707,7 +1707,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, const Register obj_reg = r19; // Will contain the oop const Register lock_reg = r13; // Address of compiler lock object (BasicLock) const Register old_hdr = r13; // value of old header at unlock time - const Register lock_tmp = r14; // Temporary used by lightweight_lock/unlock + const Register lock_tmp = r14; // Temporary used by fast_lock/unlock const Register tmp = lr; Label slow_path_lock; @@ -1724,7 +1724,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // Load the oop from the handle __ ldr(obj_reg, Address(oop_handle_reg, 0)); - __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock); + __ fast_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock); // Slow path will re-enter here __ bind(lock_done); @@ -1833,7 +1833,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, save_native_result(masm, ret_type, stack_slots); } - __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock); + __ fast_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock); // slow path re-enters here __ bind(unlock_done); diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp index 5a8642a285a..3f7ba683efc 100644 --- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp @@ -195,7 +195,7 @@ enum Ampere_CPU_Model { // Aarch64 supports fast class initialization checks static bool supports_fast_class_init_checks() { return true; } constexpr static bool supports_stack_watermark_barrier() { return true; } - constexpr static bool supports_recursive_lightweight_locking() { return true; } + constexpr static bool supports_recursive_fast_locking() { return true; } constexpr static bool supports_secondary_supers_table() { return true; } diff --git a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp index ca7711353d2..ad6c56186df 100644 --- a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp @@ -201,7 +201,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register basic_lo Register t2 = hdr; // blow Register t3 = Rtemp; // blow - lightweight_lock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case); + fast_lock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case); // Success: fall through return null_check_offset; } @@ -218,7 +218,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register basic Register t2 = hdr; // blow Register t3 = Rtemp; // blow - lightweight_unlock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case); + fast_unlock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case); // Success: fall through } diff --git a/src/hotspot/cpu/arm/c2_MacroAssembler_arm.cpp b/src/hotspot/cpu/arm/c2_MacroAssembler_arm.cpp index 2d26b4f9a50..83e9cc672f2 100644 --- a/src/hotspot/cpu/arm/c2_MacroAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/c2_MacroAssembler_arm.cpp @@ -90,8 +90,8 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc b(done, ne); } - lightweight_lock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */, - 1 /* savemask (save t1) */, done); + MacroAssembler::fast_lock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */, + 1 /* savemask (save t1) */, done); cmp(Roop, Roop); // Success: set Z bind(done); @@ -107,8 +107,8 @@ void C2_MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscra Label done; - lightweight_unlock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */, - 1 /* savemask (save t1) */, done); + MacroAssembler::fast_unlock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */, + 1 /* savemask (save t1) */, done); cmp(Roop, Roop); // Success: Set Z // Fall through diff --git a/src/hotspot/cpu/arm/interp_masm_arm.cpp b/src/hotspot/cpu/arm/interp_masm_arm.cpp index 3f9130309e9..720413c9c5b 100644 --- a/src/hotspot/cpu/arm/interp_masm_arm.cpp +++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp @@ -904,7 +904,7 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) { b(slow_case, ne); } - lightweight_lock(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case); + fast_lock(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case); b(done); bind(slow_case); @@ -945,8 +945,8 @@ void InterpreterMacroAssembler::unlock_object(Register Rlock) { cmpoop(Rtemp, Robj); b(slow_case, ne); - lightweight_unlock(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, - 1 /* savemask (save t1) */, slow_case); + fast_unlock(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, + 1 /* savemask (save t1) */, slow_case); b(done); bind(slow_case); diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.cpp b/src/hotspot/cpu/arm/macroAssembler_arm.cpp index 12462e1843c..935c9544620 100644 --- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp @@ -1750,14 +1750,14 @@ void MacroAssembler::read_polling_page(Register dest, relocInfo::relocType rtype POISON_REG(mask, 1, R2, poison) \ POISON_REG(mask, 2, R3, poison) -// Attempt to lightweight-lock an object +// Attempt to fast-lock an object // Registers: // - obj: the object to be locked // - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown. // Result: // - Success: fallthrough // - Error: break to slow, Z cleared. -void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) { +void MacroAssembler::fast_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) { assert_different_registers(obj, t1, t2, t3); #ifdef ASSERT @@ -1807,14 +1807,14 @@ void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Re // Success: fall through } -// Attempt to lightweight-unlock an object +// Attempt to fast-unlock an object // Registers: // - obj: the object to be unlocked // - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown. // Result: // - Success: fallthrough // - Error: break to slow, Z cleared. -void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) { +void MacroAssembler::fast_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) { assert_different_registers(obj, t1, t2, t3); #ifdef ASSERT diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.hpp b/src/hotspot/cpu/arm/macroAssembler_arm.hpp index d60b38e42db..8e80c5bcc6e 100644 --- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp +++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1010,23 +1010,23 @@ public: void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false); void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false); - // Attempt to lightweight-lock an object + // Attempt to fast-lock an object // Registers: // - obj: the object to be locked // - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown. // Result: // - Success: fallthrough // - Error: break to slow, Z cleared. - void lightweight_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow); + void fast_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow); - // Attempt to lightweight-unlock an object + // Attempt to fast-unlock an object // Registers: // - obj: the object to be unlocked // - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown. // Result: // - Success: fallthrough // - Error: break to slow, Z cleared. - void lightweight_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow); + void fast_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow); #ifndef PRODUCT // Preserves flags and all registers. diff --git a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp index 99d8773368d..76e38d29478 100644 --- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp +++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp @@ -1139,8 +1139,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ mov(sync_handle, R1); log_trace(fastlock)("SharedRuntime lock fast"); - __ lightweight_lock(sync_obj /* object */, basic_lock /* t1 */, tmp /* t2 */, Rtemp /* t3 */, - 0x7 /* savemask */, slow_lock); + __ fast_lock(sync_obj /* object */, basic_lock /* t1 */, tmp /* t2 */, Rtemp /* t3 */, + 0x7 /* savemask */, slow_lock); // Fall through to lock_done __ bind(lock_done); } @@ -1195,8 +1195,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, Label slow_unlock, unlock_done; if (method->is_synchronized()) { log_trace(fastlock)("SharedRuntime unlock fast"); - __ lightweight_unlock(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */, - 7 /* savemask */, slow_unlock); + __ fast_unlock(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */, + 7 /* savemask */, slow_unlock); // Fall through __ bind(unlock_done); diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp index 04af473c99b..798451446e5 100644 --- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp @@ -82,7 +82,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox // Save object being locked into the BasicObjectLock... std(Roop, in_bytes(BasicObjectLock::obj_offset()), Rbox); - lightweight_lock(Rbox, Roop, Rmark, Rscratch, slow_int); + fast_lock(Rbox, Roop, Rmark, Rscratch, slow_int); b(done); bind(slow_int); @@ -104,7 +104,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb ld(Roop, in_bytes(BasicObjectLock::obj_offset()), Rbox); verify_oop(Roop, FILE_AND_LINE); - lightweight_unlock(Roop, Rmark, slow_int); + fast_unlock(Roop, Rmark, slow_int); b(done); bind(slow_int); b(slow_case); // far diff --git a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp index eab3df03fde..edf348fdc50 100644 --- a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp @@ -36,14 +36,14 @@ #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") -void C2_MacroAssembler::fast_lock_lightweight(ConditionRegister flag, Register obj, Register box, - Register tmp1, Register tmp2, Register tmp3) { - compiler_fast_lock_lightweight_object(flag, obj, box, tmp1, tmp2, tmp3); +void C2_MacroAssembler::fast_lock(ConditionRegister flag, Register obj, Register box, + Register tmp1, Register tmp2, Register tmp3) { + compiler_fast_lock_object(flag, obj, box, tmp1, tmp2, tmp3); } -void C2_MacroAssembler::fast_unlock_lightweight(ConditionRegister flag, Register obj, Register box, - Register tmp1, Register tmp2, Register tmp3) { - compiler_fast_unlock_lightweight_object(flag, obj, box, tmp1, tmp2, tmp3); +void C2_MacroAssembler::fast_unlock(ConditionRegister flag, Register obj, Register box, + Register tmp1, Register tmp2, Register tmp3) { + compiler_fast_unlock_object(flag, obj, box, tmp1, tmp2, tmp3); } void C2_MacroAssembler::load_narrow_klass_compact_c2(Register dst, Register obj, int disp) { diff --git a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp index 16b6d1935ba..5a114294c1f 100644 --- a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,11 +28,11 @@ // C2_MacroAssembler contains high-level macros for C2 public: - // Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file. - void fast_lock_lightweight(ConditionRegister flag, Register obj, Register box, - Register tmp1, Register tmp2, Register tmp3); - void fast_unlock_lightweight(ConditionRegister flag, Register obj, Register box, - Register tmp1, Register tmp2, Register tmp3); + // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. + void fast_lock(ConditionRegister flag, Register obj, Register box, + Register tmp1, Register tmp2, Register tmp3); + void fast_unlock(ConditionRegister flag, Register obj, Register box, + Register tmp1, Register tmp2, Register tmp3); void load_narrow_klass_compact_c2(Register dst, Register obj, int disp); diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp index 0d32ea8003e..fc865be015e 100644 --- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp @@ -958,7 +958,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { assert_different_registers(header, tmp); - lightweight_lock(monitor, object, header, tmp, slow_case); + fast_lock(monitor, object, header, tmp, slow_case); b(done); bind(slow_case); @@ -987,7 +987,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) { // The object address from the monitor is in object. ld(object, in_bytes(BasicObjectLock::obj_offset()), monitor); - lightweight_unlock(object, header, slow_case); + fast_unlock(object, header, slow_case); b(free_slot); diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp index 5a7e2172b29..5649ead2ea8 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -2671,8 +2671,8 @@ address MacroAssembler::emit_trampoline_stub(int destination_toc_offset, } // "The box" is the space on the stack where we copy the object mark. -void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister flag, Register obj, Register box, - Register tmp1, Register tmp2, Register tmp3) { +void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register obj, Register box, + Register tmp1, Register tmp2, Register tmp3) { assert_different_registers(obj, box, tmp1, tmp2, tmp3); assert(UseObjectMonitorTable || tmp3 == noreg, "tmp3 not needed"); assert(flag == CR0, "bad condition register"); @@ -2699,7 +2699,7 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla Register mark = tmp1; - { // Lightweight locking + { // Fast locking // Push lock to the lock stack and finish successfully. MUST reach to with flag == EQ Label push; @@ -2847,8 +2847,8 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla // C2 uses the value of flag (NE vs EQ) to determine the continuation. } -void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister flag, Register obj, Register box, - Register tmp1, Register tmp2, Register tmp3) { +void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register obj, Register box, + Register tmp1, Register tmp2, Register tmp3) { assert_different_registers(obj, tmp1, tmp2, tmp3); assert(flag == CR0, "bad condition register"); @@ -2863,7 +2863,7 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f const Register top = tmp2; const Register t = tmp3; - { // Lightweight unlock + { // Fast unlock Label push_and_slow; // Check if obj is top of lock-stack. @@ -2904,7 +2904,7 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f Label not_unlocked; andi_(t, mark, markWord::unlocked_value); beq(CR0, not_unlocked); - stop("lightweight_unlock already unlocked"); + stop("fast_unlock already unlocked"); bind(not_unlocked); #endif @@ -4588,11 +4588,11 @@ void MacroAssembler::atomically_flip_locked_state(bool is_unlock, Register obj, } } -// Implements lightweight-locking. +// Implements fast-locking. // // - obj: the object to be locked // - t1, t2: temporary register -void MacroAssembler::lightweight_lock(Register box, Register obj, Register t1, Register t2, Label& slow) { +void MacroAssembler::fast_lock(Register box, Register obj, Register t1, Register t2, Label& slow) { assert_different_registers(box, obj, t1, t2, R0); Label push; @@ -4644,11 +4644,11 @@ void MacroAssembler::lightweight_lock(Register box, Register obj, Register t1, R stw(top, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread); } -// Implements lightweight-unlocking. +// Implements fast-unlocking. // // - obj: the object to be unlocked // - t1: temporary register -void MacroAssembler::lightweight_unlock(Register obj, Register t1, Label& slow) { +void MacroAssembler::fast_unlock(Register obj, Register t1, Label& slow) { assert_different_registers(obj, t1); #ifdef ASSERT @@ -4706,7 +4706,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register t1, Label& slow) Label not_unlocked; andi_(t, mark, markWord::unlocked_value); beq(CR0, not_unlocked); - stop("lightweight_unlock already unlocked"); + stop("fast_unlock already unlocked"); bind(not_unlocked); #endif diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp index 61e6a173823..875602cae58 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp @@ -698,8 +698,8 @@ class MacroAssembler: public Assembler { void push_cont_fastpath(); void pop_cont_fastpath(); void atomically_flip_locked_state(bool is_unlock, Register obj, Register tmp, Label& failed, int semantics); - void lightweight_lock(Register box, Register obj, Register t1, Register t2, Label& slow); - void lightweight_unlock(Register obj, Register t1, Label& slow); + void fast_lock(Register box, Register obj, Register t1, Register t2, Label& slow); + void fast_unlock(Register obj, Register t1, Label& slow); // allocation (for C1) void tlab_allocate( @@ -713,11 +713,11 @@ class MacroAssembler: public Assembler { enum { trampoline_stub_size = 6 * 4 }; address emit_trampoline_stub(int destination_toc_offset, int insts_call_instruction_offset, Register Rtoc = noreg); - void compiler_fast_lock_lightweight_object(ConditionRegister flag, Register oop, Register box, - Register tmp1, Register tmp2, Register tmp3); + void compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box, + Register tmp1, Register tmp2, Register tmp3); - void compiler_fast_unlock_lightweight_object(ConditionRegister flag, Register oop, Register box, - Register tmp1, Register tmp2, Register tmp3); + void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, + Register tmp1, Register tmp2, Register tmp3); // Check if safepoint requested and if so branch void safepoint_poll(Label& slow_path, Register temp, bool at_return, bool in_nmethod); diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad index 5488dbdb8c0..5c44fc19704 100644 --- a/src/hotspot/cpu/ppc/ppc.ad +++ b/src/hotspot/cpu/ppc/ppc.ad @@ -11551,15 +11551,15 @@ instruct partialSubtypeCheckConstSuper(rarg3RegP sub, rarg2RegP super_reg, immP // inlined locking and unlocking -instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{ +instruct cmpFastLock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{ predicate(!UseObjectMonitorTable); match(Set crx (FastLock oop box)); effect(TEMP tmp1, TEMP tmp2); format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2" %} ins_encode %{ - __ fast_lock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register, - $tmp1$$Register, $tmp2$$Register, noreg /*tmp3*/); + __ fast_lock($crx$$CondRegister, $oop$$Register, $box$$Register, + $tmp1$$Register, $tmp2$$Register, noreg /*tmp3*/); // If locking was successful, crx should indicate 'EQ'. // The compiler generates a branch to the runtime call to // _complete_monitor_locking_Java for the case where crx is 'NE'. @@ -11574,8 +11574,8 @@ instruct cmpFastLockMonitorTable(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iR format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2, $tmp3" %} ins_encode %{ - __ fast_lock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register, - $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); + __ fast_lock($crx$$CondRegister, $oop$$Register, $box$$Register, + $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); // If locking was successful, crx should indicate 'EQ'. // The compiler generates a branch to the runtime call to // _complete_monitor_locking_Java for the case where crx is 'NE'. @@ -11583,14 +11583,14 @@ instruct cmpFastLockMonitorTable(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iR ins_pipe(pipe_class_compare); %} -instruct cmpFastUnlockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{ +instruct cmpFastUnlock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{ match(Set crx (FastUnlock oop box)); effect(TEMP tmp1, TEMP tmp2, TEMP tmp3); format %{ "FASTUNLOCK $oop, $box, $tmp1, $tmp2" %} ins_encode %{ - __ fast_unlock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register, - $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); + __ fast_unlock($crx$$CondRegister, $oop$$Register, $box$$Register, + $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); // If unlocking was successful, crx should indicate 'EQ'. // The compiler generates a branch to the runtime call to // _complete_monitor_unlocking_Java for the case where crx is 'NE'. diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp index 4be3a0aee8b..4e427ace404 100644 --- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp +++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp @@ -2381,7 +2381,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, // Try fastpath for locking. // fast_lock kills r_temp_1, r_temp_2, r_temp_3. Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg; - __ compiler_fast_lock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg); + __ compiler_fast_lock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg); __ beq(CR0, locked); // None of the above fast optimizations worked so we have to get into the @@ -2600,7 +2600,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, __ addi(r_box, R1_SP, lock_offset); // Try fastpath for unlocking. - __ compiler_fast_unlock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); + __ compiler_fast_unlock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); __ beq(CR0, done); // Save and restore any potential method result value around the unlocking operation. diff --git a/src/hotspot/cpu/ppc/vm_version_ppc.hpp b/src/hotspot/cpu/ppc/vm_version_ppc.hpp index 9588edc4bf8..11dce83bed0 100644 --- a/src/hotspot/cpu/ppc/vm_version_ppc.hpp +++ b/src/hotspot/cpu/ppc/vm_version_ppc.hpp @@ -62,7 +62,7 @@ public: // PPC64 supports fast class initialization checks static bool supports_fast_class_init_checks() { return true; } constexpr static bool supports_stack_watermark_barrier() { return true; } - constexpr static bool supports_recursive_lightweight_locking() { return true; } + constexpr static bool supports_recursive_fast_locking() { return true; } constexpr static bool supports_secondary_supers_table() { return true; } static bool supports_float16() { return PowerArchitecturePPC64 >= 9; } diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp index 8e989de2665..aeb077ba0a0 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp @@ -59,7 +59,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register basic_lo null_check_offset = offset(); - lightweight_lock(basic_lock, obj, hdr, temp, t1, slow_case); + fast_lock(basic_lock, obj, hdr, temp, t1, slow_case); return null_check_offset; } @@ -71,7 +71,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register basic ld(obj, Address(basic_lock, BasicObjectLock::obj_offset())); verify_oop(obj); - lightweight_unlock(obj, hdr, temp, t1, slow_case); + fast_unlock(obj, hdr, temp, t1, slow_case); } // Defines obj, preserves var_size_in_bytes diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp index 154b62db47f..abbd7eedbba 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp @@ -43,8 +43,8 @@ #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") -void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, - Register tmp1, Register tmp2, Register tmp3, Register tmp4) { +void C2_MacroAssembler::fast_lock(Register obj, Register box, + Register tmp1, Register tmp2, Register tmp3, Register tmp4) { // Flag register, zero for success; non-zero for failure. Register flag = t1; @@ -74,7 +74,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, const Register tmp1_mark = tmp1; const Register tmp3_t = tmp3; - { // Lightweight locking + { // Fast locking // Push lock to the lock stack and finish successfully. MUST branch to with flag == 0 Label push; @@ -205,8 +205,8 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, // C2 uses the value of flag (0 vs !0) to determine the continuation. } -void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, - Register tmp1, Register tmp2, Register tmp3) { +void C2_MacroAssembler::fast_unlock(Register obj, Register box, + Register tmp1, Register tmp2, Register tmp3) { // Flag register, zero for success; non-zero for failure. Register flag = t1; @@ -225,7 +225,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, const Register tmp2_top = tmp2; const Register tmp3_t = tmp3; - { // Lightweight unlock + { // Fast unlock Label push_and_slow_path; // Check if obj is top of lock-stack. diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp index 2d5339dc153..f08e5e27c87 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp @@ -49,11 +49,11 @@ const int STUB_THRESHOLD, Label *STUB, Label *DONE); public: - // Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file. - void fast_lock_lightweight(Register object, Register box, - Register tmp1, Register tmp2, Register tmp3, Register tmp4); - void fast_unlock_lightweight(Register object, Register box, - Register tmp1, Register tmp2, Register tmp3); + // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. + void fast_lock(Register object, Register box, + Register tmp1, Register tmp2, Register tmp3, Register tmp4); + void fast_unlock(Register object, Register box, + Register tmp1, Register tmp2, Register tmp3); void string_compare(Register str1, Register str2, Register cnt1, Register cnt2, Register result, diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp index 5af8ea1da37..189c7c93d07 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp @@ -751,7 +751,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) ld(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); Label done, slow_case; - lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case); + fast_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case); j(done); bind(slow_case); @@ -782,7 +782,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) const Register swap_reg = x10; const Register header_reg = c_rarg2; // Will contain the old oopMark const Register obj_reg = c_rarg3; // Will contain the oop - const Register tmp_reg = c_rarg4; // Temporary used by lightweight_unlock + const Register tmp_reg = c_rarg4; // Temporary used by fast_unlock save_bcp(); // Save in case of exception @@ -793,7 +793,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) sd(zr, Address(lock_reg, BasicObjectLock::obj_offset())); Label done, slow_case; - lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case); + fast_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case); j(done); bind(slow_case); diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp index 54304ec648d..7a8496ae42b 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp @@ -6435,12 +6435,12 @@ void MacroAssembler::test_bit(Register Rd, Register Rs, uint32_t bit_pos) { } } -// Implements lightweight-locking. +// Implements fast-locking. // // - obj: the object to be locked // - tmp1, tmp2, tmp3: temporary registers, will be destroyed // - slow: branched to if locking fails -void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) { +void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) { assert_different_registers(basic_lock, obj, tmp1, tmp2, tmp3, t0); Label push; @@ -6499,7 +6499,7 @@ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Registe // - obj: the object to be unlocked // - tmp1, tmp2, tmp3: temporary registers // - slow: branched to if unlocking fails -void MacroAssembler::lightweight_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) { +void MacroAssembler::fast_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) { assert_different_registers(obj, tmp1, tmp2, tmp3, t0); #ifdef ASSERT @@ -6546,7 +6546,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register tmp1, Register tm Label not_unlocked; test_bit(t, mark, exact_log2(markWord::unlocked_value)); beqz(t, not_unlocked); - stop("lightweight_unlock already unlocked"); + stop("fast_unlock already unlocked"); bind(not_unlocked); #endif diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp index e0e610ff49a..1908b9a9605 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp @@ -1639,8 +1639,8 @@ private: void store_conditional(Register dst, Register new_val, Register addr, Assembler::operand_size size, Assembler::Aqrl release); public: - void lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow); - void lightweight_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow); + void fast_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow); + void fast_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow); public: enum { diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index 34177701900..e23adcf2488 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -11039,36 +11039,36 @@ instruct tlsLoadP(javaThread_RegP dst) // inlined locking and unlocking // using t1 as the 'flag' register to bridge the BoolNode producers and consumers -instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, - iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4) +instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, + iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4) %{ match(Set cr (FastLock object box)); effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4); ins_cost(10 * DEFAULT_COST); - format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLockLightweight" %} + format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLock" %} ins_encode %{ - __ fast_lock_lightweight($object$$Register, $box$$Register, - $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register); + __ fast_lock($object$$Register, $box$$Register, + $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register); %} ins_pipe(pipe_serial); %} // using t1 as the 'flag' register to bridge the BoolNode producers and consumers -instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, - iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3) +instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, + iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3) %{ match(Set cr (FastUnlock object box)); effect(TEMP tmp1, TEMP tmp2, TEMP tmp3); ins_cost(10 * DEFAULT_COST); - format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlockLightweight" %} + format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlock" %} ins_encode %{ - __ fast_unlock_lightweight($object$$Register, $box$$Register, - $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); + __ fast_unlock($object$$Register, $box$$Register, + $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); %} ins_pipe(pipe_serial); diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp index e64fc2ffc80..eeb6fad1b59 100644 --- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp +++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp @@ -1642,7 +1642,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, const Register obj_reg = x9; // Will contain the oop const Register lock_reg = x30; // Address of compiler lock object (BasicLock) const Register old_hdr = x30; // value of old header at unlock time - const Register lock_tmp = x31; // Temporary used by lightweight_lock/unlock + const Register lock_tmp = x31; // Temporary used by fast_lock/unlock const Register tmp = ra; Label slow_path_lock; @@ -1659,7 +1659,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // Load the oop from the handle __ ld(obj_reg, Address(oop_handle_reg, 0)); - __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock); + __ fast_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock); // Slow path will re-enter here __ bind(lock_done); @@ -1754,7 +1754,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, save_native_result(masm, ret_type, stack_slots); } - __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock); + __ fast_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock); // slow path re-enters here __ bind(unlock_done); diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.hpp b/src/hotspot/cpu/riscv/vm_version_riscv.hpp index f74992cbc37..16f2e5d8f5b 100644 --- a/src/hotspot/cpu/riscv/vm_version_riscv.hpp +++ b/src/hotspot/cpu/riscv/vm_version_riscv.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2023, Rivos Inc. All rights reserved. @@ -498,7 +498,7 @@ private: constexpr static bool supports_stack_watermark_barrier() { return true; } - constexpr static bool supports_recursive_lightweight_locking() { return true; } + constexpr static bool supports_recursive_fast_locking() { return true; } constexpr static bool supports_secondary_supers_table() { return true; } diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp index 91d52def08d..993c1a1b552 100644 --- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp @@ -67,7 +67,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox // Save object being locked into the BasicObjectLock... z_stg(Roop, Address(Rbox, BasicObjectLock::obj_offset())); - lightweight_lock(Rbox, Roop, Rmark, tmp, slow_case); + fast_lock(Rbox, Roop, Rmark, tmp, slow_case); } void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) { @@ -77,7 +77,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb z_lg(Roop, Address(Rbox, BasicObjectLock::obj_offset())); verify_oop(Roop, FILE_AND_LINE); - lightweight_unlock(Roop, Rmark, Z_R1_scratch, slow_case); + fast_unlock(Roop, Rmark, Z_R1_scratch, slow_case); } void C1_MacroAssembler::try_allocate( diff --git a/src/hotspot/cpu/s390/c2_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c2_MacroAssembler_s390.cpp index 485efec6b9b..957c89af3fc 100644 --- a/src/hotspot/cpu/s390/c2_MacroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c2_MacroAssembler_s390.cpp @@ -32,13 +32,13 @@ #define BLOCK_COMMENT(str) block_comment(str) #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") -void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register temp1, Register temp2) { - compiler_fast_lock_lightweight_object(obj, box, temp1, temp2); +void C2_MacroAssembler::fast_lock(Register obj, Register box, Register temp1, Register temp2) { + compiler_fast_lock_object(obj, box, temp1, temp2); } -void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Register temp1, Register temp2) { - compiler_fast_unlock_lightweight_object(obj, box, temp1, temp2); +void C2_MacroAssembler::fast_unlock(Register obj, Register box, Register temp1, Register temp2) { + compiler_fast_unlock_object(obj, box, temp1, temp2); } void C2_MacroAssembler::load_narrow_klass_compact_c2(Register dst, Address src) { diff --git a/src/hotspot/cpu/s390/c2_MacroAssembler_s390.hpp b/src/hotspot/cpu/s390/c2_MacroAssembler_s390.hpp index abf0db8e520..632cc31d492 100644 --- a/src/hotspot/cpu/s390/c2_MacroAssembler_s390.hpp +++ b/src/hotspot/cpu/s390/c2_MacroAssembler_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,9 +29,9 @@ // C2_MacroAssembler contains high-level macros for C2 public: - // Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in s390.ad file. - void fast_lock_lightweight(Register obj, Register box, Register temp1, Register temp2); - void fast_unlock_lightweight(Register obj, Register box, Register temp1, Register temp2); + // Code used by cmpFastLock and cmpFastUnlock mach instructions in s390.ad file. + void fast_lock(Register obj, Register box, Register temp1, Register temp2); + void fast_unlock(Register obj, Register box, Register temp1, Register temp2); void load_narrow_klass_compact_c2(Register dst, Address src); diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp index f62051c628e..a80ca26239b 100644 --- a/src/hotspot/cpu/s390/interp_masm_s390.cpp +++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp @@ -1019,7 +1019,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { NearLabel done, slow_case; - lightweight_lock(monitor, object, header, tmp, slow_case); + fast_lock(monitor, object, header, tmp, slow_case); z_bru(done); bind(slow_case); @@ -1054,7 +1054,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object) clear_mem(obj_entry, sizeof(oop)); - lightweight_unlock(object, header, current_header, slow_case); + fast_unlock(object, header, current_header, slow_case); z_bru(done); // The lock has been converted into a heavy lock and hence diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index b047ff00044..f35e18c7398 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -6138,11 +6138,11 @@ void MacroAssembler::zap_from_to(Register low, Register high, Register val, Regi } #endif // !PRODUCT -// Implements lightweight-locking. +// Implements fast-locking. // - obj: the object to be locked, contents preserved. // - temp1, temp2: temporary registers, contents destroyed. // Note: make sure Z_R1 is not manipulated here when C2 compiler is in play -void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register temp1, Register temp2, Label& slow) { +void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register temp1, Register temp2, Label& slow) { assert_different_registers(basic_lock, obj, temp1, temp2); @@ -6203,11 +6203,11 @@ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Registe z_alsi(in_bytes(ls_top_offset), Z_thread, oopSize); } -// Implements lightweight-unlocking. +// Implements fast-unlocking. // - obj: the object to be unlocked // - temp1, temp2: temporary registers, will be destroyed // - Z_R1_scratch: will be killed in case of Interpreter & C1 Compiler -void MacroAssembler::lightweight_unlock(Register obj, Register temp1, Register temp2, Label& slow) { +void MacroAssembler::fast_unlock(Register obj, Register temp1, Register temp2, Label& slow) { assert_different_registers(obj, temp1, temp2); @@ -6264,7 +6264,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register temp1, Register t NearLabel not_unlocked; z_tmll(mark, markWord::unlocked_value); z_braz(not_unlocked); - stop("lightweight_unlock already unlocked"); + stop("fast_unlock already unlocked"); bind(not_unlocked); #endif // ASSERT @@ -6289,7 +6289,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register temp1, Register t bind(unlocked); } -void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Register box, Register tmp1, Register tmp2) { +void MacroAssembler::compiler_fast_lock_object(Register obj, Register box, Register tmp1, Register tmp2) { assert_different_registers(obj, box, tmp1, tmp2, Z_R0_scratch); // Handle inflated monitor. @@ -6314,8 +6314,8 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Registe const int mark_offset = oopDesc::mark_offset_in_bytes(); const ByteSize ls_top_offset = JavaThread::lock_stack_top_offset(); - BLOCK_COMMENT("compiler_fast_lightweight_locking {"); - { // lightweight locking + BLOCK_COMMENT("compiler_fast_locking {"); + { // Fast locking // Push lock to the lock stack and finish successfully. MUST reach to with flag == EQ NearLabel push; @@ -6362,9 +6362,9 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Registe z_cgr(obj, obj); // set the CC to EQ, as it could be changed by alsi z_bru(locked); } - BLOCK_COMMENT("} compiler_fast_lightweight_locking"); + BLOCK_COMMENT("} compiler_fast_locking"); - BLOCK_COMMENT("handle_inflated_monitor_lightweight_locking {"); + BLOCK_COMMENT("handle_inflated_monitor_locking {"); { // Handle inflated monitor. bind(inflated); @@ -6441,7 +6441,7 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Registe // set the CC now z_cgr(obj, obj); } - BLOCK_COMMENT("} handle_inflated_monitor_lightweight_locking"); + BLOCK_COMMENT("} handle_inflated_monitor_locking"); bind(locked); @@ -6464,7 +6464,7 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Registe // C2 uses the value of flag (NE vs EQ) to determine the continuation. } -void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Register box, Register tmp1, Register tmp2) { +void MacroAssembler::compiler_fast_unlock_object(Register obj, Register box, Register tmp1, Register tmp2) { assert_different_registers(obj, box, tmp1, tmp2); // Handle inflated monitor. @@ -6479,8 +6479,8 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Regis const int mark_offset = oopDesc::mark_offset_in_bytes(); const ByteSize ls_top_offset = JavaThread::lock_stack_top_offset(); - BLOCK_COMMENT("compiler_fast_lightweight_unlock {"); - { // Lightweight Unlock + BLOCK_COMMENT("compiler_fast_unlock {"); + { // Fast Unlock NearLabel push_and_slow_path; // Check if obj is top of lock-stack. @@ -6525,7 +6525,7 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Regis NearLabel not_unlocked; z_tmll(mark, markWord::unlocked_value); z_braz(not_unlocked); - stop("lightweight_unlock already unlocked"); + stop("fast_unlock already unlocked"); bind(not_unlocked); #endif // ASSERT @@ -6546,7 +6546,7 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Regis z_ltgr(obj, obj); // object is not null here z_bru(slow_path); } - BLOCK_COMMENT("} compiler_fast_lightweight_unlock"); + BLOCK_COMMENT("} compiler_fast_unlock"); { // Handle inflated monitor. diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.hpp b/src/hotspot/cpu/s390/macroAssembler_s390.hpp index 1c4da4d26eb..da24ae80d45 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp @@ -790,10 +790,10 @@ class MacroAssembler: public Assembler { // Kills registers tmp1_reg and tmp2_reg and preserves the condition code. void increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg); - void lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Label& slow); - void lightweight_unlock(Register obj, Register tmp1, Register tmp2, Label& slow); - void compiler_fast_lock_lightweight_object(Register obj, Register box, Register tmp1, Register tmp2); - void compiler_fast_unlock_lightweight_object(Register obj, Register box, Register tmp1, Register tmp2); + void fast_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Label& slow); + void fast_unlock(Register obj, Register tmp1, Register tmp2, Label& slow); + void compiler_fast_lock_object(Register obj, Register box, Register tmp1, Register tmp2); + void compiler_fast_unlock_object(Register obj, Register box, Register tmp1, Register tmp2); void resolve_jobject(Register value, Register tmp1, Register tmp2); void resolve_global_jobject(Register value, Register tmp1, Register tmp2); diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad index 0c4939d8432..2f12aa4c03c 100644 --- a/src/hotspot/cpu/s390/s390.ad +++ b/src/hotspot/cpu/s390/s390.ad @@ -10123,14 +10123,14 @@ instruct partialSubtypeCheckConstSuper(rarg2RegP sub, rarg1RegP super, immP supe // ============================================================================ // inlined locking and unlocking -instruct cmpFastLockLightweight(flagsReg pcc, iRegP_N2P oop, iRegP_N2P box, iRegP tmp1, iRegP tmp2) %{ +instruct cmpFastLock(flagsReg pcc, iRegP_N2P oop, iRegP_N2P box, iRegP tmp1, iRegP tmp2) %{ match(Set pcc (FastLock oop box)); effect(TEMP tmp1, TEMP tmp2); ins_cost(100); // TODO: s390 port size(VARIABLE_SIZE); format %{ "FASTLOCK $oop, $box; KILL Z_ARG4, Z_ARG5" %} ins_encode %{ - __ fast_lock_lightweight($oop$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register); + __ fast_lock($oop$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register); // If locking was successful, cc should indicate 'EQ'. // The compiler generates a branch to the runtime call to // _complete_monitor_locking_Java for the case where cc is 'NE'. @@ -10138,14 +10138,14 @@ instruct cmpFastLockLightweight(flagsReg pcc, iRegP_N2P oop, iRegP_N2P box, iReg ins_pipe(pipe_class_dummy); %} -instruct cmpFastUnlockLightweight(flagsReg pcc, iRegP_N2P oop, iRegP_N2P box, iRegP tmp1, iRegP tmp2) %{ +instruct cmpFastUnlock(flagsReg pcc, iRegP_N2P oop, iRegP_N2P box, iRegP tmp1, iRegP tmp2) %{ match(Set pcc (FastUnlock oop box)); effect(TEMP tmp1, TEMP tmp2); ins_cost(100); // TODO: s390 port size(FIXED_SIZE); format %{ "FASTUNLOCK $oop, $box; KILL Z_ARG4, Z_ARG5" %} ins_encode %{ - __ fast_unlock_lightweight($oop$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register); + __ fast_unlock($oop$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register); // If unlocking was successful, cc should indicate 'EQ'. // The compiler generates a branch to the runtime call to // _complete_monitor_unlocking_Java for the case where cc is 'NE'. diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp index 3a6e1bff8f4..5b6f7dcd984 100644 --- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp +++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp @@ -1765,7 +1765,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, // Try fastpath for locking. // Fast_lock kills r_temp_1, r_temp_2. - __ compiler_fast_lock_lightweight_object(r_oop, r_box, r_tmp1, r_tmp2); + __ compiler_fast_lock_object(r_oop, r_box, r_tmp1, r_tmp2); __ z_bre(done); //------------------------------------------------------------------------- @@ -1961,7 +1961,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, // Try fastpath for unlocking. // Fast_unlock kills r_tmp1, r_tmp2. - __ compiler_fast_unlock_lightweight_object(r_oop, r_box, r_tmp1, r_tmp2); + __ compiler_fast_unlock_object(r_oop, r_box, r_tmp1, r_tmp2); __ z_bre(done); // Slow path for unlocking. diff --git a/src/hotspot/cpu/s390/vm_version_s390.hpp b/src/hotspot/cpu/s390/vm_version_s390.hpp index 04005ff2cf9..591d30c3a1c 100644 --- a/src/hotspot/cpu/s390/vm_version_s390.hpp +++ b/src/hotspot/cpu/s390/vm_version_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -425,7 +425,7 @@ class VM_Version: public Abstract_VM_Version { constexpr static bool supports_secondary_supers_table() { return true; } - constexpr static bool supports_recursive_lightweight_locking() { return true; } + constexpr static bool supports_recursive_fast_locking() { return true; } // CPU feature query functions static const char* get_model_string() { return _model_string; } diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index c3d45f9d15d..88e2e6c8ba9 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -53,7 +53,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register basic_lo null_check_offset = offset(); - lightweight_lock(basic_lock, obj, hdr, tmp, slow_case); + fast_lock(basic_lock, obj, hdr, tmp, slow_case); return null_check_offset; } @@ -66,7 +66,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register basic movptr(obj, Address(basic_lock, BasicObjectLock::obj_offset())); verify_oop(obj); - lightweight_unlock(obj, rax, hdr, slow_case); + fast_unlock(obj, rax, hdr, slow_case); } diff --git a/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp index b4f8e9d9514..f73110f8660 100644 --- a/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp +++ b/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp @@ -58,11 +58,11 @@ void C2EntryBarrierStub::emit(C2_MacroAssembler& masm) { __ jmp(continuation(), false /* maybe_short */); } -int C2FastUnlockLightweightStub::max_size() const { +int C2FastUnlockStub::max_size() const { return 128; } -void C2FastUnlockLightweightStub::emit(C2_MacroAssembler& masm) { +void C2FastUnlockStub::emit(C2_MacroAssembler& masm) { assert(_t == rax, "must be"); { // Restore lock-stack and handle the unlock in runtime. diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp index 8386e57c389..51b2eff2cfb 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp @@ -222,8 +222,8 @@ inline Assembler::AvxVectorLen C2_MacroAssembler::vector_length_encoding(int vle // box: on-stack box address -- KILLED // rax: tmp -- KILLED // t : tmp -- KILLED -void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register rax_reg, - Register t, Register thread) { +void C2_MacroAssembler::fast_lock(Register obj, Register box, Register rax_reg, + Register t, Register thread) { assert(rax_reg == rax, "Used for CAS"); assert_different_registers(obj, box, rax_reg, t, thread); @@ -247,7 +247,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist const Register mark = t; - { // Lightweight Lock + { // Fast Lock Label push; @@ -415,7 +415,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist // A perfectly viable alternative is to elide the owner check except when // Xcheck:jni is enabled. -void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, Register t, Register thread) { +void C2_MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register t, Register thread) { assert(reg_rax == rax, "Used for CAS"); assert_different_registers(obj, reg_rax, t); @@ -430,16 +430,16 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, const Register box = reg_rax; Label dummy; - C2FastUnlockLightweightStub* stub = nullptr; + C2FastUnlockStub* stub = nullptr; if (!Compile::current()->output()->in_scratch_emit_size()) { - stub = new (Compile::current()->comp_arena()) C2FastUnlockLightweightStub(obj, mark, reg_rax, thread); + stub = new (Compile::current()->comp_arena()) C2FastUnlockStub(obj, mark, reg_rax, thread); Compile::current()->output()->add_stub(stub); } Label& push_and_slow_path = stub == nullptr ? dummy : stub->push_and_slow_path(); - { // Lightweight Unlock + { // Fast Unlock // Load top. movl(top, Address(thread, JavaThread::lock_stack_top_offset())); diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp index aaee25f440a..cd5f0ceb900 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp @@ -35,9 +35,9 @@ public: // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. // See full description in c2_MacroAssembler_x86.cpp. - void fast_lock_lightweight(Register obj, Register box, Register rax_reg, - Register t, Register thread); - void fast_unlock_lightweight(Register obj, Register reg_rax, Register t, Register thread); + void fast_lock(Register obj, Register box, Register rax_reg, + Register t, Register thread); + void fast_unlock(Register obj, Register reg_rax, Register t, Register thread); void verify_int_in_range(uint idx, const TypeInt* t, Register val); void verify_long_in_range(uint idx, const TypeLong* t, Register val, Register tmp); diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp index 110dfab5808..36959ddfe1d 100644 --- a/src/hotspot/cpu/x86/interp_masm_x86.cpp +++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp @@ -1107,7 +1107,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) { // Load object pointer into obj_reg movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); - lightweight_lock(lock_reg, obj_reg, swap_reg, tmp_reg, slow_case); + fast_lock(lock_reg, obj_reg, swap_reg, tmp_reg, slow_case); jmp(done); bind(slow_case); @@ -1149,7 +1149,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) { // Free entry movptr(Address(lock_reg, BasicObjectLock::obj_offset()), NULL_WORD); - lightweight_unlock(obj_reg, swap_reg, header_reg, slow_case); + fast_unlock(obj_reg, swap_reg, header_reg, slow_case); jmp(done); bind(slow_case); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index 4f19b30b832..44f1a35d443 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -9653,13 +9653,13 @@ void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigne bind(L_stack_ok); } -// Implements lightweight-locking. +// Implements fast-locking. // // obj: the object to be locked // reg_rax: rax // thread: the thread which attempts to lock obj // tmp: a temporary register -void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) { +void MacroAssembler::fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) { Register thread = r15_thread; assert(reg_rax == rax, ""); @@ -9715,13 +9715,13 @@ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Registe movl(Address(thread, JavaThread::lock_stack_top_offset()), top); } -// Implements lightweight-unlocking. +// Implements fast-unlocking. // // obj: the object to be unlocked // reg_rax: rax // thread: the thread // tmp: a temporary register -void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) { +void MacroAssembler::fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) { Register thread = r15_thread; assert(reg_rax == rax, ""); @@ -9753,7 +9753,7 @@ void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register Label not_unlocked; testptr(reg_rax, markWord::unlocked_value); jcc(Assembler::zero, not_unlocked); - stop("lightweight_unlock already unlocked"); + stop("fast_unlock already unlocked"); bind(not_unlocked); #endif diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp index ed1343d9c8c..4cecaa55345 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -2054,8 +2054,8 @@ public: void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); - void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow); - void lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow); + void fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow); + void fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow); void save_legacy_gprs(); void restore_legacy_gprs(); diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp index e702b587edd..5a4a5b1809e 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp @@ -2141,7 +2141,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // Load the oop from the handle __ movptr(obj_reg, Address(oop_handle_reg, 0)); - __ lightweight_lock(lock_reg, obj_reg, swap_reg, rscratch1, slow_path_lock); + __ fast_lock(lock_reg, obj_reg, swap_reg, rscratch1, slow_path_lock); // Slow path will re-enter here __ bind(lock_done); @@ -2266,7 +2266,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, save_native_result(masm, ret_type, stack_slots); } - __ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock); + __ fast_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock); // slow path re-enters here __ bind(unlock_done); diff --git a/src/hotspot/cpu/x86/vm_version_x86.hpp b/src/hotspot/cpu/x86/vm_version_x86.hpp index aa9a527e0b7..cc93ee3564e 100644 --- a/src/hotspot/cpu/x86/vm_version_x86.hpp +++ b/src/hotspot/cpu/x86/vm_version_x86.hpp @@ -995,7 +995,7 @@ public: return true; } - constexpr static bool supports_recursive_lightweight_locking() { + constexpr static bool supports_recursive_fast_locking() { return true; } diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad index ca94b03a841..783ed038858 100644 --- a/src/hotspot/cpu/x86/x86.ad +++ b/src/hotspot/cpu/x86/x86.ad @@ -16925,24 +16925,24 @@ instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{ // ============================================================================ // inlined locking and unlocking -instruct cmpFastLockLightweight(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI rax_reg, rRegP tmp) %{ +instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI rax_reg, rRegP tmp) %{ match(Set cr (FastLock object box)); effect(TEMP rax_reg, TEMP tmp, USE_KILL box); ins_cost(300); format %{ "fastlock $object,$box\t! kills $box,$rax_reg,$tmp" %} ins_encode %{ - __ fast_lock_lightweight($object$$Register, $box$$Register, $rax_reg$$Register, $tmp$$Register, r15_thread); + __ fast_lock($object$$Register, $box$$Register, $rax_reg$$Register, $tmp$$Register, r15_thread); %} ins_pipe(pipe_slow); %} -instruct cmpFastUnlockLightweight(rFlagsReg cr, rRegP object, rax_RegP rax_reg, rRegP tmp) %{ +instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP rax_reg, rRegP tmp) %{ match(Set cr (FastUnlock object rax_reg)); effect(TEMP tmp, USE_KILL rax_reg); ins_cost(300); format %{ "fastunlock $object,$rax_reg\t! kills $rax_reg,$tmp" %} ins_encode %{ - __ fast_unlock_lightweight($object$$Register, $rax_reg$$Register, $tmp$$Register, r15_thread); + __ fast_unlock($object$$Register, $rax_reg$$Register, $tmp$$Register, r15_thread); %} ins_pipe(pipe_slow); %} diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp index 5f4cc6c450d..b985d2af6ff 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp @@ -70,7 +70,7 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/stackWatermarkSet.hpp" #include "runtime/stubRoutines.hpp" -#include "runtime/synchronizer.inline.hpp" +#include "runtime/synchronizer.hpp" #include "utilities/align.hpp" #include "utilities/checkedCast.hpp" #include "utilities/copy.hpp" diff --git a/src/hotspot/share/oops/markWord.hpp b/src/hotspot/share/oops/markWord.hpp index aa9cdd65ba9..6bcbcfe97a0 100644 --- a/src/hotspot/share/oops/markWord.hpp +++ b/src/hotspot/share/oops/markWord.hpp @@ -211,7 +211,7 @@ class markWord { } ObjectMonitor* monitor() const { assert(has_monitor(), "check"); - assert(!UseObjectMonitorTable, "Lightweight locking with OM table does not use markWord for monitors"); + assert(!UseObjectMonitorTable, "Locking with OM table does not use markWord for monitors"); // Use xor instead of &~ to provide one extra tag-bit check. return (ObjectMonitor*) (value() ^ monitor_value); } @@ -237,7 +237,7 @@ class markWord { return from_pointer(lock); } static markWord encode(ObjectMonitor* monitor) { - assert(!UseObjectMonitorTable, "Lightweight locking with OM table does not use markWord for monitors"); + assert(!UseObjectMonitorTable, "Locking with OM table does not use markWord for monitors"); uintptr_t tmp = (uintptr_t) monitor; return markWord(tmp | monitor_value); } diff --git a/src/hotspot/share/opto/c2_CodeStubs.hpp b/src/hotspot/share/opto/c2_CodeStubs.hpp index e778cfcde47..5664ee03e5c 100644 --- a/src/hotspot/share/opto/c2_CodeStubs.hpp +++ b/src/hotspot/share/opto/c2_CodeStubs.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,7 +97,7 @@ public: void emit(C2_MacroAssembler& masm); }; -class C2FastUnlockLightweightStub : public C2CodeStub { +class C2FastUnlockStub : public C2CodeStub { private: Register _obj; Register _mark; @@ -107,8 +107,8 @@ private: Label _push_and_slow_path; Label _unlocked_continuation; public: - C2FastUnlockLightweightStub(Register obj, Register mark, Register t, Register thread) : C2CodeStub(), - _obj(obj), _mark(mark), _t(t), _thread(thread) {} + C2FastUnlockStub(Register obj, Register mark, Register t, Register thread) : C2CodeStub(), + _obj(obj), _mark(mark), _t(t), _thread(thread) {} int max_size() const; void emit(C2_MacroAssembler& masm); Label& slow_path() { return _slow_path; } diff --git a/src/hotspot/share/prims/jvmtiEnvBase.cpp b/src/hotspot/share/prims/jvmtiEnvBase.cpp index 7450b079d18..2b17eddbe94 100644 --- a/src/hotspot/share/prims/jvmtiEnvBase.cpp +++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp @@ -55,7 +55,7 @@ #include "runtime/osThread.hpp" #include "runtime/signature.hpp" #include "runtime/stackWatermarkSet.inline.hpp" -#include "runtime/synchronizer.inline.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/threads.hpp" #include "runtime/threadSMR.inline.hpp" #include "runtime/vframe.inline.hpp" diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 92f5356235a..f5d1edd28f4 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -87,7 +87,6 @@ #include "runtime/javaCalls.hpp" #include "runtime/javaThread.inline.hpp" #include "runtime/jniHandles.inline.hpp" -#include "runtime/lightweightSynchronizer.hpp" #include "runtime/lockStack.hpp" #include "runtime/os.hpp" #include "runtime/stackFrameStream.inline.hpp" @@ -1974,8 +1973,8 @@ WB_ENTRY(jint, WB_getLockStackCapacity(JNIEnv* env)) return (jint) LockStack::CAPACITY; WB_END -WB_ENTRY(jboolean, WB_supportsRecursiveLightweightLocking(JNIEnv* env)) - return (jboolean) VM_Version::supports_recursive_lightweight_locking(); +WB_ENTRY(jboolean, WB_supportsRecursiveFastLocking(JNIEnv* env)) + return (jboolean) VM_Version::supports_recursive_fast_locking(); WB_END WB_ENTRY(jboolean, WB_DeflateIdleMonitors(JNIEnv* env, jobject wb)) @@ -2996,7 +2995,7 @@ static JNINativeMethod methods[] = { {CC"isUbsanEnabled", CC"()Z", (void*)&WB_IsUbsanEnabled }, {CC"getInUseMonitorCount", CC"()J", (void*)&WB_getInUseMonitorCount }, {CC"getLockStackCapacity", CC"()I", (void*)&WB_getLockStackCapacity }, - {CC"supportsRecursiveLightweightLocking", CC"()Z", (void*)&WB_supportsRecursiveLightweightLocking }, + {CC"supportsRecursiveFastLocking", CC"()Z", (void*)&WB_supportsRecursiveFastLocking }, {CC"forceSafepoint", CC"()V", (void*)&WB_ForceSafepoint }, {CC"forceClassLoaderStatsSafepoint", CC"()V", (void*)&WB_ForceClassLoaderStatsSafepoint }, {CC"getConstantPool0", CC"(Ljava/lang/Class;)J", (void*)&WB_GetConstantPool }, diff --git a/src/hotspot/share/runtime/abstract_vm_version.hpp b/src/hotspot/share/runtime/abstract_vm_version.hpp index b9c52b27182..5a6b41506c7 100644 --- a/src/hotspot/share/runtime/abstract_vm_version.hpp +++ b/src/hotspot/share/runtime/abstract_vm_version.hpp @@ -191,8 +191,8 @@ class Abstract_VM_Version: AllStatic { // Does platform support stack watermark barriers for concurrent stack processing? constexpr static bool supports_stack_watermark_barrier() { return false; } - // Is recursive lightweight locking implemented for this platform? - constexpr static bool supports_recursive_lightweight_locking() { return false; } + // Is recursive fast locking implemented for this platform? + constexpr static bool supports_recursive_fast_locking() { return false; } // Does platform support secondary supers table lookup? constexpr static bool supports_secondary_supers_table() { return false; } diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index 35ccc92f90b..e2029a26d37 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -74,7 +74,6 @@ #include "runtime/javaThread.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/keepStackGCProcessed.hpp" -#include "runtime/lightweightSynchronizer.hpp" #include "runtime/lockStack.inline.hpp" #include "runtime/objectMonitor.inline.hpp" #include "runtime/osThread.hpp" @@ -85,7 +84,7 @@ #include "runtime/stackValue.hpp" #include "runtime/stackWatermarkSet.hpp" #include "runtime/stubRoutines.hpp" -#include "runtime/synchronizer.inline.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/threadSMR.hpp" #include "runtime/threadWXSetters.inline.hpp" #include "runtime/vframe.hpp" @@ -1680,8 +1679,8 @@ bool Deoptimization::relock_objects(JavaThread* thread, GrowableArraylock_stack().contains(obj())) { - LightweightSynchronizer::inflate_fast_locked_object(obj(), ObjectSynchronizer::InflateCause::inflate_cause_vm_internal, - deoptee_thread, thread); + ObjectSynchronizer::inflate_fast_locked_object(obj(), ObjectSynchronizer::InflateCause::inflate_cause_vm_internal, + deoptee_thread, thread); } assert(mon_info->owner()->is_locked(), "object must be locked now"); assert(obj->mark().has_monitor(), "must be"); diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index 238517197b2..d002edd48cd 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -1954,14 +1954,14 @@ const int ObjectAlignmentInBytes = 8; "fence. Add cleanliness checks.") \ \ product(bool, UseObjectMonitorTable, false, DIAGNOSTIC, \ - "With Lightweight Locking mode, use a table to record inflated " \ - "monitors rather than the first word of the object.") \ + "Use a table to record inflated monitors rather than the first " \ + "word of the object.") \ \ - product(int, LightweightFastLockingSpins, 13, DIAGNOSTIC, \ - "Specifies the number of times lightweight fast locking will " \ - "attempt to CAS the markWord before inflating. Between each " \ - "CAS it will spin for exponentially more time, resulting in " \ - "a total number of spins on the order of O(2^value)") \ + product(int, FastLockingSpins, 13, DIAGNOSTIC, \ + "Specifies the number of times fast locking will attempt to " \ + "CAS the markWord before inflating. Between each CAS it will " \ + "spin for exponentially more time, resulting in a total number " \ + "of spins on the order of O(2^value)") \ range(1, 30) \ \ product(uint, TrimNativeHeapInterval, 0, \ diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp index 838f8e4c581..f52be5d740e 100644 --- a/src/hotspot/share/runtime/javaThread.cpp +++ b/src/hotspot/share/runtime/javaThread.cpp @@ -1409,7 +1409,7 @@ void JavaThread::oops_do_no_frames(OopClosure* f, NMethodClosure* cf) { entry = entry->parent(); } - // Due to lightweight locking + // Due to fast locking lock_stack().oops_do(f); } diff --git a/src/hotspot/share/runtime/lightweightSynchronizer.cpp b/src/hotspot/share/runtime/lightweightSynchronizer.cpp deleted file mode 100644 index ebb14490365..00000000000 --- a/src/hotspot/share/runtime/lightweightSynchronizer.cpp +++ /dev/null @@ -1,1231 +0,0 @@ -/* - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "classfile/vmSymbols.hpp" -#include "jfrfiles/jfrEventClasses.hpp" -#include "logging/log.hpp" -#include "memory/allStatic.hpp" -#include "memory/resourceArea.hpp" -#include "nmt/memTag.hpp" -#include "oops/oop.inline.hpp" -#include "runtime/atomicAccess.hpp" -#include "runtime/basicLock.inline.hpp" -#include "runtime/globals_extension.hpp" -#include "runtime/interfaceSupport.inline.hpp" -#include "runtime/javaThread.inline.hpp" -#include "runtime/lightweightSynchronizer.hpp" -#include "runtime/lockStack.inline.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/objectMonitor.inline.hpp" -#include "runtime/os.hpp" -#include "runtime/safepointMechanism.inline.hpp" -#include "runtime/safepointVerifiers.hpp" -#include "runtime/synchronizer.inline.hpp" -#include "runtime/timerTrace.hpp" -#include "runtime/trimNativeHeap.hpp" -#include "utilities/concurrentHashTable.inline.hpp" -#include "utilities/concurrentHashTableTasks.inline.hpp" -#include "utilities/globalDefinitions.hpp" - -// ConcurrentHashTable storing links from objects to ObjectMonitors -class ObjectMonitorTable : AllStatic { - struct Config { - using Value = ObjectMonitor*; - static uintx get_hash(Value const& value, bool* is_dead) { - return (uintx)value->hash(); - } - static void* allocate_node(void* context, size_t size, Value const& value) { - ObjectMonitorTable::inc_items_count(); - return AllocateHeap(size, mtObjectMonitor); - }; - static void free_node(void* context, void* memory, Value const& value) { - ObjectMonitorTable::dec_items_count(); - FreeHeap(memory); - } - }; - using ConcurrentTable = ConcurrentHashTable; - - static ConcurrentTable* _table; - static volatile size_t _items_count; - static size_t _table_size; - static volatile bool _resize; - - class Lookup : public StackObj { - oop _obj; - - public: - explicit Lookup(oop obj) : _obj(obj) {} - - uintx get_hash() const { - uintx hash = _obj->mark().hash(); - assert(hash != 0, "should have a hash"); - return hash; - } - - bool equals(ObjectMonitor** value) { - assert(*value != nullptr, "must be"); - return (*value)->object_refers_to(_obj); - } - - bool is_dead(ObjectMonitor** value) { - assert(*value != nullptr, "must be"); - return false; - } - }; - - class LookupMonitor : public StackObj { - ObjectMonitor* _monitor; - - public: - explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {} - - uintx get_hash() const { - return _monitor->hash(); - } - - bool equals(ObjectMonitor** value) { - return (*value) == _monitor; - } - - bool is_dead(ObjectMonitor** value) { - assert(*value != nullptr, "must be"); - return (*value)->object_is_dead(); - } - }; - - static void inc_items_count() { - AtomicAccess::inc(&_items_count, memory_order_relaxed); - } - - static void dec_items_count() { - AtomicAccess::dec(&_items_count, memory_order_relaxed); - } - - static double get_load_factor() { - size_t count = AtomicAccess::load(&_items_count); - return (double)count / (double)_table_size; - } - - static size_t table_size(Thread* current = Thread::current()) { - return ((size_t)1) << _table->get_size_log2(current); - } - - static size_t max_log_size() { - // TODO[OMTable]: Evaluate the max size. - // TODO[OMTable]: Need to fix init order to use Universe::heap()->max_capacity(); - // Using MaxHeapSize directly this early may be wrong, and there - // are definitely rounding errors (alignment). - const size_t max_capacity = MaxHeapSize; - const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize; - const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast(min_object_size)); - const size_t log_max_objects = log2i_graceful(max_objects); - - return MAX2(MIN2(SIZE_BIG_LOG2, log_max_objects), min_log_size()); - } - - static size_t min_log_size() { - // ~= log(AvgMonitorsPerThreadEstimate default) - return 10; - } - - template - static size_t clamp_log_size(V log_size) { - return MAX2(MIN2(log_size, checked_cast(max_log_size())), checked_cast(min_log_size())); - } - - static size_t initial_log_size() { - const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1))); - return clamp_log_size(estimate); - } - - static size_t grow_hint () { - return ConcurrentTable::DEFAULT_GROW_HINT; - } - - public: - static void create() { - _table = new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint()); - _items_count = 0; - _table_size = table_size(); - _resize = false; - } - - static void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) { -#ifdef ASSERT - if (SafepointSynchronize::is_at_safepoint()) { - bool has_monitor = obj->mark().has_monitor(); - assert(has_monitor == (monitor != nullptr), - "Inconsistency between markWord and ObjectMonitorTable has_monitor: %s monitor: " PTR_FORMAT, - BOOL_TO_STR(has_monitor), p2i(monitor)); - } -#endif - } - - static ObjectMonitor* monitor_get(Thread* current, oop obj) { - ObjectMonitor* result = nullptr; - Lookup lookup_f(obj); - auto found_f = [&](ObjectMonitor** found) { - assert((*found)->object_peek() == obj, "must be"); - result = *found; - }; - _table->get(current, lookup_f, found_f); - verify_monitor_get_result(obj, result); - return result; - } - - static void try_notify_grow() { - if (!_table->is_max_size_reached() && !AtomicAccess::load(&_resize)) { - AtomicAccess::store(&_resize, true); - if (Service_lock->try_lock()) { - Service_lock->notify(); - Service_lock->unlock(); - } - } - } - - static bool should_shrink() { - // Not implemented; - return false; - } - - static constexpr double GROW_LOAD_FACTOR = 0.75; - - static bool should_grow() { - return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached(); - } - - static bool should_resize() { - return should_grow() || should_shrink() || AtomicAccess::load(&_resize); - } - - template - static bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) { - if (task.prepare(current)) { - log_trace(monitortable)("Started to %s", task_name); - TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf)); - while (task.do_task(current, args...)) { - task.pause(current); - { - ThreadBlockInVM tbivm(current); - } - task.cont(current); - } - task.done(current); - return true; - } - return false; - } - - static bool grow(JavaThread* current) { - ConcurrentTable::GrowTask grow_task(_table); - if (run_task(current, grow_task, "Grow")) { - _table_size = table_size(current); - log_info(monitortable)("Grown to size: %zu", _table_size); - return true; - } - return false; - } - - static bool clean(JavaThread* current) { - ConcurrentTable::BulkDeleteTask clean_task(_table); - auto is_dead = [&](ObjectMonitor** monitor) { - return (*monitor)->object_is_dead(); - }; - auto do_nothing = [&](ObjectMonitor** monitor) {}; - NativeHeapTrimmer::SuspendMark sm("ObjectMonitorTable"); - return run_task(current, clean_task, "Clean", is_dead, do_nothing); - } - - static bool resize(JavaThread* current) { - LogTarget(Info, monitortable) lt; - bool success = false; - - if (should_grow()) { - lt.print("Start growing with load factor %f", get_load_factor()); - success = grow(current); - } else { - if (!_table->is_max_size_reached() && AtomicAccess::load(&_resize)) { - lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor()); - } - lt.print("Start cleaning with load factor %f", get_load_factor()); - success = clean(current); - } - - AtomicAccess::store(&_resize, false); - - return success; - } - - static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) { - // Enter the monitor into the concurrent hashtable. - ObjectMonitor* result = monitor; - Lookup lookup_f(obj); - auto found_f = [&](ObjectMonitor** found) { - assert((*found)->object_peek() == obj, "must be"); - result = *found; - }; - bool grow; - _table->insert_get(current, lookup_f, monitor, found_f, &grow); - verify_monitor_get_result(obj, result); - if (grow) { - try_notify_grow(); - } - return result; - } - - static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) { - LookupMonitor lookup_f(monitor); - return _table->remove(current, lookup_f); - } - - static bool contains_monitor(Thread* current, ObjectMonitor* monitor) { - LookupMonitor lookup_f(monitor); - bool result = false; - auto found_f = [&](ObjectMonitor** found) { - result = true; - }; - _table->get(current, lookup_f, found_f); - return result; - } - - static void print_on(outputStream* st) { - auto printer = [&] (ObjectMonitor** entry) { - ObjectMonitor* om = *entry; - oop obj = om->object_peek(); - st->print("monitor=" PTR_FORMAT ", ", p2i(om)); - st->print("object=" PTR_FORMAT, p2i(obj)); - assert(obj->mark().hash() == om->hash(), "hash must match"); - st->cr(); - return true; - }; - if (SafepointSynchronize::is_at_safepoint()) { - _table->do_safepoint_scan(printer); - } else { - _table->do_scan(Thread::current(), printer); - } - } -}; - -ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr; -volatile size_t ObjectMonitorTable::_items_count = 0; -size_t ObjectMonitorTable::_table_size = 0; -volatile bool ObjectMonitorTable::_resize = false; - -ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) { - ObjectMonitor* monitor = get_monitor_from_table(current, object); - if (monitor != nullptr) { - *inserted = false; - return monitor; - } - - ObjectMonitor* alloced_monitor = new ObjectMonitor(object); - alloced_monitor->set_anonymous_owner(); - - // Try insert monitor - monitor = add_monitor(current, alloced_monitor, object); - - *inserted = alloced_monitor == monitor; - if (!*inserted) { - delete alloced_monitor; - } - - return monitor; -} - -static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) { - if (log_is_enabled(Trace, monitorinflation)) { - ResourceMark rm(current); - log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark=" - INTPTR_FORMAT ", type='%s' cause=%s", p2i(object), - object->mark().value(), object->klass()->external_name(), - ObjectSynchronizer::inflate_cause_name(cause)); - } -} - -static void post_monitor_inflate_event(EventJavaMonitorInflate* event, - const oop obj, - ObjectSynchronizer::InflateCause cause) { - assert(event != nullptr, "invariant"); - const Klass* monitor_klass = obj->klass(); - if (ObjectMonitor::is_jfr_excluded(monitor_klass)) { - return; - } - event->set_monitorClass(monitor_klass); - event->set_address((uintptr_t)(void*)obj); - event->set_cause((u1)cause); - event->commit(); -} - -ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) { - assert(UseObjectMonitorTable, "must be"); - - EventJavaMonitorInflate event; - - bool inserted; - ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted); - - if (inserted) { - log_inflate(current, object, cause); - if (event.should_commit()) { - post_monitor_inflate_event(&event, object, cause); - } - - // The monitor has an anonymous owner so it is safe from async deflation. - ObjectSynchronizer::_in_use_list.add(monitor); - } - - return monitor; -} - -// Add the hashcode to the monitor to match the object and put it in the hashtable. -ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) { - assert(UseObjectMonitorTable, "must be"); - assert(obj == monitor->object(), "must be"); - - intptr_t hash = obj->mark().hash(); - assert(hash != 0, "must be set when claiming the object monitor"); - monitor->set_hash(hash); - - return ObjectMonitorTable::monitor_put_get(current, monitor, obj); -} - -bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) { - assert(UseObjectMonitorTable, "must be"); - assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead"); - - return ObjectMonitorTable::remove_monitor_entry(current, monitor); -} - -void LightweightSynchronizer::deflate_mark_word(oop obj) { - assert(UseObjectMonitorTable, "must be"); - - markWord mark = obj->mark_acquire(); - assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash"); - - while (mark.has_monitor()) { - const markWord new_mark = mark.clear_lock_bits().set_unlocked(); - mark = obj->cas_set_mark(new_mark, mark); - } -} - -void LightweightSynchronizer::initialize() { - if (!UseObjectMonitorTable) { - return; - } - ObjectMonitorTable::create(); -} - -bool LightweightSynchronizer::needs_resize() { - if (!UseObjectMonitorTable) { - return false; - } - return ObjectMonitorTable::should_resize(); -} - -bool LightweightSynchronizer::resize_table(JavaThread* current) { - if (!UseObjectMonitorTable) { - return true; - } - return ObjectMonitorTable::resize(current); -} - -class LightweightSynchronizer::LockStackInflateContendedLocks : private OopClosure { - private: - oop _contended_oops[LockStack::CAPACITY]; - int _length; - - void do_oop(oop* o) final { - oop obj = *o; - if (obj->mark_acquire().has_monitor()) { - if (_length > 0 && _contended_oops[_length - 1] == obj) { - // Recursive - return; - } - _contended_oops[_length++] = obj; - } - } - - void do_oop(narrowOop* o) final { - ShouldNotReachHere(); - } - - public: - LockStackInflateContendedLocks() : - _contended_oops(), - _length(0) {}; - - void inflate(JavaThread* current) { - assert(current == JavaThread::current(), "must be"); - current->lock_stack().oops_do(this); - for (int i = 0; i < _length; i++) { - LightweightSynchronizer:: - inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current); - } - } -}; - -void LightweightSynchronizer::ensure_lock_stack_space(JavaThread* current) { - assert(current == JavaThread::current(), "must be"); - LockStack& lock_stack = current->lock_stack(); - - // Make room on lock_stack - if (lock_stack.is_full()) { - // Inflate contended objects - LockStackInflateContendedLocks().inflate(current); - if (lock_stack.is_full()) { - // Inflate the oldest object - inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current); - } - } -} - -class LightweightSynchronizer::CacheSetter : StackObj { - JavaThread* const _thread; - BasicLock* const _lock; - ObjectMonitor* _monitor; - - NONCOPYABLE(CacheSetter); - - public: - CacheSetter(JavaThread* thread, BasicLock* lock) : - _thread(thread), - _lock(lock), - _monitor(nullptr) {} - - ~CacheSetter() { - // Only use the cache if using the table. - if (UseObjectMonitorTable) { - if (_monitor != nullptr) { - // If the monitor is already in the BasicLock cache then it is most - // likely in the thread cache, do not set it again to avoid reordering. - if (_monitor != _lock->object_monitor_cache()) { - _thread->om_set_monitor_cache(_monitor); - _lock->set_object_monitor_cache(_monitor); - } - } else { - _lock->clear_object_monitor_cache(); - } - } - } - - void set_monitor(ObjectMonitor* monitor) { - assert(_monitor == nullptr, "only set once"); - _monitor = monitor; - } - -}; - -// Reads first from the BasicLock cache then from the OMCache in the current thread. -// C2 fast-path may have put the monitor in the cache in the BasicLock. -inline static ObjectMonitor* read_caches(JavaThread* current, BasicLock* lock, oop object) { - ObjectMonitor* monitor = lock->object_monitor_cache(); - if (monitor == nullptr) { - monitor = current->om_get_from_monitor_cache(object); - } - return monitor; -} - -class LightweightSynchronizer::VerifyThreadState { - bool _no_safepoint; - - public: - VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) { - assert(current == Thread::current(), "must be"); - assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently"); - if (_no_safepoint) { - DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();) - } - } - ~VerifyThreadState() { - if (_no_safepoint){ - DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();) - } - } -}; - -inline bool LightweightSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) { - markWord mark = obj->mark(); - while (mark.is_unlocked()) { - ensure_lock_stack_space(current); - assert(!lock_stack.is_full(), "must have made room on the lock stack"); - assert(!lock_stack.contains(obj), "thread must not already hold the lock"); - // Try to swing into 'fast-locked' state. - markWord locked_mark = mark.set_fast_locked(); - markWord old_mark = mark; - mark = obj->cas_set_mark(locked_mark, old_mark); - if (old_mark == mark) { - // Successfully fast-locked, push object to lock-stack and return. - lock_stack.push(obj); - return true; - } - } - return false; -} - -bool LightweightSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) { - assert(UseObjectMonitorTable, "must be"); - // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins. - const int log_spin_limit = os::is_MP() ? LightweightFastLockingSpins : 1; - const int log_min_safepoint_check_interval = 10; - - markWord mark = obj->mark(); - const auto should_spin = [&]() { - if (!mark.has_monitor()) { - // Spin while not inflated. - return true; - } else if (observed_deflation) { - // Spin while monitor is being deflated. - ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark); - return monitor == nullptr || monitor->is_being_async_deflated(); - } - // Else stop spinning. - return false; - }; - // Always attempt to lock once even when safepoint synchronizing. - bool should_process = false; - for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) { - // Spin with exponential backoff. - const int total_spin_count = 1 << i; - const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count); - const int outer_spin_count = total_spin_count / inner_spin_count; - for (int outer = 0; outer < outer_spin_count; outer++) { - should_process = SafepointMechanism::should_process(current); - if (should_process) { - // Stop spinning for safepoint. - break; - } - for (int inner = 1; inner < inner_spin_count; inner++) { - SpinPause(); - } - } - - if (fast_lock_try_enter(obj, lock_stack, current)) return true; - } - return false; -} - -void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) { - assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "must be cleared"); - JavaThread* current = JavaThread::current(); - VerifyThreadState vts(locking_thread, current); - - if (obj->klass()->is_value_based()) { - ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread); - } - - LockStack& lock_stack = locking_thread->lock_stack(); - - ObjectMonitor* monitor = nullptr; - if (lock_stack.contains(obj())) { - monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current); - bool entered = monitor->enter_for(locking_thread); - assert(entered, "recursive ObjectMonitor::enter_for must succeed"); - } else { - do { - // It is assumed that enter_for must enter on an object without contention. - monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current); - // But there may still be a race with deflation. - } while (monitor == nullptr); - } - - assert(monitor != nullptr, "LightweightSynchronizer::enter_for must succeed"); - assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "unused. already cleared"); -} - -void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) { - assert(current == JavaThread::current(), "must be"); - - if (obj->klass()->is_value_based()) { - ObjectSynchronizer::handle_sync_on_value_based_class(obj, current); - } - - CacheSetter cache_setter(current, lock); - - // Used when deflation is observed. Progress here requires progress - // from the deflator. After observing that the deflator is not - // making progress (after two yields), switch to sleeping. - SpinYield spin_yield(0, 2); - bool observed_deflation = false; - - LockStack& lock_stack = current->lock_stack(); - - if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) { - // Recursively fast locked - return; - } - - if (lock_stack.contains(obj())) { - ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current); - bool entered = monitor->enter(current); - assert(entered, "recursive ObjectMonitor::enter must succeed"); - cache_setter.set_monitor(monitor); - return; - } - - while (true) { - // Fast-locking does not use the 'lock' argument. - // Fast-lock spinning to avoid inflating for short critical sections. - // The goal is to only inflate when the extra cost of using ObjectMonitors - // is worth it. - // If deflation has been observed we also spin while deflation is ongoing. - if (fast_lock_try_enter(obj(), lock_stack, current)) { - return; - } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) { - return; - } - - if (observed_deflation) { - spin_yield.wait(); - } - - ObjectMonitor* monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, current, current); - if (monitor != nullptr) { - cache_setter.set_monitor(monitor); - return; - } - - // If inflate_and_enter returns nullptr it is because a deflated monitor - // was encountered. Fallback to fast locking. The deflater is responsible - // for clearing out the monitor and transitioning the markWord back to - // fast locking. - observed_deflation = true; - } -} - -void LightweightSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) { - assert(current == Thread::current(), "must be"); - - markWord mark = object->mark(); - assert(!mark.is_unlocked(), "must be"); - - LockStack& lock_stack = current->lock_stack(); - if (mark.is_fast_locked()) { - if (lock_stack.try_recursive_exit(object)) { - // This is a recursive exit which succeeded - return; - } - if (lock_stack.is_recursive(object)) { - // Must inflate recursive locks if try_recursive_exit fails - // This happens for un-structured unlocks, could potentially - // fix try_recursive_exit to handle these. - inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current); - } - } - - while (mark.is_fast_locked()) { - markWord unlocked_mark = mark.set_unlocked(); - markWord old_mark = mark; - mark = object->cas_set_mark(unlocked_mark, old_mark); - if (old_mark == mark) { - // CAS successful, remove from lock_stack - size_t recursion = lock_stack.remove(object) - 1; - assert(recursion == 0, "Should not have unlocked here"); - return; - } - } - - assert(mark.has_monitor(), "must be"); - // The monitor exists - ObjectMonitor* monitor; - if (UseObjectMonitorTable) { - monitor = read_caches(current, lock, object); - if (monitor == nullptr) { - monitor = get_monitor_from_table(current, object); - } - } else { - monitor = ObjectSynchronizer::read_monitor(mark); - } - if (monitor->has_anonymous_owner()) { - assert(current->lock_stack().contains(object), "current must have object on its lock stack"); - monitor->set_owner_from_anonymous(current); - monitor->set_recursions(current->lock_stack().remove(object) - 1); - } - - monitor->exit(current); -} - -// LightweightSynchronizer::inflate_locked_or_imse is used to get an -// inflated ObjectMonitor* from contexts which require that, such as -// notify/wait and jni_exit. Lightweight locking keeps the invariant that it -// only inflates if it is already locked by the current thread or the current -// thread is in the process of entering. To maintain this invariant we need to -// throw a java.lang.IllegalMonitorStateException before inflating if the -// current thread is not the owner. -ObjectMonitor* LightweightSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) { - JavaThread* current = THREAD; - - for (;;) { - markWord mark = obj->mark_acquire(); - if (mark.is_unlocked()) { - // No lock, IMSE. - THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(), - "current thread is not owner", nullptr); - } - - if (mark.is_fast_locked()) { - if (!current->lock_stack().contains(obj)) { - // Fast locked by other thread, IMSE. - THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(), - "current thread is not owner", nullptr); - } else { - // Current thread owns the lock, must inflate - return inflate_fast_locked_object(obj, cause, current, current); - } - } - - assert(mark.has_monitor(), "must be"); - ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark); - if (monitor != nullptr) { - if (monitor->has_anonymous_owner()) { - LockStack& lock_stack = current->lock_stack(); - if (lock_stack.contains(obj)) { - // Current thread owns the lock but someone else inflated it. - // Fix owner and pop lock stack. - monitor->set_owner_from_anonymous(current); - monitor->set_recursions(lock_stack.remove(obj) - 1); - } else { - // Fast locked (and inflated) by other thread, or deflation in progress, IMSE. - THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(), - "current thread is not owner", nullptr); - } - } - return monitor; - } - } -} - -ObjectMonitor* LightweightSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) { - - // The JavaThread* locking parameter requires that the locking_thread == JavaThread::current, - // or is suspended throughout the call by some other mechanism. - // Even with lightweight locking the thread might be nullptr when called from a non - // JavaThread. (As may still be the case from FastHashCode). However it is only - // important for the correctness of the lightweight locking algorithm that the thread - // is set when called from ObjectSynchronizer::enter from the owning thread, - // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit. - EventJavaMonitorInflate event; - - for (;;) { - const markWord mark = object->mark_acquire(); - - // The mark can be in one of the following states: - // * inflated - Just return if using stack-locking. - // If using fast-locking and the ObjectMonitor owner - // is anonymous and the locking_thread owns the - // object lock, then we make the locking_thread - // the ObjectMonitor owner and remove the lock from - // the locking_thread's lock stack. - // * fast-locked - Coerce it to inflated from fast-locked. - // * unlocked - Aggressively inflate the object. - - // CASE: inflated - if (mark.has_monitor()) { - ObjectMonitor* inf = mark.monitor(); - markWord dmw = inf->header(); - assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); - if (inf->has_anonymous_owner() && - locking_thread != nullptr && locking_thread->lock_stack().contains(object)) { - inf->set_owner_from_anonymous(locking_thread); - size_t removed = locking_thread->lock_stack().remove(object); - inf->set_recursions(removed - 1); - } - return inf; - } - - // CASE: fast-locked - // Could be fast-locked either by the locking_thread or by some other thread. - // - // Note that we allocate the ObjectMonitor speculatively, _before_ - // attempting to set the object's mark to the new ObjectMonitor. If - // the locking_thread owns the monitor, then we set the ObjectMonitor's - // owner to the locking_thread. Otherwise, we set the ObjectMonitor's owner - // to anonymous. If we lose the race to set the object's mark to the - // new ObjectMonitor, then we just delete it and loop around again. - // - if (mark.is_fast_locked()) { - ObjectMonitor* monitor = new ObjectMonitor(object); - monitor->set_header(mark.set_unlocked()); - bool own = locking_thread != nullptr && locking_thread->lock_stack().contains(object); - if (own) { - // Owned by locking_thread. - monitor->set_owner(locking_thread); - } else { - // Owned by somebody else. - monitor->set_anonymous_owner(); - } - markWord monitor_mark = markWord::encode(monitor); - markWord old_mark = object->cas_set_mark(monitor_mark, mark); - if (old_mark == mark) { - // Success! Return inflated monitor. - if (own) { - size_t removed = locking_thread->lock_stack().remove(object); - monitor->set_recursions(removed - 1); - } - // Once the ObjectMonitor is configured and object is associated - // with the ObjectMonitor, it is safe to allow async deflation: - ObjectSynchronizer::_in_use_list.add(monitor); - - log_inflate(current, object, cause); - if (event.should_commit()) { - post_monitor_inflate_event(&event, object, cause); - } - return monitor; - } else { - delete monitor; - continue; // Interference -- just retry - } - } - - // CASE: unlocked - // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. - // If we know we're inflating for entry it's better to inflate by swinging a - // pre-locked ObjectMonitor pointer into the object header. A successful - // CAS inflates the object *and* confers ownership to the inflating thread. - // In the current implementation we use a 2-step mechanism where we CAS() - // to inflate and then CAS() again to try to swing _owner from null to current. - // An inflateTry() method that we could call from enter() would be useful. - - assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value()); - ObjectMonitor* m = new ObjectMonitor(object); - // prepare m for installation - set monitor to initial state - m->set_header(mark); - - if (object->cas_set_mark(markWord::encode(m), mark) != mark) { - delete m; - m = nullptr; - continue; - // interference - the markword changed - just retry. - // The state-transitions are one-way, so there's no chance of - // live-lock -- "Inflated" is an absorbing state. - } - - // Once the ObjectMonitor is configured and object is associated - // with the ObjectMonitor, it is safe to allow async deflation: - ObjectSynchronizer::_in_use_list.add(m); - - log_inflate(current, object, cause); - if (event.should_commit()) { - post_monitor_inflate_event(&event, object, cause); - } - return m; - } -} - -ObjectMonitor* LightweightSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) { - VerifyThreadState vts(locking_thread, current); - assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack"); - - ObjectMonitor* monitor; - - if (!UseObjectMonitorTable) { - return inflate_into_object_header(object, cause, locking_thread, current); - } - - // Inflating requires a hash code - ObjectSynchronizer::FastHashCode(current, object); - - markWord mark = object->mark_acquire(); - assert(!mark.is_unlocked(), "Cannot be unlocked"); - - for (;;) { - // Fetch the monitor from the table - monitor = get_or_insert_monitor(object, current, cause); - - // ObjectMonitors are always inserted as anonymously owned, this thread is - // the current holder of the monitor. So unless the entry is stale and - // contains a deflating monitor it must be anonymously owned. - if (monitor->has_anonymous_owner()) { - // The monitor must be anonymously owned if it was added - assert(monitor == get_monitor_from_table(current, object), "The monitor must be found"); - // New fresh monitor - break; - } - - // If the monitor was not anonymously owned then we got a deflating monitor - // from the table. We need to let the deflator make progress and remove this - // entry before we are allowed to add a new one. - os::naked_yield(); - assert(monitor->is_being_async_deflated(), "Should be the reason"); - } - - // Set the mark word; loop to handle concurrent updates to other parts of the mark word - while (mark.is_fast_locked()) { - mark = object->cas_set_mark(mark.set_has_monitor(), mark); - } - - // Indicate that the monitor now has a known owner - monitor->set_owner_from_anonymous(locking_thread); - - // Remove the entry from the thread's lock stack - monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1); - - if (locking_thread == current) { - // Only change the thread local state of the current thread. - locking_thread->om_set_monitor_cache(monitor); - } - - return monitor; -} - -ObjectMonitor* LightweightSynchronizer::inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) { - VerifyThreadState vts(locking_thread, current); - - // Note: In some paths (deoptimization) the 'current' thread inflates and - // enters the lock on behalf of the 'locking_thread' thread. - - ObjectMonitor* monitor = nullptr; - - if (!UseObjectMonitorTable) { - // Do the old inflate and enter. - monitor = inflate_into_object_header(object, cause, locking_thread, current); - - bool entered; - if (locking_thread == current) { - entered = monitor->enter(locking_thread); - } else { - entered = monitor->enter_for(locking_thread); - } - - // enter returns false for deflation found. - return entered ? monitor : nullptr; - } - - NoSafepointVerifier nsv; - - // Try to get the monitor from the thread-local cache. - // There's no need to use the cache if we are locking - // on behalf of another thread. - if (current == locking_thread) { - monitor = read_caches(current, lock, object); - } - - // Get or create the monitor - if (monitor == nullptr) { - // Lightweight monitors require that hash codes are installed first - ObjectSynchronizer::FastHashCode(locking_thread, object); - monitor = get_or_insert_monitor(object, current, cause); - } - - if (monitor->try_enter(locking_thread)) { - return monitor; - } - - // Holds is_being_async_deflated() stable throughout this function. - ObjectMonitorContentionMark contention_mark(monitor); - - /// First handle the case where the monitor from the table is deflated - if (monitor->is_being_async_deflated()) { - // The MonitorDeflation thread is deflating the monitor. The locking thread - // must spin until further progress has been made. - - // Clear the BasicLock cache as it may contain this monitor. - lock->clear_object_monitor_cache(); - - const markWord mark = object->mark_acquire(); - - if (mark.has_monitor()) { - // Waiting on the deflation thread to remove the deflated monitor from the table. - os::naked_yield(); - - } else if (mark.is_fast_locked()) { - // Some other thread managed to fast-lock the lock, or this is a - // recursive lock from the same thread; yield for the deflation - // thread to remove the deflated monitor from the table. - os::naked_yield(); - - } else { - assert(mark.is_unlocked(), "Implied"); - // Retry immediately - } - - // Retry - return nullptr; - } - - for (;;) { - const markWord mark = object->mark_acquire(); - // The mark can be in one of the following states: - // * inflated - If the ObjectMonitor owner is anonymous - // and the locking_thread owns the object - // lock, then we make the locking_thread - // the ObjectMonitor owner and remove the - // lock from the locking_thread's lock stack. - // * fast-locked - Coerce it to inflated from fast-locked. - // * neutral - Inflate the object. Successful CAS is locked - - // CASE: inflated - if (mark.has_monitor()) { - LockStack& lock_stack = locking_thread->lock_stack(); - if (monitor->has_anonymous_owner() && lock_stack.contains(object)) { - // The lock is fast-locked by the locking thread, - // convert it to a held monitor with a known owner. - monitor->set_owner_from_anonymous(locking_thread); - monitor->set_recursions(lock_stack.remove(object) - 1); - } - - break; // Success - } - - // CASE: fast-locked - // Could be fast-locked either by locking_thread or by some other thread. - // - if (mark.is_fast_locked()) { - markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark); - if (old_mark != mark) { - // CAS failed - continue; - } - - // Success! Return inflated monitor. - LockStack& lock_stack = locking_thread->lock_stack(); - if (lock_stack.contains(object)) { - // The lock is fast-locked by the locking thread, - // convert it to a held monitor with a known owner. - monitor->set_owner_from_anonymous(locking_thread); - monitor->set_recursions(lock_stack.remove(object) - 1); - } - - break; // Success - } - - // CASE: neutral (unlocked) - - // Catch if the object's header is not neutral (not locked and - // not marked is what we care about here). - assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); - markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark); - if (old_mark != mark) { - // CAS failed - continue; - } - - // Transitioned from unlocked to monitor means locking_thread owns the lock. - monitor->set_owner_from_anonymous(locking_thread); - - return monitor; - } - - if (current == locking_thread) { - // One round of spinning - if (monitor->spin_enter(locking_thread)) { - return monitor; - } - - // Monitor is contended, take the time before entering to fix the lock stack. - LockStackInflateContendedLocks().inflate(current); - } - - // enter can block for safepoints; clear the unhandled object oop - PauseNoSafepointVerifier pnsv(&nsv); - object = nullptr; - - if (current == locking_thread) { - monitor->enter_with_contention_mark(locking_thread, contention_mark); - } else { - monitor->enter_for_with_contention_mark(locking_thread, contention_mark); - } - - return monitor; -} - -void LightweightSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) { - if (obj != nullptr) { - deflate_mark_word(obj); - } - bool removed = remove_monitor(current, monitor, obj); - if (obj != nullptr) { - assert(removed, "Should have removed the entry if obj was alive"); - } -} - -ObjectMonitor* LightweightSynchronizer::get_monitor_from_table(Thread* current, oop obj) { - assert(UseObjectMonitorTable, "must be"); - return ObjectMonitorTable::monitor_get(current, obj); -} - -bool LightweightSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) { - assert(UseObjectMonitorTable, "must be"); - return ObjectMonitorTable::contains_monitor(current, monitor); -} - -bool LightweightSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) { - assert(current->thread_state() == _thread_in_Java, "must be"); - assert(obj != nullptr, "must be"); - NoSafepointVerifier nsv; - - LockStack& lock_stack = current->lock_stack(); - if (lock_stack.is_full()) { - // Always go into runtime if the lock stack is full. - return false; - } - - const markWord mark = obj->mark(); - -#ifndef _LP64 - // Only for 32bit which has limited support for fast locking outside the runtime. - if (lock_stack.try_recursive_enter(obj)) { - // Recursive lock successful. - return true; - } - - if (mark.is_unlocked()) { - markWord locked_mark = mark.set_fast_locked(); - if (obj->cas_set_mark(locked_mark, mark) == mark) { - // Successfully fast-locked, push object to lock-stack and return. - lock_stack.push(obj); - return true; - } - } -#endif - - if (mark.has_monitor()) { - ObjectMonitor* monitor; - if (UseObjectMonitorTable) { - monitor = read_caches(current, lock, obj); - } else { - monitor = ObjectSynchronizer::read_monitor(mark); - } - - if (monitor == nullptr) { - // Take the slow-path on a cache miss. - return false; - } - - if (UseObjectMonitorTable) { - // Set the monitor regardless of success. - // Either we successfully lock on the monitor, or we retry with the - // monitor in the slow path. If the monitor gets deflated, it will be - // cleared, either by the CacheSetter if we fast lock in enter or in - // inflate_and_enter when we see that the monitor is deflated. - lock->set_object_monitor_cache(monitor); - } - - if (monitor->spin_enter(current)) { - return true; - } - } - - // Slow-path. - return false; -} diff --git a/src/hotspot/share/runtime/lightweightSynchronizer.hpp b/src/hotspot/share/runtime/lightweightSynchronizer.hpp deleted file mode 100644 index b10e639a67c..00000000000 --- a/src/hotspot/share/runtime/lightweightSynchronizer.hpp +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_RUNTIME_LIGHTWEIGHTSYNCHRONIZER_HPP -#define SHARE_RUNTIME_LIGHTWEIGHTSYNCHRONIZER_HPP - -#include "memory/allStatic.hpp" -#include "runtime/javaThread.hpp" -#include "runtime/objectMonitor.hpp" -#include "runtime/synchronizer.hpp" - -class ObjectMonitorTable; - -class LightweightSynchronizer : AllStatic { - private: - static ObjectMonitor* get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted); - static ObjectMonitor* get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause); - - static ObjectMonitor* add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj); - static bool remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj); - - static void deflate_mark_word(oop object); - - static void ensure_lock_stack_space(JavaThread* current); - - class CacheSetter; - class LockStackInflateContendedLocks; - class VerifyThreadState; - - public: - static void initialize(); - - static bool needs_resize(); - static bool resize_table(JavaThread* current); - - private: - static inline bool fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current); - static bool fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation); - - public: - static void enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread); - static void enter(Handle obj, BasicLock* lock, JavaThread* current); - static void exit(oop object, BasicLock* lock, JavaThread* current); - - static ObjectMonitor* inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current); - static ObjectMonitor* inflate_locked_or_imse(oop object, ObjectSynchronizer::InflateCause cause, TRAPS); - static ObjectMonitor* inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current); - static ObjectMonitor* inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current); - - static void deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor); - - static ObjectMonitor* get_monitor_from_table(Thread* current, oop obj); - - static bool contains_monitor(Thread* current, ObjectMonitor* monitor); - - static bool quick_enter(oop obj, BasicLock* Lock, JavaThread* current); -}; - -#endif // SHARE_RUNTIME_LIGHTWEIGHTSYNCHRONIZER_HPP diff --git a/src/hotspot/share/runtime/lockStack.cpp b/src/hotspot/share/runtime/lockStack.cpp index 95f72393d91..a88a84eb9f8 100644 --- a/src/hotspot/share/runtime/lockStack.cpp +++ b/src/hotspot/share/runtime/lockStack.cpp @@ -35,7 +35,7 @@ #include "runtime/safepoint.hpp" #include "runtime/stackWatermark.hpp" #include "runtime/stackWatermarkSet.inline.hpp" -#include "runtime/synchronizer.inline.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/thread.hpp" #include "utilities/copy.hpp" #include "utilities/debug.hpp" @@ -82,7 +82,7 @@ void LockStack::verify(const char* msg) const { int top = to_index(_top); for (int i = 0; i < top; i++) { assert(_base[i] != nullptr, "no zapped before top"); - if (VM_Version::supports_recursive_lightweight_locking()) { + if (VM_Version::supports_recursive_fast_locking()) { oop o = _base[i]; for (; i < top - 1; i++) { // Consecutive entries may be the same diff --git a/src/hotspot/share/runtime/lockStack.inline.hpp b/src/hotspot/share/runtime/lockStack.inline.hpp index 0516a85356d..27eb07fcec8 100644 --- a/src/hotspot/share/runtime/lockStack.inline.hpp +++ b/src/hotspot/share/runtime/lockStack.inline.hpp @@ -1,7 +1,7 @@ /* * Copyright (c) 2022, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,11 +31,11 @@ #include "memory/iterator.hpp" #include "runtime/javaThread.hpp" -#include "runtime/lightweightSynchronizer.hpp" #include "runtime/objectMonitor.inline.hpp" #include "runtime/safepoint.hpp" #include "runtime/stackWatermark.hpp" #include "runtime/stackWatermarkSet.inline.hpp" +#include "runtime/synchronizer.hpp" #include "utilities/align.hpp" #include "utilities/globalDefinitions.hpp" @@ -87,7 +87,7 @@ inline bool LockStack::is_empty() const { } inline bool LockStack::is_recursive(oop o) const { - if (!VM_Version::supports_recursive_lightweight_locking()) { + if (!VM_Version::supports_recursive_fast_locking()) { return false; } verify("pre-is_recursive"); @@ -119,7 +119,7 @@ inline bool LockStack::is_recursive(oop o) const { } inline bool LockStack::try_recursive_enter(oop o) { - if (!VM_Version::supports_recursive_lightweight_locking()) { + if (!VM_Version::supports_recursive_fast_locking()) { return false; } verify("pre-try_recursive_enter"); @@ -145,7 +145,7 @@ inline bool LockStack::try_recursive_enter(oop o) { } inline bool LockStack::try_recursive_exit(oop o) { - if (!VM_Version::supports_recursive_lightweight_locking()) { + if (!VM_Version::supports_recursive_fast_locking()) { return false; } verify("pre-try_recursive_exit"); @@ -254,7 +254,7 @@ inline void OMCache::set_monitor(ObjectMonitor *monitor) { oop obj = monitor->object_peek(); assert(obj != nullptr, "must be alive"); - assert(monitor == LightweightSynchronizer::get_monitor_from_table(JavaThread::current(), obj), "must exist in table"); + assert(monitor == ObjectSynchronizer::get_monitor_from_table(JavaThread::current(), obj), "must exist in table"); OMCacheEntry to_insert = {obj, monitor}; diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp index 6a99568ba44..ee7629ec6f5 100644 --- a/src/hotspot/share/runtime/objectMonitor.cpp +++ b/src/hotspot/share/runtime/objectMonitor.cpp @@ -43,7 +43,6 @@ #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaThread.inline.hpp" -#include "runtime/lightweightSynchronizer.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.inline.hpp" #include "runtime/orderAccess.hpp" @@ -51,6 +50,7 @@ #include "runtime/safefetch.hpp" #include "runtime/safepointMechanism.inline.hpp" #include "runtime/sharedRuntime.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/threads.hpp" #include "services/threadService.hpp" #include "utilities/debug.hpp" @@ -415,7 +415,7 @@ bool ObjectMonitor::try_lock_with_contention_mark(JavaThread* locking_thread, Ob } void ObjectMonitor::enter_for_with_contention_mark(JavaThread* locking_thread, ObjectMonitorContentionMark& contention_mark) { - // Used by LightweightSynchronizer::inflate_and_enter in deoptimization path to enter for another thread. + // Used by ObjectSynchronizer::inflate_and_enter in deoptimization path to enter for another thread. // The monitor is private to or already owned by locking_thread which must be suspended. // So this code may only contend with deflation. assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be"); @@ -856,7 +856,7 @@ bool ObjectMonitor::deflate_monitor(Thread* current) { } if (UseObjectMonitorTable) { - LightweightSynchronizer::deflate_monitor(current, obj, this); + ObjectSynchronizer::deflate_monitor(current, obj, this); } else if (obj != nullptr) { // Install the old mark word if nobody else has already done it. install_displaced_markword_in_object(obj); diff --git a/src/hotspot/share/runtime/objectMonitor.hpp b/src/hotspot/share/runtime/objectMonitor.hpp index 2da41786309..53b64f1e8a5 100644 --- a/src/hotspot/share/runtime/objectMonitor.hpp +++ b/src/hotspot/share/runtime/objectMonitor.hpp @@ -160,10 +160,10 @@ class ObjectMonitor : public CHeapObj { // Because of frequent access, the metadata field is at offset zero (0). // Enforced by the assert() in metadata_addr(). - // * Lightweight locking with UseObjectMonitorTable: + // * Locking with UseObjectMonitorTable: // Contains the _object's hashCode. - // * * Lightweight locking without UseObjectMonitorTable: - // Contains the displaced object header word - mark + // * Locking without UseObjectMonitorTable: + // Contains the displaced object header word - mark volatile uintptr_t _metadata; // metadata WeakHandle _object; // backward object pointer // Separate _metadata and _owner on different cache lines since both can diff --git a/src/hotspot/share/runtime/objectMonitor.inline.hpp b/src/hotspot/share/runtime/objectMonitor.inline.hpp index eeb451235dc..efdc33cd441 100644 --- a/src/hotspot/share/runtime/objectMonitor.inline.hpp +++ b/src/hotspot/share/runtime/objectMonitor.inline.hpp @@ -74,22 +74,22 @@ inline volatile uintptr_t* ObjectMonitor::metadata_addr() { } inline markWord ObjectMonitor::header() const { - assert(!UseObjectMonitorTable, "Lightweight locking with OM table does not use header"); + assert(!UseObjectMonitorTable, "Locking with OM table does not use header"); return markWord(metadata()); } inline void ObjectMonitor::set_header(markWord hdr) { - assert(!UseObjectMonitorTable, "Lightweight locking with OM table does not use header"); + assert(!UseObjectMonitorTable, "Locking with OM table does not use header"); set_metadata(hdr.value()); } inline intptr_t ObjectMonitor::hash() const { - assert(UseObjectMonitorTable, "Only used by lightweight locking with OM table"); + assert(UseObjectMonitorTable, "Only used when locking with OM table"); return metadata(); } inline void ObjectMonitor::set_hash(intptr_t hash) { - assert(UseObjectMonitorTable, "Only used by lightweight locking with OM table"); + assert(UseObjectMonitorTable, "Only used when locking with OM table"); set_metadata(hash); } diff --git a/src/hotspot/share/runtime/serviceThread.cpp b/src/hotspot/share/runtime/serviceThread.cpp index 03168842e36..27958885a7f 100644 --- a/src/hotspot/share/runtime/serviceThread.cpp +++ b/src/hotspot/share/runtime/serviceThread.cpp @@ -36,10 +36,10 @@ #include "prims/resolvedMethodTable.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" -#include "runtime/lightweightSynchronizer.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" #include "runtime/serviceThread.hpp" +#include "runtime/synchronizer.hpp" #include "services/finalizerService.hpp" #include "services/gcNotifier.hpp" #include "services/lowMemoryDetector.hpp" @@ -113,7 +113,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) { (cldg_cleanup_work = ClassLoaderDataGraph::should_clean_metaspaces_and_reset()) | (jvmti_tagmap_work = JvmtiTagMap::has_object_free_events_and_reset()) | (oopmap_cache_work = OopMapCache::has_cleanup_work()) | - (object_monitor_table_work = LightweightSynchronizer::needs_resize()) + (object_monitor_table_work = ObjectSynchronizer::needs_resize()) ) == 0) { // Wait until notified that there is some work to do or timer expires. // Some cleanup requests don't notify the ServiceThread so work needs to be done at periodic intervals. @@ -173,7 +173,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) { } if (object_monitor_table_work) { - LightweightSynchronizer::resize_table(jt); + ObjectSynchronizer::resize_table(jt); } } } diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index a980838ed76..afa4558c7ae 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -72,7 +72,7 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/stackWatermarkSet.hpp" #include "runtime/stubRoutines.hpp" -#include "runtime/synchronizer.inline.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/timerTrace.hpp" #include "runtime/vframe.inline.hpp" #include "runtime/vframeArray.hpp" @@ -2029,7 +2029,7 @@ void SharedRuntime::monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThrea ExceptionMark em(current); // Check if C2_MacroAssembler::fast_unlock() or - // C2_MacroAssembler::fast_unlock_lightweight() unlocked an inflated + // C2_MacroAssembler::fast_unlock() unlocked an inflated // monitor before going slow path. Since there is no safepoint // polling when calling into the VM, we can be sure that the monitor // hasn't been deallocated. diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp index 36e38b4dd35..fe95320c574 100644 --- a/src/hotspot/share/runtime/synchronizer.cpp +++ b/src/hotspot/share/runtime/synchronizer.cpp @@ -41,7 +41,6 @@ #include "runtime/handshake.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaThread.hpp" -#include "runtime/lightweightSynchronizer.hpp" #include "runtime/lockStack.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.inline.hpp" @@ -51,13 +50,16 @@ #include "runtime/safepointVerifiers.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" -#include "runtime/synchronizer.inline.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/threads.hpp" #include "runtime/timer.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/trimNativeHeap.hpp" #include "runtime/vframe.hpp" #include "runtime/vmThread.hpp" #include "utilities/align.hpp" +#include "utilities/concurrentHashTable.inline.hpp" +#include "utilities/concurrentHashTableTasks.inline.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" #include "utilities/globalCounter.inline.hpp" @@ -281,7 +283,7 @@ void ObjectSynchronizer::initialize() { // Start the timer for deflations, so it does not trigger immediately. _last_async_deflation_time_ns = os::javaTimeNanos(); - LightweightSynchronizer::initialize(); + ObjectSynchronizer::create_om_table(); } MonitorList ObjectSynchronizer::_in_use_list; @@ -421,17 +423,6 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread } } -// ----------------------------------------------------------------------------- -// Monitor Enter/Exit - -void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) { - // When called with locking_thread != Thread::current() some mechanism must synchronize - // the locking_thread with respect to the current thread. Currently only used when - // deoptimizing and re-locking locks. See Deoptimization::relock_objects - assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be"); - return LightweightSynchronizer::enter_for(obj, lock, locking_thread); -} - // ----------------------------------------------------------------------------- // JNI locks on java objects // NOTE: must use heavy weight monitor to handle jni monitor enter @@ -451,7 +442,7 @@ void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) { // we have lost the race to async deflation and we simply try again. while (true) { BasicLock lock; - if (LightweightSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) { + if (ObjectSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) { break; } } @@ -463,7 +454,7 @@ void ObjectSynchronizer::jni_exit(oop obj, TRAPS) { JavaThread* current = THREAD; ObjectMonitor* monitor; - monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK); + monitor = ObjectSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK); // If this thread has locked the object, exit the monitor. We // intentionally do not use CHECK on check_owner because we must exit the // monitor even if an exception was already pending. @@ -526,7 +517,7 @@ int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { } ObjectMonitor* monitor; - monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0); + monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0); DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis); monitor->wait(millis, true, THREAD); // Not CHECK as we need following code @@ -543,7 +534,7 @@ void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { assert(millis >= 0, "timeout value is negative"); ObjectMonitor* monitor; - monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK); + monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK); monitor->wait(millis, false, THREAD); } @@ -556,7 +547,7 @@ void ObjectSynchronizer::notify(Handle obj, TRAPS) { // Not inflated so there can't be any waiters to notify. return; } - ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK); + ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK); monitor->notify(CHECK); } @@ -570,7 +561,7 @@ void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { return; } - ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK); + ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK); monitor->notifyAll(CHECK); } @@ -647,40 +638,15 @@ static intptr_t get_next_hash(Thread* current, oop obj) { return value; } -static intptr_t install_hash_code(Thread* current, oop obj) { - assert(UseObjectMonitorTable, "must be"); - - markWord mark = obj->mark_acquire(); - for (;;) { - intptr_t hash = mark.hash(); - if (hash != 0) { - return hash; - } - - hash = get_next_hash(current, obj); - const markWord old_mark = mark; - const markWord new_mark = old_mark.copy_set_hash(hash); - - mark = obj->cas_set_mark(new_mark, old_mark); - if (old_mark == mark) { - return hash; - } - } -} - intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) { - if (UseObjectMonitorTable) { - // Since the monitor isn't in the object header, the hash can simply be - // installed in the object header. - return install_hash_code(current, obj); - } - while (true) { ObjectMonitor* monitor = nullptr; markWord temp, test; intptr_t hash; markWord mark = obj->mark_acquire(); - if (mark.is_unlocked() || mark.is_fast_locked()) { + // If UseObjectMonitorTable is set the hash can simply be installed in the + // object header, since the monitor isn't in the object header. + if (UseObjectMonitorTable || !mark.has_monitor()) { hash = mark.hash(); if (hash != 0) { // if it has a hash, just return it return hash; @@ -699,7 +665,8 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) { // installed the hash just before our attempt or inflation has // occurred or... so we fall thru to inflate the monitor for // stability and then install the hash. - } else if (mark.has_monitor()) { + } else { + assert(!mark.is_unlocked() && !mark.is_fast_locked(), "invariant"); monitor = mark.monitor(); temp = monitor->header(); assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); @@ -1230,7 +1197,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors() { #ifdef ASSERT if (UseObjectMonitorTable) { for (ObjectMonitor* monitor : delete_list) { - assert(!LightweightSynchronizer::contains_monitor(current, monitor), "Should have been removed"); + assert(!ObjectSynchronizer::contains_monitor(current, monitor), "Should have been removed"); } } #endif @@ -1502,3 +1469,1219 @@ void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_ out->flush(); } + +// ----------------------------------------------------------------------------- +// ConcurrentHashTable storing links from objects to ObjectMonitors +class ObjectMonitorTable : AllStatic { + struct Config { + using Value = ObjectMonitor*; + static uintx get_hash(Value const& value, bool* is_dead) { + return (uintx)value->hash(); + } + static void* allocate_node(void* context, size_t size, Value const& value) { + ObjectMonitorTable::inc_items_count(); + return AllocateHeap(size, mtObjectMonitor); + }; + static void free_node(void* context, void* memory, Value const& value) { + ObjectMonitorTable::dec_items_count(); + FreeHeap(memory); + } + }; + using ConcurrentTable = ConcurrentHashTable; + + static ConcurrentTable* _table; + static volatile size_t _items_count; + static size_t _table_size; + static volatile bool _resize; + + class Lookup : public StackObj { + oop _obj; + + public: + explicit Lookup(oop obj) : _obj(obj) {} + + uintx get_hash() const { + uintx hash = _obj->mark().hash(); + assert(hash != 0, "should have a hash"); + return hash; + } + + bool equals(ObjectMonitor** value) { + assert(*value != nullptr, "must be"); + return (*value)->object_refers_to(_obj); + } + + bool is_dead(ObjectMonitor** value) { + assert(*value != nullptr, "must be"); + return false; + } + }; + + class LookupMonitor : public StackObj { + ObjectMonitor* _monitor; + + public: + explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {} + + uintx get_hash() const { + return _monitor->hash(); + } + + bool equals(ObjectMonitor** value) { + return (*value) == _monitor; + } + + bool is_dead(ObjectMonitor** value) { + assert(*value != nullptr, "must be"); + return (*value)->object_is_dead(); + } + }; + + static void inc_items_count() { + AtomicAccess::inc(&_items_count, memory_order_relaxed); + } + + static void dec_items_count() { + AtomicAccess::dec(&_items_count, memory_order_relaxed); + } + + static double get_load_factor() { + size_t count = AtomicAccess::load(&_items_count); + return (double)count / (double)_table_size; + } + + static size_t table_size(Thread* current = Thread::current()) { + return ((size_t)1) << _table->get_size_log2(current); + } + + static size_t max_log_size() { + // TODO[OMTable]: Evaluate the max size. + // TODO[OMTable]: Need to fix init order to use Universe::heap()->max_capacity(); + // Using MaxHeapSize directly this early may be wrong, and there + // are definitely rounding errors (alignment). + const size_t max_capacity = MaxHeapSize; + const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize; + const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast(min_object_size)); + const size_t log_max_objects = log2i_graceful(max_objects); + + return MAX2(MIN2(SIZE_BIG_LOG2, log_max_objects), min_log_size()); + } + + static size_t min_log_size() { + // ~= log(AvgMonitorsPerThreadEstimate default) + return 10; + } + + template + static size_t clamp_log_size(V log_size) { + return MAX2(MIN2(log_size, checked_cast(max_log_size())), checked_cast(min_log_size())); + } + + static size_t initial_log_size() { + const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1))); + return clamp_log_size(estimate); + } + + static size_t grow_hint () { + return ConcurrentTable::DEFAULT_GROW_HINT; + } + + public: + static void create() { + _table = new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint()); + _items_count = 0; + _table_size = table_size(); + _resize = false; + } + + static void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) { +#ifdef ASSERT + if (SafepointSynchronize::is_at_safepoint()) { + bool has_monitor = obj->mark().has_monitor(); + assert(has_monitor == (monitor != nullptr), + "Inconsistency between markWord and ObjectMonitorTable has_monitor: %s monitor: " PTR_FORMAT, + BOOL_TO_STR(has_monitor), p2i(monitor)); + } +#endif + } + + static ObjectMonitor* monitor_get(Thread* current, oop obj) { + ObjectMonitor* result = nullptr; + Lookup lookup_f(obj); + auto found_f = [&](ObjectMonitor** found) { + assert((*found)->object_peek() == obj, "must be"); + result = *found; + }; + _table->get(current, lookup_f, found_f); + verify_monitor_get_result(obj, result); + return result; + } + + static void try_notify_grow() { + if (!_table->is_max_size_reached() && !AtomicAccess::load(&_resize)) { + AtomicAccess::store(&_resize, true); + if (Service_lock->try_lock()) { + Service_lock->notify(); + Service_lock->unlock(); + } + } + } + + static bool should_shrink() { + // Not implemented; + return false; + } + + static constexpr double GROW_LOAD_FACTOR = 0.75; + + static bool should_grow() { + return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached(); + } + + static bool should_resize() { + return should_grow() || should_shrink() || AtomicAccess::load(&_resize); + } + + template + static bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) { + if (task.prepare(current)) { + log_trace(monitortable)("Started to %s", task_name); + TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf)); + while (task.do_task(current, args...)) { + task.pause(current); + { + ThreadBlockInVM tbivm(current); + } + task.cont(current); + } + task.done(current); + return true; + } + return false; + } + + static bool grow(JavaThread* current) { + ConcurrentTable::GrowTask grow_task(_table); + if (run_task(current, grow_task, "Grow")) { + _table_size = table_size(current); + log_info(monitortable)("Grown to size: %zu", _table_size); + return true; + } + return false; + } + + static bool clean(JavaThread* current) { + ConcurrentTable::BulkDeleteTask clean_task(_table); + auto is_dead = [&](ObjectMonitor** monitor) { + return (*monitor)->object_is_dead(); + }; + auto do_nothing = [&](ObjectMonitor** monitor) {}; + NativeHeapTrimmer::SuspendMark sm("ObjectMonitorTable"); + return run_task(current, clean_task, "Clean", is_dead, do_nothing); + } + + static bool resize(JavaThread* current) { + LogTarget(Info, monitortable) lt; + bool success = false; + + if (should_grow()) { + lt.print("Start growing with load factor %f", get_load_factor()); + success = grow(current); + } else { + if (!_table->is_max_size_reached() && AtomicAccess::load(&_resize)) { + lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor()); + } + lt.print("Start cleaning with load factor %f", get_load_factor()); + success = clean(current); + } + + AtomicAccess::store(&_resize, false); + + return success; + } + + static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) { + // Enter the monitor into the concurrent hashtable. + ObjectMonitor* result = monitor; + Lookup lookup_f(obj); + auto found_f = [&](ObjectMonitor** found) { + assert((*found)->object_peek() == obj, "must be"); + result = *found; + }; + bool grow; + _table->insert_get(current, lookup_f, monitor, found_f, &grow); + verify_monitor_get_result(obj, result); + if (grow) { + try_notify_grow(); + } + return result; + } + + static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) { + LookupMonitor lookup_f(monitor); + return _table->remove(current, lookup_f); + } + + static bool contains_monitor(Thread* current, ObjectMonitor* monitor) { + LookupMonitor lookup_f(monitor); + bool result = false; + auto found_f = [&](ObjectMonitor** found) { + result = true; + }; + _table->get(current, lookup_f, found_f); + return result; + } + + static void print_on(outputStream* st) { + auto printer = [&] (ObjectMonitor** entry) { + ObjectMonitor* om = *entry; + oop obj = om->object_peek(); + st->print("monitor=" PTR_FORMAT ", ", p2i(om)); + st->print("object=" PTR_FORMAT, p2i(obj)); + assert(obj->mark().hash() == om->hash(), "hash must match"); + st->cr(); + return true; + }; + if (SafepointSynchronize::is_at_safepoint()) { + _table->do_safepoint_scan(printer); + } else { + _table->do_scan(Thread::current(), printer); + } + } +}; + +ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr; +volatile size_t ObjectMonitorTable::_items_count = 0; +size_t ObjectMonitorTable::_table_size = 0; +volatile bool ObjectMonitorTable::_resize = false; + +ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) { + ObjectMonitor* monitor = get_monitor_from_table(current, object); + if (monitor != nullptr) { + *inserted = false; + return monitor; + } + + ObjectMonitor* alloced_monitor = new ObjectMonitor(object); + alloced_monitor->set_anonymous_owner(); + + // Try insert monitor + monitor = add_monitor(current, alloced_monitor, object); + + *inserted = alloced_monitor == monitor; + if (!*inserted) { + delete alloced_monitor; + } + + return monitor; +} + +static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) { + if (log_is_enabled(Trace, monitorinflation)) { + ResourceMark rm(current); + log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark=" + INTPTR_FORMAT ", type='%s' cause=%s", p2i(object), + object->mark().value(), object->klass()->external_name(), + ObjectSynchronizer::inflate_cause_name(cause)); + } +} + +static void post_monitor_inflate_event(EventJavaMonitorInflate* event, + const oop obj, + ObjectSynchronizer::InflateCause cause) { + assert(event != nullptr, "invariant"); + const Klass* monitor_klass = obj->klass(); + if (ObjectMonitor::is_jfr_excluded(monitor_klass)) { + return; + } + event->set_monitorClass(monitor_klass); + event->set_address((uintptr_t)(void*)obj); + event->set_cause((u1)cause); + event->commit(); +} + +ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) { + assert(UseObjectMonitorTable, "must be"); + + EventJavaMonitorInflate event; + + bool inserted; + ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted); + + if (inserted) { + log_inflate(current, object, cause); + if (event.should_commit()) { + post_monitor_inflate_event(&event, object, cause); + } + + // The monitor has an anonymous owner so it is safe from async deflation. + ObjectSynchronizer::_in_use_list.add(monitor); + } + + return monitor; +} + +// Add the hashcode to the monitor to match the object and put it in the hashtable. +ObjectMonitor* ObjectSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) { + assert(UseObjectMonitorTable, "must be"); + assert(obj == monitor->object(), "must be"); + + intptr_t hash = obj->mark().hash(); + assert(hash != 0, "must be set when claiming the object monitor"); + monitor->set_hash(hash); + + return ObjectMonitorTable::monitor_put_get(current, monitor, obj); +} + +bool ObjectSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) { + assert(UseObjectMonitorTable, "must be"); + assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead"); + + return ObjectMonitorTable::remove_monitor_entry(current, monitor); +} + +void ObjectSynchronizer::deflate_mark_word(oop obj) { + assert(UseObjectMonitorTable, "must be"); + + markWord mark = obj->mark_acquire(); + assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash"); + + while (mark.has_monitor()) { + const markWord new_mark = mark.clear_lock_bits().set_unlocked(); + mark = obj->cas_set_mark(new_mark, mark); + } +} + +void ObjectSynchronizer::create_om_table() { + if (!UseObjectMonitorTable) { + return; + } + ObjectMonitorTable::create(); +} + +bool ObjectSynchronizer::needs_resize() { + if (!UseObjectMonitorTable) { + return false; + } + return ObjectMonitorTable::should_resize(); +} + +bool ObjectSynchronizer::resize_table(JavaThread* current) { + if (!UseObjectMonitorTable) { + return true; + } + return ObjectMonitorTable::resize(current); +} + +class ObjectSynchronizer::LockStackInflateContendedLocks : private OopClosure { + private: + oop _contended_oops[LockStack::CAPACITY]; + int _length; + + void do_oop(oop* o) final { + oop obj = *o; + if (obj->mark_acquire().has_monitor()) { + if (_length > 0 && _contended_oops[_length - 1] == obj) { + // Recursive + return; + } + _contended_oops[_length++] = obj; + } + } + + void do_oop(narrowOop* o) final { + ShouldNotReachHere(); + } + + public: + LockStackInflateContendedLocks() : + _contended_oops(), + _length(0) {}; + + void inflate(JavaThread* current) { + assert(current == JavaThread::current(), "must be"); + current->lock_stack().oops_do(this); + for (int i = 0; i < _length; i++) { + ObjectSynchronizer:: + inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current); + } + } +}; + +void ObjectSynchronizer::ensure_lock_stack_space(JavaThread* current) { + assert(current == JavaThread::current(), "must be"); + LockStack& lock_stack = current->lock_stack(); + + // Make room on lock_stack + if (lock_stack.is_full()) { + // Inflate contended objects + LockStackInflateContendedLocks().inflate(current); + if (lock_stack.is_full()) { + // Inflate the oldest object + inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current); + } + } +} + +class ObjectSynchronizer::CacheSetter : StackObj { + JavaThread* const _thread; + BasicLock* const _lock; + ObjectMonitor* _monitor; + + NONCOPYABLE(CacheSetter); + + public: + CacheSetter(JavaThread* thread, BasicLock* lock) : + _thread(thread), + _lock(lock), + _monitor(nullptr) {} + + ~CacheSetter() { + // Only use the cache if using the table. + if (UseObjectMonitorTable) { + if (_monitor != nullptr) { + // If the monitor is already in the BasicLock cache then it is most + // likely in the thread cache, do not set it again to avoid reordering. + if (_monitor != _lock->object_monitor_cache()) { + _thread->om_set_monitor_cache(_monitor); + _lock->set_object_monitor_cache(_monitor); + } + } else { + _lock->clear_object_monitor_cache(); + } + } + } + + void set_monitor(ObjectMonitor* monitor) { + assert(_monitor == nullptr, "only set once"); + _monitor = monitor; + } + +}; + +// Reads first from the BasicLock cache then from the OMCache in the current thread. +// C2 fast-path may have put the monitor in the cache in the BasicLock. +inline static ObjectMonitor* read_caches(JavaThread* current, BasicLock* lock, oop object) { + ObjectMonitor* monitor = lock->object_monitor_cache(); + if (monitor == nullptr) { + monitor = current->om_get_from_monitor_cache(object); + } + return monitor; +} + +class ObjectSynchronizer::VerifyThreadState { + bool _no_safepoint; + + public: + VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) { + assert(current == Thread::current(), "must be"); + assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently"); + if (_no_safepoint) { + DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();) + } + } + ~VerifyThreadState() { + if (_no_safepoint){ + DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();) + } + } +}; + +inline bool ObjectSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) { + markWord mark = obj->mark(); + while (mark.is_unlocked()) { + ensure_lock_stack_space(current); + assert(!lock_stack.is_full(), "must have made room on the lock stack"); + assert(!lock_stack.contains(obj), "thread must not already hold the lock"); + // Try to swing into 'fast-locked' state. + markWord locked_mark = mark.set_fast_locked(); + markWord old_mark = mark; + mark = obj->cas_set_mark(locked_mark, old_mark); + if (old_mark == mark) { + // Successfully fast-locked, push object to lock-stack and return. + lock_stack.push(obj); + return true; + } + } + return false; +} + +bool ObjectSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) { + assert(UseObjectMonitorTable, "must be"); + // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins. + const int log_spin_limit = os::is_MP() ? FastLockingSpins : 1; + const int log_min_safepoint_check_interval = 10; + + markWord mark = obj->mark(); + const auto should_spin = [&]() { + if (!mark.has_monitor()) { + // Spin while not inflated. + return true; + } else if (observed_deflation) { + // Spin while monitor is being deflated. + ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark); + return monitor == nullptr || monitor->is_being_async_deflated(); + } + // Else stop spinning. + return false; + }; + // Always attempt to lock once even when safepoint synchronizing. + bool should_process = false; + for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) { + // Spin with exponential backoff. + const int total_spin_count = 1 << i; + const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count); + const int outer_spin_count = total_spin_count / inner_spin_count; + for (int outer = 0; outer < outer_spin_count; outer++) { + should_process = SafepointMechanism::should_process(current); + if (should_process) { + // Stop spinning for safepoint. + break; + } + for (int inner = 1; inner < inner_spin_count; inner++) { + SpinPause(); + } + } + + if (fast_lock_try_enter(obj, lock_stack, current)) return true; + } + return false; +} + +void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) { + // When called with locking_thread != Thread::current() some mechanism must synchronize + // the locking_thread with respect to the current thread. Currently only used when + // deoptimizing and re-locking locks. See Deoptimization::relock_objects + assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be"); + + assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "must be cleared"); + JavaThread* current = JavaThread::current(); + VerifyThreadState vts(locking_thread, current); + + if (obj->klass()->is_value_based()) { + ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread); + } + + LockStack& lock_stack = locking_thread->lock_stack(); + + ObjectMonitor* monitor = nullptr; + if (lock_stack.contains(obj())) { + monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current); + bool entered = monitor->enter_for(locking_thread); + assert(entered, "recursive ObjectMonitor::enter_for must succeed"); + } else { + do { + // It is assumed that enter_for must enter on an object without contention. + monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current); + // But there may still be a race with deflation. + } while (monitor == nullptr); + } + + assert(monitor != nullptr, "ObjectSynchronizer::enter_for must succeed"); + assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "unused. already cleared"); +} + +void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) { + assert(current == JavaThread::current(), "must be"); + + if (obj->klass()->is_value_based()) { + ObjectSynchronizer::handle_sync_on_value_based_class(obj, current); + } + + CacheSetter cache_setter(current, lock); + + // Used when deflation is observed. Progress here requires progress + // from the deflator. After observing that the deflator is not + // making progress (after two yields), switch to sleeping. + SpinYield spin_yield(0, 2); + bool observed_deflation = false; + + LockStack& lock_stack = current->lock_stack(); + + if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) { + // Recursively fast locked + return; + } + + if (lock_stack.contains(obj())) { + ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current); + bool entered = monitor->enter(current); + assert(entered, "recursive ObjectMonitor::enter must succeed"); + cache_setter.set_monitor(monitor); + return; + } + + while (true) { + // Fast-locking does not use the 'lock' argument. + // Fast-lock spinning to avoid inflating for short critical sections. + // The goal is to only inflate when the extra cost of using ObjectMonitors + // is worth it. + // If deflation has been observed we also spin while deflation is ongoing. + if (fast_lock_try_enter(obj(), lock_stack, current)) { + return; + } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) { + return; + } + + if (observed_deflation) { + spin_yield.wait(); + } + + ObjectMonitor* monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, current, current); + if (monitor != nullptr) { + cache_setter.set_monitor(monitor); + return; + } + + // If inflate_and_enter returns nullptr it is because a deflated monitor + // was encountered. Fallback to fast locking. The deflater is responsible + // for clearing out the monitor and transitioning the markWord back to + // fast locking. + observed_deflation = true; + } +} + +void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) { + assert(current == Thread::current(), "must be"); + + markWord mark = object->mark(); + assert(!mark.is_unlocked(), "must be"); + + LockStack& lock_stack = current->lock_stack(); + if (mark.is_fast_locked()) { + if (lock_stack.try_recursive_exit(object)) { + // This is a recursive exit which succeeded + return; + } + if (lock_stack.is_recursive(object)) { + // Must inflate recursive locks if try_recursive_exit fails + // This happens for un-structured unlocks, could potentially + // fix try_recursive_exit to handle these. + inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current); + } + } + + while (mark.is_fast_locked()) { + markWord unlocked_mark = mark.set_unlocked(); + markWord old_mark = mark; + mark = object->cas_set_mark(unlocked_mark, old_mark); + if (old_mark == mark) { + // CAS successful, remove from lock_stack + size_t recursion = lock_stack.remove(object) - 1; + assert(recursion == 0, "Should not have unlocked here"); + return; + } + } + + assert(mark.has_monitor(), "must be"); + // The monitor exists + ObjectMonitor* monitor; + if (UseObjectMonitorTable) { + monitor = read_caches(current, lock, object); + if (monitor == nullptr) { + monitor = get_monitor_from_table(current, object); + } + } else { + monitor = ObjectSynchronizer::read_monitor(mark); + } + if (monitor->has_anonymous_owner()) { + assert(current->lock_stack().contains(object), "current must have object on its lock stack"); + monitor->set_owner_from_anonymous(current); + monitor->set_recursions(current->lock_stack().remove(object) - 1); + } + + monitor->exit(current); +} + +// ObjectSynchronizer::inflate_locked_or_imse is used to get an +// inflated ObjectMonitor* from contexts which require that, such as +// notify/wait and jni_exit. Fast locking keeps the invariant that it +// only inflates if it is already locked by the current thread or the current +// thread is in the process of entering. To maintain this invariant we need to +// throw a java.lang.IllegalMonitorStateException before inflating if the +// current thread is not the owner. +ObjectMonitor* ObjectSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) { + JavaThread* current = THREAD; + + for (;;) { + markWord mark = obj->mark_acquire(); + if (mark.is_unlocked()) { + // No lock, IMSE. + THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(), + "current thread is not owner", nullptr); + } + + if (mark.is_fast_locked()) { + if (!current->lock_stack().contains(obj)) { + // Fast locked by other thread, IMSE. + THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(), + "current thread is not owner", nullptr); + } else { + // Current thread owns the lock, must inflate + return inflate_fast_locked_object(obj, cause, current, current); + } + } + + assert(mark.has_monitor(), "must be"); + ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark); + if (monitor != nullptr) { + if (monitor->has_anonymous_owner()) { + LockStack& lock_stack = current->lock_stack(); + if (lock_stack.contains(obj)) { + // Current thread owns the lock but someone else inflated it. + // Fix owner and pop lock stack. + monitor->set_owner_from_anonymous(current); + monitor->set_recursions(lock_stack.remove(obj) - 1); + } else { + // Fast locked (and inflated) by other thread, or deflation in progress, IMSE. + THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(), + "current thread is not owner", nullptr); + } + } + return monitor; + } + } +} + +ObjectMonitor* ObjectSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) { + + // The JavaThread* locking parameter requires that the locking_thread == JavaThread::current, + // or is suspended throughout the call by some other mechanism. + // Even with fast locking the thread might be nullptr when called from a non + // JavaThread. (As may still be the case from FastHashCode). However it is only + // important for the correctness of the fast locking algorithm that the thread + // is set when called from ObjectSynchronizer::enter from the owning thread, + // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit. + EventJavaMonitorInflate event; + + for (;;) { + const markWord mark = object->mark_acquire(); + + // The mark can be in one of the following states: + // * inflated - Just return if using stack-locking. + // If using fast-locking and the ObjectMonitor owner + // is anonymous and the locking_thread owns the + // object lock, then we make the locking_thread + // the ObjectMonitor owner and remove the lock from + // the locking_thread's lock stack. + // * fast-locked - Coerce it to inflated from fast-locked. + // * unlocked - Aggressively inflate the object. + + // CASE: inflated + if (mark.has_monitor()) { + ObjectMonitor* inf = mark.monitor(); + markWord dmw = inf->header(); + assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); + if (inf->has_anonymous_owner() && + locking_thread != nullptr && locking_thread->lock_stack().contains(object)) { + inf->set_owner_from_anonymous(locking_thread); + size_t removed = locking_thread->lock_stack().remove(object); + inf->set_recursions(removed - 1); + } + return inf; + } + + // CASE: fast-locked + // Could be fast-locked either by the locking_thread or by some other thread. + // + // Note that we allocate the ObjectMonitor speculatively, _before_ + // attempting to set the object's mark to the new ObjectMonitor. If + // the locking_thread owns the monitor, then we set the ObjectMonitor's + // owner to the locking_thread. Otherwise, we set the ObjectMonitor's owner + // to anonymous. If we lose the race to set the object's mark to the + // new ObjectMonitor, then we just delete it and loop around again. + // + if (mark.is_fast_locked()) { + ObjectMonitor* monitor = new ObjectMonitor(object); + monitor->set_header(mark.set_unlocked()); + bool own = locking_thread != nullptr && locking_thread->lock_stack().contains(object); + if (own) { + // Owned by locking_thread. + monitor->set_owner(locking_thread); + } else { + // Owned by somebody else. + monitor->set_anonymous_owner(); + } + markWord monitor_mark = markWord::encode(monitor); + markWord old_mark = object->cas_set_mark(monitor_mark, mark); + if (old_mark == mark) { + // Success! Return inflated monitor. + if (own) { + size_t removed = locking_thread->lock_stack().remove(object); + monitor->set_recursions(removed - 1); + } + // Once the ObjectMonitor is configured and object is associated + // with the ObjectMonitor, it is safe to allow async deflation: + ObjectSynchronizer::_in_use_list.add(monitor); + + log_inflate(current, object, cause); + if (event.should_commit()) { + post_monitor_inflate_event(&event, object, cause); + } + return monitor; + } else { + delete monitor; + continue; // Interference -- just retry + } + } + + // CASE: unlocked + // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. + // If we know we're inflating for entry it's better to inflate by swinging a + // pre-locked ObjectMonitor pointer into the object header. A successful + // CAS inflates the object *and* confers ownership to the inflating thread. + // In the current implementation we use a 2-step mechanism where we CAS() + // to inflate and then CAS() again to try to swing _owner from null to current. + // An inflateTry() method that we could call from enter() would be useful. + + assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value()); + ObjectMonitor* m = new ObjectMonitor(object); + // prepare m for installation - set monitor to initial state + m->set_header(mark); + + if (object->cas_set_mark(markWord::encode(m), mark) != mark) { + delete m; + m = nullptr; + continue; + // interference - the markword changed - just retry. + // The state-transitions are one-way, so there's no chance of + // live-lock -- "Inflated" is an absorbing state. + } + + // Once the ObjectMonitor is configured and object is associated + // with the ObjectMonitor, it is safe to allow async deflation: + ObjectSynchronizer::_in_use_list.add(m); + + log_inflate(current, object, cause); + if (event.should_commit()) { + post_monitor_inflate_event(&event, object, cause); + } + return m; + } +} + +ObjectMonitor* ObjectSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) { + VerifyThreadState vts(locking_thread, current); + assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack"); + + ObjectMonitor* monitor; + + if (!UseObjectMonitorTable) { + return inflate_into_object_header(object, cause, locking_thread, current); + } + + // Inflating requires a hash code + ObjectSynchronizer::FastHashCode(current, object); + + markWord mark = object->mark_acquire(); + assert(!mark.is_unlocked(), "Cannot be unlocked"); + + for (;;) { + // Fetch the monitor from the table + monitor = get_or_insert_monitor(object, current, cause); + + // ObjectMonitors are always inserted as anonymously owned, this thread is + // the current holder of the monitor. So unless the entry is stale and + // contains a deflating monitor it must be anonymously owned. + if (monitor->has_anonymous_owner()) { + // The monitor must be anonymously owned if it was added + assert(monitor == get_monitor_from_table(current, object), "The monitor must be found"); + // New fresh monitor + break; + } + + // If the monitor was not anonymously owned then we got a deflating monitor + // from the table. We need to let the deflator make progress and remove this + // entry before we are allowed to add a new one. + os::naked_yield(); + assert(monitor->is_being_async_deflated(), "Should be the reason"); + } + + // Set the mark word; loop to handle concurrent updates to other parts of the mark word + while (mark.is_fast_locked()) { + mark = object->cas_set_mark(mark.set_has_monitor(), mark); + } + + // Indicate that the monitor now has a known owner + monitor->set_owner_from_anonymous(locking_thread); + + // Remove the entry from the thread's lock stack + monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1); + + if (locking_thread == current) { + // Only change the thread local state of the current thread. + locking_thread->om_set_monitor_cache(monitor); + } + + return monitor; +} + +ObjectMonitor* ObjectSynchronizer::inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) { + VerifyThreadState vts(locking_thread, current); + + // Note: In some paths (deoptimization) the 'current' thread inflates and + // enters the lock on behalf of the 'locking_thread' thread. + + ObjectMonitor* monitor = nullptr; + + if (!UseObjectMonitorTable) { + // Do the old inflate and enter. + monitor = inflate_into_object_header(object, cause, locking_thread, current); + + bool entered; + if (locking_thread == current) { + entered = monitor->enter(locking_thread); + } else { + entered = monitor->enter_for(locking_thread); + } + + // enter returns false for deflation found. + return entered ? monitor : nullptr; + } + + NoSafepointVerifier nsv; + + // Try to get the monitor from the thread-local cache. + // There's no need to use the cache if we are locking + // on behalf of another thread. + if (current == locking_thread) { + monitor = read_caches(current, lock, object); + } + + // Get or create the monitor + if (monitor == nullptr) { + // Lightweight monitors require that hash codes are installed first + ObjectSynchronizer::FastHashCode(locking_thread, object); + monitor = get_or_insert_monitor(object, current, cause); + } + + if (monitor->try_enter(locking_thread)) { + return monitor; + } + + // Holds is_being_async_deflated() stable throughout this function. + ObjectMonitorContentionMark contention_mark(monitor); + + /// First handle the case where the monitor from the table is deflated + if (monitor->is_being_async_deflated()) { + // The MonitorDeflation thread is deflating the monitor. The locking thread + // must spin until further progress has been made. + + // Clear the BasicLock cache as it may contain this monitor. + lock->clear_object_monitor_cache(); + + const markWord mark = object->mark_acquire(); + + if (mark.has_monitor()) { + // Waiting on the deflation thread to remove the deflated monitor from the table. + os::naked_yield(); + + } else if (mark.is_fast_locked()) { + // Some other thread managed to fast-lock the lock, or this is a + // recursive lock from the same thread; yield for the deflation + // thread to remove the deflated monitor from the table. + os::naked_yield(); + + } else { + assert(mark.is_unlocked(), "Implied"); + // Retry immediately + } + + // Retry + return nullptr; + } + + for (;;) { + const markWord mark = object->mark_acquire(); + // The mark can be in one of the following states: + // * inflated - If the ObjectMonitor owner is anonymous + // and the locking_thread owns the object + // lock, then we make the locking_thread + // the ObjectMonitor owner and remove the + // lock from the locking_thread's lock stack. + // * fast-locked - Coerce it to inflated from fast-locked. + // * neutral - Inflate the object. Successful CAS is locked + + // CASE: inflated + if (mark.has_monitor()) { + LockStack& lock_stack = locking_thread->lock_stack(); + if (monitor->has_anonymous_owner() && lock_stack.contains(object)) { + // The lock is fast-locked by the locking thread, + // convert it to a held monitor with a known owner. + monitor->set_owner_from_anonymous(locking_thread); + monitor->set_recursions(lock_stack.remove(object) - 1); + } + + break; // Success + } + + // CASE: fast-locked + // Could be fast-locked either by locking_thread or by some other thread. + // + if (mark.is_fast_locked()) { + markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark); + if (old_mark != mark) { + // CAS failed + continue; + } + + // Success! Return inflated monitor. + LockStack& lock_stack = locking_thread->lock_stack(); + if (lock_stack.contains(object)) { + // The lock is fast-locked by the locking thread, + // convert it to a held monitor with a known owner. + monitor->set_owner_from_anonymous(locking_thread); + monitor->set_recursions(lock_stack.remove(object) - 1); + } + + break; // Success + } + + // CASE: neutral (unlocked) + + // Catch if the object's header is not neutral (not locked and + // not marked is what we care about here). + assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); + markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark); + if (old_mark != mark) { + // CAS failed + continue; + } + + // Transitioned from unlocked to monitor means locking_thread owns the lock. + monitor->set_owner_from_anonymous(locking_thread); + + return monitor; + } + + if (current == locking_thread) { + // One round of spinning + if (monitor->spin_enter(locking_thread)) { + return monitor; + } + + // Monitor is contended, take the time before entering to fix the lock stack. + LockStackInflateContendedLocks().inflate(current); + } + + // enter can block for safepoints; clear the unhandled object oop + PauseNoSafepointVerifier pnsv(&nsv); + object = nullptr; + + if (current == locking_thread) { + monitor->enter_with_contention_mark(locking_thread, contention_mark); + } else { + monitor->enter_for_with_contention_mark(locking_thread, contention_mark); + } + + return monitor; +} + +void ObjectSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) { + if (obj != nullptr) { + deflate_mark_word(obj); + } + bool removed = remove_monitor(current, monitor, obj); + if (obj != nullptr) { + assert(removed, "Should have removed the entry if obj was alive"); + } +} + +ObjectMonitor* ObjectSynchronizer::get_monitor_from_table(Thread* current, oop obj) { + assert(UseObjectMonitorTable, "must be"); + return ObjectMonitorTable::monitor_get(current, obj); +} + +bool ObjectSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) { + assert(UseObjectMonitorTable, "must be"); + return ObjectMonitorTable::contains_monitor(current, monitor); +} + +ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) { + return mark.monitor(); +} + +ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj) { + return ObjectSynchronizer::read_monitor(current, obj, obj->mark()); +} + +ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj, markWord mark) { + if (!UseObjectMonitorTable) { + return read_monitor(mark); + } else { + return ObjectSynchronizer::get_monitor_from_table(current, obj); + } +} + +bool ObjectSynchronizer::quick_enter_internal(oop obj, BasicLock* lock, JavaThread* current) { + assert(current->thread_state() == _thread_in_Java, "must be"); + assert(obj != nullptr, "must be"); + NoSafepointVerifier nsv; + + LockStack& lock_stack = current->lock_stack(); + if (lock_stack.is_full()) { + // Always go into runtime if the lock stack is full. + return false; + } + + const markWord mark = obj->mark(); + +#ifndef _LP64 + // Only for 32bit which has limited support for fast locking outside the runtime. + if (lock_stack.try_recursive_enter(obj)) { + // Recursive lock successful. + return true; + } + + if (mark.is_unlocked()) { + markWord locked_mark = mark.set_fast_locked(); + if (obj->cas_set_mark(locked_mark, mark) == mark) { + // Successfully fast-locked, push object to lock-stack and return. + lock_stack.push(obj); + return true; + } + } +#endif + + if (mark.has_monitor()) { + ObjectMonitor* monitor; + if (UseObjectMonitorTable) { + monitor = read_caches(current, lock, obj); + } else { + monitor = ObjectSynchronizer::read_monitor(mark); + } + + if (monitor == nullptr) { + // Take the slow-path on a cache miss. + return false; + } + + if (UseObjectMonitorTable) { + // Set the monitor regardless of success. + // Either we successfully lock on the monitor, or we retry with the + // monitor in the slow path. If the monitor gets deflated, it will be + // cleared, either by the CacheSetter if we fast lock in enter or in + // inflate_and_enter when we see that the monitor is deflated. + lock->set_object_monitor_cache(monitor); + } + + if (monitor->spin_enter(current)) { + return true; + } + } + + // Slow-path. + return false; +} + +bool ObjectSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) { + assert(current->thread_state() == _thread_in_Java, "invariant"); + NoSafepointVerifier nsv; + if (obj == nullptr) return false; // Need to throw NPE + + if (obj->klass()->is_value_based()) { + return false; + } + + return ObjectSynchronizer::quick_enter_internal(obj, lock, current); +} diff --git a/src/hotspot/share/runtime/synchronizer.hpp b/src/hotspot/share/runtime/synchronizer.hpp index 2337569176e..a10e44b3092 100644 --- a/src/hotspot/share/runtime/synchronizer.hpp +++ b/src/hotspot/share/runtime/synchronizer.hpp @@ -94,8 +94,8 @@ public: // deoptimization at monitor exit. Hence, it does not take a Handle argument. // This is the "slow path" version of monitor enter and exit. - static inline void enter(Handle obj, BasicLock* lock, JavaThread* current); - static inline void exit(oop obj, BasicLock* lock, JavaThread* current); + static void enter(Handle obj, BasicLock* lock, JavaThread* current); + static void exit(oop obj, BasicLock* lock, JavaThread* current); // Used to enter a monitor for another thread. This requires that the // locking_thread is suspended, and that entering on a potential @@ -115,7 +115,7 @@ public: static void notifyall(Handle obj, TRAPS); static bool quick_notify(oopDesc* obj, JavaThread* current, bool All); - static inline bool quick_enter(oop obj, BasicLock* Lock, JavaThread* current); + static bool quick_enter(oop obj, BasicLock* Lock, JavaThread* current); // Special internal-use-only method for use by JVM infrastructure // that needs to wait() on a java-level object but that can't risk @@ -125,9 +125,9 @@ public: public: static const char* inflate_cause_name(const InflateCause cause); - inline static ObjectMonitor* read_monitor(markWord mark); - inline static ObjectMonitor* read_monitor(Thread* current, oop obj); - inline static ObjectMonitor* read_monitor(Thread* current, oop obj, markWord mark); + static ObjectMonitor* read_monitor(markWord mark); + static ObjectMonitor* read_monitor(Thread* current, oop obj); + static ObjectMonitor* read_monitor(Thread* current, oop obj, markWord mark); // Returns the identity hash value for an oop // NOTE: It may cause monitor inflation @@ -195,7 +195,6 @@ public: private: friend class SynchronizerTest; - friend class LightweightSynchronizer; static MonitorList _in_use_list; static volatile bool _is_async_deflation_requested; @@ -209,6 +208,44 @@ public: static u_char* get_gvars_stw_random_addr(); static void handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread); + + static ObjectMonitor* get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted); + static ObjectMonitor* get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause); + + static ObjectMonitor* add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj); + static bool remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj); + + static void deflate_mark_word(oop object); + + static void ensure_lock_stack_space(JavaThread* current); + + class CacheSetter; + class LockStackInflateContendedLocks; + class VerifyThreadState; + + static void create_om_table(); + + public: + static bool needs_resize(); + static bool resize_table(JavaThread* current); + + private: + static inline bool fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current); + static bool fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation); + + public: + static ObjectMonitor* inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current); + static ObjectMonitor* inflate_locked_or_imse(oop object, ObjectSynchronizer::InflateCause cause, TRAPS); + static ObjectMonitor* inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current); + static ObjectMonitor* inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current); + + static void deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor); + + static ObjectMonitor* get_monitor_from_table(Thread* current, oop obj); + + static bool contains_monitor(Thread* current, ObjectMonitor* monitor); + + static bool quick_enter_internal(oop obj, BasicLock* Lock, JavaThread* current); }; // ObjectLocker enforces balanced locking and can never throw an diff --git a/src/hotspot/share/runtime/synchronizer.inline.hpp b/src/hotspot/share/runtime/synchronizer.inline.hpp deleted file mode 100644 index 7bcbd91eda7..00000000000 --- a/src/hotspot/share/runtime/synchronizer.inline.hpp +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_RUNTIME_SYNCHRONIZER_INLINE_HPP -#define SHARE_RUNTIME_SYNCHRONIZER_INLINE_HPP - -#include "runtime/synchronizer.hpp" - -#include "runtime/lightweightSynchronizer.hpp" -#include "runtime/safepointVerifiers.hpp" - -inline ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) { - return mark.monitor(); -} - -inline ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj) { - return ObjectSynchronizer::read_monitor(current, obj, obj->mark()); -} - -inline ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj, markWord mark) { - if (!UseObjectMonitorTable) { - return read_monitor(mark); - } else { - return LightweightSynchronizer::get_monitor_from_table(current, obj); - } -} - -inline void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) { - assert(current == Thread::current(), "must be"); - - LightweightSynchronizer::enter(obj, lock, current); -} - -inline bool ObjectSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) { - assert(current->thread_state() == _thread_in_Java, "invariant"); - NoSafepointVerifier nsv; - if (obj == nullptr) return false; // Need to throw NPE - - if (obj->klass()->is_value_based()) { - return false; - } - - return LightweightSynchronizer::quick_enter(obj, lock, current); -} - -inline void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) { - LightweightSynchronizer::exit(object, lock, current); -} - -#endif // SHARE_RUNTIME_SYNCHRONIZER_INLINE_HPP diff --git a/src/hotspot/share/runtime/vframe.cpp b/src/hotspot/share/runtime/vframe.cpp index 74d42b7dc9d..604ff1f751a 100644 --- a/src/hotspot/share/runtime/vframe.cpp +++ b/src/hotspot/share/runtime/vframe.cpp @@ -48,7 +48,7 @@ #include "runtime/signature.hpp" #include "runtime/stackFrameStream.inline.hpp" #include "runtime/stubRoutines.hpp" -#include "runtime/synchronizer.inline.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/vframe.inline.hpp" #include "runtime/vframe_hp.hpp" #include "runtime/vframeArray.hpp" diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp index 547ca4e51d5..48f4eb16cf1 100644 --- a/src/hotspot/share/services/threadService.cpp +++ b/src/hotspot/share/services/threadService.cpp @@ -48,7 +48,7 @@ #include "runtime/javaThread.inline.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/objectMonitor.inline.hpp" -#include "runtime/synchronizer.inline.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threads.hpp" #include "runtime/threadSMR.inline.hpp" diff --git a/test/hotspot/gtest/runtime/test_lockStack.cpp b/test/hotspot/gtest/runtime/test_lockStack.cpp index 6755541adb0..c61b6db6023 100644 --- a/test/hotspot/gtest/runtime/test_lockStack.cpp +++ b/test/hotspot/gtest/runtime/test_lockStack.cpp @@ -63,7 +63,7 @@ public: } while (false) TEST_VM_F(LockStackTest, is_recursive) { - if (!VM_Version::supports_recursive_lightweight_locking()) { + if (!VM_Version::supports_recursive_fast_locking()) { return; } @@ -130,7 +130,7 @@ TEST_VM_F(LockStackTest, is_recursive) { } TEST_VM_F(LockStackTest, try_recursive_enter) { - if (!VM_Version::supports_recursive_lightweight_locking()) { + if (!VM_Version::supports_recursive_fast_locking()) { return; } @@ -197,7 +197,7 @@ TEST_VM_F(LockStackTest, try_recursive_enter) { } TEST_VM_F(LockStackTest, contains) { - const bool test_recursive = VM_Version::supports_recursive_lightweight_locking(); + const bool test_recursive = VM_Version::supports_recursive_fast_locking(); JavaThread* THREAD = JavaThread::current(); // the thread should be in vm to use locks @@ -259,7 +259,7 @@ TEST_VM_F(LockStackTest, contains) { } TEST_VM_F(LockStackTest, remove) { - const bool test_recursive = VM_Version::supports_recursive_lightweight_locking(); + const bool test_recursive = VM_Version::supports_recursive_fast_locking(); JavaThread* THREAD = JavaThread::current(); // the thread should be in vm to use locks diff --git a/test/hotspot/jtreg/runtime/Monitor/TestRecursiveLocking.java b/test/hotspot/jtreg/runtime/Monitor/TestRecursiveLocking.java index bea7a41ae2b..9782824ef0f 100644 --- a/test/hotspot/jtreg/runtime/Monitor/TestRecursiveLocking.java +++ b/test/hotspot/jtreg/runtime/Monitor/TestRecursiveLocking.java @@ -202,7 +202,7 @@ public class TestRecursiveLocking { assertNotInflated(); } else { // Second time we want to lock A, the lock stack - // looks like this [A, B]. Lightweight locking + // looks like this [A, B]. Fast locking // doesn't allow interleaving ([A, B, A]), instead // it inflates A and removes it from the lock // stack. Which leaves us with only [B] on the @@ -220,11 +220,10 @@ public class TestRecursiveLocking { counter++; - // Legacy tolerates endless recursions. While testing - // lightweight we don't go deeper than the size of the - // lock stack, which in this test case will be filled - // with a number of B-elements. See comment in runA() - // above for more info. + // Legacy tolerates endless recursions. While testing we + // don't go deeper than the size of the lock stack, which + // in this test case will be filled with a number of + // B-elements. See comment in runA() above for more info. assertNotInflated(); if (depth == 1) { diff --git a/test/hotspot/jtreg/runtime/lockStack/TestLockStackCapacity.java b/test/hotspot/jtreg/runtime/lockStack/TestLockStackCapacity.java index f3f1f9c91a6..37568fd3434 100644 --- a/test/hotspot/jtreg/runtime/lockStack/TestLockStackCapacity.java +++ b/test/hotspot/jtreg/runtime/lockStack/TestLockStackCapacity.java @@ -24,7 +24,7 @@ /* * @test TestLockStackCapacity - * @summary Tests the interaction between recursive lightweight locking and + * @summary Tests the interaction between recursive fast locking and * when the lock stack capacity is exceeded. * @requires vm.flagless * @library /testlibrary /test/lib @@ -93,8 +93,8 @@ public class TestLockStackCapacity { } public static void main(String... args) throws Exception { - if (!WB.supportsRecursiveLightweightLocking()) { - throw new SkippedException("Test only valid if lightweight locking supports recursion"); + if (!WB.supportsRecursiveFastLocking()) { + throw new SkippedException("Test only valid if fast locking supports recursion"); } SynchronizedObject.runTest(); diff --git a/test/jdk/com/sun/jdi/EATests.java b/test/jdk/com/sun/jdi/EATests.java index 321855b4969..cb51e91021b 100644 --- a/test/jdk/com/sun/jdi/EATests.java +++ b/test/jdk/com/sun/jdi/EATests.java @@ -97,7 +97,7 @@ * -Xlog:monitorinflation=trace:file=monitorinflation.log * * @bug 8341819 - * @comment Regression test for re-locking racing with deflation with lightweight locking. + * @comment Regression test for re-locking racing with deflation with fast locking. * @run driver EATests * -XX:+UnlockDiagnosticVMOptions * -Xms256m -Xmx256m @@ -237,7 +237,7 @@ class EATestsTarget { // Relocking test cases new EARelockingSimpleTarget() .run(); - new EARelockingWithManyLightweightLocksTarget() .run(); + new EARelockingWithManyFastLocksTarget() .run(); new EARelockingSimpleWithAccessInOtherThreadTarget() .run(); new EARelockingSimpleWithAccessInOtherThread_02_DynamicCall_Target() .run(); new EARelockingRecursiveTarget() .run(); @@ -363,7 +363,7 @@ public class EATests extends TestScaffold { // Relocking test cases new EARelockingSimple() .run(this); - new EARelockingWithManyLightweightLocks() .run(this); + new EARelockingWithManyFastLocks() .run(this); new EARelockingSimpleWithAccessInOtherThread() .run(this); new EARelockingSimpleWithAccessInOtherThread_02_DynamicCall() .run(this); new EARelockingRecursive() .run(this); @@ -1750,12 +1750,11 @@ class EARelockingSimpleTarget extends EATestCaseBaseTarget { /** * Like {@link EARelockingSimple}. The difference is that there are many - * lightweight locked objects when the relocking is done. With - * lightweight the lock stack of the thread will be full because of - * this. + * fast locked objects when the relocking is done, which means that the + * lock stack of the thread will be full because of this. */ -class EARelockingWithManyLightweightLocks extends EATestCaseBaseDebugger { +class EARelockingWithManyFastLocks extends EATestCaseBaseDebugger { public void runTestCase() throws Exception { BreakpointEvent bpe = resumeTo(TARGET_TESTCASE_BASE_NAME, "dontinline_brkpt", "()V"); @@ -1765,7 +1764,7 @@ class EARelockingWithManyLightweightLocks extends EATestCaseBaseDebugger { } } -class EARelockingWithManyLightweightLocksTarget extends EATestCaseBaseTarget { +class EARelockingWithManyFastLocksTarget extends EATestCaseBaseTarget { static class Lock { } @@ -2260,7 +2259,7 @@ class EARelockingArgEscapeLWLockedInCalleeFrame_2Target extends EATestCaseBaseTa /** * Similar to {@link EARelockingArgEscapeLWLockedInCalleeFrame_2Target}. It does - * not use recursive locking and exposed a bug in the lightweight-locking implementation. + * not use recursive locking and exposed a bug in the fast-locking implementation. */ class EARelockingArgEscapeLWLockedInCalleeFrameNoRecursive extends EATestCaseBaseDebugger { diff --git a/test/lib/jdk/test/whitebox/WhiteBox.java b/test/lib/jdk/test/whitebox/WhiteBox.java index e989b0aca88..558feeec78f 100644 --- a/test/lib/jdk/test/whitebox/WhiteBox.java +++ b/test/lib/jdk/test/whitebox/WhiteBox.java @@ -124,7 +124,7 @@ public class WhiteBox { public native int getLockStackCapacity(); - public native boolean supportsRecursiveLightweightLocking(); + public native boolean supportsRecursiveFastLocking(); public native void forceSafepoint();