8365190: Remove LockingMode related code from share

Reviewed-by: aboldtch, dholmes, ayang, coleenp, lmesnik, rcastanedalo
This commit is contained in:
Fredrik Bredberg 2025-09-08 10:28:18 +00:00
parent 5e423e034f
commit a272696813
50 changed files with 141 additions and 1270 deletions

View File

@ -331,26 +331,9 @@ int ZeroInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
monitor = nullptr;
if (method->is_synchronized()) {
monitor = (BasicObjectLock*) istate->stack_base();
oop lockee = monitor->obj();
bool success = false;
if (LockingMode == LM_LEGACY) {
markWord disp = lockee->mark().set_unlocked();
monitor->lock()->set_displaced_header(disp);
success = true;
if (lockee->cas_set_mark(markWord::from_pointer(monitor), disp) != disp) {
// Is it simple recursive case?
if (thread->is_lock_owned((address) disp.clear_lock_bits().to_pointer())) {
monitor->lock()->set_displaced_header(markWord::from_pointer(nullptr));
} else {
success = false;
}
}
}
if (!success) {
CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor));
if (HAS_PENDING_EXCEPTION)
goto unwind_and_return;
}
CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor));
if (HAS_PENDING_EXCEPTION)
goto unwind_and_return;
}
// Get the signature handler
@ -481,24 +464,7 @@ int ZeroInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
// Unlock if necessary
if (monitor) {
bool success = false;
if (LockingMode == LM_LEGACY) {
BasicLock* lock = monitor->lock();
oop rcvr = monitor->obj();
monitor->set_obj(nullptr);
success = true;
markWord header = lock->displaced_header();
if (header.to_pointer() != nullptr) { // Check for recursive lock
markWord old_header = markWord::encode(lock);
if (rcvr->cas_set_mark(header, old_header) != old_header) {
monitor->set_obj(rcvr);
success = false;
}
}
}
if (!success) {
InterpreterRuntime::monitorexit(monitor);
}
InterpreterRuntime::monitorexit(monitor);
}
unwind_and_return:

View File

@ -635,7 +635,7 @@ void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, L
// setup registers
LIR_Opr hdr = lock;
lock = new_hdr;
CodeStub* slow_path = new MonitorExitStub(lock, LockingMode != LM_MONITOR, monitor_no);
CodeStub* slow_path = new MonitorExitStub(lock, true, monitor_no);
__ load_stack_address_monitor(monitor_no, lock);
__ unlock_object(hdr, object, lock, scratch, slow_path);
}

View File

@ -778,9 +778,6 @@ JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj,
_monitorenter_slowcase_cnt++;
}
#endif
if (LockingMode == LM_MONITOR) {
lock->set_obj(obj);
}
assert(obj == lock->obj(), "must match");
SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);
JRT_END

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@
inline void G1BarrierSet::enqueue_preloaded(oop pre_val) {
// Nulls should have been already filtered.
assert(oopDesc::is_oop(pre_val, true), "Error");
assert(oopDesc::is_oop(pre_val), "Error");
G1SATBMarkQueueSet& queue_set = G1BarrierSet::satb_mark_queue_set();
if (!queue_set.is_active()) return;

View File

@ -47,7 +47,7 @@ void G1BarrierSetRuntime::write_ref_array_post_entry(HeapWord* dst, size_t lengt
JRT_LEAF(void, G1BarrierSetRuntime::write_ref_field_pre_entry(oopDesc* orig, JavaThread* thread))
assert(thread == JavaThread::current(), "pre-condition");
assert(orig != nullptr, "should be optimized out");
assert(oopDesc::is_oop(orig, true /* ignore mark word */), "Error");
assert(oopDesc::is_oop(orig), "Error");
// store the original value that was in the field reference
SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread);
G1BarrierSet::satb_mark_queue_set().enqueue_known_active(queue, orig);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -350,7 +350,7 @@ inline HeapWord* G1HeapRegion::oops_on_memregion_iterate_in_unparsable(MemRegion
assert(bitmap->is_marked(cur), "inv");
oop obj = cast_to_oop(cur);
assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur));
assert(oopDesc::is_oop(obj), "Not an oop at " PTR_FORMAT, p2i(cur));
cur += obj->size();
bool is_precise;
@ -418,7 +418,7 @@ inline HeapWord* G1HeapRegion::oops_on_memregion_iterate(MemRegion mr, Closure*
// All objects >= pb are parsable. So we can just take object sizes directly.
while (true) {
oop obj = cast_to_oop(cur);
assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur));
assert(oopDesc::is_oop(obj), "Not an oop at " PTR_FORMAT, p2i(cur));
bool is_precise = false;

View File

@ -89,7 +89,7 @@ static inline bool requires_marking(const void* entry, G1CollectedHeap* g1h) {
return false;
}
assert(oopDesc::is_oop(cast_to_oop(entry), true /* ignore mark word */),
assert(oopDesc::is_oop(cast_to_oop(entry)),
"Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry));
return true;

View File

@ -162,7 +162,7 @@ void CardTableBarrierSet::flush_deferred_card_mark_barrier(JavaThread* thread) {
DEBUG_ONLY(oop old_obj = cast_to_oop(deferred.start());)
assert(!_card_table->is_in_young(old_obj),
"Else should have been filtered in on_slowpath_allocation_exit()");
assert(oopDesc::is_oop(old_obj, true), "Not an oop");
assert(oopDesc::is_oop(old_obj), "Not an oop");
assert(deferred.word_size() == old_obj->size(),
"Mismatch: multiple objects?");
}

View File

@ -312,11 +312,6 @@ void ShenandoahHeap::increase_object_age(oop obj, uint additional_age) {
// For all these reasons, we take the conservative approach and not attempt
// to increase the age when the header is displaced.
markWord w = obj->mark();
// The mark-word has been copied from the original object. It can not be
// inflating, because inflation can not be interrupted by a safepoint,
// and after a safepoint, a Java thread would first have to successfully
// evacuate the object before it could inflate the monitor.
assert(!w.is_being_inflated() || LockingMode == LM_LIGHTWEIGHT, "must not inflate monitor before evacuation of object succeeds");
// It is possible that we have copied the object after another thread has
// already successfully completed evacuation. While harmless (we would never
// publish our copy), don't even attempt to modify the age when that
@ -334,7 +329,6 @@ uint ShenandoahHeap::get_object_age(oop obj) {
markWord w = obj->mark();
assert(!w.is_marked(), "must not be forwarded");
if (UseObjectMonitorTable) {
assert(LockingMode == LM_LIGHTWEIGHT, "Must use LW locking, too");
assert(w.age() <= markWord::max_age, "Impossible!");
return w.age();
}

View File

@ -624,26 +624,7 @@ void BytecodeInterpreter::run(interpreterState istate) {
// The initial monitor is ours for the taking.
BasicObjectLock* mon = &istate->monitor_base()[-1];
mon->set_obj(rcvr);
bool success = false;
if (LockingMode == LM_LEGACY) {
// Traditional fast locking.
markWord displaced = rcvr->mark().set_unlocked();
mon->lock()->set_displaced_header(displaced);
success = true;
if (rcvr->cas_set_mark(markWord::from_pointer(mon), displaced) != displaced) {
// Is it simple recursive case?
if (THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
mon->lock()->set_displaced_header(markWord::from_pointer(nullptr));
} else {
success = false;
}
}
}
if (!success) {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
}
CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
}
THREAD->set_do_not_unlock_if_synchronized(false);
@ -725,26 +706,7 @@ void BytecodeInterpreter::run(interpreterState istate) {
BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
assert(entry->obj() == nullptr, "Frame manager didn't allocate the monitor");
entry->set_obj(lockee);
bool success = false;
if (LockingMode == LM_LEGACY) {
// Traditional fast locking.
markWord displaced = lockee->mark().set_unlocked();
entry->lock()->set_displaced_header(displaced);
success = true;
if (lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) {
// Is it simple recursive case?
if (THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
entry->lock()->set_displaced_header(markWord::from_pointer(nullptr));
} else {
success = false;
}
}
}
if (!success) {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
}
CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
UPDATE_PC_AND_TOS(1, -1);
goto run;
}
@ -1657,26 +1619,7 @@ run:
}
if (entry != nullptr) {
entry->set_obj(lockee);
bool success = false;
if (LockingMode == LM_LEGACY) {
// Traditional fast locking.
markWord displaced = lockee->mark().set_unlocked();
entry->lock()->set_displaced_header(displaced);
success = true;
if (lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) {
// Is it simple recursive case?
if (THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) {
entry->lock()->set_displaced_header(markWord::from_pointer(nullptr));
} else {
success = false;
}
}
}
if (!success) {
CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
}
CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
} else {
istate->set_msg(more_monitors);
@ -1694,25 +1637,7 @@ run:
while (most_recent != limit ) {
if ((most_recent)->obj() == lockee) {
BasicLock* lock = most_recent->lock();
bool success = false;
if (LockingMode == LM_LEGACY) {
// If it isn't recursive we either must swap old header or call the runtime
most_recent->set_obj(nullptr);
success = true;
markWord header = lock->displaced_header();
if (header.to_pointer() != nullptr) {
markWord old_header = markWord::encode(lock);
if (lockee->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
most_recent->set_obj(lockee);
success = false;
}
}
}
if (!success) {
InterpreterRuntime::monitorexit(most_recent);
}
InterpreterRuntime::monitorexit(most_recent);
UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
}
most_recent++;
@ -3137,27 +3062,7 @@ run:
while (end < base) {
oop lockee = end->obj();
if (lockee != nullptr) {
BasicLock* lock = end->lock();
bool success = false;
if (LockingMode == LM_LEGACY) {
markWord header = lock->displaced_header();
end->set_obj(nullptr);
// If it isn't recursive we either must swap old header or call the runtime
success = true;
if (header.to_pointer() != nullptr) {
markWord old_header = markWord::encode(lock);
if (lockee->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
end->set_obj(lockee);
success = false;
}
}
}
if (!success) {
InterpreterRuntime::monitorexit(end);
}
InterpreterRuntime::monitorexit(end);
// One error is plenty
if (illegal_state_oop() == nullptr && !suppress_error) {
@ -3204,32 +3109,12 @@ run:
illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
THREAD->clear_pending_exception();
}
} else if (LockingMode != LM_LEGACY) {
} else {
InterpreterRuntime::monitorexit(base);
if (THREAD->has_pending_exception()) {
if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
THREAD->clear_pending_exception();
}
} else {
BasicLock* lock = base->lock();
markWord header = lock->displaced_header();
base->set_obj(nullptr);
// If it isn't recursive we either must swap old header or call the runtime
bool dec_monitor_count = true;
if (header.to_pointer() != nullptr) {
markWord old_header = markWord::encode(lock);
if (rcvr->cas_set_mark(header, old_header) != old_header) {
// restore object for the slow case
base->set_obj(rcvr);
dec_monitor_count = false;
InterpreterRuntime::monitorexit(base);
if (THREAD->has_pending_exception()) {
if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
THREAD->clear_pending_exception();
}
}
}
}
}
}

View File

@ -565,8 +565,8 @@ JRT_LEAF(void, JVMCIRuntime::log_object(JavaThread* thread, oopDesc* obj, bool a
if (obj == nullptr) {
tty->print("null");
} else if (oopDesc::is_oop_or_null(obj, true) && (!as_string || !java_lang_String::is_instance(obj))) {
if (oopDesc::is_oop_or_null(obj, true)) {
} else if (oopDesc::is_oop_or_null(obj) && (!as_string || !java_lang_String::is_instance(obj))) {
if (oopDesc::is_oop_or_null(obj)) {
char buf[O_BUFLEN];
tty->print("%s@" INTPTR_FORMAT, obj->klass()->name()->as_C_string(buf, O_BUFLEN), p2i(obj));
} else {

View File

@ -341,7 +341,6 @@
volatile_nonstatic_field(ObjectMonitor, _recursions, intptr_t) \
volatile_nonstatic_field(ObjectMonitor, _entry_list, ObjectWaiter*) \
volatile_nonstatic_field(ObjectMonitor, _succ, int64_t) \
volatile_nonstatic_field(ObjectMonitor, _stack_locker, BasicLock*) \
\
volatile_nonstatic_field(oopDesc, _mark, markWord) \
volatile_nonstatic_field(oopDesc, _metadata._klass, Klass*) \
@ -780,10 +779,6 @@
declare_constant(InstanceKlass::being_initialized) \
declare_constant(InstanceKlass::fully_initialized) \
\
declare_constant(LockingMode::LM_MONITOR) \
declare_constant(LockingMode::LM_LEGACY) \
declare_constant(LockingMode::LM_LIGHTWEIGHT) \
\
/*********************************/ \
/* InstanceKlass _misc_flags */ \
/*********************************/ \

View File

@ -164,10 +164,6 @@ void InstanceStackChunkKlass::oop_oop_iterate_stack_slow(stackChunkOop chunk, Oo
template <typename OopT>
void InstanceStackChunkKlass::oop_oop_iterate_lockstack(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr) {
if (LockingMode != LM_LIGHTWEIGHT) {
return;
}
StackChunkOopIterateFilterClosure<OopIterateClosure> cl(closure, mr);
if (chunk->has_bitmap()) {
chunk->iterate_lockstack<OopT>(&cl);

View File

@ -36,35 +36,18 @@ STATIC_ASSERT(markWord::klass_shift == markWord::hash_bits + markWord::hash_shif
markWord markWord::displaced_mark_helper() const {
assert(has_displaced_mark_helper(), "check");
if (has_monitor()) {
// Has an inflated monitor. Must be checked before has_locker().
ObjectMonitor* monitor = this->monitor();
return monitor->header();
}
if (has_locker()) { // has a stack lock
BasicLock* locker = this->locker();
return locker->displaced_header();
}
// This should never happen:
fatal("bad header=" INTPTR_FORMAT, value());
return markWord(value());
// Make sure we have an inflated monitor.
guarantee(has_monitor(), "bad header=" INTPTR_FORMAT, value());
ObjectMonitor* monitor = this->monitor();
return monitor->header();
}
void markWord::set_displaced_mark_helper(markWord m) const {
assert(has_displaced_mark_helper(), "check");
if (has_monitor()) {
// Has an inflated monitor. Must be checked before has_locker().
ObjectMonitor* monitor = this->monitor();
monitor->set_header(m);
return;
}
if (has_locker()) { // has a stack lock
BasicLock* locker = this->locker();
locker->set_displaced_header(m);
return;
}
// This should never happen:
fatal("bad header=" INTPTR_FORMAT, value());
// Make sure we have an inflated monitor.
guarantee(has_monitor(), "bad header=" INTPTR_FORMAT, value());
ObjectMonitor* monitor = this->monitor();
monitor->set_header(m);
}
void markWord::print_on(outputStream* st, bool print_monitor_info) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -198,17 +198,8 @@ class markWord {
markWord set_unlocked() const {
return markWord(value() | unlocked_value);
}
bool has_locker() const {
assert(LockingMode == LM_LEGACY, "should only be called with legacy stack locking");
return (value() & lock_mask_in_place) == locked_value;
}
BasicLock* locker() const {
assert(has_locker(), "check");
return (BasicLock*) value();
}
bool is_fast_locked() const {
assert(LockingMode == LM_LIGHTWEIGHT, "should only be called with new lightweight locking");
return (value() & lock_mask_in_place) == locked_value;
}
markWord set_fast_locked() const {
@ -227,11 +218,7 @@ class markWord {
}
bool has_displaced_mark_helper() const {
intptr_t lockbits = value() & lock_mask_in_place;
if (LockingMode == LM_LIGHTWEIGHT) {
return !UseObjectMonitorTable && lockbits == monitor_value;
}
// monitor (0b10) | stack-locked (0b00)?
return (lockbits & unlocked_value) == 0;
return !UseObjectMonitorTable && lockbits == monitor_value;
}
markWord displaced_mark_helper() const;
void set_displaced_mark_helper(markWord m) const;
@ -304,17 +291,14 @@ class markWord {
inline void* decode_pointer() const { return (void*)clear_lock_bits().value(); }
inline bool is_self_forwarded() const {
NOT_LP64(assert(LockingMode != LM_LEGACY, "incorrect with LM_LEGACY on 32 bit");)
return mask_bits(value(), self_fwd_mask_in_place) != 0;
}
inline markWord set_self_forwarded() const {
NOT_LP64(assert(LockingMode != LM_LEGACY, "incorrect with LM_LEGACY on 32 bit");)
return markWord(value() | self_fwd_mask_in_place);
}
inline markWord unset_self_forwarded() const {
NOT_LP64(assert(LockingMode != LM_LEGACY, "incorrect with LM_LEGACY on 32 bit");)
return markWord(value() & ~self_fwd_mask_in_place);
}

View File

@ -110,27 +110,13 @@ intptr_t oopDesc::slow_identity_hash() {
}
// used only for asserts and guarantees
bool oopDesc::is_oop(oop obj, bool ignore_mark_word) {
if (!Universe::heap()->is_oop(obj)) {
return false;
}
// Header verification: the mark is typically non-zero. If we're
// at a safepoint, it must not be zero, except when using the new lightweight locking.
// Outside of a safepoint, the header could be changing (for example,
// another thread could be inflating a lock on this object).
if (ignore_mark_word) {
return true;
}
if (obj->mark().value() != 0) {
return true;
}
return LockingMode == LM_LIGHTWEIGHT || !SafepointSynchronize::is_at_safepoint();
bool oopDesc::is_oop(oop obj) {
return Universe::heap()->is_oop(obj);
}
// used only for asserts and guarantees
bool oopDesc::is_oop_or_null(oop obj, bool ignore_mark_word) {
return obj == nullptr ? true : is_oop(obj, ignore_mark_word);
bool oopDesc::is_oop_or_null(oop obj) {
return obj == nullptr ? true : is_oop(obj);
}
VerifyOopClosure VerifyOopClosure::verify_oop;

View File

@ -261,8 +261,8 @@ class oopDesc {
inline bool is_unlocked() const;
// asserts and guarantees
static bool is_oop(oop obj, bool ignore_mark_word = false);
static bool is_oop_or_null(oop obj, bool ignore_mark_word = false);
static bool is_oop(oop obj);
static bool is_oop_or_null(oop obj);
// garbage collection
inline bool is_gc_marked() const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -195,7 +195,6 @@ void stackChunkOopDesc::do_barriers(const StackChunkFrameStream<frame_kind>& f,
template <typename OopT, class StackChunkLockStackClosureType>
inline void stackChunkOopDesc::iterate_lockstack(StackChunkLockStackClosureType* closure) {
assert(LockingMode == LM_LIGHTWEIGHT, "");
int cnt = lockstack_size();
intptr_t* lockstart_addr = start_address();
for (int i = 0; i < cnt; i++) {

View File

@ -4788,19 +4788,11 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
// Test the header to see if it is safe to read w.r.t. locking.
Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
if (LockingMode == LM_LIGHTWEIGHT) {
Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
generate_slow_guard(test_monitor, slow_region);
} else {
Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
generate_slow_guard(test_not_unlocked, slow_region);
}
generate_slow_guard(test_monitor, slow_region);
}
// Get the hash value and check to see that it has been properly assigned.

View File

@ -1670,6 +1670,7 @@ bool PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape) {
// Found (linux x64 only?) with:
// serviceability/sa/ClhsdbThreadContext.java
// -XX:+UnlockExperimentalVMOptions -XX:LockingMode=1 -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110
// Note: The -XX:LockingMode option is not available anymore.
case Op_StrEquals:
return false;

View File

@ -1367,11 +1367,6 @@ JvmtiEnv::GetOwnedMonitorInfo(jthread thread, jint* owned_monitor_count_ptr, job
return err;
}
if (LockingMode == LM_LEGACY && java_thread == nullptr) {
*owned_monitor_count_ptr = 0;
return JVMTI_ERROR_NONE;
}
// growable array of jvmti monitors info on the C-heap
GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list =
new (mtServiceability) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, mtServiceability);
@ -1427,11 +1422,6 @@ JvmtiEnv::GetOwnedMonitorStackDepthInfo(jthread thread, jint* monitor_info_count
return err;
}
if (LockingMode == LM_LEGACY && java_thread == nullptr) {
*monitor_info_count_ptr = 0;
return JVMTI_ERROR_NONE;
}
// growable array of jvmti monitors info on the C-heap
GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list =
new (mtServiceability) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, mtServiceability);

View File

@ -1854,24 +1854,6 @@ bool Arguments::check_vm_args_consistency() {
}
#endif
if (UseObjectMonitorTable && LockingMode != LM_LIGHTWEIGHT) {
// ObjectMonitorTable requires lightweight locking.
FLAG_SET_CMDLINE(UseObjectMonitorTable, false);
warning("UseObjectMonitorTable requires LM_LIGHTWEIGHT");
}
#if !defined(X86) && !defined(AARCH64) && !defined(PPC64) && !defined(RISCV64) && !defined(S390)
if (LockingMode == LM_MONITOR) {
jio_fprintf(defaultStream::error_stream(),
"LockingMode == 0 (LM_MONITOR) is not fully implemented on this architecture\n");
return false;
}
#endif
if (VerifyHeavyMonitors && LockingMode != LM_MONITOR) {
jio_fprintf(defaultStream::error_stream(),
"-XX:+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)\n");
return false;
}
return status;
}

View File

@ -34,13 +34,6 @@ void BasicLock::print_on(outputStream* st, oop owner) const {
if (mon != nullptr) {
mon->print_on(st);
}
} else if (LockingMode == LM_LEGACY) {
markWord mark_word = displaced_header();
if (mark_word.value() != 0) {
// Print monitor info if there's an owning oop and it refers to this BasicLock.
bool print_monitor_info = (owner != nullptr) && (owner->mark() == markWord::from_pointer((void*)this));
mark_word.print_on(st, print_monitor_info);
}
}
}
@ -73,23 +66,7 @@ void BasicLock::move_to(oop obj, BasicLock* dest) {
// small (given the support for inflated fast-path locking in the fast_lock, etc)
// we'll leave that optimization for another time.
if (LockingMode == LM_LEGACY) {
if (displaced_header().is_neutral()) {
// The object is locked and the resulting ObjectMonitor* will also be
// locked so it can't be async deflated until ownership is dropped.
ObjectSynchronizer::inflate_helper(obj);
// WARNING: We cannot put a check here, because the inflation
// will not update the displaced header. Once BasicLock is inflated,
// no one should ever look at its content.
} else {
// Typically the displaced header will be 0 (recursive stack lock) or
// unused_mark. Naively we'd like to assert that the displaced mark
// value is either 0, neutral, or 3. But with the advent of the
// store-before-CAS avoidance in fast_lock/compiler_lock_object
// we can find any flavor mark in the displaced mark.
}
dest->set_displaced_header(displaced_header());
} else if (UseObjectMonitorTable) {
if (UseObjectMonitorTable) {
// Preserve the ObjectMonitor*, the cache is cleared when a box is reused
// and only read while the lock is held, so no stale ObjectMonitor* is
// encountered.

View File

@ -35,12 +35,6 @@ class BasicLock {
friend class VMStructs;
friend class JVMCIVMStructs;
private:
// * For LM_MONITOR
// Unused.
// * For LM_LEGACY
// This is either the actual displaced header from a locked object, or
// a sentinel zero value indicating a recursive stack-lock.
// * For LM_LIGHTWEIGHT
// Used as a cache of the ObjectMonitor* used when locking. Must either
// be nullptr or the ObjectMonitor* used when locking.
volatile uintptr_t _metadata;
@ -52,15 +46,10 @@ class BasicLock {
public:
BasicLock() : _metadata(0) {}
// LM_MONITOR
void set_bad_metadata_deopt() { set_metadata(badDispHeaderDeopt); }
// LM_LEGACY
inline markWord displaced_header() const;
inline void set_displaced_header(markWord header);
static int displaced_header_offset_in_bytes() { return metadata_offset_in_bytes(); }
// LM_LIGHTWEIGHT
inline ObjectMonitor* object_monitor_cache() const;
inline void clear_object_monitor_cache();
inline void set_object_monitor_cache(ObjectMonitor* mon);

View File

@ -29,16 +29,6 @@
#include "runtime/objectMonitor.inline.hpp"
inline markWord BasicLock::displaced_header() const {
assert(LockingMode == LM_LEGACY, "must be");
return markWord(get_metadata());
}
inline void BasicLock::set_displaced_header(markWord header) {
assert(LockingMode == LM_LEGACY, "must be");
Atomic::store(&_metadata, header.value());
}
inline ObjectMonitor* BasicLock::object_monitor_cache() const {
assert(UseObjectMonitorTable, "must be");
#if !defined(ZERO) && (defined(X86) || defined(AARCH64) || defined(RISCV64) || defined(PPC64) || defined(S390))

View File

@ -146,10 +146,6 @@ static void verify_preempt_preconditions(JavaThread* current, oop continuation)
freeze_result Continuation::try_preempt(JavaThread* current, oop continuation) {
verify_preempt_preconditions(current, continuation);
if (LockingMode == LM_LEGACY) {
return freeze_unsupported;
}
if (!is_vthread_safe_to_preempt(current, current->vthread())) {
return freeze_pinned_native;
}

View File

@ -533,11 +533,7 @@ FreezeBase::FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t*
cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
assert(cont_size() > 0, "");
if (LockingMode != LM_LIGHTWEIGHT) {
_monitors_in_lockstack = 0;
} else {
_monitors_in_lockstack = _thread->lock_stack().monitor_count();
}
_monitors_in_lockstack = _thread->lock_stack().monitor_count();
}
void FreezeBase::init_rest() { // we want to postpone some initialization after chunk handling
@ -587,33 +583,12 @@ static void assert_frames_in_continuation_are_safe(JavaThread* thread) {
#endif // ASSERT
}
#ifdef ASSERT
static bool monitors_on_stack(JavaThread* thread) {
assert_frames_in_continuation_are_safe(thread);
ContinuationEntry* ce = thread->last_continuation();
RegisterMap map(thread,
RegisterMap::UpdateMap::include,
RegisterMap::ProcessFrames::skip,
RegisterMap::WalkContinuation::skip);
map.set_include_argument_oops(false);
for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
if ((f.is_interpreted_frame() && ContinuationHelper::InterpretedFrame::is_owning_locks(f)) ||
(f.is_compiled_frame() && ContinuationHelper::CompiledFrame::is_owning_locks(map.thread(), &map, f)) ||
(f.is_native_frame() && ContinuationHelper::NativeFrame::is_owning_locks(map.thread(), f))) {
return true;
}
}
return false;
}
#endif // ASSERT
// Called _after_ the last possible safepoint during the freeze operation (chunk allocation)
void FreezeBase::unwind_frames() {
ContinuationEntry* entry = _cont.entry();
entry->flush_stack_processing(_thread);
assert_frames_in_continuation_are_safe(_thread);
JFR_ONLY(Jfr::check_and_process_sample_request(_thread);)
assert(LockingMode != LM_LEGACY || !monitors_on_stack(_thread), "unexpected monitors on stack");
set_anchor_to_entry(_thread, entry);
}
@ -1762,8 +1737,8 @@ static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const
assert(entry->is_virtual_thread() == (entry->scope(current) == java_lang_VirtualThread::vthread_scope()), "");
assert(LockingMode == LM_LEGACY || (current->held_monitor_count() == 0 && current->jni_monitor_count() == 0),
"Held monitor count should only be used for LM_LEGACY: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
assert((current->held_monitor_count() == 0 && current->jni_monitor_count() == 0),
"Held monitor count should not be used for lightweight locking: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
if (entry->is_pinned() || current->held_monitor_count() > 0) {
log_develop_debug(continuations)("PINNED due to critical section/hold monitor");

View File

@ -1645,26 +1645,17 @@ bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInf
Handle obj(thread, mon_info->owner());
markWord mark = obj->mark();
if (exec_mode == Unpack_none) {
if (LockingMode == LM_LEGACY && mark.has_locker() && fr.sp() > (intptr_t*)mark.locker()) {
// With exec_mode == Unpack_none obj may be thread local and locked in
// a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
markWord dmw = mark.displaced_mark_helper();
mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr));
obj->set_mark(dmw);
}
if (mark.has_monitor()) {
// defer relocking if the deoptee thread is currently waiting for obj
ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) {
assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
if (LockingMode == LM_LEGACY) {
mon_info->lock()->set_displaced_header(markWord::unused_mark());
} else if (UseObjectMonitorTable) {
if (UseObjectMonitorTable) {
mon_info->lock()->clear_object_monitor_cache();
}
#ifdef ASSERT
else {
assert(LockingMode == LM_MONITOR || !UseObjectMonitorTable, "must be");
assert(!UseObjectMonitorTable, "must be");
mon_info->lock()->set_bad_metadata_deopt();
}
#endif
@ -1674,29 +1665,24 @@ bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInf
}
}
BasicLock* lock = mon_info->lock();
if (LockingMode == LM_LIGHTWEIGHT) {
// We have lost information about the correct state of the lock stack.
// Entering may create an invalid lock stack. Inflate the lock if it
// was fast_locked to restore the valid lock stack.
if (UseObjectMonitorTable) {
// UseObjectMonitorTable expects the BasicLock cache to be either a
// valid ObjectMonitor* or nullptr. Right now it is garbage, set it
// to nullptr.
lock->clear_object_monitor_cache();
}
ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
if (deoptee_thread->lock_stack().contains(obj())) {
LightweightSynchronizer::inflate_fast_locked_object(obj(), ObjectSynchronizer::InflateCause::inflate_cause_vm_internal,
deoptee_thread, thread);
}
assert(mon_info->owner()->is_locked(), "object must be locked now");
assert(obj->mark().has_monitor(), "must be");
assert(!deoptee_thread->lock_stack().contains(obj()), "must be");
assert(ObjectSynchronizer::read_monitor(thread, obj(), obj->mark())->has_owner(deoptee_thread), "must be");
} else {
ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
assert(mon_info->owner()->is_locked(), "object must be locked now");
// We have lost information about the correct state of the lock stack.
// Entering may create an invalid lock stack. Inflate the lock if it
// was fast_locked to restore the valid lock stack.
if (UseObjectMonitorTable) {
// UseObjectMonitorTable expects the BasicLock cache to be either a
// valid ObjectMonitor* or nullptr. Right now it is garbage, set it
// to nullptr.
lock->clear_object_monitor_cache();
}
ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
if (deoptee_thread->lock_stack().contains(obj())) {
LightweightSynchronizer::inflate_fast_locked_object(obj(), ObjectSynchronizer::InflateCause::inflate_cause_vm_internal,
deoptee_thread, thread);
}
assert(mon_info->owner()->is_locked(), "object must be locked now");
assert(obj->mark().has_monitor(), "must be");
assert(!deoptee_thread->lock_stack().contains(obj()), "must be");
assert(ObjectSynchronizer::read_monitor(thread, obj(), obj->mark())->has_owner(deoptee_thread), "must be");
}
}
}

View File

@ -1048,10 +1048,6 @@ const int ObjectAlignmentInBytes = 8;
product(bool, ErrorFileToStdout, false, \
"If true, error data is printed to stdout instead of a file") \
\
develop(bool, VerifyHeavyMonitors, false, \
"Checks that no stack locking happens when using " \
"-XX:LockingMode=0 (LM_MONITOR)") \
\
product(bool, PrintStringTableStatistics, false, \
"print statistics about the StringTable and SymbolTable") \
\

View File

@ -553,7 +553,7 @@ class SignatureChekker : public SignatureIterator {
"Bad JNI oop argument %d: " PTR_FORMAT, _pos, v);
// Verify the pointee.
oop vv = resolve_indirect_oop(v, _value_state[_pos]);
guarantee(oopDesc::is_oop_or_null(vv, true),
guarantee(oopDesc::is_oop_or_null(vv),
"Bad JNI oop argument %d: " PTR_FORMAT " -> " PTR_FORMAT,
_pos, v, p2i(vv));
}

View File

@ -1063,11 +1063,6 @@ JavaThread* JavaThread::active() {
}
}
bool JavaThread::is_lock_owned(address adr) const {
assert(LockingMode != LM_LIGHTWEIGHT, "should not be called with new lightweight locking");
return is_in_full_stack(adr);
}
oop JavaThread::exception_oop() const {
return Atomic::load(&_exception_oop);
}
@ -1437,9 +1432,8 @@ void JavaThread::oops_do_no_frames(OopClosure* f, NMethodClosure* cf) {
entry = entry->parent();
}
if (LockingMode == LM_LIGHTWEIGHT) {
lock_stack().oops_do(f);
}
// Due to lightweight locking
lock_stack().oops_do(f);
}
void JavaThread::oops_do_frames(OopClosure* f, NMethodClosure* cf) {
@ -1999,22 +1993,9 @@ void JavaThread::trace_stack() {
// this slow-path.
void JavaThread::inc_held_monitor_count(intx i, bool jni) {
#ifdef SUPPORT_MONITOR_COUNT
if (LockingMode != LM_LEGACY) {
// Nothing to do. Just do some sanity check.
assert(_held_monitor_count == 0, "counter should not be used");
assert(_jni_monitor_count == 0, "counter should not be used");
return;
}
assert(_held_monitor_count >= 0, "Must always be non-negative: %zd", _held_monitor_count);
_held_monitor_count += i;
if (jni) {
assert(_jni_monitor_count >= 0, "Must always be non-negative: %zd", _jni_monitor_count);
_jni_monitor_count += i;
}
assert(_held_monitor_count >= _jni_monitor_count, "Monitor count discrepancy detected - held count "
"%zd is less than JNI count %zd", _held_monitor_count, _jni_monitor_count);
// Nothing to do. Just do some sanity check.
assert(_held_monitor_count == 0, "counter should not be used");
assert(_jni_monitor_count == 0, "counter should not be used");
#endif // SUPPORT_MONITOR_COUNT
}
@ -2022,26 +2003,9 @@ void JavaThread::inc_held_monitor_count(intx i, bool jni) {
// this slow-path.
void JavaThread::dec_held_monitor_count(intx i, bool jni) {
#ifdef SUPPORT_MONITOR_COUNT
if (LockingMode != LM_LEGACY) {
// Nothing to do. Just do some sanity check.
assert(_held_monitor_count == 0, "counter should not be used");
assert(_jni_monitor_count == 0, "counter should not be used");
return;
}
_held_monitor_count -= i;
assert(_held_monitor_count >= 0, "Must always be non-negative: %zd", _held_monitor_count);
if (jni) {
_jni_monitor_count -= i;
assert(_jni_monitor_count >= 0, "Must always be non-negative: %zd", _jni_monitor_count);
}
// When a thread is detaching with still owned JNI monitors, the logic that releases
// the monitors doesn't know to set the "jni" flag and so the counts can get out of sync.
// So we skip this assert if the thread is exiting. Once all monitors are unlocked the
// JNI count is directly set to zero.
assert(_held_monitor_count >= _jni_monitor_count || is_exiting(), "Monitor count discrepancy detected - held count "
"%zd is less than JNI count %zd", _held_monitor_count, _jni_monitor_count);
// Nothing to do. Just do some sanity check.
assert(_held_monitor_count == 0, "counter should not be used");
assert(_jni_monitor_count == 0, "counter should not be used");
#endif // SUPPORT_MONITOR_COUNT
}

View File

@ -755,9 +755,6 @@ public:
return (_suspend_flags & _obj_deopt) != 0;
}
// Stack-locking support (not for LM_LIGHTWEIGHT)
bool is_lock_owned(address adr) const;
// Accessors for vframe array top
// The linked list of vframe arrays are sorted on sp. This means when we
// unpack the head must contain the vframe array to unpack.

View File

@ -333,8 +333,6 @@ size_t ObjectMonitorTable::_table_size = 0;
volatile bool ObjectMonitorTable::_resize = false;
ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
ObjectMonitor* monitor = get_monitor_from_table(current, object);
if (monitor != nullptr) {
*inserted = false;
@ -628,7 +626,6 @@ bool LightweightSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stac
}
void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "must be cleared");
JavaThread* current = JavaThread::current();
VerifyThreadState vts(locking_thread, current);
@ -657,7 +654,6 @@ void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread*
}
void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
assert(current == JavaThread::current(), "must be");
if (obj->klass()->is_value_based()) {
@ -718,7 +714,6 @@ void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* cur
}
void LightweightSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
assert(current == Thread::current(), "must be");
markWord mark = object->mark();
@ -770,18 +765,14 @@ void LightweightSynchronizer::exit(oop object, BasicLock* lock, JavaThread* curr
monitor->exit(current);
}
// LightweightSynchronizer::inflate_locked_or_imse is used to to get an inflated
// ObjectMonitor* with LM_LIGHTWEIGHT. It is used from contexts which require
// an inflated ObjectMonitor* for a monitor, and expects to throw a
// java.lang.IllegalMonitorStateException if it is not held by the current
// thread. Such as notify/wait and jni_exit. LM_LIGHTWEIGHT keeps it invariant
// that it only inflates if it is already locked by the current thread or the
// current thread is in the process of entering. To maintain this invariant we
// need to throw a java.lang.IllegalMonitorStateException before inflating if
// the current thread is not the owner.
// LightweightSynchronizer::inflate_locked_or_imse facilitates this.
// LightweightSynchronizer::inflate_locked_or_imse is used to get an
// inflated ObjectMonitor* from contexts which require that, such as
// notify/wait and jni_exit. Lightweight locking keeps the invariant that it
// only inflates if it is already locked by the current thread or the current
// thread is in the process of entering. To maintain this invariant we need to
// throw a java.lang.IllegalMonitorStateException before inflating if the
// current thread is not the owner.
ObjectMonitor* LightweightSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
JavaThread* current = THREAD;
for (;;) {
@ -826,12 +817,11 @@ ObjectMonitor* LightweightSynchronizer::inflate_locked_or_imse(oop obj, ObjectSy
ObjectMonitor* LightweightSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) {
// The JavaThread* locking_thread parameter is only used by LM_LIGHTWEIGHT and requires
// that the locking_thread == Thread::current() or is suspended throughout the call by
// some other mechanism.
// Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
// The JavaThread* locking parameter requires that the locking_thread == JavaThread::current,
// or is suspended throughout the call by some other mechanism.
// Even with lightweight locking the thread might be nullptr when called from a non
// JavaThread. (As may still be the case from FastHashCode). However it is only
// important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
// important for the correctness of the lightweight locking algorithm that the thread
// is set when called from ObjectSynchronizer::enter from the owning thread,
// ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
EventJavaMonitorInflate event;
@ -943,7 +933,6 @@ ObjectMonitor* LightweightSynchronizer::inflate_into_object_header(oop object, O
}
ObjectMonitor* LightweightSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
VerifyThreadState vts(locking_thread, current);
assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
@ -1000,7 +989,6 @@ ObjectMonitor* LightweightSynchronizer::inflate_fast_locked_object(oop object, O
}
ObjectMonitor* LightweightSynchronizer::inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
VerifyThreadState vts(locking_thread, current);
// Note: In some paths (deoptimization) the 'current' thread inflates and

View File

@ -77,7 +77,6 @@ uint32_t LockStack::end_offset() {
#ifndef PRODUCT
void LockStack::verify(const char* msg) const {
assert(LockingMode == LM_LIGHTWEIGHT, "never use lock-stack when light weight locking is disabled");
assert((_top <= end_offset()), "lockstack overflow: _top %d end_offset %d", _top, end_offset());
assert((_top >= start_offset()), "lockstack underflow: _top %d start_offset %d", _top, start_offset());
if (SafepointSynchronize::is_at_safepoint() || (Thread::current()->is_Java_thread() && is_owning_thread())) {

View File

@ -296,8 +296,7 @@ ObjectMonitor::ObjectMonitor(oop object) :
_contentions(0),
_wait_set(nullptr),
_waiters(0),
_wait_set_lock(0),
_stack_locker(nullptr)
_wait_set_lock(0)
{ }
ObjectMonitor::~ObjectMonitor() {

View File

@ -160,9 +160,9 @@ class ObjectMonitor : public CHeapObj<mtObjectMonitor> {
// Because of frequent access, the metadata field is at offset zero (0).
// Enforced by the assert() in metadata_addr().
// * LM_LIGHTWEIGHT with UseObjectMonitorTable:
// Contains the _object's hashCode.
// * LM_LEGACY, LM_MONITOR, LM_LIGHTWEIGHT without UseObjectMonitorTable:
// * Lightweight locking with UseObjectMonitorTable:
// Contains the _object's hashCode.
// * * Lightweight locking without UseObjectMonitorTable:
// Contains the displaced object header word - mark
volatile uintptr_t _metadata; // metadata
WeakHandle _object; // backward object pointer
@ -204,9 +204,6 @@ class ObjectMonitor : public CHeapObj<mtObjectMonitor> {
volatile int _waiters; // number of waiting threads
volatile int _wait_set_lock; // protects wait set queue - simple spinlock
// Used in LM_LEGACY mode to store BasicLock* in case of inflation by contending thread.
BasicLock* volatile _stack_locker;
public:
static void Initialize();
@ -318,10 +315,6 @@ class ObjectMonitor : public CHeapObj<mtObjectMonitor> {
set_owner_from(ANONYMOUS_OWNER, owner);
}
// Get and set _stack_locker.
BasicLock* stack_locker() const;
void set_stack_locker(BasicLock* locker);
// Simply get _next_om field.
ObjectMonitor* next_om() const;
// Simply set _next_om field to new_value.

View File

@ -52,11 +52,7 @@ inline int64_t ObjectMonitor::owner_id_from(oop vthread) {
inline bool ObjectMonitor::is_entered(JavaThread* current) const {
if (has_anonymous_owner()) {
if (LockingMode == LM_LIGHTWEIGHT) {
return current->lock_stack().contains(object());
} else {
return current->is_lock_owned((address)stack_locker());
}
return current->lock_stack().contains(object());
} else {
return has_owner(current);
}
@ -116,14 +112,6 @@ inline int64_t ObjectMonitor::owner_raw() const {
return Atomic::load(&_owner);
}
inline BasicLock* ObjectMonitor::stack_locker() const {
return Atomic::load(&_stack_locker);
}
inline void ObjectMonitor::set_stack_locker(BasicLock* locker) {
Atomic::store(&_stack_locker, locker);
}
// Returns true if owner field == DEFLATER_MARKER and false otherwise.
inline bool ObjectMonitor::owner_is_DEFLATER_MARKER() const {
return owner_raw() == DEFLATER_MARKER;

View File

@ -3348,18 +3348,7 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) )
kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
if (kptr2->obj() != nullptr) { // Avoid 'holes' in the monitor array
BasicLock *lock = kptr2->lock();
if (LockingMode == LM_LEGACY) {
// Inflate so the object's header no longer refers to the BasicLock.
if (lock->displaced_header().is_unlocked()) {
// The object is locked and the resulting ObjectMonitor* will also be
// locked so it can't be async deflated until ownership is dropped.
// See the big comment in basicLock.cpp: BasicLock::move_to().
ObjectSynchronizer::inflate_helper(kptr2->obj());
}
// Now the displaced header is free to move because the
// object's header no longer refers to it.
buf[i] = (intptr_t)lock->displaced_header().value();
} else if (UseObjectMonitorTable) {
if (UseObjectMonitorTable) {
buf[i] = (intptr_t)lock->object_monitor_cache();
}
#ifdef ASSERT

View File

@ -281,9 +281,7 @@ void ObjectSynchronizer::initialize() {
// Start the timer for deflations, so it does not trigger immediately.
_last_async_deflation_time_ns = os::javaTimeNanos();
if (LockingMode == LM_LIGHTWEIGHT) {
LightweightSynchronizer::initialize();
}
LightweightSynchronizer::initialize();
}
MonitorList ObjectSynchronizer::_in_use_list;
@ -342,23 +340,15 @@ bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool al
if (obj == nullptr) return false; // slow-path for invalid obj
const markWord mark = obj->mark();
if (LockingMode == LM_LIGHTWEIGHT) {
if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
// Degenerate notify
// fast-locked by caller so by definition the implied waitset is empty.
return true;
}
} else if (LockingMode == LM_LEGACY) {
if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Degenerate notify
// stack-locked by caller so by definition the implied waitset is empty.
return true;
}
if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
// Degenerate notify
// fast-locked by caller so by definition the implied waitset is empty.
return true;
}
if (mark.has_monitor()) {
ObjectMonitor* const mon = read_monitor(current, obj, mark);
if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
if (mon == nullptr) {
// Racing with inflation/deflation go slow path
return false;
}
@ -381,79 +371,6 @@ bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool al
return false;
}
static bool useHeavyMonitors() {
#if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
return LockingMode == LM_MONITOR;
#else
return false;
#endif
}
// The LockNode emitted directly at the synchronization site would have
// been too big if it were to have included support for the cases of inflated
// recursive enter and exit, so they go here instead.
// Note that we can't safely call AsyncPrintJavaStack() from within
// quick_enter() as our thread state remains _in_Java.
bool ObjectSynchronizer::quick_enter_legacy(oop obj, BasicLock* lock, JavaThread* current) {
assert(current->thread_state() == _thread_in_Java, "invariant");
if (useHeavyMonitors()) {
return false; // Slow path
}
assert(LockingMode == LM_LEGACY, "legacy mode below");
const markWord mark = obj->mark();
if (mark.has_monitor()) {
ObjectMonitor* const m = read_monitor(mark);
// An async deflation or GC can race us before we manage to make
// the ObjectMonitor busy by setting the owner below. If we detect
// that race we just bail out to the slow-path here.
if (m->object_peek() == nullptr) {
return false;
}
// Lock contention and Transactional Lock Elision (TLE) diagnostics
// and observability
// Case: light contention possibly amenable to TLE
// Case: TLE inimical operations such as nested/recursive synchronization
if (m->has_owner(current)) {
m->increment_recursions(current);
current->inc_held_monitor_count();
return true;
}
// This Java Monitor is inflated so obj's header will never be
// displaced to this thread's BasicLock. Make the displaced header
// non-null so this BasicLock is not seen as recursive nor as
// being locked. We do this unconditionally so that this thread's
// BasicLock cannot be mis-interpreted by any stack walkers. For
// performance reasons, stack walkers generally first check for
// stack-locking in the object's header, the second check is for
// recursive stack-locking in the displaced header in the BasicLock,
// and last are the inflated Java Monitor (ObjectMonitor) checks.
lock->set_displaced_header(markWord::unused_mark());
if (!m->has_owner() && m->try_set_owner(current)) {
assert(m->recursions() == 0, "invariant");
current->inc_held_monitor_count();
return true;
}
}
// Note that we could inflate in quick_enter.
// This is likely a useful optimization
// Critically, in quick_enter() we must not:
// -- block indefinitely, or
// -- reach a safepoint
return false; // revert to slow-path
}
// Handle notifications when synchronizing on value based classes
void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
@ -512,144 +429,7 @@ void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* lock
// the locking_thread with respect to the current thread. Currently only used when
// deoptimizing and re-locking locks. See Deoptimization::relock_objects
assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
if (LockingMode == LM_LIGHTWEIGHT) {
return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
}
if (!enter_fast_impl(obj, lock, locking_thread)) {
// Inflated ObjectMonitor::enter_for is required
// An async deflation can race after the inflate_for() call and before
// enter_for() can make the ObjectMonitor busy. enter_for() returns false
// if we have lost the race to async deflation and we simply try again.
while (true) {
ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
if (monitor->enter_for(locking_thread)) {
return;
}
assert(monitor->is_being_async_deflated(), "must be");
}
}
}
void ObjectSynchronizer::enter_legacy(Handle obj, BasicLock* lock, JavaThread* current) {
if (!enter_fast_impl(obj, lock, current)) {
// Inflated ObjectMonitor::enter is required
// An async deflation can race after the inflate() call and before
// enter() can make the ObjectMonitor busy. enter() returns false if
// we have lost the race to async deflation and we simply try again.
while (true) {
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
if (monitor->enter(current)) {
return;
}
}
}
}
// The interpreter and compiler assembly code tries to lock using the fast path
// of this algorithm. Make sure to update that code if the following function is
// changed. The implementation is extremely sensitive to race condition. Be careful.
bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
if (obj->klass()->is_value_based()) {
handle_sync_on_value_based_class(obj, locking_thread);
}
locking_thread->inc_held_monitor_count();
if (!useHeavyMonitors()) {
if (LockingMode == LM_LEGACY) {
markWord mark = obj->mark();
if (mark.is_unlocked()) {
// Anticipate successful CAS -- the ST of the displaced mark must
// be visible <= the ST performed by the CAS.
lock->set_displaced_header(mark);
if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
return true;
}
} else if (mark.has_locker() &&
locking_thread->is_lock_owned((address) mark.locker())) {
assert(lock != mark.locker(), "must not re-lock the same lock");
assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
lock->set_displaced_header(markWord::from_pointer(nullptr));
return true;
}
// The object header will never be displaced to this lock,
// so it does not matter what the value is, except that it
// must be non-zero to avoid looking like a re-entrant lock,
// and must not look locked either.
lock->set_displaced_header(markWord::unused_mark());
// Failed to fast lock.
return false;
}
} else if (VerifyHeavyMonitors) {
guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
}
return false;
}
void ObjectSynchronizer::exit_legacy(oop object, BasicLock* lock, JavaThread* current) {
assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
if (!useHeavyMonitors()) {
markWord mark = object->mark();
if (LockingMode == LM_LEGACY) {
markWord dhw = lock->displaced_header();
if (dhw.value() == 0) {
// If the displaced header is null, then this exit matches up with
// a recursive enter. No real work to do here except for diagnostics.
#ifndef PRODUCT
if (mark != markWord::INFLATING()) {
// Only do diagnostics if we are not racing an inflation. Simply
// exiting a recursive enter of a Java Monitor that is being
// inflated is safe; see the has_monitor() comment below.
assert(!mark.is_unlocked(), "invariant");
assert(!mark.has_locker() ||
current->is_lock_owned((address)mark.locker()), "invariant");
if (mark.has_monitor()) {
// The BasicLock's displaced_header is marked as a recursive
// enter and we have an inflated Java Monitor (ObjectMonitor).
// This is a special case where the Java Monitor was inflated
// after this thread entered the stack-lock recursively. When a
// Java Monitor is inflated, we cannot safely walk the Java
// Monitor owner's stack and update the BasicLocks because a
// Java Monitor can be asynchronously inflated by a thread that
// does not own the Java Monitor.
ObjectMonitor* m = read_monitor(mark);
assert(m->object()->mark() == mark, "invariant");
assert(m->is_entered(current), "invariant");
}
}
#endif
return;
}
if (mark == markWord::from_pointer(lock)) {
// If the object is stack-locked by the current thread, try to
// swing the displaced header from the BasicLock back to the mark.
assert(dhw.is_neutral(), "invariant");
if (object->cas_set_mark(dhw, mark) == mark) {
return;
}
}
}
} else if (VerifyHeavyMonitors) {
guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
}
// We have to take the slow-path of possible inflation and then exit.
// The ObjectMonitor* can't be async deflated until ownership is
// dropped inside exit() and the ObjectMonitor* must be !is_busy().
ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
assert(!monitor->has_anonymous_owner(), "must not be");
monitor->exit(current);
return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
}
// -----------------------------------------------------------------------------
@ -670,17 +450,8 @@ void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
// enter() can make the ObjectMonitor busy. enter() returns false if
// we have lost the race to async deflation and we simply try again.
while (true) {
ObjectMonitor* monitor;
bool entered;
if (LockingMode == LM_LIGHTWEIGHT) {
BasicLock lock;
entered = LightweightSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr;
} else {
monitor = inflate(current, obj(), inflate_cause_jni_enter);
entered = monitor->enter(current);
}
if (entered) {
BasicLock lock;
if (LightweightSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
current->inc_held_monitor_count(1, true);
break;
}
@ -693,13 +464,7 @@ void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
JavaThread* current = THREAD;
ObjectMonitor* monitor;
if (LockingMode == LM_LIGHTWEIGHT) {
monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
} else {
// The ObjectMonitor* can't be async deflated until ownership is
// dropped inside exit() and the ObjectMonitor* must be !is_busy().
monitor = inflate(current, obj, inflate_cause_jni_exit);
}
monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
// If this thread has locked the object, exit the monitor. We
// intentionally do not use CHECK on check_owner because we must exit the
// monitor even if an exception was already pending.
@ -740,14 +505,7 @@ int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
}
ObjectMonitor* monitor;
if (LockingMode == LM_LIGHTWEIGHT) {
monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
} else {
// The ObjectMonitor* can't be async deflated because the _waiters
// field is incremented before ownership is dropped and decremented
// after ownership is regained.
monitor = inflate(current, obj(), inflate_cause_wait);
}
monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
@ -766,11 +524,7 @@ void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
}
ObjectMonitor* monitor;
if (LockingMode == LM_LIGHTWEIGHT) {
monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
} else {
monitor = inflate(THREAD, obj(), inflate_cause_wait);
}
monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
monitor->wait(millis, false, THREAD);
}
@ -779,26 +533,11 @@ void ObjectSynchronizer::notify(Handle obj, TRAPS) {
JavaThread* current = THREAD;
markWord mark = obj->mark();
if (LockingMode == LM_LIGHTWEIGHT) {
if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
// Not inflated so there can't be any waiters to notify.
return;
}
} else if (LockingMode == LM_LEGACY) {
if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Not inflated so there can't be any waiters to notify.
return;
}
}
ObjectMonitor* monitor;
if (LockingMode == LM_LIGHTWEIGHT) {
monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
} else {
// The ObjectMonitor* can't be async deflated until ownership is
// dropped by the calling thread.
monitor = inflate(current, obj(), inflate_cause_notify);
if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
// Not inflated so there can't be any waiters to notify.
return;
}
ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
monitor->notify(CHECK);
}
@ -807,26 +546,12 @@ void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
JavaThread* current = THREAD;
markWord mark = obj->mark();
if (LockingMode == LM_LIGHTWEIGHT) {
if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
// Not inflated so there can't be any waiters to notify.
return;
}
} else if (LockingMode == LM_LEGACY) {
if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Not inflated so there can't be any waiters to notify.
return;
}
if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
// Not inflated so there can't be any waiters to notify.
return;
}
ObjectMonitor* monitor;
if (LockingMode == LM_LIGHTWEIGHT) {
monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
} else {
// The ObjectMonitor* can't be async deflated until ownership is
// dropped by the calling thread.
monitor = inflate(current, obj(), inflate_cause_notify);
}
ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
monitor->notifyAll(CHECK);
}
@ -846,67 +571,6 @@ struct SharedGlobals {
static SharedGlobals GVars;
static markWord read_stable_mark(oop obj) {
markWord mark = obj->mark_acquire();
if (!mark.is_being_inflated() || LockingMode == LM_LIGHTWEIGHT) {
// New lightweight locking does not use the markWord::INFLATING() protocol.
return mark; // normal fast-path return
}
int its = 0;
for (;;) {
markWord mark = obj->mark_acquire();
if (!mark.is_being_inflated()) {
return mark; // normal fast-path return
}
// The object is being inflated by some other thread.
// The caller of read_stable_mark() must wait for inflation to complete.
// Avoid live-lock.
++its;
if (its > 10000 || !os::is_MP()) {
if (its & 1) {
os::naked_yield();
} else {
// Note that the following code attenuates the livelock problem but is not
// a complete remedy. A more complete solution would require that the inflating
// thread hold the associated inflation lock. The following code simply restricts
// the number of spinners to at most one. We'll have N-2 threads blocked
// on the inflationlock, 1 thread holding the inflation lock and using
// a yield/park strategy, and 1 thread in the midst of inflation.
// A more refined approach would be to change the encoding of INFLATING
// to allow encapsulation of a native thread pointer. Threads waiting for
// inflation to complete would use CAS to push themselves onto a singly linked
// list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag
// and calling park(). When inflation was complete the thread that accomplished inflation
// would detach the list and set the markword to inflated with a single CAS and
// then for each thread on the list, set the flag and unpark() the thread.
// Index into the lock array based on the current object address.
static_assert(is_power_of_2(inflation_lock_count()), "must be");
size_t ix = (cast_from_oop<intptr_t>(obj) >> 5) & (inflation_lock_count() - 1);
int YieldThenBlock = 0;
assert(ix < inflation_lock_count(), "invariant");
inflation_lock(ix)->lock();
while (obj->mark_acquire() == markWord::INFLATING()) {
// Beware: naked_yield() is advisory and has almost no effect on some platforms
// so we periodically call current->_ParkEvent->park(1).
// We use a mixed spin/yield/block mechanism.
if ((YieldThenBlock++) >= 16) {
Thread::current()->_ParkEvent->park(1);
} else {
os::naked_yield();
}
}
inflation_lock(ix)->unlock();
}
} else {
SpinPause(); // SMP-polite spinning
}
}
}
// hashCode() generation :
//
// Possibilities:
@ -965,7 +629,7 @@ static intptr_t get_next_hash(Thread* current, oop obj) {
}
static intptr_t install_hash_code(Thread* current, oop obj) {
assert(UseObjectMonitorTable && LockingMode == LM_LIGHTWEIGHT, "must be");
assert(UseObjectMonitorTable, "must be");
markWord mark = obj->mark_acquire();
for (;;) {
@ -996,12 +660,8 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
ObjectMonitor* monitor = nullptr;
markWord temp, test;
intptr_t hash;
markWord mark = read_stable_mark(obj);
if (VerifyHeavyMonitors) {
assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
}
if (mark.is_unlocked() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
markWord mark = obj->mark_acquire();
if (mark.is_unlocked() || mark.is_fast_locked()) {
hash = mark.hash();
if (hash != 0) { // if it has a hash, just return it
return hash;
@ -1013,10 +673,9 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
if (test == mark) { // if the hash was installed, return it
return hash;
}
if (LockingMode == LM_LIGHTWEIGHT) {
// CAS failed, retry
continue;
}
// CAS failed, retry
continue;
// Failed to install the hash. It could be that another thread
// installed the hash just before our attempt or inflation has
// occurred or... so we fall thru to inflate the monitor for
@ -1048,34 +707,14 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
}
// Fall thru so we only have one place that installs the hash in
// the ObjectMonitor.
} else if (LockingMode == LM_LEGACY && mark.has_locker()
&& current->is_Java_thread()
&& JavaThread::cast(current)->is_lock_owned((address)mark.locker())) {
// This is a stack-lock owned by the calling thread so fetch the
// displaced markWord from the BasicLock on the stack.
temp = mark.displaced_mark_helper();
assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
hash = temp.hash();
if (hash != 0) { // if it has a hash, just return it
return hash;
}
// WARNING:
// The displaced header in the BasicLock on a thread's stack
// is strictly immutable. It CANNOT be changed in ANY cases.
// So we have to inflate the stack-lock into an ObjectMonitor
// even if the current thread owns the lock. The BasicLock on
// a thread's stack can be asynchronously read by other threads
// during an inflate() call so any change to that stack memory
// may not propagate to other threads correctly.
}
// Inflate the monitor to set the hash.
// There's no need to inflate if the mark has already got a monitor.
// NOTE: an async deflation can race after we get the monitor and
// before we can update the ObjectMonitor's header with the hash
// value below.
monitor = mark.has_monitor() ? mark.monitor() : inflate(current, obj, inflate_cause_hash_code);
assert(mark.has_monitor(), "must be");
monitor = mark.monitor();
// Load ObjectMonitor's header/dmw field and see if it has a hash.
mark = monitor->header();
assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
@ -1115,19 +754,14 @@ bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
assert(current == JavaThread::current(), "Can only be called on current thread");
oop obj = h_obj();
markWord mark = read_stable_mark(obj);
markWord mark = obj->mark_acquire();
if (LockingMode == LM_LEGACY && mark.has_locker()) {
// stack-locked case, header points into owner's stack
return current->is_lock_owned((address)mark.locker());
}
if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
if (mark.is_fast_locked()) {
// fast-locking case, see if lock is in current's lock stack
return current->lock_stack().contains(h_obj());
}
while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
while (mark.has_monitor()) {
ObjectMonitor* monitor = read_monitor(current, obj, mark);
if (monitor != nullptr) {
return monitor->is_entered(current) != 0;
@ -1141,13 +775,6 @@ bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
}
}
if (LockingMode != LM_LIGHTWEIGHT && mark.has_monitor()) {
// Inflated monitor so header points to ObjectMonitor (tagged pointer).
// The first stage of async deflation does not affect any field
// used by this comparison so the ObjectMonitor* is usable here.
ObjectMonitor* monitor = read_monitor(mark);
return monitor->is_entered(current) != 0;
}
// Unlocked case, header in place
assert(mark.is_unlocked(), "sanity check");
return false;
@ -1155,21 +782,15 @@ bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
oop obj = h_obj();
markWord mark = read_stable_mark(obj);
markWord mark = obj->mark_acquire();
if (LockingMode == LM_LEGACY && mark.has_locker()) {
// stack-locked so header points into owner's stack.
// owning_thread_from_monitor_owner() may also return null here:
return Threads::owning_thread_from_stacklock(t_list, (address) mark.locker());
}
if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
if (mark.is_fast_locked()) {
// fast-locked so get owner from the object.
// owning_thread_from_object() may also return null here:
return Threads::owning_thread_from_object(t_list, h_obj());
}
while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
while (mark.has_monitor()) {
ObjectMonitor* monitor = read_monitor(Thread::current(), obj, mark);
if (monitor != nullptr) {
return Threads::owning_thread_from_monitor(t_list, monitor);
@ -1183,16 +804,6 @@ JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_ob
}
}
if (LockingMode != LM_LIGHTWEIGHT && mark.has_monitor()) {
// Inflated monitor so header points to ObjectMonitor (tagged pointer).
// The first stage of async deflation does not affect any field
// used by this comparison so the ObjectMonitor* is usable here.
ObjectMonitor* monitor = read_monitor(mark);
assert(monitor != nullptr, "monitor should be non-null");
// owning_thread_from_monitor() may also return null here:
return Threads::owning_thread_from_monitor(t_list, monitor);
}
// Unlocked case, header in place
// Cannot have assertion since this object may have been
// locked by another thread when reaching here.
@ -1415,230 +1026,6 @@ jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
}
static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
const oop obj,
ObjectSynchronizer::InflateCause cause) {
assert(event != nullptr, "invariant");
const Klass* monitor_klass = obj->klass();
if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
return;
}
event->set_monitorClass(monitor_klass);
event->set_address((uintptr_t)(void*)obj);
event->set_cause((u1)cause);
event->commit();
}
// Fast path code shared by multiple functions
void ObjectSynchronizer::inflate_helper(oop obj) {
assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
markWord mark = obj->mark_acquire();
if (mark.has_monitor()) {
ObjectMonitor* monitor = read_monitor(mark);
markWord dmw = monitor->header();
assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
return;
}
(void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
}
ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
assert(current == Thread::current(), "must be");
assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
return inflate_impl(current->is_Java_thread() ? JavaThread::cast(current) : nullptr, obj, cause);
}
ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
return inflate_impl(thread, obj, cause);
}
ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* locking_thread, oop object, const InflateCause cause) {
// The JavaThread* locking_thread requires that the locking_thread == Thread::current() or
// is suspended throughout the call by some other mechanism.
// The thread might be nullptr when called from a non JavaThread. (As may still be
// the case from FastHashCode). However it is only important for correctness that the
// thread is set when called from ObjectSynchronizer::enter from the owning thread,
// ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
EventJavaMonitorInflate event;
for (;;) {
const markWord mark = object->mark_acquire();
// The mark can be in one of the following states:
// * inflated - If the ObjectMonitor owner is anonymous and the
// locking_thread owns the object lock, then we
// make the locking_thread the ObjectMonitor owner.
// * stack-locked - Coerce it to inflated from stack-locked.
// * INFLATING - Busy wait for conversion from stack-locked to
// inflated.
// * unlocked - Aggressively inflate the object.
// CASE: inflated
if (mark.has_monitor()) {
ObjectMonitor* inf = mark.monitor();
markWord dmw = inf->header();
assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
if (inf->has_anonymous_owner() && locking_thread != nullptr) {
assert(LockingMode == LM_LEGACY, "invariant");
if (locking_thread->is_lock_owned((address)inf->stack_locker())) {
inf->set_stack_locker(nullptr);
inf->set_owner_from_anonymous(locking_thread);
}
}
return inf;
}
// CASE: inflation in progress - inflating over a stack-lock.
// Some other thread is converting from stack-locked to inflated.
// Only that thread can complete inflation -- other threads must wait.
// The INFLATING value is transient.
// Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
// We could always eliminate polling by parking the thread on some auxiliary list.
if (mark == markWord::INFLATING()) {
read_stable_mark(object);
continue;
}
// CASE: stack-locked
// Could be stack-locked either by current or by some other thread.
//
// Note that we allocate the ObjectMonitor speculatively, _before_ attempting
// to install INFLATING into the mark word. We originally installed INFLATING,
// allocated the ObjectMonitor, and then finally STed the address of the
// ObjectMonitor into the mark. This was correct, but artificially lengthened
// the interval in which INFLATING appeared in the mark, thus increasing
// the odds of inflation contention. If we lose the race to set INFLATING,
// then we just delete the ObjectMonitor and loop around again.
//
LogStreamHandle(Trace, monitorinflation) lsh;
if (LockingMode == LM_LEGACY && mark.has_locker()) {
ObjectMonitor* m = new ObjectMonitor(object);
// Optimistically prepare the ObjectMonitor - anticipate successful CAS
// We do this before the CAS in order to minimize the length of time
// in which INFLATING appears in the mark.
markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
if (cmp != mark) {
delete m;
continue; // Interference -- just retry
}
// We've successfully installed INFLATING (0) into the mark-word.
// This is the only case where 0 will appear in a mark-word.
// Only the singular thread that successfully swings the mark-word
// to 0 can perform (or more precisely, complete) inflation.
//
// Why do we CAS a 0 into the mark-word instead of just CASing the
// mark-word from the stack-locked value directly to the new inflated state?
// Consider what happens when a thread unlocks a stack-locked object.
// It attempts to use CAS to swing the displaced header value from the
// on-stack BasicLock back into the object header. Recall also that the
// header value (hash code, etc) can reside in (a) the object header, or
// (b) a displaced header associated with the stack-lock, or (c) a displaced
// header in an ObjectMonitor. The inflate() routine must copy the header
// value from the BasicLock on the owner's stack to the ObjectMonitor, all
// the while preserving the hashCode stability invariants. If the owner
// decides to release the lock while the value is 0, the unlock will fail
// and control will eventually pass from slow_exit() to inflate. The owner
// will then spin, waiting for the 0 value to disappear. Put another way,
// the 0 causes the owner to stall if the owner happens to try to
// drop the lock (restoring the header from the BasicLock to the object)
// while inflation is in-progress. This protocol avoids races that might
// would otherwise permit hashCode values to change or "flicker" for an object.
// Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
// 0 serves as a "BUSY" inflate-in-progress indicator.
// fetch the displaced mark from the owner's stack.
// The owner can't die or unwind past the lock while our INFLATING
// object is in the mark. Furthermore the owner can't complete
// an unlock on the object, either.
markWord dmw = mark.displaced_mark_helper();
// Catch if the object's header is not neutral (not locked and
// not marked is what we care about here).
assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
// Setup monitor fields to proper values -- prepare the monitor
m->set_header(dmw);
// Note that a thread can inflate an object
// that it has stack-locked -- as might happen in wait() -- directly
// with CAS. That is, we can avoid the xchg-nullptr .... ST idiom.
if (locking_thread != nullptr && locking_thread->is_lock_owned((address)mark.locker())) {
m->set_owner(locking_thread);
} else {
// Use ANONYMOUS_OWNER to indicate that the owner is the BasicLock on the stack,
// and set the stack locker field in the monitor.
m->set_stack_locker(mark.locker());
m->set_anonymous_owner();
}
// TODO-FIXME: assert BasicLock->dhw != 0.
// Must preserve store ordering. The monitor state must
// be stable at the time of publishing the monitor address.
guarantee(object->mark() == markWord::INFLATING(), "invariant");
// Release semantics so that above set_object() is seen first.
object->release_set_mark(markWord::encode(m));
// Once ObjectMonitor is configured and the object is associated
// with the ObjectMonitor, it is safe to allow async deflation:
_in_use_list.add(m);
if (log_is_enabled(Trace, monitorinflation)) {
ResourceMark rm;
lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
object->mark().value(), object->klass()->external_name());
}
if (event.should_commit()) {
post_monitor_inflate_event(&event, object, cause);
}
return m;
}
// CASE: unlocked
// TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
// If we know we're inflating for entry it's better to inflate by swinging a
// pre-locked ObjectMonitor pointer into the object header. A successful
// CAS inflates the object *and* confers ownership to the inflating thread.
// In the current implementation we use a 2-step mechanism where we CAS()
// to inflate and then CAS() again to try to swing _owner from null to current.
// An inflateTry() method that we could call from enter() would be useful.
assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
ObjectMonitor* m = new ObjectMonitor(object);
// prepare m for installation - set monitor to initial state
m->set_header(mark);
if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
delete m;
m = nullptr;
continue;
// interference - the markword changed - just retry.
// The state-transitions are one-way, so there's no chance of
// live-lock -- "Inflated" is an absorbing state.
}
// Once the ObjectMonitor is configured and object is associated
// with the ObjectMonitor, it is safe to allow async deflation:
_in_use_list.add(m);
if (log_is_enabled(Trace, monitorinflation)) {
ResourceMark rm;
lsh.print_cr("inflate(unlocked): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
object->mark().value(), object->klass()->external_name());
}
if (event.should_commit()) {
post_monitor_inflate_event(&event, object, cause);
}
return m;
}
}
// Walk the in-use list and deflate (at most MonitorDeflationMax) idle
// ObjectMonitors. Returns the number of deflated ObjectMonitors.
//
@ -1914,7 +1301,6 @@ const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
case inflate_cause_monitor_enter: return "Monitor Enter";
case inflate_cause_wait: return "Monitor Wait";
case inflate_cause_notify: return "Monitor Notify";
case inflate_cause_hash_code: return "Monitor Hash Code";
case inflate_cause_jni_enter: return "JNI Monitor Enter";
case inflate_cause_jni_exit: return "JNI Monitor Exit";
default:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,16 +73,15 @@ class ObjectSynchronizer : AllStatic {
friend class ObjectMonitorDeflationLogging;
friend class WhiteBox;
public:
public:
typedef enum {
inflate_cause_vm_internal = 0,
inflate_cause_monitor_enter = 1,
inflate_cause_wait = 2,
inflate_cause_notify = 3,
inflate_cause_hash_code = 4,
inflate_cause_jni_enter = 5,
inflate_cause_jni_exit = 6,
inflate_cause_nof = 7 // Number of causes
inflate_cause_jni_enter = 4,
inflate_cause_jni_exit = 5,
inflate_cause_nof = 6 // Number of causes
} InflateCause;
typedef enum {
@ -104,15 +103,7 @@ class ObjectSynchronizer : AllStatic {
// locked on is either already locked by the locking_thread or cannot
// escape the locking_thread.
static void enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread);
private:
// Shared implementation for enter and enter_for. Performs all but
// inflated monitor enter.
static bool enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread);
static bool quick_enter_legacy(oop obj, BasicLock* Lock, JavaThread* current);
static void enter_legacy(Handle obj, BasicLock* Lock, JavaThread* current);
static void exit_legacy(oop obj, BasicLock* lock, JavaThread* current);
public:
// Used only to handle jni locks or other unmatched monitor enter/exit
// Internally they will use heavy weight monitor.
static void jni_enter(Handle obj, JavaThread* current);
@ -131,18 +122,7 @@ public:
// throwing unexpected InterruptedExecutionExceptions.
static void waitUninterruptibly(Handle obj, jlong Millis, TRAPS);
// Inflate light weight monitor to heavy weight monitor
static ObjectMonitor* inflate(Thread* current, oop obj, const InflateCause cause);
// Used to inflate a monitor as if it was done from the thread JavaThread.
static ObjectMonitor* inflate_for(JavaThread* thread, oop obj, const InflateCause cause);
private:
// Shared implementation between the different LockingMode.
static ObjectMonitor* inflate_impl(JavaThread* locking_thread, oop obj, const InflateCause cause);
public:
// This version is only for internal use
static void inflate_helper(oop obj);
static const char* inflate_cause_name(const InflateCause cause);
inline static ObjectMonitor* read_monitor(markWord mark);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,11 +45,7 @@ inline ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj,
inline void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
assert(current == Thread::current(), "must be");
if (LockingMode == LM_LIGHTWEIGHT) {
LightweightSynchronizer::enter(obj, lock, current);
} else {
enter_legacy(obj, lock, current);
}
}
inline bool ObjectSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
@ -61,21 +57,13 @@ inline bool ObjectSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread
return false;
}
if (LockingMode == LM_LIGHTWEIGHT) {
return LightweightSynchronizer::quick_enter(obj, lock, current);
} else {
return quick_enter_legacy(obj, lock, current);
}
return LightweightSynchronizer::quick_enter(obj, lock, current);
}
inline void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
current->dec_held_monitor_count();
if (LockingMode == LM_LIGHTWEIGHT) {
LightweightSynchronizer::exit(object, lock, current);
} else {
exit_legacy(object, lock, current);
}
LightweightSynchronizer::exit(object, lock, current);
}
#endif // SHARE_RUNTIME_SYNCHRONIZER_INLINE_HPP

View File

@ -1294,21 +1294,7 @@ GrowableArray<JavaThread*>* Threads::get_pending_threads(ThreadsList * t_list,
}
#endif // INCLUDE_JVMTI
JavaThread *Threads::owning_thread_from_stacklock(ThreadsList * t_list, address basicLock) {
assert(LockingMode == LM_LEGACY, "Not with new lightweight locking");
JavaThread* the_owner = nullptr;
for (JavaThread* q : *t_list) {
if (q->is_lock_owned(basicLock)) {
the_owner = q;
break;
}
}
return the_owner;
}
JavaThread* Threads::owning_thread_from_object(ThreadsList * t_list, oop obj) {
assert(LockingMode == LM_LIGHTWEIGHT, "Only with new lightweight locking");
for (JavaThread* q : *t_list) {
// Need to start processing before accessing oops in the thread.
StackWatermark* watermark = StackWatermarkSet::get(q, StackWatermarkKind::gc);
@ -1325,12 +1311,7 @@ JavaThread* Threads::owning_thread_from_object(ThreadsList * t_list, oop obj) {
JavaThread* Threads::owning_thread_from_monitor(ThreadsList* t_list, ObjectMonitor* monitor) {
if (monitor->has_anonymous_owner()) {
if (LockingMode == LM_LIGHTWEIGHT) {
return owning_thread_from_object(t_list, monitor->object());
} else {
assert(LockingMode == LM_LEGACY, "invariant");
return owning_thread_from_stacklock(t_list, (address)monitor->stack_locker());
}
return owning_thread_from_object(t_list, monitor->object());
} else {
JavaThread* the_owner = nullptr;
for (JavaThread* q : *t_list) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -134,9 +134,6 @@ public:
static GrowableArray<JavaThread*>* get_pending_threads(ThreadsList * t_list,
int count, address monitor);
// Get owning Java thread from the basicLock address.
static JavaThread *owning_thread_from_stacklock(ThreadsList * t_list, address basicLock);
static JavaThread* owning_thread_from_object(ThreadsList* t_list, oop obj);
static JavaThread* owning_thread_from_monitor(ThreadsList* t_list, ObjectMonitor* owner);

View File

@ -673,7 +673,6 @@
volatile_nonstatic_field(ObjectMonitor, _metadata, uintptr_t) \
unchecked_nonstatic_field(ObjectMonitor, _object, sizeof(void *)) /* NOTE: no type */ \
volatile_nonstatic_field(ObjectMonitor, _owner, int64_t) \
volatile_nonstatic_field(ObjectMonitor, _stack_locker, BasicLock*) \
volatile_nonstatic_field(ObjectMonitor, _next_om, ObjectMonitor*) \
volatile_nonstatic_field(BasicLock, _metadata, uintptr_t) \
nonstatic_field(ObjectMonitor, _contentions, int) \
@ -1640,14 +1639,6 @@
declare_constant(T_NARROWKLASS_size) \
declare_constant(T_VOID_size) \
\
/**********************************************/ \
/* LockingMode enum (globalDefinitions.hpp) */ \
/**********************************************/ \
\
declare_constant(LM_MONITOR) \
declare_constant(LM_LEGACY) \
declare_constant(LM_LIGHTWEIGHT) \
\
/*********************************************/ \
/* MethodCompilation (globalDefinitions.hpp) */ \
/*********************************************/ \

View File

@ -56,8 +56,6 @@ int LogMinObjAlignmentInBytes = -1;
// Oop encoding heap max
uint64_t OopEncodingHeapMax = 0;
const int LockingMode = LM_LIGHTWEIGHT;
// Something to help porters sleep at night
#ifdef ASSERT

View File

@ -1004,17 +1004,6 @@ enum JavaThreadState {
_thread_max_state = 12 // maximum thread state+1 - used for statistics allocation
};
enum LockingMode {
// Use only heavy monitors for locking
LM_MONITOR = 0,
// Legacy stack-locking, with monitors as 2nd tier
LM_LEGACY = 1,
// New lightweight locking, with monitors as 2nd tier
LM_LIGHTWEIGHT = 2
};
extern const int LockingMode;
//----------------------------------------------------------------------------------------------------
// Special constants for debugging

View File

@ -1078,7 +1078,7 @@ void VMError::report(outputStream* st, bool _verbose) {
print_stack_location(st, _context, continuation);
st->cr();
STEP_IF("printing lock stack", _verbose && _thread != nullptr && _thread->is_Java_thread() && LockingMode == LM_LIGHTWEIGHT);
STEP_IF("printing lock stack", _verbose && _thread != nullptr && _thread->is_Java_thread());
st->print_cr("Lock stack of current Java thread (top to bottom):");
JavaThread::cast(_thread)->lock_stack().print_on(st);
st->cr();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,8 +51,6 @@ public class ObjectMonitor extends VMObject {
objectFieldOffset = f.getOffset();
f = type.getField("_owner");
ownerFieldOffset = f.getOffset();
f = type.getField("_stack_locker");
stackLockerFieldOffset = f.getOffset();
f = type.getField("_next_om");
nextOMFieldOffset = f.getOffset();
contentionsField = new CIntField(type.getCIntegerField("_contentions"), 0);
@ -89,7 +87,6 @@ public class ObjectMonitor extends VMObject {
}
public Address owner() { return addr.getAddressAt(ownerFieldOffset); }
public Address stackLocker() { return addr.getAddressAt(stackLockerFieldOffset); }
// FIXME
// void set_owner(void* owner);
@ -120,7 +117,6 @@ public class ObjectMonitor extends VMObject {
private static long metadataFieldOffset;
private static long objectFieldOffset;
private static long ownerFieldOffset;
private static long stackLockerFieldOffset;
private static long nextOMFieldOffset;
private static CIntField contentionsField;
private static CIntField waitersField;

View File

@ -63,7 +63,7 @@ public:
} while (false)
TEST_VM_F(LockStackTest, is_recursive) {
if (LockingMode != LM_LIGHTWEIGHT || !VM_Version::supports_recursive_lightweight_locking()) {
if (!VM_Version::supports_recursive_lightweight_locking()) {
return;
}
@ -130,7 +130,7 @@ TEST_VM_F(LockStackTest, is_recursive) {
}
TEST_VM_F(LockStackTest, try_recursive_enter) {
if (LockingMode != LM_LIGHTWEIGHT || !VM_Version::supports_recursive_lightweight_locking()) {
if (!VM_Version::supports_recursive_lightweight_locking()) {
return;
}
@ -197,10 +197,6 @@ TEST_VM_F(LockStackTest, try_recursive_enter) {
}
TEST_VM_F(LockStackTest, contains) {
if (LockingMode != LM_LIGHTWEIGHT) {
return;
}
const bool test_recursive = VM_Version::supports_recursive_lightweight_locking();
JavaThread* THREAD = JavaThread::current();
@ -263,10 +259,6 @@ TEST_VM_F(LockStackTest, contains) {
}
TEST_VM_F(LockStackTest, remove) {
if (LockingMode != LM_LIGHTWEIGHT) {
return;
}
const bool test_recursive = VM_Version::supports_recursive_lightweight_locking();
JavaThread* THREAD = JavaThread::current();

View File

@ -53,9 +53,6 @@ public class TestRecursiveMonitorChurn {
public static volatile Monitor monitor;
public static void main(String[] args) {
if (WB.getBooleanVMFlag("VerifyHeavyMonitors")) {
throw new SkippedException("VerifyHeavyMonitors always inflates. Invalid test.");
}
final long pre_monitor_count = WB.getInUseMonitorCount();
System.out.println(" Precount = " + pre_monitor_count);
for (int i = 0; i < COUNT; i++) {