From 20a81450dc39f76cd571fa300db9a174725161d0 Mon Sep 17 00:00:00 2001 From: Anton Artemov Date: Wed, 21 Jan 2026 14:20:42 +0100 Subject: [PATCH] 8366659: Addressed reviewers' comments. --- src/hotspot/share/runtime/objectMonitor.cpp | 126 +++++++------------- src/hotspot/share/runtime/objectMonitor.hpp | 1 - 2 files changed, 45 insertions(+), 82 deletions(-) diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp index ec57e4f987c..bd54cfb5122 100644 --- a/src/hotspot/share/runtime/objectMonitor.cpp +++ b/src/hotspot/share/runtime/objectMonitor.cpp @@ -564,7 +564,7 @@ void ObjectMonitor::enter_with_contention_mark(JavaThread* current, ObjectMonito // _entry_list so cancel preemption. We will still go through the preempt stub // but instead of unmounting we will call thaw to continue execution. current->set_preemption_cancelled(true); - if (JvmtiExport::should_post_monitor_contended_entered()) { + if (post_jvmti_events && JvmtiExport::should_post_monitor_contended_entered()) { // We are going to call thaw again after this and finish the VMTS // transition so no need to do it here. We will post the event there. current->set_contended_entered_monitor(this); @@ -1866,7 +1866,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { // while (!timeout && !interrupted && node.TState == TS_WAIT) park() int ret = OS_OK; - bool was_notified = true; + bool was_notified = false; // Need to check interrupt state whilst still _thread_in_vm bool interrupted = interruptible && current->is_interrupted(false); @@ -1880,7 +1880,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { { ThreadBlockInVM tbivm(current, false /* allow_suspend */); if (interrupted || HAS_PENDING_EXCEPTION) { - was_notified = false; + // Intentionally empty } else if (node.TState == ObjectWaiter::TS_WAIT) { if (millis <= 0) { current->_ParkEvent->park(); @@ -1904,7 +1904,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { // highly unlikely). If the following LD fetches a stale TS_WAIT value // then we'll acquire the lock and then re-fetch a fresh TState value. // That is, we fail toward safety. - + was_notified = true; if (node.TState == ObjectWaiter::TS_WAIT) { SpinCriticalSection scs(&_wait_set_lock); if (node.TState == ObjectWaiter::TS_WAIT) { @@ -1929,8 +1929,8 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { // (Don't cache naked oops over safepoints, of course). // Post monitor waited event. Note that this is past-tense, we are done waiting. - // A thread which should post monitor_waited event is never in TS_ENTER state. - if (JvmtiExport::should_post_monitor_waited()) { + // An event could have been enabled after notification, need to check the state. + if (JvmtiExport::should_post_monitor_waited() && node.TState != ObjectWaiter::TS_ENTER) { // Process suspend requests now if any, before posting the event. { @@ -2032,6 +2032,8 @@ bool ObjectMonitor::notify_internal(JavaThread* current) { ObjectWaiter* iterator = dequeue_waiter(); if (iterator != nullptr) { guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant"); + iterator->_notifier_tid = JFR_THREAD_ID(current); + did_notify = true; if (iterator->is_vthread()) { oop vthread = iterator->vthread(); @@ -2047,61 +2049,46 @@ bool ObjectMonitor::notify_internal(JavaThread* current) { old_state == java_lang_VirtualThread::TIMED_WAIT) { java_lang_VirtualThread::cmpxchg_state(vthread, old_state, java_lang_VirtualThread::BLOCKED); } - // If we will add the vthread to the entry list below then we need to - // increment the counter *before* doing so. - // Adding to _entry_list uses Atomic::cmpxchg() which already provides - // a fence that prevents reordering of the stores. if (!JvmtiExport::should_post_monitor_waited()) { - // This is is now conditional as if the monitor_waited event - // is allowed, then a thread, even virtual, should not be moved to - // the entry_list, but rather unparked and let run. See the comment below. + // Increment counter *before* adding the vthread to the _entry_list. + // Adding to _entry_list uses Atomic::cmpxchg() which already provides + // a fence that prevents reordering of the stores. inc_unmounted_vthreads(); - } - } - - iterator->_notifier_tid = JFR_THREAD_ID(current); - did_notify = true; - - if (!JvmtiExport::should_post_monitor_waited()) { - // If the monitor_waited JVMTI event is not allowed, a thread is - // transferred to the entry_list, and it will eventually be unparked - // only when it is chosen to become the successor. - add_to_entry_list(current, iterator); - } else { - // However, if the monitor_waited event is allowed, then - // the thread is set to state TS_RUN and unparked. The thread - // will then contend directly to reacquire the monitor and - // avoids being flagged as the successor. This avoids the problem - // of having a thread suspend whilst it is the successor. - - iterator->TState = ObjectWaiter::TS_RUN; - - OrderAccess::fence(); - - oop vthread = nullptr; - ParkEvent* evt; - if (!iterator->is_vthread()) { - iterator->wait_reenter_begin(this); - if (has_unmounted_vthreads()) { - iterator->_do_timed_park = true; + add_to_entry_list(current, iterator); + } else { + iterator->TState = ObjectWaiter::TS_RUN; + if (java_lang_VirtualThread::set_onWaitingList(vthread, vthread_list_head())) { + ParkEvent* pe = ObjectMonitor::vthread_unparker_ParkEvent(); + pe->unpark(); } + } + } else { + if (!JvmtiExport::should_post_monitor_waited()) { + add_to_entry_list(current, iterator); + // Read counter *after* adding the thread to the _entry_list. + // Adding to _entry_list uses Atomic::cmpxchg() which already provides + // a fence that prevents this load from floating up previous store. + if (has_unmounted_vthreads()) { + // Wake up the thread to alleviate some deadlock cases where the successor + // that will be picked up when this thread releases the monitor is an unmounted + // virtual thread that cannot run due to having run out of carriers. Upon waking + // up, the thread will call reenter_internal() which will use timed-park in case + // there is contention and there are still vthreads in the _entry_list. + // If the target was interrupted or the wait timed-out at the same time, it could + // have reached reenter_internal and read a false value of has_unmounted_vthreads() + // before we added it to the _entry_list above. To deal with that case, we set _do_timed_park + // which will be read by the target on the next loop iteration in reenter_internal. + iterator->_do_timed_park = true; + JavaThread* t = iterator->thread(); + t->_ParkEvent->unpark(); + } + iterator->wait_reenter_begin(this); + } else { + iterator->TState = ObjectWaiter::TS_RUN; JavaThread* t = iterator->thread(); assert(t != nullptr, ""); - evt = t->_ParkEvent; - } else { - vthread = iterator->vthread(); - assert(vthread != nullptr, ""); - evt = ObjectMonitor::vthread_unparker_ParkEvent(); + t->_ParkEvent->unpark(); } - - if (vthread == nullptr) { - // Platform thread case. - evt->unpark(); - } else if (java_lang_VirtualThread::set_onWaitingList(vthread, vthread_list_head())) { - // Virtual thread case. - evt->unpark(); - } - return did_notify; } // _wait_set_lock protects the wait queue, not the entry_list. We could @@ -2111,28 +2098,6 @@ bool ObjectMonitor::notify_internal(JavaThread* current) { // is the only thread that grabs _wait_set_lock. There's almost no contention // on _wait_set_lock so it's not profitable to reduce the length of the // critical section. - - if (!iterator->is_vthread()) { - iterator->wait_reenter_begin(this); - - // Read counter *after* adding the thread to the _entry_list. - // Adding to _entry_list uses Atomic::cmpxchg() which already provides - // a fence that prevents this load from floating up previous store. - if (has_unmounted_vthreads()) { - // Wake up the thread to alleviate some deadlock cases where the successor - // that will be picked up when this thread releases the monitor is an unmounted - // virtual thread that cannot run due to having run out of carriers. Upon waking - // up, the thread will call reenter_internal() which will use timed-park in case - // there is contention and there are still vthreads in the _entry_list. - // If the target was interrupted or the wait timed-out at the same time, it could - // have reached reenter_internal and read a false value of has_unmounted_vthreads() - // before we added it to the _entry_list above. To deal with that case, we set _do_timed_park - // which will be read by the target on the next loop iteration in reenter_internal. - iterator->_do_timed_park = true; - JavaThread* t = iterator->thread(); - t->_ParkEvent->unpark(); - } - } } return did_notify; } @@ -2285,10 +2250,9 @@ bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node // Mark that we are at reenter so that we don't call this method again. node->_at_reenter = true; - // We check against the state, rather than if was_notified, - // because, if monitor_waited JVMTI event is allowed, there can be a vthead which - // is not on the entry_list, but has been notified in the sense that it has been - // unparked directly in notify_internal(). Its state is then TS_RUN. + // We check the state rather than was_notified because, when JVMTI + // monitor_waited event is enabled, the notifier only unparks the waiter + // without adding it to the entry_list. if (state == ObjectWaiter::TS_RUN) { bool acquired = vthread_monitor_enter(current, node); if (acquired) { diff --git a/src/hotspot/share/runtime/objectMonitor.hpp b/src/hotspot/share/runtime/objectMonitor.hpp index 0f8f9570cfa..1b3246c13e3 100644 --- a/src/hotspot/share/runtime/objectMonitor.hpp +++ b/src/hotspot/share/runtime/objectMonitor.hpp @@ -364,7 +364,6 @@ class ObjectMonitor : public CHeapObj { bool enter_is_async_deflating(); void notify_contended_enter(JavaThread *current, bool post_jvmti_events = true); - void post_waited_event(JavaThread* current, EventJavaMonitorWait* wait_event, bool was_notified, ObjectWaiter* node, jlong millis, int ret); public: void enter_for_with_contention_mark(JavaThread* locking_thread, ObjectMonitorContentionMark& contention_mark); bool enter_for(JavaThread* locking_thread);