More lock rank tweaks

This commit is contained in:
William Kemper 2026-01-16 11:52:41 -08:00
parent 1d8e7a2a14
commit 688c2f8cca
4 changed files with 9 additions and 9 deletions

View File

@ -44,7 +44,7 @@ ShenandoahControlThread::ShenandoahControlThread() :
ShenandoahController(),
_requested_gc_cause(GCCause::_no_gc),
_degen_point(ShenandoahGC::_degenerated_outside_cycle),
_control_lock(LOCK_RANK - 1, "ShenandoahControl_lock", true) {
_control_lock(Mutex::nosafepoint - 3, "ShenandoahControl_lock", true) {
set_name("Shenandoah Control Thread");
create_and_start();
}
@ -380,7 +380,7 @@ void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
// opportunities for cleanup that were made available before the caller
// requested the GC.
MonitorLocker ml(&_gc_waiters_lock, Mutex::_no_safepoint_check_flag);
MonitorLocker ml(&_gc_waiters_lock);
size_t current_gc_id = get_gc_id();
size_t required_gc_id = current_gc_id + 1;
while (current_gc_id < required_gc_id && !should_terminate()) {
@ -392,6 +392,6 @@ void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
void ShenandoahControlThread::notify_gc_waiters() {
_gc_requested.unset();
MonitorLocker ml(&_gc_waiters_lock, Mutex::_no_safepoint_check_flag);
MonitorLocker ml(&_gc_waiters_lock);
ml.notify_all();
}

View File

@ -51,7 +51,7 @@ void ShenandoahController::handle_alloc_failure(const ShenandoahAllocRequest& re
}
if (block) {
MonitorLocker ml(&_alloc_failure_waiters_lock, Mutex::_no_safepoint_check_flag);
MonitorLocker ml(&_alloc_failure_waiters_lock);
while (!should_terminate() && ShenandoahCollectorPolicy::is_allocation_failure(heap->cancelled_cause())) {
ml.wait();
}
@ -70,6 +70,6 @@ void ShenandoahController::handle_alloc_failure_evac(size_t words) {
}
void ShenandoahController::notify_alloc_failure_waiters() {
MonitorLocker ml(&_alloc_failure_waiters_lock, Mutex::_no_safepoint_check_flag);
MonitorLocker ml(&_alloc_failure_waiters_lock);
ml.notify_all();
}

View File

@ -42,7 +42,7 @@ private:
shenandoah_padding(1);
protected:
const Mutex::Rank LOCK_RANK = Mutex::service;
const Mutex::Rank LOCK_RANK = Mutex::safepoint - 3;
// While we could have a single lock for these, it may risk unblocking
// GC waiters when alloc failure GC cycle finishes. We want instead

View File

@ -47,7 +47,7 @@
#include "utilities/events.hpp"
ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() :
_control_lock(LOCK_RANK - 1, "ShenandoahControl_lock", true),
_control_lock(Mutex::nosafepoint - 3, "ShenandoahGCRequest_lock", true),
_requested_gc_cause(GCCause::_no_gc),
_requested_generation(nullptr),
_gc_mode(none),
@ -759,7 +759,7 @@ void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cau
// opportunities for cleanup that were made available before the caller
// requested the GC.
MonitorLocker ml(&_gc_waiters_lock, Mutex::_no_safepoint_check_flag);
MonitorLocker ml(&_gc_waiters_lock);
size_t current_gc_id = get_gc_id();
const size_t required_gc_id = current_gc_id + 1;
while (current_gc_id < required_gc_id && !should_terminate()) {
@ -771,7 +771,7 @@ void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cau
}
void ShenandoahGenerationalControlThread::notify_gc_waiters() {
MonitorLocker ml(&_gc_waiters_lock, Mutex::_no_safepoint_check_flag);
MonitorLocker ml(&_gc_waiters_lock);
ml.notify_all();
}