8366671: Refactor Thread::SpinAcquire and Thread::SpinRelease

Co-authored-by: Axel Boldt-Christmas <aboldtch@openjdk.org>
Reviewed-by: coleenp, kbarrett, dholmes, aboldtch
This commit is contained in:
Anton Artemov 2025-12-08 16:07:01 +00:00
parent ac81ce51fa
commit 355755d35d
12 changed files with 155 additions and 119 deletions

View File

@ -24,11 +24,11 @@
*/
#include "jfr/recorder/service/jfrEventThrottler.hpp"
#include "jfr/utilities/jfrSpinlockHelper.hpp"
#include "jfrfiles/jfrEventIds.hpp"
#include "logging/log.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/spinCriticalSection.hpp"
constexpr static const JfrSamplerParams _disabled_params = {
0, // sample points per window
@ -128,7 +128,7 @@ JfrEventThrottler* JfrEventThrottler::create_throttler(JfrEventId id) {
* - period_ms time period expressed in milliseconds
*/
void JfrEventThrottler::configure(int64_t sample_size, int64_t period_ms) {
JfrSpinlockHelper mutex(&_lock);
SpinCriticalSection scs(&_lock);
_sample_size = sample_size;
_period_ms = period_ms;
_update = true;

View File

@ -25,13 +25,13 @@
#include "jfr/support/jfrAdaptiveSampler.hpp"
#include "jfr/utilities/jfrRandom.inline.hpp"
#include "jfr/utilities/jfrSpinlockHelper.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrTimeConverter.hpp"
#include "jfr/utilities/jfrTryLock.hpp"
#include "logging/log.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/spinCriticalSection.hpp"
#include <cmath>
@ -342,7 +342,7 @@ JfrGTestFixedRateSampler::JfrGTestFixedRateSampler(size_t sample_points_per_wind
bool JfrGTestFixedRateSampler::initialize() {
const bool result = JfrAdaptiveSampler::initialize();
JfrSpinlockHelper mutex(&_lock);
SpinCriticalSection scs(&_lock);
reconfigure();
return result;
}

View File

@ -36,7 +36,6 @@
#include "jfr/recorder/storage/jfrStorage.hpp"
#include "jfr/support/jfrThreadId.inline.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "jfr/utilities/jfrSpinlockHelper.hpp"
#include "jfr/writers/jfrJavaEventWriter.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"

View File

@ -1,44 +0,0 @@
/*
* Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_JFR_UTILITIES_JFRSPINLOCKHELPER_HPP
#define SHARE_JFR_UTILITIES_JFRSPINLOCKHELPER_HPP
#include "runtime/javaThread.hpp"
class JfrSpinlockHelper {
private:
volatile int* const _lock;
public:
JfrSpinlockHelper(volatile int* lock) : _lock(lock) {
Thread::SpinAcquire(_lock);
}
~JfrSpinlockHelper() {
Thread::SpinRelease(_lock);
}
};
#endif // SHARE_JFR_UTILITIES_JFRSPINLOCKHELPER_HPP

View File

@ -59,6 +59,7 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/preserveException.hpp"
#include "utilities/spinCriticalSection.hpp"
#if INCLUDE_JFR
#include "jfr/support/jfrFlush.hpp"
#endif
@ -1863,9 +1864,10 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
// returns because of a timeout of interrupt. Contention is exceptionally rare
// so we use a simple spin-lock instead of a heavier-weight blocking lock.
Thread::SpinAcquire(&_wait_set_lock);
add_waiter(&node);
Thread::SpinRelease(&_wait_set_lock);
{
SpinCriticalSection scs(&_wait_set_lock);
add_waiter(&node);
}
intx save = _recursions; // record the old recursion count
_waiters++; // increment the number of waiters
@ -1922,12 +1924,11 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
// That is, we fail toward safety.
if (node.TState == ObjectWaiter::TS_WAIT) {
Thread::SpinAcquire(&_wait_set_lock);
SpinCriticalSection scs(&_wait_set_lock);
if (node.TState == ObjectWaiter::TS_WAIT) {
dequeue_specific_waiter(&node); // unlink from wait_set
node.TState = ObjectWaiter::TS_RUN;
}
Thread::SpinRelease(&_wait_set_lock);
}
// The thread is now either on off-list (TS_RUN),
@ -2036,7 +2037,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
bool ObjectMonitor::notify_internal(JavaThread* current) {
bool did_notify = false;
Thread::SpinAcquire(&_wait_set_lock);
SpinCriticalSection scs(&_wait_set_lock);
ObjectWaiter* iterator = dequeue_waiter();
if (iterator != nullptr) {
guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
@ -2095,7 +2096,6 @@ bool ObjectMonitor::notify_internal(JavaThread* current) {
}
}
}
Thread::SpinRelease(&_wait_set_lock);
return did_notify;
}
@ -2198,9 +2198,10 @@ void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis, bool interru
// returns because of a timeout or interrupt. Contention is exceptionally rare
// so we use a simple spin-lock instead of a heavier-weight blocking lock.
Thread::SpinAcquire(&_wait_set_lock);
add_waiter(node);
Thread::SpinRelease(&_wait_set_lock);
{
SpinCriticalSection scs(&_wait_set_lock);
add_waiter(node);
}
node->_recursions = _recursions; // record the old recursion count
_recursions = 0; // set the recursion level to be 0
@ -2221,12 +2222,11 @@ bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node
// need to check if we were interrupted or the wait timed-out, and
// in that case remove ourselves from the _wait_set queue.
if (node->TState == ObjectWaiter::TS_WAIT) {
Thread::SpinAcquire(&_wait_set_lock);
SpinCriticalSection scs(&_wait_set_lock);
if (node->TState == ObjectWaiter::TS_WAIT) {
dequeue_specific_waiter(node); // unlink from wait_set
node->TState = ObjectWaiter::TS_RUN;
}
Thread::SpinRelease(&_wait_set_lock);
}
// If this was an interrupted case, set the _interrupted boolean so that

View File

@ -25,6 +25,7 @@
#include "memory/allocation.inline.hpp"
#include "nmt/memTracker.hpp"
#include "runtime/javaThread.hpp"
#include "utilities/spinCriticalSection.hpp"
// Lifecycle management for TSM ParkEvents.
// ParkEvents are type-stable (TSM).
@ -60,14 +61,13 @@ ParkEvent * ParkEvent::Allocate (Thread * t) {
// Using a spin lock since we are part of the mutex impl.
// 8028280: using concurrent free list without memory management can leak
// pretty badly it turns out.
Thread::SpinAcquire(&ListLock);
{
SpinCriticalSection scs(&ListLock);
ev = FreeList;
if (ev != nullptr) {
FreeList = ev->FreeNext;
}
}
Thread::SpinRelease(&ListLock);
if (ev != nullptr) {
guarantee (ev->AssociatedWith == nullptr, "invariant") ;
@ -88,12 +88,11 @@ void ParkEvent::Release (ParkEvent * ev) {
ev->AssociatedWith = nullptr ;
// Note that if we didn't have the TSM/immortal constraint, then
// when reattaching we could trim the list.
Thread::SpinAcquire(&ListLock);
{
SpinCriticalSection scs(&ListLock);
ev->FreeNext = FreeList;
FreeList = ev;
}
Thread::SpinRelease(&ListLock);
}
// Override operator new and delete so we can ensure that the

View File

@ -30,7 +30,9 @@
#ifdef ASSERT
NoSafepointVerifier::NoSafepointVerifier(bool active) : _thread(Thread::current()), _active(active) {
NoSafepointVerifier::NoSafepointVerifier(bool active)
: _thread(active ? Thread::current() : nullptr),
_active(active) {
if (!_active) {
return;
}

View File

@ -566,49 +566,3 @@ bool Thread::set_as_starting_thread(JavaThread* jt) {
DEBUG_ONLY(_starting_thread = jt;)
return os::create_main_thread(jt);
}
// Ad-hoc mutual exclusion primitive: spin lock
//
// We employ a spin lock _only for low-contention, fixed-length
// short-duration critical sections where we're concerned
// about native mutex_t or HotSpot Mutex:: latency.
void Thread::SpinAcquire(volatile int * adr) {
if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) {
return; // normal fast-path return
}
// Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
int ctr = 0;
int Yields = 0;
for (;;) {
while (*adr != 0) {
++ctr;
if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
if (Yields > 5) {
os::naked_short_sleep(1);
} else {
os::naked_yield();
++Yields;
}
} else {
SpinPause();
}
}
if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) return;
}
}
void Thread::SpinRelease(volatile int * adr) {
assert(*adr != 0, "invariant");
// Roach-motel semantics.
// It's safe if subsequent LDs and STs float "up" into the critical section,
// but prior LDs and STs within the critical section can't be allowed
// to reorder or float past the ST that releases the lock.
// Loads and stores in the critical section - which appear in program
// order before the store that releases the lock - must also appear
// before the store that releases the lock in memory visibility order.
// So we need a #loadstore|#storestore "release" memory barrier before
// the ST of 0 into the lock-word which releases the lock.
AtomicAccess::release_store(adr, 0);
}

View File

@ -602,11 +602,6 @@ protected:
jint _hashStateY;
jint _hashStateZ;
// Low-level leaf-lock primitives used to implement synchronization.
// Not for general synchronization use.
static void SpinAcquire(volatile int * Lock);
static void SpinRelease(volatile int * Lock);
#if defined(__APPLE__) && defined(AARCH64)
private:
DEBUG_ONLY(bool _wx_init);

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "runtime/atomicAccess.hpp"
#include "utilities/spinCriticalSection.hpp"
void SpinCriticalSection::spin_acquire(volatile int* adr) {
if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) {
return; // normal fast-path return
}
// Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
int ctr = 0;
int Yields = 0;
for (;;) {
while (*adr != 0) {
++ctr;
if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
if (Yields > 5) {
os::naked_short_sleep(1);
}
else {
os::naked_yield();
++Yields;
}
}
else {
SpinPause();
}
}
if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) return;
}
}
void SpinCriticalSection::spin_release(volatile int* adr) {
assert(*adr != 0, "invariant");
// Roach-motel semantics.
// It's safe if subsequent LDs and STs float "up" into the critical section,
// but prior LDs and STs within the critical section can't be allowed
// to reorder or float past the ST that releases the lock.
// Loads and stores in the critical section - which appear in program
// order before the store that releases the lock - must also appear
// before the store that releases the lock in memory visibility order.
// So we need a #loadstore|#storestore "release" memory barrier before
// the ST of 0 into the lock-word which releases the lock.
AtomicAccess::release_store(adr, 0);
}

View File

@ -0,0 +1,63 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_UTILITIES_SPINCRITICALSECTION_HPP
#define SHARE_UTILITIES_SPINCRITICALSECTION_HPP
#include "runtime/javaThread.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/thread.hpp"
#include "utilities/macros.hpp"
// Ad-hoc mutual exclusion primitive: spin critical section,
// which employs a spin lock.
//
// We use this critical section only for low-contention code, and
// when it is know that the duration is short. To be used where
// we're concerned about native mutex_t or HotSpot Mutex:: latency.
// This class uses low-level leaf-lock primitives to implement
// synchronization and is not for general synchronization use.
// Should not be used in signal-handling contexts.
class SpinCriticalSection {
private:
// We use int type as 32-bit atomic operation is the most performant
// compared to smaller/larger types.
volatile int* const _lock;
DEBUG_ONLY(NoSafepointVerifier _nsv;)
static void spin_acquire(volatile int* Lock);
static void spin_release(volatile int* Lock);
public:
NONCOPYABLE(SpinCriticalSection);
SpinCriticalSection(volatile int* lock)
: _lock(lock)
DEBUG_ONLY(COMMA _nsv(Thread::current_or_null() != nullptr)) {
spin_acquire(_lock);
}
~SpinCriticalSection() {
spin_release(_lock);
}
};
#endif // SHARE_UTILITIES_SPINCRITICALSECTION_HPP

View File

@ -34,13 +34,12 @@
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfr/utilities/jfrRandom.inline.hpp"
#include "jfr/utilities/jfrSpinlockHelper.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrTimeConverter.hpp"
#include "jfr/utilities/jfrTryLock.hpp"
#include "logging/log.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/spinCriticalSection.hpp"
#include "unittest.hpp"
#include <cmath>