From 355755d35de5c3155d1ea8d1afdd0debe5296a13 Mon Sep 17 00:00:00 2001 From: Anton Artemov Date: Mon, 8 Dec 2025 16:07:01 +0000 Subject: [PATCH] 8366671: Refactor Thread::SpinAcquire and Thread::SpinRelease Co-authored-by: Axel Boldt-Christmas Reviewed-by: coleenp, kbarrett, dholmes, aboldtch --- .../recorder/service/jfrEventThrottler.cpp | 4 +- .../share/jfr/support/jfrAdaptiveSampler.cpp | 4 +- .../share/jfr/support/jfrThreadLocal.cpp | 1 - .../share/jfr/utilities/jfrSpinlockHelper.hpp | 44 ------------ src/hotspot/share/runtime/objectMonitor.cpp | 24 +++---- src/hotspot/share/runtime/park.cpp | 7 +- .../share/runtime/safepointVerifiers.cpp | 4 +- src/hotspot/share/runtime/thread.cpp | 46 ------------- src/hotspot/share/runtime/thread.hpp | 5 -- .../share/utilities/spinCriticalSection.cpp | 69 +++++++++++++++++++ .../share/utilities/spinCriticalSection.hpp | 63 +++++++++++++++++ .../gtest/jfr/test_adaptiveSampler.cpp | 3 +- 12 files changed, 155 insertions(+), 119 deletions(-) delete mode 100644 src/hotspot/share/jfr/utilities/jfrSpinlockHelper.hpp create mode 100644 src/hotspot/share/utilities/spinCriticalSection.cpp create mode 100644 src/hotspot/share/utilities/spinCriticalSection.hpp diff --git a/src/hotspot/share/jfr/recorder/service/jfrEventThrottler.cpp b/src/hotspot/share/jfr/recorder/service/jfrEventThrottler.cpp index 787b2d7456b..e5420d29d94 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrEventThrottler.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrEventThrottler.cpp @@ -24,11 +24,11 @@ */ #include "jfr/recorder/service/jfrEventThrottler.hpp" -#include "jfr/utilities/jfrSpinlockHelper.hpp" #include "jfrfiles/jfrEventIds.hpp" #include "logging/log.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/spinCriticalSection.hpp" constexpr static const JfrSamplerParams _disabled_params = { 0, // sample points per window @@ -128,7 +128,7 @@ JfrEventThrottler* JfrEventThrottler::create_throttler(JfrEventId id) { * - period_ms time period expressed in milliseconds */ void JfrEventThrottler::configure(int64_t sample_size, int64_t period_ms) { - JfrSpinlockHelper mutex(&_lock); + SpinCriticalSection scs(&_lock); _sample_size = sample_size; _period_ms = period_ms; _update = true; diff --git a/src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp b/src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp index 22399f42bbb..571b5656576 100644 --- a/src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp +++ b/src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp @@ -25,13 +25,13 @@ #include "jfr/support/jfrAdaptiveSampler.hpp" #include "jfr/utilities/jfrRandom.inline.hpp" -#include "jfr/utilities/jfrSpinlockHelper.hpp" #include "jfr/utilities/jfrTime.hpp" #include "jfr/utilities/jfrTimeConverter.hpp" #include "jfr/utilities/jfrTryLock.hpp" #include "logging/log.hpp" #include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/spinCriticalSection.hpp" #include @@ -342,7 +342,7 @@ JfrGTestFixedRateSampler::JfrGTestFixedRateSampler(size_t sample_points_per_wind bool JfrGTestFixedRateSampler::initialize() { const bool result = JfrAdaptiveSampler::initialize(); - JfrSpinlockHelper mutex(&_lock); + SpinCriticalSection scs(&_lock); reconfigure(); return result; } diff --git a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp index 5fe5546e0a9..39b0eb3656c 100644 --- a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp +++ b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp @@ -36,7 +36,6 @@ #include "jfr/recorder/storage/jfrStorage.hpp" #include "jfr/support/jfrThreadId.inline.hpp" #include "jfr/support/jfrThreadLocal.hpp" -#include "jfr/utilities/jfrSpinlockHelper.hpp" #include "jfr/writers/jfrJavaEventWriter.hpp" #include "logging/log.hpp" #include "memory/allocation.inline.hpp" diff --git a/src/hotspot/share/jfr/utilities/jfrSpinlockHelper.hpp b/src/hotspot/share/jfr/utilities/jfrSpinlockHelper.hpp deleted file mode 100644 index 4b5ca80470e..00000000000 --- a/src/hotspot/share/jfr/utilities/jfrSpinlockHelper.hpp +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_JFR_UTILITIES_JFRSPINLOCKHELPER_HPP -#define SHARE_JFR_UTILITIES_JFRSPINLOCKHELPER_HPP - -#include "runtime/javaThread.hpp" - -class JfrSpinlockHelper { - private: - volatile int* const _lock; - - public: - JfrSpinlockHelper(volatile int* lock) : _lock(lock) { - Thread::SpinAcquire(_lock); - } - - ~JfrSpinlockHelper() { - Thread::SpinRelease(_lock); - } -}; - -#endif // SHARE_JFR_UTILITIES_JFRSPINLOCKHELPER_HPP diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp index ee7629ec6f5..785ee2af592 100644 --- a/src/hotspot/share/runtime/objectMonitor.cpp +++ b/src/hotspot/share/runtime/objectMonitor.cpp @@ -59,6 +59,7 @@ #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #include "utilities/preserveException.hpp" +#include "utilities/spinCriticalSection.hpp" #if INCLUDE_JFR #include "jfr/support/jfrFlush.hpp" #endif @@ -1863,9 +1864,10 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { // returns because of a timeout of interrupt. Contention is exceptionally rare // so we use a simple spin-lock instead of a heavier-weight blocking lock. - Thread::SpinAcquire(&_wait_set_lock); - add_waiter(&node); - Thread::SpinRelease(&_wait_set_lock); + { + SpinCriticalSection scs(&_wait_set_lock); + add_waiter(&node); + } intx save = _recursions; // record the old recursion count _waiters++; // increment the number of waiters @@ -1922,12 +1924,11 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { // That is, we fail toward safety. if (node.TState == ObjectWaiter::TS_WAIT) { - Thread::SpinAcquire(&_wait_set_lock); + SpinCriticalSection scs(&_wait_set_lock); if (node.TState == ObjectWaiter::TS_WAIT) { dequeue_specific_waiter(&node); // unlink from wait_set node.TState = ObjectWaiter::TS_RUN; } - Thread::SpinRelease(&_wait_set_lock); } // The thread is now either on off-list (TS_RUN), @@ -2036,7 +2037,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { bool ObjectMonitor::notify_internal(JavaThread* current) { bool did_notify = false; - Thread::SpinAcquire(&_wait_set_lock); + SpinCriticalSection scs(&_wait_set_lock); ObjectWaiter* iterator = dequeue_waiter(); if (iterator != nullptr) { guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant"); @@ -2095,7 +2096,6 @@ bool ObjectMonitor::notify_internal(JavaThread* current) { } } } - Thread::SpinRelease(&_wait_set_lock); return did_notify; } @@ -2198,9 +2198,10 @@ void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis, bool interru // returns because of a timeout or interrupt. Contention is exceptionally rare // so we use a simple spin-lock instead of a heavier-weight blocking lock. - Thread::SpinAcquire(&_wait_set_lock); - add_waiter(node); - Thread::SpinRelease(&_wait_set_lock); + { + SpinCriticalSection scs(&_wait_set_lock); + add_waiter(node); + } node->_recursions = _recursions; // record the old recursion count _recursions = 0; // set the recursion level to be 0 @@ -2221,12 +2222,11 @@ bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node // need to check if we were interrupted or the wait timed-out, and // in that case remove ourselves from the _wait_set queue. if (node->TState == ObjectWaiter::TS_WAIT) { - Thread::SpinAcquire(&_wait_set_lock); + SpinCriticalSection scs(&_wait_set_lock); if (node->TState == ObjectWaiter::TS_WAIT) { dequeue_specific_waiter(node); // unlink from wait_set node->TState = ObjectWaiter::TS_RUN; } - Thread::SpinRelease(&_wait_set_lock); } // If this was an interrupted case, set the _interrupted boolean so that diff --git a/src/hotspot/share/runtime/park.cpp b/src/hotspot/share/runtime/park.cpp index 37dfe6fcc3d..338a01bbfb9 100644 --- a/src/hotspot/share/runtime/park.cpp +++ b/src/hotspot/share/runtime/park.cpp @@ -25,6 +25,7 @@ #include "memory/allocation.inline.hpp" #include "nmt/memTracker.hpp" #include "runtime/javaThread.hpp" +#include "utilities/spinCriticalSection.hpp" // Lifecycle management for TSM ParkEvents. // ParkEvents are type-stable (TSM). @@ -60,14 +61,13 @@ ParkEvent * ParkEvent::Allocate (Thread * t) { // Using a spin lock since we are part of the mutex impl. // 8028280: using concurrent free list without memory management can leak // pretty badly it turns out. - Thread::SpinAcquire(&ListLock); { + SpinCriticalSection scs(&ListLock); ev = FreeList; if (ev != nullptr) { FreeList = ev->FreeNext; } } - Thread::SpinRelease(&ListLock); if (ev != nullptr) { guarantee (ev->AssociatedWith == nullptr, "invariant") ; @@ -88,12 +88,11 @@ void ParkEvent::Release (ParkEvent * ev) { ev->AssociatedWith = nullptr ; // Note that if we didn't have the TSM/immortal constraint, then // when reattaching we could trim the list. - Thread::SpinAcquire(&ListLock); { + SpinCriticalSection scs(&ListLock); ev->FreeNext = FreeList; FreeList = ev; } - Thread::SpinRelease(&ListLock); } // Override operator new and delete so we can ensure that the diff --git a/src/hotspot/share/runtime/safepointVerifiers.cpp b/src/hotspot/share/runtime/safepointVerifiers.cpp index 6eb61efe0ca..0c6f2e9add3 100644 --- a/src/hotspot/share/runtime/safepointVerifiers.cpp +++ b/src/hotspot/share/runtime/safepointVerifiers.cpp @@ -30,7 +30,9 @@ #ifdef ASSERT -NoSafepointVerifier::NoSafepointVerifier(bool active) : _thread(Thread::current()), _active(active) { +NoSafepointVerifier::NoSafepointVerifier(bool active) + : _thread(active ? Thread::current() : nullptr), + _active(active) { if (!_active) { return; } diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index d018c8a1a3a..f3c08ae62f7 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -566,49 +566,3 @@ bool Thread::set_as_starting_thread(JavaThread* jt) { DEBUG_ONLY(_starting_thread = jt;) return os::create_main_thread(jt); } - -// Ad-hoc mutual exclusion primitive: spin lock -// -// We employ a spin lock _only for low-contention, fixed-length -// short-duration critical sections where we're concerned -// about native mutex_t or HotSpot Mutex:: latency. - -void Thread::SpinAcquire(volatile int * adr) { - if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) { - return; // normal fast-path return - } - - // Slow-path : We've encountered contention -- Spin/Yield/Block strategy. - int ctr = 0; - int Yields = 0; - for (;;) { - while (*adr != 0) { - ++ctr; - if ((ctr & 0xFFF) == 0 || !os::is_MP()) { - if (Yields > 5) { - os::naked_short_sleep(1); - } else { - os::naked_yield(); - ++Yields; - } - } else { - SpinPause(); - } - } - if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) return; - } -} - -void Thread::SpinRelease(volatile int * adr) { - assert(*adr != 0, "invariant"); - // Roach-motel semantics. - // It's safe if subsequent LDs and STs float "up" into the critical section, - // but prior LDs and STs within the critical section can't be allowed - // to reorder or float past the ST that releases the lock. - // Loads and stores in the critical section - which appear in program - // order before the store that releases the lock - must also appear - // before the store that releases the lock in memory visibility order. - // So we need a #loadstore|#storestore "release" memory barrier before - // the ST of 0 into the lock-word which releases the lock. - AtomicAccess::release_store(adr, 0); -} diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp index 240821e90bd..181dfc46f87 100644 --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -602,11 +602,6 @@ protected: jint _hashStateY; jint _hashStateZ; - // Low-level leaf-lock primitives used to implement synchronization. - // Not for general synchronization use. - static void SpinAcquire(volatile int * Lock); - static void SpinRelease(volatile int * Lock); - #if defined(__APPLE__) && defined(AARCH64) private: DEBUG_ONLY(bool _wx_init); diff --git a/src/hotspot/share/utilities/spinCriticalSection.cpp b/src/hotspot/share/utilities/spinCriticalSection.cpp new file mode 100644 index 00000000000..0bbbc82c12b --- /dev/null +++ b/src/hotspot/share/utilities/spinCriticalSection.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "runtime/atomicAccess.hpp" +#include "utilities/spinCriticalSection.hpp" + +void SpinCriticalSection::spin_acquire(volatile int* adr) { + if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) { + return; // normal fast-path return + } + + // Slow-path : We've encountered contention -- Spin/Yield/Block strategy. + int ctr = 0; + int Yields = 0; + for (;;) { + while (*adr != 0) { + ++ctr; + if ((ctr & 0xFFF) == 0 || !os::is_MP()) { + if (Yields > 5) { + os::naked_short_sleep(1); + } + else { + os::naked_yield(); + ++Yields; + } + } + else { + SpinPause(); + } + } + if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) return; + } +} + +void SpinCriticalSection::spin_release(volatile int* adr) { + assert(*adr != 0, "invariant"); + // Roach-motel semantics. + // It's safe if subsequent LDs and STs float "up" into the critical section, + // but prior LDs and STs within the critical section can't be allowed + // to reorder or float past the ST that releases the lock. + // Loads and stores in the critical section - which appear in program + // order before the store that releases the lock - must also appear + // before the store that releases the lock in memory visibility order. + // So we need a #loadstore|#storestore "release" memory barrier before + // the ST of 0 into the lock-word which releases the lock. + AtomicAccess::release_store(adr, 0); +} + diff --git a/src/hotspot/share/utilities/spinCriticalSection.hpp b/src/hotspot/share/utilities/spinCriticalSection.hpp new file mode 100644 index 00000000000..0ebdc5de63e --- /dev/null +++ b/src/hotspot/share/utilities/spinCriticalSection.hpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_UTILITIES_SPINCRITICALSECTION_HPP +#define SHARE_UTILITIES_SPINCRITICALSECTION_HPP + +#include "runtime/javaThread.hpp" +#include "runtime/safepointVerifiers.hpp" +#include "runtime/thread.hpp" +#include "utilities/macros.hpp" + +// Ad-hoc mutual exclusion primitive: spin critical section, +// which employs a spin lock. +// +// We use this critical section only for low-contention code, and +// when it is know that the duration is short. To be used where +// we're concerned about native mutex_t or HotSpot Mutex:: latency. +// This class uses low-level leaf-lock primitives to implement +// synchronization and is not for general synchronization use. +// Should not be used in signal-handling contexts. +class SpinCriticalSection { +private: + // We use int type as 32-bit atomic operation is the most performant + // compared to smaller/larger types. + volatile int* const _lock; + DEBUG_ONLY(NoSafepointVerifier _nsv;) + + static void spin_acquire(volatile int* Lock); + static void spin_release(volatile int* Lock); +public: + NONCOPYABLE(SpinCriticalSection); + SpinCriticalSection(volatile int* lock) + : _lock(lock) + DEBUG_ONLY(COMMA _nsv(Thread::current_or_null() != nullptr)) { + spin_acquire(_lock); + } + ~SpinCriticalSection() { + spin_release(_lock); + } +}; + +#endif // SHARE_UTILITIES_SPINCRITICALSECTION_HPP diff --git a/test/hotspot/gtest/jfr/test_adaptiveSampler.cpp b/test/hotspot/gtest/jfr/test_adaptiveSampler.cpp index 69548b06e51..8625f64099d 100644 --- a/test/hotspot/gtest/jfr/test_adaptiveSampler.cpp +++ b/test/hotspot/gtest/jfr/test_adaptiveSampler.cpp @@ -34,13 +34,12 @@ #include "jfr/utilities/jfrAllocation.hpp" #include "jfr/utilities/jfrRandom.inline.hpp" -#include "jfr/utilities/jfrSpinlockHelper.hpp" #include "jfr/utilities/jfrTime.hpp" #include "jfr/utilities/jfrTimeConverter.hpp" #include "jfr/utilities/jfrTryLock.hpp" #include "logging/log.hpp" -#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/spinCriticalSection.hpp" #include "unittest.hpp" #include