diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index f3c08ae62f7..bfbd4727e9a 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -36,6 +36,7 @@ #include "memory/resourceArea.hpp" #include "nmt/memTracker.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.hpp" #include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaThread.inline.hpp" @@ -82,7 +83,7 @@ Thread::Thread(MemTag mem_tag) { _threads_hazard_ptr = nullptr; _threads_list_ptr = nullptr; _nested_threads_hazard_ptr_cnt = 0; - _rcu_counter = 0; + _rcu_counter.store_relaxed(0); // the handle mark links itself to last_handle_mark new HandleMark(this); diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp index 181dfc46f87..dcd6fb2d3fd 100644 --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,6 +30,7 @@ #include "gc/shared/threadLocalAllocBuffer.hpp" #include "jni.h" #include "memory/allocation.hpp" +#include "runtime/atomic.hpp" #include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" @@ -238,9 +239,9 @@ class Thread: public ThreadShadow { // Support for GlobalCounter private: - volatile uintx _rcu_counter; + Atomic _rcu_counter; public: - volatile uintx* get_rcu_counter() { + Atomic* get_rcu_counter() { return &_rcu_counter; } diff --git a/src/hotspot/share/utilities/globalCounter.cpp b/src/hotspot/share/utilities/globalCounter.cpp index 7019273d937..114d3b5fed1 100644 --- a/src/hotspot/share/utilities/globalCounter.cpp +++ b/src/hotspot/share/utilities/globalCounter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,7 @@ */ #include "memory/iterator.hpp" -#include "runtime/atomicAccess.hpp" +#include "runtime/atomic.hpp" #include "runtime/javaThread.hpp" #include "runtime/threadSMR.inline.hpp" #include "runtime/vmThread.hpp" @@ -41,7 +41,7 @@ class GlobalCounter::CounterThreadCheck : public ThreadClosure { SpinYield yield; // Loops on this thread until it has exited the critical read section. while(true) { - uintx cnt = AtomicAccess::load_acquire(thread->get_rcu_counter()); + uintx cnt = thread->get_rcu_counter()->load_acquire(); // This checks if the thread's counter is active. And if so is the counter // for a pre-existing reader (belongs to this grace period). A pre-existing // reader will have a lower counter than the global counter version for this @@ -57,9 +57,9 @@ class GlobalCounter::CounterThreadCheck : public ThreadClosure { }; void GlobalCounter::write_synchronize() { - assert((*Thread::current()->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section"); - // AtomicAccess::add must provide fence since we have storeload dependency. - uintx gbl_cnt = AtomicAccess::add(&_global_counter._counter, COUNTER_INCREMENT); + assert((Thread::current()->get_rcu_counter()->load_relaxed() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section"); + // Atomic add must provide fence since we have storeload dependency. + uintx gbl_cnt = _global_counter._counter.add_then_fetch(COUNTER_INCREMENT); // Do all RCU threads. CounterThreadCheck ctc(gbl_cnt); diff --git a/src/hotspot/share/utilities/globalCounter.hpp b/src/hotspot/share/utilities/globalCounter.hpp index c78831acfa5..80fc1b3e94e 100644 --- a/src/hotspot/share/utilities/globalCounter.hpp +++ b/src/hotspot/share/utilities/globalCounter.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "memory/allStatic.hpp" #include "memory/padded.hpp" +#include "runtime/atomic.hpp" class Thread; @@ -47,7 +48,7 @@ class GlobalCounter : public AllStatic { // counter is on a separate cacheline. struct PaddedCounter { DEFINE_PAD_MINUS_SIZE(0, DEFAULT_PADDING_SIZE, 0); - volatile uintx _counter; + Atomic _counter; DEFINE_PAD_MINUS_SIZE(1, DEFAULT_PADDING_SIZE, sizeof(volatile uintx)); }; diff --git a/src/hotspot/share/utilities/globalCounter.inline.hpp b/src/hotspot/share/utilities/globalCounter.inline.hpp index 9cc746173b8..ed37b8a878d 100644 --- a/src/hotspot/share/utilities/globalCounter.inline.hpp +++ b/src/hotspot/share/utilities/globalCounter.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,30 +27,29 @@ #include "utilities/globalCounter.hpp" -#include "runtime/atomicAccess.hpp" +#include "runtime/atomic.hpp" #include "runtime/javaThread.hpp" inline GlobalCounter::CSContext GlobalCounter::critical_section_begin(Thread *thread) { assert(thread == Thread::current(), "must be current thread"); - uintx old_cnt = AtomicAccess::load(thread->get_rcu_counter()); + uintx old_cnt = thread->get_rcu_counter()->load_relaxed(); // Retain the old counter value if already active, e.g. nested. // Otherwise, set the counter to the current version + active bit. uintx new_cnt = old_cnt; if ((new_cnt & COUNTER_ACTIVE) == 0) { - new_cnt = AtomicAccess::load(&_global_counter._counter) | COUNTER_ACTIVE; + new_cnt = _global_counter._counter.load_relaxed() | COUNTER_ACTIVE; } - AtomicAccess::release_store_fence(thread->get_rcu_counter(), new_cnt); + thread->get_rcu_counter()->release_store_fence(new_cnt); return static_cast(old_cnt); } inline void GlobalCounter::critical_section_end(Thread *thread, CSContext context) { assert(thread == Thread::current(), "must be current thread"); - assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section"); + assert((thread->get_rcu_counter()->load_relaxed() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section"); // Restore the counter value from before the associated begin. - AtomicAccess::release_store(thread->get_rcu_counter(), - static_cast(context)); + thread->get_rcu_counter()->release_store(static_cast(context)); } class GlobalCounter::CriticalSection {