8371923: Update LockFreeStack for Atomic<T>

Reviewed-by: iwalulya, dholmes
This commit is contained in:
Kim Barrett 2025-11-19 20:05:09 +00:00
parent 223cc64518
commit 6f1c5733ed
2 changed files with 53 additions and 32 deletions

View File

@ -25,6 +25,7 @@
#ifndef SHARE_UTILITIES_LOCKFREESTACK_HPP
#define SHARE_UTILITIES_LOCKFREESTACK_HPP
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@ -34,11 +35,14 @@
// a result, there is no allocation involved in adding objects to the stack
// or removing them from the stack.
//
// To be used in a LockFreeStack of objects of type T, an object of
// type T must have a list entry member of type T* volatile, with an
// non-member accessor function returning a pointer to that member. A
// LockFreeStack is associated with the class of its elements and an
// entry member from that class.
// To be used in a LockFreeStack of objects of type T, an object of type T
// must have a list entry member. A list entry member is a data member whose
// type is either (1) Atomic<T*>, or (2) T* volatile. There must be a
// non-member or static member function returning a pointer to that member,
// which is used to provide access to it by a LockFreeStack. A LockFreeStack
// is associated with the class of its elements and an entry member from that
// class by being specialized on the element class and a pointer to the
// function for accessing that entry member.
//
// An object can be in multiple stacks at the same time, so long as
// each stack uses a different entry member. That is, the class of the
@ -52,12 +56,12 @@
//
// \tparam T is the class of the elements in the stack.
//
// \tparam next_ptr is a function pointer. Applying this function to
// \tparam next_accessor is a function pointer. Applying this function to
// an object of type T must return a pointer to the list entry member
// of the object associated with the LockFreeStack type.
template<typename T, T* volatile* (*next_ptr)(T&)>
template<typename T, auto next_accessor>
class LockFreeStack {
T* volatile _top;
Atomic<T*> _top;
void prepend_impl(T* first, T* last) {
T* cur = top();
@ -65,12 +69,21 @@ class LockFreeStack {
do {
old = cur;
set_next(*last, cur);
cur = AtomicAccess::cmpxchg(&_top, cur, first);
cur = _top.compare_exchange(cur, first);
} while (old != cur);
}
NONCOPYABLE(LockFreeStack);
template<typename NextAccessor>
static constexpr void use_atomic_access_impl(NextAccessor) {
static_assert(DependentAlwaysFalse<NextAccessor>, "Invalid next accessor");
}
static constexpr bool use_atomic_access_impl(T* volatile* (*)(T&)) { return true; }
static constexpr bool use_atomic_access_impl(Atomic<T*>* (*)(T&)) { return false; }
static constexpr bool use_atomic_access = use_atomic_access_impl(next_accessor);
public:
LockFreeStack() : _top(nullptr) {}
~LockFreeStack() { assert(empty(), "stack not empty"); }
@ -89,7 +102,7 @@ public:
new_top = next(*result);
}
// CAS even on empty pop, for consistent membar behavior.
result = AtomicAccess::cmpxchg(&_top, result, new_top);
result = _top.compare_exchange(result, new_top);
} while (result != old);
if (result != nullptr) {
set_next(*result, nullptr);
@ -101,7 +114,7 @@ public:
// list of elements. Acts as a full memory barrier.
// postcondition: empty()
T* pop_all() {
return AtomicAccess::xchg(&_top, (T*)nullptr);
return _top.exchange(nullptr);
}
// Atomically adds value to the top of this stack. Acts as a full
@ -143,9 +156,9 @@ public:
// Return true if the stack is empty.
bool empty() const { return top() == nullptr; }
// Return the most recently pushed element, or nullptr if the stack is empty.
// Return the most recently pushed element, or null if the stack is empty.
// The returned element is not removed from the stack.
T* top() const { return AtomicAccess::load(&_top); }
T* top() const { return _top.load_relaxed(); }
// Return the number of objects in the stack. There must be no concurrent
// pops while the length is being determined.
@ -160,7 +173,11 @@ public:
// Return the entry following value in the list used by the
// specialized LockFreeStack class.
static T* next(const T& value) {
return AtomicAccess::load(next_ptr(const_cast<T&>(value)));
if constexpr (use_atomic_access) {
return AtomicAccess::load(next_accessor(const_cast<T&>(value)));
} else {
return next_accessor(const_cast<T&>(value))->load_relaxed();
}
}
// Set the entry following value to new_next in the list used by the
@ -168,7 +185,11 @@ public:
// if value is in an instance of this specialization of LockFreeStack,
// there must be no concurrent push or pop operations on that stack.
static void set_next(T& value, T* new_next) {
AtomicAccess::store(next_ptr(value), new_next);
if constexpr (use_atomic_access) {
AtomicAccess::store(next_accessor(value), new_next);
} else {
next_accessor(value)->store_relaxed(new_next);
}
}
};

View File

@ -22,7 +22,7 @@
*/
#include "memory/allocation.inline.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/lockFreeStack.hpp"
#include "threadHelper.inline.hpp"
@ -33,12 +33,12 @@
class LockFreeStackTestElement {
typedef LockFreeStackTestElement Element;
Element* volatile _entry;
Element* volatile _entry1;
Atomic<Element*> _entry;
Atomic<Element*> _entry1;
size_t _id;
static Element* volatile* entry_ptr(Element& e) { return &e._entry; }
static Element* volatile* entry1_ptr(Element& e) { return &e._entry1; }
static Atomic<Element*>* entry_ptr(Element& e) { return &e._entry; }
static Atomic<Element*>* entry1_ptr(Element& e) { return &e._entry1; }
public:
LockFreeStackTestElement(size_t id = 0) : _entry(), _entry1(), _id(id) {}
@ -202,17 +202,17 @@ class LockFreeStackTestThread : public JavaTestThread {
uint _id;
TestStack* _from;
TestStack* _to;
volatile size_t* _processed;
Atomic<size_t>* _processed;
size_t _process_limit;
size_t _local_processed;
volatile bool _ready;
Atomic<bool> _ready;
public:
LockFreeStackTestThread(Semaphore* post,
uint id,
TestStack* from,
TestStack* to,
volatile size_t* processed,
Atomic<size_t>* processed,
size_t process_limit) :
JavaTestThread(post),
_id(id),
@ -225,21 +225,21 @@ public:
{}
virtual void main_run() {
AtomicAccess::release_store_fence(&_ready, true);
_ready.release_store_fence(true);
while (true) {
Element* e = _from->pop();
if (e != nullptr) {
_to->push(*e);
AtomicAccess::inc(_processed);
_processed->fetch_then_add(1u);
++_local_processed;
} else if (AtomicAccess::load_acquire(_processed) == _process_limit) {
} else if (_processed->load_acquire() == _process_limit) {
tty->print_cr("thread %u processed %zu", _id, _local_processed);
return;
}
}
}
bool ready() const { return AtomicAccess::load_acquire(&_ready); }
bool ready() const { return _ready.load_acquire(); }
};
TEST_VM(LockFreeStackTest, stress) {
@ -248,8 +248,8 @@ TEST_VM(LockFreeStackTest, stress) {
TestStack start_stack;
TestStack middle_stack;
TestStack final_stack;
volatile size_t stage1_processed = 0;
volatile size_t stage2_processed = 0;
Atomic<size_t> stage1_processed{0};
Atomic<size_t> stage2_processed{0};
const size_t nelements = 10000;
Element* elements = NEW_C_HEAP_ARRAY(Element, nelements, mtOther);
@ -272,7 +272,7 @@ TEST_VM(LockFreeStackTest, stress) {
for (uint i = 0; i < ARRAY_SIZE(threads); ++i) {
TestStack* from = &start_stack;
TestStack* to = &middle_stack;
volatile size_t* processed = &stage1_processed;
Atomic<size_t>* processed = &stage1_processed;
if (i >= stage1_threads) {
from = &middle_stack;
to = &final_stack;
@ -293,8 +293,8 @@ TEST_VM(LockFreeStackTest, stress) {
}
// Verify expected state.
ASSERT_EQ(nelements, stage1_processed);
ASSERT_EQ(nelements, stage2_processed);
ASSERT_EQ(nelements, stage1_processed.load_relaxed());
ASSERT_EQ(nelements, stage2_processed.load_relaxed());
ASSERT_EQ(0u, initial_stack.length());
ASSERT_EQ(0u, start_stack.length());
ASSERT_EQ(0u, middle_stack.length());