8372650: Convert GenericWaitBarrier to use Atomic<T>

Reviewed-by: shade, iwalulya
This commit is contained in:
Kim Barrett 2025-11-28 22:50:18 +00:00
parent e071afbfe4
commit 52568bf483
2 changed files with 22 additions and 22 deletions

View File

@ -23,7 +23,6 @@
*
*/
#include "runtime/atomicAccess.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "utilities/spinYield.hpp"
@ -79,10 +78,10 @@
void GenericWaitBarrier::arm(int barrier_tag) {
assert(barrier_tag != 0, "Pre arm: Should be arming with armed value");
assert(AtomicAccess::load(&_barrier_tag) == 0,
assert(_barrier_tag.load_relaxed() == 0,
"Pre arm: Should not be already armed. Tag: %d",
AtomicAccess::load(&_barrier_tag));
AtomicAccess::release_store(&_barrier_tag, barrier_tag);
_barrier_tag.load_relaxed());
_barrier_tag.release_store(barrier_tag);
Cell &cell = tag_to_cell(barrier_tag);
cell.arm(barrier_tag);
@ -92,9 +91,9 @@ void GenericWaitBarrier::arm(int barrier_tag) {
}
void GenericWaitBarrier::disarm() {
int barrier_tag = AtomicAccess::load_acquire(&_barrier_tag);
int barrier_tag = _barrier_tag.load_acquire();
assert(barrier_tag != 0, "Pre disarm: Should be armed. Tag: %d", barrier_tag);
AtomicAccess::release_store(&_barrier_tag, 0);
_barrier_tag.release_store(0);
Cell &cell = tag_to_cell(barrier_tag);
cell.disarm(barrier_tag);
@ -121,7 +120,7 @@ void GenericWaitBarrier::Cell::arm(int32_t requested_tag) {
SpinYield sp;
while (true) {
state = AtomicAccess::load_acquire(&_state);
state = _state.load_acquire();
assert(decode_tag(state) == 0,
"Pre arm: Should not be armed. "
"Tag: " INT32_FORMAT "; Waiters: " INT32_FORMAT,
@ -134,7 +133,7 @@ void GenericWaitBarrier::Cell::arm(int32_t requested_tag) {
// Try to swing cell to armed. This should always succeed after the check above.
int64_t new_state = encode(requested_tag, 0);
int64_t prev_state = AtomicAccess::cmpxchg(&_state, state, new_state);
int64_t prev_state = _state.compare_exchange(state, new_state);
if (prev_state != state) {
fatal("Cannot arm the wait barrier. "
"Tag: " INT32_FORMAT "; Waiters: " INT32_FORMAT,
@ -145,14 +144,14 @@ void GenericWaitBarrier::Cell::arm(int32_t requested_tag) {
int GenericWaitBarrier::Cell::signal_if_needed(int max) {
int signals = 0;
while (true) {
int cur = AtomicAccess::load_acquire(&_outstanding_wakeups);
int cur = _outstanding_wakeups.load_acquire();
if (cur == 0) {
// All done, no more waiters.
return 0;
}
assert(cur > 0, "Sanity");
int prev = AtomicAccess::cmpxchg(&_outstanding_wakeups, cur, cur - 1);
int prev = _outstanding_wakeups.compare_exchange(cur, cur - 1);
if (prev != cur) {
// Contention, return to caller for early return or backoff.
return prev;
@ -172,7 +171,7 @@ void GenericWaitBarrier::Cell::disarm(int32_t expected_tag) {
int32_t waiters;
while (true) {
int64_t state = AtomicAccess::load_acquire(&_state);
int64_t state = _state.load_acquire();
int32_t tag = decode_tag(state);
waiters = decode_waiters(state);
@ -182,7 +181,7 @@ void GenericWaitBarrier::Cell::disarm(int32_t expected_tag) {
tag, waiters);
int64_t new_state = encode(0, waiters);
if (AtomicAccess::cmpxchg(&_state, state, new_state) == state) {
if (_state.compare_exchange(state, new_state) == state) {
// Successfully disarmed.
break;
}
@ -191,19 +190,19 @@ void GenericWaitBarrier::Cell::disarm(int32_t expected_tag) {
// Wake up waiters, if we have at least one.
// Allow other threads to assist with wakeups, if possible.
if (waiters > 0) {
AtomicAccess::release_store(&_outstanding_wakeups, waiters);
_outstanding_wakeups.release_store(waiters);
SpinYield sp;
while (signal_if_needed(INT_MAX) > 0) {
sp.wait();
}
}
assert(AtomicAccess::load(&_outstanding_wakeups) == 0, "Post disarm: Should not have outstanding wakeups");
assert(_outstanding_wakeups.load_relaxed() == 0, "Post disarm: Should not have outstanding wakeups");
}
void GenericWaitBarrier::Cell::wait(int32_t expected_tag) {
// Try to register ourselves as pending waiter.
while (true) {
int64_t state = AtomicAccess::load_acquire(&_state);
int64_t state = _state.load_acquire();
int32_t tag = decode_tag(state);
if (tag != expected_tag) {
// Cell tag had changed while waiting here. This means either the cell had
@ -219,7 +218,7 @@ void GenericWaitBarrier::Cell::wait(int32_t expected_tag) {
tag, waiters);
int64_t new_state = encode(tag, waiters + 1);
if (AtomicAccess::cmpxchg(&_state, state, new_state) == state) {
if (_state.compare_exchange(state, new_state) == state) {
// Success! Proceed to wait.
break;
}
@ -238,7 +237,7 @@ void GenericWaitBarrier::Cell::wait(int32_t expected_tag) {
// Register ourselves as completed waiter before leaving.
while (true) {
int64_t state = AtomicAccess::load_acquire(&_state);
int64_t state = _state.load_acquire();
int32_t tag = decode_tag(state);
int32_t waiters = decode_waiters(state);
@ -248,7 +247,7 @@ void GenericWaitBarrier::Cell::wait(int32_t expected_tag) {
tag, waiters);
int64_t new_state = encode(tag, waiters - 1);
if (AtomicAccess::cmpxchg(&_state, state, new_state) == state) {
if (_state.compare_exchange(state, new_state) == state) {
// Success!
break;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/padded.hpp"
#include "runtime/atomic.hpp"
#include "runtime/semaphore.hpp"
#include "utilities/globalDefinitions.hpp"
@ -43,10 +44,10 @@ private:
Semaphore _sem;
// Cell state, tracks the arming + waiters status
volatile int64_t _state;
Atomic<int64_t> _state;
// Wakeups to deliver for current waiters
volatile int _outstanding_wakeups;
Atomic<int> _outstanding_wakeups;
int signal_if_needed(int max);
@ -83,7 +84,7 @@ private:
// Trailing padding to protect the last cell.
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_PADDING_SIZE, 0);
volatile int _barrier_tag;
Atomic<int> _barrier_tag;
// Trailing padding to insulate the rest of the barrier from adjacent
// data structures. The leading padding is not needed, as cell padding