8372251: Convert PartialArrayStepper/State to use Atomic<T>

Reviewed-by: iwalulya, tschatzl
This commit is contained in:
Kim Barrett 2025-11-26 06:17:44 +00:00
parent cdf5fbed9b
commit f3fddd6e90
5 changed files with 27 additions and 26 deletions

View File

@ -47,7 +47,7 @@ PartialArrayState::PartialArrayState(oop src, oop dst,
}
void PartialArrayState::add_references(size_t count) {
size_t new_count = AtomicAccess::add(&_refcount, count, memory_order_relaxed);
size_t new_count = _refcount.add_then_fetch(count, memory_order_relaxed);
assert(new_count >= count, "reference count overflow");
}
@ -92,7 +92,7 @@ PartialArrayState* PartialArrayStateAllocator::allocate(oop src, oop dst,
}
void PartialArrayStateAllocator::release(PartialArrayState* state) {
size_t refcount = AtomicAccess::sub(&state->_refcount, size_t(1), memory_order_release);
size_t refcount = state->_refcount.sub_then_fetch(1u, memory_order_release);
if (refcount != 0) {
assert(refcount + 1 != 0, "refcount underflow");
} else {
@ -116,25 +116,25 @@ PartialArrayStateManager::~PartialArrayStateManager() {
}
Arena* PartialArrayStateManager::register_allocator() {
uint idx = AtomicAccess::fetch_then_add(&_registered_allocators, 1u, memory_order_relaxed);
uint idx = _registered_allocators.fetch_then_add(1u, memory_order_relaxed);
assert(idx < _max_allocators, "exceeded configured max number of allocators");
return ::new (&_arenas[idx]) Arena(mtGC);
}
#ifdef ASSERT
void PartialArrayStateManager::release_allocator() {
uint old = AtomicAccess::fetch_then_add(&_released_allocators, 1u, memory_order_relaxed);
assert(old < AtomicAccess::load(&_registered_allocators), "too many releases");
uint old = _released_allocators.fetch_then_add(1u, memory_order_relaxed);
assert(old < _registered_allocators.load_relaxed(), "too many releases");
}
#endif // ASSERT
void PartialArrayStateManager::reset() {
uint count = AtomicAccess::load(&_registered_allocators);
assert(count == AtomicAccess::load(&_released_allocators),
uint count = _registered_allocators.load_relaxed();
assert(count == _released_allocators.load_relaxed(),
"some allocators still active");
for (uint i = 0; i < count; ++i) {
_arenas[i].~Arena();
}
AtomicAccess::store(&_registered_allocators, 0u);
DEBUG_ONLY(AtomicAccess::store(&_released_allocators, 0u);)
_registered_allocators.store_relaxed(0u);
DEBUG_ONLY(_released_allocators.store_relaxed(0u);)
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@ -60,8 +61,8 @@ class PartialArrayState {
oop _source;
oop _destination;
size_t _length;
volatile size_t _index;
volatile size_t _refcount;
Atomic<size_t> _index;
Atomic<size_t> _refcount;
friend class PartialArrayStateAllocator;
@ -90,7 +91,7 @@ public:
// A pointer to the start index for the next segment to process, for atomic
// update.
volatile size_t* index_addr() { return &_index; }
Atomic<size_t>* index_addr() { return &_index; }
};
// This class provides memory management for PartialArrayStates.
@ -178,8 +179,8 @@ class PartialArrayStateManager : public CHeapObj<mtGC> {
// The number of allocators that have been registered/released.
// Atomic to support concurrent registration, and concurrent release.
// Phasing restriction forbids registration concurrent with release.
volatile uint _registered_allocators;
DEBUG_ONLY(volatile uint _released_allocators;)
Atomic<uint> _registered_allocators;
DEBUG_ONLY(Atomic<uint> _released_allocators;)
// These are all for sole use of the befriended allocator class.
Arena* register_allocator();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#define SHARE_GC_SHARED_PARTIALARRAYTASKSTEPPER_HPP
#include "oops/arrayOop.hpp"
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
class PartialArrayState;
@ -73,7 +74,7 @@ private:
uint _task_fanout;
// For unit tests.
inline Step next_impl(size_t length, volatile size_t* index_addr) const;
inline Step next_impl(size_t length, Atomic<size_t>* index_addr) const;
};
#endif // SHARE_GC_SHARED_PARTIALARRAYTASKSTEPPER_HPP

View File

@ -46,15 +46,13 @@ PartialArrayTaskStepper::start(size_t length) const {
}
PartialArrayTaskStepper::Step
PartialArrayTaskStepper::next_impl(size_t length, volatile size_t* index_addr) const {
PartialArrayTaskStepper::next_impl(size_t length, Atomic<size_t>* index_addr) const {
// The start of the next task is in the state's index.
// Atomically increment by the chunk size to claim the associated chunk.
// Because we limit the number of enqueued tasks to being no more than the
// number of remaining chunks to process, we can use an atomic add for the
// claim, rather than a CAS loop.
size_t start = AtomicAccess::fetch_then_add(index_addr,
_chunk_size,
memory_order_relaxed);
size_t start = index_addr->fetch_then_add(_chunk_size, memory_order_relaxed);
assert(start < length, "invariant: start %zu, length %zu", start, length);
assert(((length - start) % _chunk_size) == 0,

View File

@ -24,6 +24,7 @@
#include "gc/shared/partialArrayTaskStepper.inline.hpp"
#include "memory/allStatic.hpp"
#include "runtime/atomic.hpp"
#include "unittest.hpp"
using Step = PartialArrayTaskStepper::Step;
@ -33,7 +34,7 @@ class PartialArrayTaskStepper::TestSupport : AllStatic {
public:
static Step next(const Stepper* stepper,
size_t length,
size_t* to_length_addr) {
Atomic<size_t>* to_length_addr) {
return stepper->next_impl(length, to_length_addr);
}
};
@ -42,9 +43,9 @@ using StepperSupport = PartialArrayTaskStepper::TestSupport;
static uint simulate(const Stepper* stepper,
size_t length,
size_t* to_length_addr) {
Atomic<size_t>* to_length_addr) {
Step init = stepper->start(length);
*to_length_addr = init._index;
to_length_addr->store_relaxed(init._index);
uint queue_count = init._ncreate;
uint task = 0;
for ( ; queue_count > 0; ++task) {
@ -57,9 +58,9 @@ static uint simulate(const Stepper* stepper,
static void run_test(size_t length, size_t chunk_size, uint n_workers) {
const PartialArrayTaskStepper stepper(n_workers, chunk_size);
size_t to_length;
Atomic<size_t> to_length;
uint tasks = simulate(&stepper, length, &to_length);
ASSERT_EQ(length, to_length);
ASSERT_EQ(length, to_length.load_relaxed());
ASSERT_EQ(tasks, length / chunk_size);
}