8367014: Rename class Atomic to AtomicAccess

Reviewed-by: dholmes, aph, stefank
This commit is contained in:
Kim Barrett 2025-09-12 06:35:55 +00:00
parent 5abd18426d
commit 9e843f56ec
428 changed files with 2554 additions and 2552 deletions

View File

@ -275,7 +275,7 @@ address BarrierSetAssembler::patching_epoch_addr() {
}
void BarrierSetAssembler::increment_patching_epoch() {
Atomic::inc(&_patching_epoch);
AtomicAccess::inc(&_patching_epoch);
}
void BarrierSetAssembler::clear_patching_epoch() {

View File

@ -112,22 +112,22 @@ public:
}
int get_value() {
return Atomic::load_acquire(guard_addr());
return AtomicAccess::load_acquire(guard_addr());
}
void set_value(int value, int bit_mask) {
if (bit_mask == ~0) {
Atomic::release_store(guard_addr(), value);
AtomicAccess::release_store(guard_addr(), value);
return;
}
assert((value & ~bit_mask) == 0, "trying to set bits outside the mask");
value &= bit_mask;
int old_value = Atomic::load(guard_addr());
int old_value = AtomicAccess::load(guard_addr());
while (true) {
// Only bits in the mask are changed
int new_value = value | (old_value & ~bit_mask);
if (new_value == old_value) break;
int v = Atomic::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
int v = AtomicAccess::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
if (v == old_value) break;
old_value = v;
}

View File

@ -42,7 +42,7 @@
#include "prims/methodHandles.hpp"
#include "prims/upcallLinker.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/continuation.hpp"
#include "runtime/continuationEntry.inline.hpp"
#include "runtime/frame.inline.hpp"
@ -10265,7 +10265,7 @@ class StubGenerator: public StubCodeGenerator {
#if defined (LINUX) && !defined (__ARM_FEATURE_ATOMICS)
// ARMv8.1 LSE versions of the atomic stubs used by Atomic::PlatformXX.
// ARMv8.1 LSE versions of the atomic stubs used by AtomicAccess::PlatformXX.
//
// If LSE is in use, generate LSE versions of all the stubs. The
// non-LSE versions are in atomic_aarch64.S.

View File

@ -48,22 +48,22 @@ class NativeNMethodBarrier: public NativeInstruction {
public:
int get_value() {
return Atomic::load_acquire(guard_addr());
return AtomicAccess::load_acquire(guard_addr());
}
void set_value(int value, int bit_mask) {
if (bit_mask == ~0) {
Atomic::release_store(guard_addr(), value);
AtomicAccess::release_store(guard_addr(), value);
return;
}
assert((value & ~bit_mask) == 0, "trying to set bits outside the mask");
value &= bit_mask;
int old_value = Atomic::load(guard_addr());
int old_value = AtomicAccess::load(guard_addr());
while (true) {
// Only bits in the mask are changed
int new_value = value | (old_value & ~bit_mask);
if (new_value == old_value) break;
int v = Atomic::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
int v = AtomicAccess::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
if (v == old_value) break;
old_value = v;
}

View File

@ -421,7 +421,8 @@ class StubGenerator: public StubCodeGenerator {
}
// As per atomic.hpp the Atomic read-modify-write operations must be logically implemented as:
// As per atomicAccess.hpp the atomic read-modify-write operations must be
// logically implemented as:
// <fence>; <op>; <membar StoreLoad|StoreStore>
// But for load-linked/store-conditional based systems a fence here simply means
// no load/store can be reordered with respect to the initial load-linked, so we have:
@ -440,7 +441,7 @@ class StubGenerator: public StubCodeGenerator {
// be removed in the future.
// Implementation of atomic_add(jint add_value, volatile jint* dest)
// used by Atomic::add(volatile jint* dest, jint add_value)
// used by AtomicAccess::add(volatile jint* dest, jint add_value)
//
// Arguments :
//
@ -492,7 +493,7 @@ class StubGenerator: public StubCodeGenerator {
}
// Implementation of jint atomic_xchg(jint exchange_value, volatile jint* dest)
// used by Atomic::add(volatile jint* dest, jint exchange_value)
// used by AtomicAccess::add(volatile jint* dest, jint exchange_value)
//
// Arguments :
//
@ -542,7 +543,7 @@ class StubGenerator: public StubCodeGenerator {
}
// Implementation of jint atomic_cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value)
// used by Atomic::cmpxchg(volatile jint *dest, jint compare_value, jint exchange_value)
// used by AtomicAccess::cmpxchg(volatile jint *dest, jint compare_value, jint exchange_value)
//
// Arguments :
//
@ -582,7 +583,7 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
// Support for jlong AtomicAccess::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
// reordered before by a wrapper to (jlong compare_value, jlong exchange_value, volatile jlong *dest)
//
// Arguments :

View File

@ -73,7 +73,7 @@ public:
u_char buf[NativeMovRegMem::instruction_size];
uint64_t u64;
} new_mov_instr, old_mov_instr;
new_mov_instr.u64 = old_mov_instr.u64 = Atomic::load(instr);
new_mov_instr.u64 = old_mov_instr.u64 = AtomicAccess::load(instr);
while (true) {
// Only bits in the mask are changed
int old_value = nativeMovRegMem_at(old_mov_instr.buf)->offset();
@ -81,7 +81,7 @@ public:
if (new_value == old_value) return; // skip icache flush if nothing changed
nativeMovRegMem_at(new_mov_instr.buf)->set_offset(new_value, false /* no icache flush */);
// Swap in the new value
uint64_t v = Atomic::cmpxchg(instr, old_mov_instr.u64, new_mov_instr.u64, memory_order_relaxed);
uint64_t v = AtomicAccess::cmpxchg(instr, old_mov_instr.u64, new_mov_instr.u64, memory_order_relaxed);
if (v == old_mov_instr.u64) break;
old_mov_instr.u64 = v;
}

View File

@ -347,7 +347,7 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
// Finally patch out the jump.
volatile juint *jump_addr = (volatile juint*)instr_addr;
// Release not needed because caller uses invalidate_range after copying the remaining bytes.
//Atomic::release_store(jump_addr, *((juint*)code_buffer));
//AtomicAccess::release_store(jump_addr, *((juint*)code_buffer));
*jump_addr = *((juint*)code_buffer); // atomically store code over branch instruction
ICache::ppc64_flush_icache_bytes(instr_addr, NativeGeneralJump::instruction_size);
}

View File

@ -217,7 +217,7 @@ address BarrierSetAssembler::patching_epoch_addr() {
}
void BarrierSetAssembler::increment_patching_epoch() {
Atomic::inc(&_patching_epoch);
AtomicAccess::inc(&_patching_epoch);
}
void BarrierSetAssembler::clear_patching_epoch() {

View File

@ -106,22 +106,22 @@ public:
}
int get_value() {
return Atomic::load_acquire(guard_addr());
return AtomicAccess::load_acquire(guard_addr());
}
void set_value(int value, int bit_mask) {
if (bit_mask == ~0) {
Atomic::release_store(guard_addr(), value);
AtomicAccess::release_store(guard_addr(), value);
return;
}
assert((value & ~bit_mask) == 0, "trying to set bits outside the mask");
value &= bit_mask;
int old_value = Atomic::load(guard_addr());
int old_value = AtomicAccess::load(guard_addr());
while (true) {
// Only bits in the mask are changed
int new_value = value | (old_value & ~bit_mask);
if (new_value == old_value) break;
int v = Atomic::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
int v = AtomicAccess::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
if (v == old_value) break;
old_value = v;
}

View File

@ -64,12 +64,12 @@ class NativeMethodBarrier: public NativeInstruction {
assert((value & ~bit_mask) == 0, "trying to set bits outside the mask");
value &= bit_mask;
int32_t* data_addr = (int32_t*)get_patchable_data_address();
int old_value = Atomic::load(data_addr);
int old_value = AtomicAccess::load(data_addr);
while (true) {
// Only bits in the mask are changed
int new_value = value | (old_value & ~bit_mask);
if (new_value == old_value) break;
int v = Atomic::cmpxchg(data_addr, old_value, new_value, memory_order_release);
int v = AtomicAccess::cmpxchg(data_addr, old_value, new_value, memory_order_release);
if (v == old_value) break;
old_value = v;
}

View File

@ -65,12 +65,12 @@ public:
assert(align_up(immediate_address(), sizeof(jint)) ==
align_down(immediate_address(), sizeof(jint)), "immediate not aligned");
jint* data_addr = (jint*)immediate_address();
jint old_value = Atomic::load(data_addr);
jint old_value = AtomicAccess::load(data_addr);
while (true) {
// Only bits in the mask are changed
jint new_value = imm | (old_value & ~bit_mask);
if (new_value == old_value) break;
jint v = Atomic::cmpxchg(data_addr, old_value, new_value, memory_order_release);
jint v = AtomicAccess::cmpxchg(data_addr, old_value, new_value, memory_order_release);
if (v == old_value) break;
old_value = v;
}

View File

@ -43,7 +43,7 @@
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/interfaceSupport.inline.hpp"

View File

@ -39,7 +39,7 @@
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@ -809,7 +809,7 @@ jlong os::javaTimeNanos() {
if (now <= prev) {
return prev; // same or retrograde time;
}
const uint64_t obsv = Atomic::cmpxchg(&Bsd::_max_abstime, prev, now);
const uint64_t obsv = AtomicAccess::cmpxchg(&Bsd::_max_abstime, prev, now);
assert(obsv >= prev, "invariant"); // Monotonicity
// If the CAS succeeded then we're done and return "now".
// If the CAS failed and the observed value "obsv" is >= now then
@ -2133,14 +2133,14 @@ uint os::processor_id() {
__asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
uint apic_id = (ebx >> 24) & (processor_id_map_size - 1);
int processor_id = Atomic::load(&processor_id_map[apic_id]);
int processor_id = AtomicAccess::load(&processor_id_map[apic_id]);
while (processor_id < 0) {
// Assign processor id to APIC id
processor_id = Atomic::cmpxchg(&processor_id_map[apic_id], processor_id_unassigned, processor_id_assigning);
processor_id = AtomicAccess::cmpxchg(&processor_id_map[apic_id], processor_id_unassigned, processor_id_assigning);
if (processor_id == processor_id_unassigned) {
processor_id = Atomic::fetch_then_add(&processor_id_next, 1) % os::processor_count();
Atomic::store(&processor_id_map[apic_id], processor_id);
processor_id = AtomicAccess::fetch_then_add(&processor_id_next, 1) % os::processor_count();
AtomicAccess::store(&processor_id_map[apic_id], processor_id);
}
}

View File

@ -42,7 +42,7 @@
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/init.hpp"
@ -4781,8 +4781,8 @@ static bool should_warn_invalid_processor_id() {
static volatile int warn_once = 1;
if (Atomic::load(&warn_once) == 0 ||
Atomic::xchg(&warn_once, 0) == 0) {
if (AtomicAccess::load(&warn_once) == 0 ||
AtomicAccess::xchg(&warn_once, 0) == 0) {
// Don't warn more than once
return false;
}

View File

@ -31,7 +31,7 @@
#include "nmt/memTracker.hpp"
#include "os_posix.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@ -1691,7 +1691,7 @@ void PlatformEvent::park() { // AKA "down()"
// atomically decrement _event
for (;;) {
v = _event;
if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
if (AtomicAccess::cmpxchg(&_event, v, v - 1) == v) break;
}
guarantee(v >= 0, "invariant");
@ -1738,7 +1738,7 @@ int PlatformEvent::park_nanos(jlong nanos) {
// atomically decrement _event
for (;;) {
v = _event;
if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
if (AtomicAccess::cmpxchg(&_event, v, v - 1) == v) break;
}
guarantee(v >= 0, "invariant");
@ -1794,7 +1794,7 @@ void PlatformEvent::unpark() {
// but only in the correctly written condition checking loops of ObjectMonitor,
// Mutex/Monitor, and JavaThread::sleep
if (Atomic::xchg(&_event, 1) >= 0) return;
if (AtomicAccess::xchg(&_event, 1) >= 0) return;
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
@ -1847,9 +1847,9 @@ void Parker::park(bool isAbsolute, jlong time) {
// Optional fast-path check:
// Return immediately if a permit is available.
// We depend on Atomic::xchg() having full barrier semantics
// We depend on AtomicAccess::xchg() having full barrier semantics
// since we are doing a lock-free update to _counter.
if (Atomic::xchg(&_counter, 0) > 0) return;
if (AtomicAccess::xchg(&_counter, 0) > 0) return;
JavaThread *jt = JavaThread::current();

View File

@ -28,7 +28,7 @@
#include "jvm.h"
#include "logging/log.hpp"
#include "os_posix.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
@ -356,7 +356,7 @@ static void jdk_misc_signal_init() {
void os::signal_notify(int sig) {
if (sig_semaphore != nullptr) {
Atomic::inc(&pending_signals[sig]);
AtomicAccess::inc(&pending_signals[sig]);
sig_semaphore->signal();
} else {
// Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
@ -369,7 +369,7 @@ static int check_pending_signals() {
for (;;) {
for (int i = 0; i < NSIG + 1; i++) {
jint n = pending_signals[i];
if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
if (n > 0 && n == AtomicAccess::cmpxchg(&pending_signals[i], n, n - 1)) {
return i;
}
}

View File

@ -22,7 +22,7 @@
*
*/
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "suspendResume_posix.hpp"
/* try to switch state from state "from" to state "to"
@ -31,7 +31,7 @@
SuspendResume::State SuspendResume::switch_state(SuspendResume::State from,
SuspendResume::State to)
{
SuspendResume::State result = Atomic::cmpxchg(&_state, from, to);
SuspendResume::State result = AtomicAccess::cmpxchg(&_state, from, to);
if (result == from) {
// success
return to;

View File

@ -42,7 +42,7 @@
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@ -2462,7 +2462,7 @@ static void jdk_misc_signal_init() {
void os::signal_notify(int sig) {
if (sig_sem != nullptr) {
Atomic::inc(&pending_signals[sig]);
AtomicAccess::inc(&pending_signals[sig]);
sig_sem->signal();
} else {
// Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
@ -2475,7 +2475,7 @@ static int check_pending_signals() {
while (true) {
for (int i = 0; i < NSIG + 1; i++) {
jint n = pending_signals[i];
if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
if (n > 0 && n == AtomicAccess::cmpxchg(&pending_signals[i], n, n - 1)) {
return i;
}
}
@ -4297,15 +4297,15 @@ static void exit_process_or_thread(Ept what, int exit_code) {
// The first thread that reached this point, initializes the critical section.
if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, nullptr)) {
warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
} else if (Atomic::load_acquire(&process_exiting) == 0) {
} else if (AtomicAccess::load_acquire(&process_exiting) == 0) {
if (what != EPT_THREAD) {
// Atomically set process_exiting before the critical section
// to increase the visibility between racing threads.
Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
AtomicAccess::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
}
EnterCriticalSection(&crit_sect);
if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
if (what == EPT_THREAD && AtomicAccess::load_acquire(&process_exiting) == 0) {
// Remove from the array those handles of the threads that have completed exiting.
for (i = 0, j = 0; i < handle_count; ++i) {
res = WaitForSingleObject(handles[i], 0 /* don't wait */);
@ -4418,7 +4418,7 @@ static void exit_process_or_thread(Ept what, int exit_code) {
}
if (!registered &&
Atomic::load_acquire(&process_exiting) != 0 &&
AtomicAccess::load_acquire(&process_exiting) != 0 &&
process_exiting != GetCurrentThreadId()) {
// Some other thread is about to call exit(), so we don't let
// the current unregistered thread proceed to exit() or _endthreadex()
@ -5584,7 +5584,7 @@ int PlatformEvent::park(jlong Millis) {
int v;
for (;;) {
v = _Event;
if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
if (AtomicAccess::cmpxchg(&_Event, v, v-1) == v) break;
}
guarantee((v == 0) || (v == 1), "invariant");
if (v != 0) return OS_OK;
@ -5647,7 +5647,7 @@ void PlatformEvent::park() {
int v;
for (;;) {
v = _Event;
if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
if (AtomicAccess::cmpxchg(&_Event, v, v-1) == v) break;
}
guarantee((v == 0) || (v == 1), "invariant");
if (v != 0) return;
@ -5694,7 +5694,7 @@ void PlatformEvent::unpark() {
// from the first park() call after an unpark() call which will help
// shake out uses of park() and unpark() without condition variables.
if (Atomic::xchg(&_Event, 1) >= 0) return;
if (AtomicAccess::xchg(&_Event, 1) >= 0) return;
::SetEvent(_ParkHandle);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -93,7 +93,7 @@ inline void post_membar(atomic_memory_order order) {
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
@ -105,8 +105,8 @@ struct Atomic::PlatformAdd {
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@ -131,8 +131,8 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@ -156,9 +156,9 @@ inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value,
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
// Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp).
@ -195,9 +195,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
// Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp).
@ -235,15 +235,15 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(1 == sizeof(T));
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a 'fence_cmpxchg_fence' if not
// specified otherwise (see atomic.hpp).
// specified otherwise (see atomicAccess.hpp).
// Using 32 bit internally.
volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
@ -305,15 +305,15 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a 'fence_cmpxchg_fence' if not
// specified otherwise (see atomic.hpp).
// specified otherwise (see atomicAccess.hpp).
T old_value;
const uint64_t zero = 0;
@ -355,15 +355,15 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a 'fence_cmpxchg_fence' if not
// specified otherwise (see atomic.hpp).
// specified otherwise (see atomicAccess.hpp).
T old_value;
const uint64_t zero = 0;
@ -404,10 +404,10 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
}
template<size_t byte_size>
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE> {
struct AtomicAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE> {
template <typename T>
T operator()(const volatile T* p) const {
T t = Atomic::load(p);
T t = AtomicAccess::load(p);
// Use twi-isync for load_acquire (faster than lwsync).
__asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory");
return t;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -34,7 +34,7 @@
// See https://patchwork.kernel.org/patch/3575821/
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
if (order == memory_order_relaxed) {
@ -54,9 +54,9 @@ struct Atomic::PlatformAdd {
template<size_t byte_size>
template<typename T>
inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<byte_size>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(byte_size == sizeof(T));
T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
FULL_MEM_BARRIER;
@ -65,10 +65,10 @@ inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
template<size_t byte_size>
template<typename T>
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(byte_size == sizeof(T));
if (order == memory_order_conservative) {
T value = compare_value;
@ -109,21 +109,21 @@ inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,
}
template<size_t byte_size>
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
struct AtomicAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
};
template<size_t byte_size>
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
struct AtomicAccess::PlatformOrderedStore<byte_size, RELEASE_X>
{
template <typename T>
void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
};
template<size_t byte_size>
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
struct AtomicAccess::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
{
template <typename T>
void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
// Implementation of class atomic
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
@ -40,8 +40,8 @@ struct Atomic::PlatformAdd {
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value,
atomic_memory_order /* order */) const {
inline D AtomicAccess::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
D old_value;
@ -54,9 +54,9 @@ inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value,
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order /* order */) const {
inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (exchange_value)
@ -67,10 +67,10 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
inline T AtomicAccess::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(1 == sizeof(T));
__asm__ volatile ( "lock cmpxchgb %1,(%3)"
: "=a" (exchange_value)
@ -81,10 +81,10 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "lock cmpxchgl %1,(%3)"
: "=a" (exchange_value)
@ -96,8 +96,8 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
#ifdef AMD64
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value,
atomic_memory_order /* order */) const {
inline D AtomicAccess::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
D old_value;
@ -110,9 +110,9 @@ inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value,
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order /* order */) const {
inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(8 == sizeof(T));
__asm__ __volatile__ ("xchgq (%2),%0"
: "=r" (exchange_value)
@ -123,10 +123,10 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(8 == sizeof(T));
__asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
: "=a" (exchange_value)
@ -145,25 +145,25 @@ extern "C" {
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(8 == sizeof(T));
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
}
// No direct support for 8-byte xchg; emulate using cmpxchg.
template<>
struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {};
struct AtomicAccess::PlatformXchg<8> : AtomicAccess::XchgUsingCmpxchg<8> {};
// No direct support for 8-byte add; emulate using cmpxchg.
template<>
struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {};
struct AtomicAccess::PlatformAdd<8> : AtomicAccess::AddUsingCmpxchg<8> {};
template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile int64_t dest;
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
@ -172,8 +172,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
template<>
template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}
@ -181,7 +181,7 @@ inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
#endif // AMD64
template<>
struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
struct AtomicAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
{
template <typename T>
void operator()(volatile T* p, T v) const {
@ -193,7 +193,7 @@ struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
};
template<>
struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
struct AtomicAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
{
template <typename T>
void operator()(volatile T* p, T v) const {
@ -205,7 +205,7 @@ struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
};
template<>
struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
struct AtomicAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
{
template <typename T>
void operator()(volatile T* p, T v) const {
@ -218,7 +218,7 @@ struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
#ifdef AMD64
template<>
struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
struct AtomicAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
{
template <typename T>
void operator()(volatile T* p, T v) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -32,7 +32,7 @@
// Implementation of class atomic
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
@ -44,8 +44,8 @@ struct Atomic::PlatformAdd {
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@ -56,8 +56,8 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@ -68,9 +68,9 @@ inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value,
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
FULL_MEM_BARRIER;
T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED);
@ -80,9 +80,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
FULL_MEM_BARRIER;
T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED);
@ -92,14 +92,14 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
// No direct support for cmpxchg of bytes; emulate using int.
template<>
struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
struct AtomicAccess::PlatformCmpxchg<1> : AtomicAccess::CmpxchgByteUsingInt {};
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
T value = compare_value;
FULL_MEM_BARRIER;
@ -111,10 +111,10 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
T value = compare_value;
@ -134,7 +134,7 @@ inline void atomic_copy64(const volatile void *src, volatile void *dst) {
template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
T dest;
__atomic_load(const_cast<T*>(src), &dest, __ATOMIC_RELAXED);
@ -143,8 +143,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
template<>
template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
__atomic_store(dest, &store_value, __ATOMIC_RELAXED);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -70,7 +70,7 @@ inline D atomic_fastcall(F stub, volatile D *dest, T1 arg1, T2 arg2) {
}
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const;
@ -83,8 +83,8 @@ struct Atomic::PlatformAdd {
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
aarch64_atomic_stub_t stub;
@ -99,8 +99,8 @@ inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value,
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
aarch64_atomic_stub_t stub;
@ -115,9 +115,9 @@ inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value,
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
T old_value = atomic_fastcall(aarch64_atomic_xchg_4_impl, dest, exchange_value);
return old_value;
@ -125,8 +125,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
T old_value = atomic_fastcall(aarch64_atomic_xchg_8_impl, dest, exchange_value);
return old_value;
@ -134,10 +134,10 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(1 == sizeof(T));
aarch64_atomic_stub_t stub;
switch (order) {
@ -152,10 +152,10 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
aarch64_atomic_stub_t stub;
switch (order) {
@ -175,10 +175,10 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
aarch64_atomic_stub_t stub;
switch (order) {
@ -197,21 +197,21 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
}
template<size_t byte_size>
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
struct AtomicAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
};
template<size_t byte_size>
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
struct AtomicAccess::PlatformOrderedStore<byte_size, RELEASE_X>
{
template <typename T>
void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
};
template<size_t byte_size>
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
struct AtomicAccess::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
{
template <typename T>
void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,7 +78,7 @@ public:
template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>(
(*ARMAtomicFuncs::_load_long_func)(reinterpret_cast<const volatile int64_t*>(src)));
@ -86,20 +86,20 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
template<>
template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
(*ARMAtomicFuncs::_store_long_func)(
PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
}
// As per atomic.hpp all read-modify-write operations have to provide two-way
// As per atomicAccess.hpp all read-modify-write operations have to provide two-way
// barriers semantics.
//
// For ARMv7 we add explicit barriers in the stubs.
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
@ -111,8 +111,8 @@ struct Atomic::PlatformAdd {
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
return add_using_helper<int32_t>(ARMAtomicFuncs::_add_func, dest, add_value);
@ -121,26 +121,26 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
return xchg_using_helper<int32_t>(ARMAtomicFuncs::_xchg_func, dest, exchange_value);
}
// No direct support for 8-byte xchg; emulate using cmpxchg.
template<>
struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {};
struct AtomicAccess::PlatformXchg<8> : AtomicAccess::XchgUsingCmpxchg<8> {};
// No direct support for 8-byte add; emulate using cmpxchg.
template<>
struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {};
struct AtomicAccess::PlatformAdd<8> : AtomicAccess::AddUsingCmpxchg<8> {};
// The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
// No direct support for cmpxchg of bytes; emulate using int.
template<>
struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
struct AtomicAccess::PlatformCmpxchg<1> : AtomicAccess::CmpxchgByteUsingInt {};
inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
@ -160,20 +160,20 @@ inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, dest, compare_value, exchange_value);
}
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, dest, compare_value, exchange_value);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -93,7 +93,7 @@ inline void post_membar(atomic_memory_order order) {
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
@ -105,8 +105,8 @@ struct Atomic::PlatformAdd {
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@ -131,8 +131,8 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@ -156,9 +156,9 @@ inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value,
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
// Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp).
@ -195,9 +195,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
// Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp).
@ -235,15 +235,15 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(1 == sizeof(T));
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a 'fence_cmpxchg_fence' if not
// specified otherwise (see atomic.hpp).
// specified otherwise (see atomicAccess.hpp).
// Using 32 bit internally.
unsigned int old_value, loaded_value;
@ -282,15 +282,15 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a 'fence_cmpxchg_fence' if not
// specified otherwise (see atomic.hpp).
// specified otherwise (see atomicAccess.hpp).
T old_value;
const uint64_t zero = 0;
@ -332,15 +332,15 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a 'fence_cmpxchg_fence' if not
// specified otherwise (see atomic.hpp).
// specified otherwise (see atomicAccess.hpp).
T old_value;
const uint64_t zero = 0;
@ -381,11 +381,11 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
}
template<size_t byte_size>
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
struct AtomicAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const {
T t = Atomic::load(p);
T t = AtomicAccess::load(p);
// Use twi-isync for load_acquire (faster than lwsync).
__asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory");
return t;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -40,7 +40,7 @@
#endif
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
@ -71,10 +71,10 @@ struct Atomic::PlatformAdd {
#ifndef FULL_COMPILER_ATOMIC_SUPPORT
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest __attribute__((unused)),
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<1>::operator()(T volatile* dest __attribute__((unused)),
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(1 == sizeof(T));
if (order != memory_order_relaxed) {
@ -122,10 +122,10 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest __attribute__((
// See also JDK-8326936.
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__((unused)),
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__((unused)),
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
int32_t old_value;
@ -154,9 +154,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__((
template<size_t byte_size>
template<typename T>
inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<byte_size>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
#ifndef FULL_COMPILER_ATOMIC_SUPPORT
// If we add xchg for sub word and are using older compiler
// it must be added here due to not using lib atomic.
@ -180,10 +180,10 @@ inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
// __attribute__((unused)) on dest is to get rid of spurious GCC warnings.
template<size_t byte_size>
template<typename T>
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest __attribute__((unused)),
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<byte_size>::operator()(T volatile* dest __attribute__((unused)),
T compare_value,
T exchange_value,
atomic_memory_order order) const {
#ifndef FULL_COMPILER_ATOMIC_SUPPORT
STATIC_ASSERT(byte_size > 4);
@ -204,21 +204,21 @@ inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest __attri
}
template<size_t byte_size>
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
struct AtomicAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
};
template<size_t byte_size>
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
struct AtomicAccess::PlatformOrderedStore<byte_size, RELEASE_X>
{
template <typename T>
void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
};
template<size_t byte_size>
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
struct AtomicAccess::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
{
template <typename T>
void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,7 +26,7 @@
#ifndef OS_CPU_LINUX_S390_ATOMIC_LINUX_S390_HPP
#define OS_CPU_LINUX_S390_ATOMIC_LINUX_S390_HPP
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
@ -55,7 +55,7 @@
// before the other store becomes visible.
//------------
// Atomic::add
// AtomicAccess::add
//------------
// These methods force the value in memory to be augmented by the passed increment.
// Both, memory value and increment, are treated as 32bit signed binary integers.
@ -75,7 +75,7 @@ inline void z196_fast_sync() {
}
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
@ -87,8 +87,8 @@ struct Atomic::PlatformAdd {
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I inc,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I inc,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@ -141,8 +141,8 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I inc,
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I inc,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I inc,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@ -194,7 +194,7 @@ inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I inc,
//-------------
// Atomic::xchg
// AtomicAccess::xchg
//-------------
// These methods force the value in memory to be replaced by the new value passed
// in as argument.
@ -211,9 +211,9 @@ inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I inc,
// replacement succeeded.
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order unused) const {
inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order unused) const {
STATIC_ASSERT(4 == sizeof(T));
T old;
@ -235,9 +235,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order unused) const {
inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order unused) const {
STATIC_ASSERT(8 == sizeof(T));
T old;
@ -258,7 +258,7 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
}
//----------------
// Atomic::cmpxchg
// AtomicAccess::cmpxchg
//----------------
// These methods compare the value in memory with a given compare value.
// If both values compare equal, the value in memory is replaced with
@ -288,14 +288,14 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
// No direct support for cmpxchg of bytes; emulate using int.
template<>
struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
struct AtomicAccess::PlatformCmpxchg<1> : AtomicAccess::CmpxchgByteUsingInt {};
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T cmp_val,
T xchg_val,
atomic_memory_order unused) const {
inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest,
T cmp_val,
T xchg_val,
atomic_memory_order unused) const {
STATIC_ASSERT(4 == sizeof(T));
T old;
@ -316,10 +316,10 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T cmp_val,
T xchg_val,
atomic_memory_order unused) const {
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
T cmp_val,
T xchg_val,
atomic_memory_order unused) const {
STATIC_ASSERT(8 == sizeof(T));
T old;
@ -339,7 +339,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
}
template<size_t byte_size>
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
struct AtomicAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const { T t = *p; OrderAccess::acquire(); return t; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
// Implementation of class atomic
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const;
@ -40,8 +40,8 @@ struct Atomic::PlatformAdd {
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
D old_value;
@ -54,9 +54,9 @@ inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value,
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (exchange_value)
@ -67,10 +67,10 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
inline T AtomicAccess::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(1 == sizeof(T));
__asm__ volatile ("lock cmpxchgb %1,(%3)"
: "=a" (exchange_value)
@ -81,10 +81,10 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ("lock cmpxchgl %1,(%3)"
: "=a" (exchange_value)
@ -97,8 +97,8 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
D old_value;
@ -111,8 +111,8 @@ inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value,
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
__asm__ __volatile__ ("xchgq (%2),%0"
: "=r" (exchange_value)
@ -123,10 +123,10 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(8 == sizeof(T));
__asm__ __volatile__ ("lock cmpxchgq %1,(%3)"
: "=a" (exchange_value)
@ -145,25 +145,25 @@ extern "C" {
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
}
// No direct support for 8-byte xchg; emulate using cmpxchg.
template<>
struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {};
struct AtomicAccess::PlatformXchg<8> : AtomicAccess::XchgUsingCmpxchg<8> {};
// No direct support for 8-byte add; emulate using cmpxchg.
template<>
struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {};
struct AtomicAccess::PlatformAdd<8> : AtomicAccess::AddUsingCmpxchg<8> {};
template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile int64_t dest;
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
@ -172,8 +172,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
template<>
template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}
@ -181,7 +181,7 @@ inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
#endif // AMD64
template<>
struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
struct AtomicAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
{
template <typename T>
void operator()(volatile T* p, T v) const {
@ -193,7 +193,7 @@ struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
};
template<>
struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
struct AtomicAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
{
template <typename T>
void operator()(volatile T* p, T v) const {
@ -205,7 +205,7 @@ struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
};
template<>
struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
struct AtomicAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
{
template <typename T>
void operator()(volatile T* p, T v) const {
@ -218,7 +218,7 @@ struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
#ifdef AMD64
template<>
struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
struct AtomicAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
{
template <typename T>
void operator()(volatile T* p, T v) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -31,7 +31,7 @@
// Implementation of class atomic
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
@ -43,8 +43,8 @@ struct Atomic::PlatformAdd {
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@ -55,8 +55,8 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
template<>
template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@ -67,9 +67,9 @@ inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value,
template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
FULL_MEM_BARRIER;
T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED);
@ -79,9 +79,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
FULL_MEM_BARRIER;
T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED);
@ -91,14 +91,14 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
// No direct support for cmpxchg of bytes; emulate using int.
template<>
struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
struct AtomicAccess::PlatformCmpxchg<1> : AtomicAccess::CmpxchgByteUsingInt {};
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
T value = compare_value;
@ -111,10 +111,10 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
template<>
template<typename T>
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
FULL_MEM_BARRIER;
@ -134,7 +134,7 @@ inline void atomic_copy64(const volatile void *src, volatile void *dst) {
template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
T dest;
__atomic_load(const_cast<T*>(src), &dest, __ATOMIC_RELAXED);
@ -143,8 +143,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
template<>
template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
__atomic_store(dest, &store_value, __ATOMIC_RELAXED);
}

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -30,14 +31,14 @@
#include "runtime/vm_version.hpp"
// As per atomic.hpp all read-modify-write operations have to provide two-way
// As per atomicAccess.hpp all read-modify-write operations have to provide two-way
// barriers semantics. The memory_order parameter is ignored - we always provide
// the strongest/most-conservative ordering
//
// For AARCH64 we add explicit barriers in the stubs.
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
@ -53,9 +54,9 @@ struct Atomic::PlatformAdd {
#define DEFINE_INTRINSIC_ADD(IntrinsicName, IntrinsicType) \
template<> \
template<typename D, typename I> \
inline D Atomic::PlatformAdd<sizeof(IntrinsicType)>::add_then_fetch(D volatile* dest, \
I add_value, \
atomic_memory_order order) const { \
inline D AtomicAccess::PlatformAdd<sizeof(IntrinsicType)>::add_then_fetch(D volatile* dest, \
I add_value, \
atomic_memory_order order) const { \
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(D)); \
return PrimitiveConversions::cast<D>( \
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
@ -70,9 +71,9 @@ DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64)
#define DEFINE_INTRINSIC_XCHG(IntrinsicName, IntrinsicType) \
template<> \
template<typename T> \
inline T Atomic::PlatformXchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
T exchange_value, \
atomic_memory_order order) const { \
inline T AtomicAccess::PlatformXchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
T exchange_value, \
atomic_memory_order order) const { \
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \
return PrimitiveConversions::cast<T>( \
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
@ -85,16 +86,16 @@ DEFINE_INTRINSIC_XCHG(InterlockedExchange64, __int64)
#undef DEFINE_INTRINSIC_XCHG
// Note: the order of the parameters is different between
// Atomic::PlatformCmpxchg<*>::operator() and the
// AtomicAccess::PlatformCmpxchg<*>::operator() and the
// InterlockedCompareExchange* API.
#define DEFINE_INTRINSIC_CMPXCHG(IntrinsicName, IntrinsicType) \
template<> \
template<typename T> \
inline T Atomic::PlatformCmpxchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
T compare_value, \
T exchange_value, \
atomic_memory_order order) const { \
inline T AtomicAccess::PlatformCmpxchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
T compare_value, \
T exchange_value, \
atomic_memory_order order) const { \
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \
return PrimitiveConversions::cast<T>( \
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#ifndef OS_CPU_WINDOWS_AARCH64_COPY_WINDOWS_AARCH64_HPP
#define OS_CPU_WINDOWS_AARCH64_COPY_WINDOWS_AARCH64_HPP
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include <string.h>
@ -35,14 +35,14 @@ static void pd_conjoint_atomic_helper(const T* from, T* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
Atomic::store(to++, Atomic::load(from++));
AtomicAccess::store(to++, AtomicAccess::load(from++));
}
} else {
from += count - 1;
to += count - 1;
while (count-- > 0) {
// Copy backwards
Atomic::store(to--, Atomic::load(from--));
AtomicAccess::store(to--, AtomicAccess::load(from--));
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,15 +32,15 @@
// guaranteed to have acquire release semantics (w.r.t. compiler
// reordering) and therefore does not even need a compiler barrier
// for normal acquire release accesses. And all generalized
// bound calls like release_store go through Atomic::load
// and Atomic::store which do volatile memory accesses.
// bound calls like release_store go through AtomicAccess::load
// and AtomicAccess::store which do volatile memory accesses.
template<> inline void ScopedFence<X_ACQUIRE>::postfix() { }
template<> inline void ScopedFence<RELEASE_X>::prefix() { }
template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { }
template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
template<size_t byte_size>
struct Atomic::PlatformAdd {
struct AtomicAccess::PlatformAdd {
template<typename D, typename I>
D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
@ -56,9 +56,9 @@ struct Atomic::PlatformAdd {
#define DEFINE_INTRINSIC_ADD(IntrinsicName, IntrinsicType) \
template<> \
template<typename D, typename I> \
inline D Atomic::PlatformAdd<sizeof(IntrinsicType)>::add_then_fetch(D volatile* dest, \
I add_value, \
atomic_memory_order order) const { \
inline D AtomicAccess::PlatformAdd<sizeof(IntrinsicType)>::add_then_fetch(D volatile* dest, \
I add_value, \
atomic_memory_order order) const { \
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(D)); \
return PrimitiveConversions::cast<D>( \
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
@ -73,9 +73,9 @@ DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64)
#define DEFINE_INTRINSIC_XCHG(IntrinsicName, IntrinsicType) \
template<> \
template<typename T> \
inline T Atomic::PlatformXchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
T exchange_value, \
atomic_memory_order order) const { \
inline T AtomicAccess::PlatformXchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
T exchange_value, \
atomic_memory_order order) const { \
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \
return PrimitiveConversions::cast<T>( \
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
@ -88,16 +88,16 @@ DEFINE_INTRINSIC_XCHG(InterlockedExchange64, __int64)
#undef DEFINE_INTRINSIC_XCHG
// Note: the order of the parameters is different between
// Atomic::PlatformCmpxchg<*>::operator() and the
// AtomicAccess::PlatformCmpxchg<*>::operator() and the
// InterlockedCompareExchange* API.
#define DEFINE_INTRINSIC_CMPXCHG(IntrinsicName, IntrinsicType) \
template<> \
template<typename T> \
inline T Atomic::PlatformCmpxchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
T compare_value, \
T exchange_value, \
atomic_memory_order order) const { \
inline T AtomicAccess::PlatformCmpxchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
T compare_value, \
T exchange_value, \
atomic_memory_order order) const { \
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \
return PrimitiveConversions::cast<T>( \
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,21 +25,21 @@
#ifndef OS_CPU_WINDOWS_X86_COPY_WINDOWS_X86_HPP
#define OS_CPU_WINDOWS_X86_COPY_WINDOWS_X86_HPP
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
template <typename T>
static void pd_conjoint_atomic_helper(const T* from, T* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
Atomic::store(to++, Atomic::load(from++));
AtomicAccess::store(to++, AtomicAccess::load(from++));
}
} else {
from += count - 1;
to += count - 1;
while (count-- > 0) {
// Copy backwards
Atomic::store(to--, Atomic::load(from--));
AtomicAccess::store(to--, AtomicAccess::load(from--));
}
}
}

View File

@ -54,7 +54,7 @@
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"

View File

@ -57,7 +57,7 @@ bool AOTLinkedClassBulkLoader::class_preloading_finished() {
// The ConstantPools of preloaded classes have references to other preloaded classes. We don't
// want any Java code (including JVMCI compiler) to use these classes until all of them
// are loaded.
return Atomic::load_acquire(&_all_completed);
return AtomicAccess::load_acquire(&_all_completed);
}
}
@ -90,7 +90,7 @@ void AOTLinkedClassBulkLoader::load_non_javabase_classes(JavaThread* current) {
}
_app_completed = true;
Atomic::release_store(&_all_completed, true);
AtomicAccess::release_store(&_all_completed, true);
}
void AOTLinkedClassBulkLoader::load_classes_in_loader(JavaThread* current, AOTLinkedClassCategory class_category, oop class_loader_oop) {

View File

@ -418,7 +418,7 @@ ArchiveWorkers::ArchiveWorkers() :
_task(nullptr) {}
ArchiveWorkers::~ArchiveWorkers() {
assert(Atomic::load(&_state) != WORKING, "Should not be working");
assert(AtomicAccess::load(&_state) != WORKING, "Should not be working");
}
int ArchiveWorkers::max_workers() {
@ -435,11 +435,11 @@ bool ArchiveWorkers::is_parallel() {
void ArchiveWorkers::start_worker_if_needed() {
while (true) {
int cur = Atomic::load(&_started_workers);
int cur = AtomicAccess::load(&_started_workers);
if (cur >= _num_workers) {
return;
}
if (Atomic::cmpxchg(&_started_workers, cur, cur + 1, memory_order_relaxed) == cur) {
if (AtomicAccess::cmpxchg(&_started_workers, cur, cur + 1, memory_order_relaxed) == cur) {
new ArchiveWorkerThread(this);
return;
}
@ -447,9 +447,9 @@ void ArchiveWorkers::start_worker_if_needed() {
}
void ArchiveWorkers::run_task(ArchiveWorkerTask* task) {
assert(Atomic::load(&_state) == UNUSED, "Should be unused yet");
assert(Atomic::load(&_task) == nullptr, "Should not have running tasks");
Atomic::store(&_state, WORKING);
assert(AtomicAccess::load(&_state) == UNUSED, "Should be unused yet");
assert(AtomicAccess::load(&_task) == nullptr, "Should not have running tasks");
AtomicAccess::store(&_state, WORKING);
if (is_parallel()) {
run_task_multi(task);
@ -457,8 +457,8 @@ void ArchiveWorkers::run_task(ArchiveWorkerTask* task) {
run_task_single(task);
}
assert(Atomic::load(&_state) == WORKING, "Should be working");
Atomic::store(&_state, SHUTDOWN);
assert(AtomicAccess::load(&_state) == WORKING, "Should be working");
AtomicAccess::store(&_state, SHUTDOWN);
}
void ArchiveWorkers::run_task_single(ArchiveWorkerTask* task) {
@ -475,8 +475,8 @@ void ArchiveWorkers::run_task_multi(ArchiveWorkerTask* task) {
// Set up the run and publish the task. Issue one additional finish token
// to cover the semaphore shutdown path, see below.
Atomic::store(&_finish_tokens, _num_workers + 1);
Atomic::release_store(&_task, task);
AtomicAccess::store(&_finish_tokens, _num_workers + 1);
AtomicAccess::release_store(&_task, task);
// Kick off pool startup by starting a single worker, and proceed
// immediately to executing the task locally.
@ -494,19 +494,19 @@ void ArchiveWorkers::run_task_multi(ArchiveWorkerTask* task) {
// on semaphore first, and then spin-wait for all workers to terminate.
_end_semaphore.wait();
SpinYield spin;
while (Atomic::load(&_finish_tokens) != 0) {
while (AtomicAccess::load(&_finish_tokens) != 0) {
spin.wait();
}
OrderAccess::fence();
assert(Atomic::load(&_finish_tokens) == 0, "All tokens are consumed");
assert(AtomicAccess::load(&_finish_tokens) == 0, "All tokens are consumed");
}
void ArchiveWorkers::run_as_worker() {
assert(is_parallel(), "Should be in parallel mode");
ArchiveWorkerTask* task = Atomic::load_acquire(&_task);
ArchiveWorkerTask* task = AtomicAccess::load_acquire(&_task);
task->run();
// All work done in threads should be visible to caller.
@ -514,22 +514,22 @@ void ArchiveWorkers::run_as_worker() {
// Signal the pool the work is complete, and we are exiting.
// Worker cannot do anything else with the pool after this.
if (Atomic::sub(&_finish_tokens, 1, memory_order_relaxed) == 1) {
if (AtomicAccess::sub(&_finish_tokens, 1, memory_order_relaxed) == 1) {
// Last worker leaving. Notify the pool it can unblock to spin-wait.
// Then consume the last token and leave.
_end_semaphore.signal();
int last = Atomic::sub(&_finish_tokens, 1, memory_order_relaxed);
int last = AtomicAccess::sub(&_finish_tokens, 1, memory_order_relaxed);
assert(last == 0, "Should be");
}
}
void ArchiveWorkerTask::run() {
while (true) {
int chunk = Atomic::load(&_chunk);
int chunk = AtomicAccess::load(&_chunk);
if (chunk >= _max_chunks) {
return;
}
if (Atomic::cmpxchg(&_chunk, chunk, chunk + 1, memory_order_relaxed) == chunk) {
if (AtomicAccess::cmpxchg(&_chunk, chunk, chunk + 1, memory_order_relaxed) == chunk) {
assert(0 <= chunk && chunk < _max_chunks, "Sanity");
work(chunk, _max_chunks);
}

View File

@ -47,7 +47,7 @@
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/constantPool.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
@ -87,7 +87,7 @@ ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) :
// _instance should only be accessed by the thread that created _instance.
assert(_instance == nullptr, "must be singleton");
_instance = this;
Atomic::store(&_parsing_thread, Thread::current());
AtomicAccess::store(&_parsing_thread, Thread::current());
}
FILE* ClassListParser::do_open(const char* file) {
@ -104,11 +104,11 @@ FILE* ClassListParser::do_open(const char* file) {
}
bool ClassListParser::is_parsing_thread() {
return Atomic::load(&_parsing_thread) == Thread::current();
return AtomicAccess::load(&_parsing_thread) == Thread::current();
}
ClassListParser::~ClassListParser() {
Atomic::store(&_parsing_thread, (Thread*)nullptr);
AtomicAccess::store(&_parsing_thread, (Thread*)nullptr);
delete _indy_items;
delete _interfaces;
_instance = nullptr;

View File

@ -5728,8 +5728,8 @@ void ClassFileParser::mangle_hidden_class_name(InstanceKlass* const ik) {
// occupied by the archive at run time, so we know that no dynamically
// loaded InstanceKlass will be placed under there.
static volatile size_t counter = 0;
Atomic::cmpxchg(&counter, (size_t)0, Arguments::default_SharedBaseAddress()); // initialize it
size_t new_id = Atomic::add(&counter, (size_t)1);
AtomicAccess::cmpxchg(&counter, (size_t)0, Arguments::default_SharedBaseAddress()); // initialize it
size_t new_id = AtomicAccess::add(&counter, (size_t)1);
jio_snprintf(addr_buf, 20, "0x%zx", new_id);
} else {
jio_snprintf(addr_buf, 20, INTPTR_FORMAT, p2i(ik));

View File

@ -750,7 +750,7 @@ void ClassLoader::add_to_boot_append_entries(ClassPathEntry *new_entry) {
if (_last_append_entry == nullptr) {
_last_append_entry = new_entry;
assert(first_append_entry() == nullptr, "boot loader's append class path entry list not empty");
Atomic::release_store(&_first_append_entry_list, new_entry);
AtomicAccess::release_store(&_first_append_entry_list, new_entry);
} else {
_last_append_entry->set_next(new_entry);
_last_append_entry = new_entry;

View File

@ -212,7 +212,7 @@ class ClassLoader: AllStatic {
// Note: boot loader append path does not support named modules.
static ClassPathEntry* volatile _first_append_entry_list;
static ClassPathEntry* first_append_entry() {
return Atomic::load_acquire(&_first_append_entry_list);
return AtomicAccess::load_acquire(&_first_append_entry_list);
}
// Last entry in linked list of appended ClassPathEntry instances

View File

@ -27,14 +27,14 @@
#include "classfile/classLoader.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
// Next entry in class path
inline ClassPathEntry* ClassPathEntry::next() const { return Atomic::load_acquire(&_next); }
inline ClassPathEntry* ClassPathEntry::next() const { return AtomicAccess::load_acquire(&_next); }
inline void ClassPathEntry::set_next(ClassPathEntry* next) {
// may have unlocked readers, so ensure visibility.
Atomic::release_store(&_next, next);
AtomicAccess::release_store(&_next, next);
}
inline ClassPathEntry* ClassLoader::classpath_entry(int n) {

View File

@ -72,7 +72,7 @@
#include "oops/verifyOopClosure.hpp"
#include "oops/weakHandle.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/safepoint.hpp"
@ -192,19 +192,19 @@ ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() {
OopHandle ClassLoaderData::ChunkedHandleList::add(oop o) {
if (_head == nullptr || _head->_size == Chunk::CAPACITY) {
Chunk* next = new Chunk(_head);
Atomic::release_store(&_head, next);
AtomicAccess::release_store(&_head, next);
}
oop* handle = &_head->_data[_head->_size];
NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o);
Atomic::release_store(&_head->_size, _head->_size + 1);
AtomicAccess::release_store(&_head->_size, _head->_size + 1);
return OopHandle(handle);
}
int ClassLoaderData::ChunkedHandleList::count() const {
int count = 0;
Chunk* chunk = Atomic::load_acquire(&_head);
Chunk* chunk = AtomicAccess::load_acquire(&_head);
while (chunk != nullptr) {
count += Atomic::load(&chunk->_size);
count += AtomicAccess::load(&chunk->_size);
chunk = chunk->_next;
}
return count;
@ -217,10 +217,10 @@ inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chu
}
void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
Chunk* head = Atomic::load_acquire(&_head);
Chunk* head = AtomicAccess::load_acquire(&_head);
if (head != nullptr) {
// Must be careful when reading size of head
oops_do_chunk(f, head, Atomic::load_acquire(&head->_size));
oops_do_chunk(f, head, AtomicAccess::load_acquire(&head->_size));
for (Chunk* c = head->_next; c != nullptr; c = c->_next) {
oops_do_chunk(f, c, c->_size);
}
@ -258,9 +258,9 @@ bool ClassLoaderData::ChunkedHandleList::contains(oop p) {
#ifndef PRODUCT
bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) {
Chunk* chunk = Atomic::load_acquire(&_head);
Chunk* chunk = AtomicAccess::load_acquire(&_head);
while (chunk != nullptr) {
if (&(chunk->_data[0]) <= oop_handle && oop_handle < &(chunk->_data[Atomic::load(&chunk->_size)])) {
if (&(chunk->_data[0]) <= oop_handle && oop_handle < &(chunk->_data[AtomicAccess::load(&chunk->_size)])) {
return true;
}
chunk = chunk->_next;
@ -271,12 +271,12 @@ bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) {
void ClassLoaderData::clear_claim(int claim) {
for (;;) {
int old_claim = Atomic::load(&_claim);
int old_claim = AtomicAccess::load(&_claim);
if ((old_claim & claim) == 0) {
return;
}
int new_claim = old_claim & ~claim;
if (Atomic::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
if (AtomicAccess::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
return;
}
}
@ -290,12 +290,12 @@ void ClassLoaderData::verify_not_claimed(int claim) {
bool ClassLoaderData::try_claim(int claim) {
for (;;) {
int old_claim = Atomic::load(&_claim);
int old_claim = AtomicAccess::load(&_claim);
if ((old_claim & claim) == claim) {
return false;
}
int new_claim = old_claim | claim;
if (Atomic::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
if (AtomicAccess::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
return true;
}
}
@ -383,7 +383,7 @@ void ClassLoaderData::oops_do(OopClosure* f, int claim_value, bool clear_mod_oop
void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
// Lock-free access requires load_acquire
for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
klass_closure->do_klass(k);
assert(k != k->next_link(), "no loops!");
}
@ -391,7 +391,7 @@ void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
void ClassLoaderData::classes_do(void f(Klass * const)) {
// Lock-free access requires load_acquire
for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
f(k);
assert(k != k->next_link(), "no loops!");
}
@ -399,7 +399,7 @@ void ClassLoaderData::classes_do(void f(Klass * const)) {
void ClassLoaderData::methods_do(void f(Method*)) {
// Lock-free access requires load_acquire
for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
InstanceKlass::cast(k)->methods_do(f);
}
@ -408,7 +408,7 @@ void ClassLoaderData::methods_do(void f(Method*)) {
void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
// Lock-free access requires load_acquire
for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
// Filter out InstanceKlasses (or their ObjArrayKlasses) that have not entered the
// loaded state.
if (k->is_instance_klass()) {
@ -436,7 +436,7 @@ void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
// Lock-free access requires load_acquire
for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
if (k->is_instance_klass()) {
f(InstanceKlass::cast(k));
}
@ -498,7 +498,7 @@ void ClassLoaderData::record_dependency(const Klass* k) {
// It's a dependency we won't find through GC, add it.
if (!_handles.contains(to)) {
NOT_PRODUCT(Atomic::inc(&_dependency_count));
NOT_PRODUCT(AtomicAccess::inc(&_dependency_count));
LogTarget(Trace, class, loader, data) lt;
if (lt.is_enabled()) {
ResourceMark rm;
@ -523,7 +523,7 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
k->set_next_link(old_value);
// Link the new item into the list, making sure the linked class is stable
// since the list can be walked without a lock
Atomic::release_store(&_klasses, k);
AtomicAccess::release_store(&_klasses, k);
if (k->is_array_klass()) {
ClassLoaderDataGraph::inc_array_classes(1);
} else {
@ -635,7 +635,7 @@ void ClassLoaderData::unload() {
ModuleEntryTable* ClassLoaderData::modules() {
// Lazily create the module entry table at first request.
// Lock-free access requires load_acquire.
ModuleEntryTable* modules = Atomic::load_acquire(&_modules);
ModuleEntryTable* modules = AtomicAccess::load_acquire(&_modules);
if (modules == nullptr) {
MutexLocker m1(Module_lock);
// Check if _modules got allocated while we were waiting for this lock.
@ -645,7 +645,7 @@ ModuleEntryTable* ClassLoaderData::modules() {
{
MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// Ensure _modules is stable, since it is examined without a lock
Atomic::release_store(&_modules, modules);
AtomicAccess::release_store(&_modules, modules);
}
}
}
@ -819,7 +819,7 @@ ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
// The reason for the delayed allocation is because some class loaders are
// simply for delegating with no metadata of their own.
// Lock-free access requires load_acquire.
ClassLoaderMetaspace* metaspace = Atomic::load_acquire(&_metaspace);
ClassLoaderMetaspace* metaspace = AtomicAccess::load_acquire(&_metaspace);
if (metaspace == nullptr) {
MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
// Check if _metaspace got allocated while we were waiting for this lock.
@ -833,7 +833,7 @@ ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
}
// Ensure _metaspace is stable, since it is examined without a lock
Atomic::release_store(&_metaspace, metaspace);
AtomicAccess::release_store(&_metaspace, metaspace);
}
}
return metaspace;
@ -1120,7 +1120,7 @@ void ClassLoaderData::verify() {
bool ClassLoaderData::contains_klass(Klass* klass) {
// Lock-free access requires load_acquire
for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) {
if (k == klass) return true;
}
return false;

View File

@ -28,7 +28,7 @@
#include "memory/allocation.hpp"
#include "oops/oopHandle.hpp"
#include "oops/weakHandle.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/mutex.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,16 +35,16 @@
inline void ClassLoaderData::set_next(ClassLoaderData* next) {
assert(this->next() == nullptr, "only link once");
Atomic::store(&_next, next);
AtomicAccess::store(&_next, next);
}
inline ClassLoaderData* ClassLoaderData::next() const {
return Atomic::load(&_next);
return AtomicAccess::load(&_next);
}
inline void ClassLoaderData::unlink_next() {
assert(next()->is_unloading(), "only remove unloading clds");
Atomic::store(&_next, _next->_next);
AtomicAccess::store(&_next, _next->_next);
}
inline void ClassLoaderData::set_unloading_next(ClassLoaderData* unloading_next) {

View File

@ -36,7 +36,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/metaspace.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/safepoint.hpp"
@ -61,20 +61,20 @@ void ClassLoaderDataGraph::clear_claimed_marks() {
//
// Any ClassLoaderData added after or during walking the list are prepended to
// _head. Their claim mark need not be handled here.
for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != nullptr; cld = cld->next()) {
for (ClassLoaderData* cld = AtomicAccess::load_acquire(&_head); cld != nullptr; cld = cld->next()) {
cld->clear_claim();
}
}
void ClassLoaderDataGraph::clear_claimed_marks(int claim) {
for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != nullptr; cld = cld->next()) {
for (ClassLoaderData* cld = AtomicAccess::load_acquire(&_head); cld != nullptr; cld = cld->next()) {
cld->clear_claim(claim);
}
}
void ClassLoaderDataGraph::verify_claimed_marks_cleared(int claim) {
#ifdef ASSERT
for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != nullptr; cld = cld->next()) {
for (ClassLoaderData* cld = AtomicAccess::load_acquire(&_head); cld != nullptr; cld = cld->next()) {
cld->verify_not_claimed(claim);
}
#endif
@ -155,7 +155,7 @@ ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool has_clas
// First install the new CLD to the Graph.
cld->set_next(_head);
Atomic::release_store(&_head, cld);
AtomicAccess::release_store(&_head, cld);
// Next associate with the class_loader.
if (!has_class_mirror_holder) {
@ -192,14 +192,14 @@ inline void assert_is_safepoint_or_gc() {
// These are functions called by the GC, which require all of the CLDs, including not yet unlinked CLDs.
void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
assert_is_safepoint_or_gc();
for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != nullptr; cld = cld->next()) {
for (ClassLoaderData* cld = AtomicAccess::load_acquire(&_head); cld != nullptr; cld = cld->next()) {
cl->do_cld(cld);
}
}
void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
assert_is_safepoint_or_gc();
for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != nullptr; cld = cld->next()) {
for (ClassLoaderData* cld = AtomicAccess::load_acquire(&_head); cld != nullptr; cld = cld->next()) {
CLDClosure* closure = (cld->keep_alive_ref_count() > 0) ? strong : weak;
if (closure != nullptr) {
closure->do_cld(cld);
@ -428,7 +428,7 @@ bool ClassLoaderDataGraph::do_unloading() {
} else {
assert(data == _head, "sanity check");
// The GC might be walking this concurrently
Atomic::store(&_head, data->next());
AtomicAccess::store(&_head, data->next());
}
}
}
@ -533,7 +533,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
while (head != nullptr) {
Klass* next = next_klass_in_cldg(head);
Klass* old_head = Atomic::cmpxchg(&_next_klass, head, next);
Klass* old_head = AtomicAccess::cmpxchg(&_next_klass, head, next);
if (old_head == head) {
return head; // Won the CAS.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
#include "classfile/javaClasses.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/orderAccess.hpp"
inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader) {
@ -44,28 +44,28 @@ inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader) {
}
size_t ClassLoaderDataGraph::num_instance_classes() {
return Atomic::load(&_num_instance_classes);
return AtomicAccess::load(&_num_instance_classes);
}
size_t ClassLoaderDataGraph::num_array_classes() {
return Atomic::load(&_num_array_classes);
return AtomicAccess::load(&_num_array_classes);
}
void ClassLoaderDataGraph::inc_instance_classes(size_t count) {
Atomic::add(&_num_instance_classes, count, memory_order_relaxed);
AtomicAccess::add(&_num_instance_classes, count, memory_order_relaxed);
}
void ClassLoaderDataGraph::dec_instance_classes(size_t count) {
size_t old_count = Atomic::fetch_then_add(&_num_instance_classes, -count, memory_order_relaxed);
size_t old_count = AtomicAccess::fetch_then_add(&_num_instance_classes, -count, memory_order_relaxed);
assert(old_count >= count, "Sanity");
}
void ClassLoaderDataGraph::inc_array_classes(size_t count) {
Atomic::add(&_num_array_classes, count, memory_order_relaxed);
AtomicAccess::add(&_num_array_classes, count, memory_order_relaxed);
}
void ClassLoaderDataGraph::dec_array_classes(size_t count) {
size_t old_count = Atomic::fetch_then_add(&_num_array_classes, -count, memory_order_relaxed);
size_t old_count = AtomicAccess::fetch_then_add(&_num_array_classes, -count, memory_order_relaxed);
assert(old_count >= count, "Sanity");
}

View File

@ -204,11 +204,11 @@ bool java_lang_String::_initialized;
bool java_lang_String::test_and_set_flag(oop java_string, uint8_t flag_mask) {
uint8_t* addr = flags_addr(java_string);
uint8_t value = Atomic::load(addr);
uint8_t value = AtomicAccess::load(addr);
while ((value & flag_mask) == 0) {
uint8_t old_value = value;
value |= flag_mask;
value = Atomic::cmpxchg(addr, old_value, value);
value = AtomicAccess::cmpxchg(addr, old_value, value);
if (value == old_value) return false; // Flag bit changed from 0 to 1.
}
return true; // Flag bit is already 1.
@ -2140,7 +2140,7 @@ void java_lang_VirtualThread::set_state(oop vthread, int state) {
int java_lang_VirtualThread::cmpxchg_state(oop vthread, int old_state, int new_state) {
jint* addr = vthread->field_addr<jint>(_state_offset);
int res = Atomic::cmpxchg(addr, old_state, new_state);
int res = AtomicAccess::cmpxchg(addr, old_state, new_state);
return res;
}
@ -2158,9 +2158,9 @@ void java_lang_VirtualThread::set_next(oop vthread, oop next_vthread) {
// Method returns true if we added vthread to the list, false otherwise.
bool java_lang_VirtualThread::set_onWaitingList(oop vthread, OopHandle& list_head) {
jboolean* addr = vthread->field_addr<jboolean>(_onWaitingList_offset);
jboolean vthread_on_list = Atomic::load(addr);
jboolean vthread_on_list = AtomicAccess::load(addr);
if (!vthread_on_list) {
vthread_on_list = Atomic::cmpxchg(addr, (jboolean)JNI_FALSE, (jboolean)JNI_TRUE);
vthread_on_list = AtomicAccess::cmpxchg(addr, (jboolean)JNI_FALSE, (jboolean)JNI_TRUE);
if (!vthread_on_list) {
for (;;) {
oop head = list_head.resolve();
@ -4760,7 +4760,7 @@ int java_lang_ClassLoader::_parent_offset;
ClassLoaderData* java_lang_ClassLoader::loader_data_acquire(oop loader) {
assert(loader != nullptr, "loader must not be null");
assert(oopDesc::is_oop(loader), "loader must be oop");
return Atomic::load_acquire(loader->field_addr<ClassLoaderData*>(_loader_data_offset));
return AtomicAccess::load_acquire(loader->field_addr<ClassLoaderData*>(_loader_data_offset));
}
ClassLoaderData* java_lang_ClassLoader::loader_data(oop loader) {
@ -4772,7 +4772,7 @@ ClassLoaderData* java_lang_ClassLoader::loader_data(oop loader) {
void java_lang_ClassLoader::release_set_loader_data(oop loader, ClassLoaderData* new_data) {
assert(loader != nullptr, "loader must not be null");
assert(oopDesc::is_oop(loader), "loader must be oop");
Atomic::release_store(loader->field_addr<ClassLoaderData*>(_loader_data_offset), new_data);
AtomicAccess::release_store(loader->field_addr<ClassLoaderData*>(_loader_data_offset), new_data);
}
#define CLASSLOADER_FIELDS_DO(macro) \

View File

@ -80,7 +80,7 @@ uint8_t* java_lang_String::flags_addr(oop java_string) {
}
bool java_lang_String::is_flag_set(oop java_string, uint8_t flag_mask) {
return (Atomic::load(flags_addr(java_string)) & flag_mask) != 0;
return (AtomicAccess::load(flags_addr(java_string)) & flag_mask) != 0;
}
bool java_lang_String::deduplication_forbidden(oop java_string) {

View File

@ -28,7 +28,7 @@
#include "classfile/moduleEntry.hpp"
#include "oops/symbol.hpp"
#include "oops/symbolHandle.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/hashTable.hpp"
#include "utilities/macros.hpp"
@ -222,11 +222,11 @@ public:
bool is_defined_by_cds_in_class_path(int idx) const {
assert(idx < max_index_for_defined_in_class_path(), "sanity");
return((Atomic::load(&_defined_by_cds_in_class_path) & ((int)1 << idx)) != 0);
return((AtomicAccess::load(&_defined_by_cds_in_class_path) & ((int)1 << idx)) != 0);
}
void set_defined_by_cds_in_class_path(int idx) {
assert(idx < max_index_for_defined_in_class_path(), "sanity");
Atomic::fetch_then_or(&_defined_by_cds_in_class_path, ((int)1 << idx));
AtomicAccess::fetch_then_or(&_defined_by_cds_in_class_path, ((int)1 << idx));
}
};

View File

@ -47,7 +47,7 @@
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "oops/weakHandle.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutexLocker.hpp"
@ -325,11 +325,11 @@ void StringTable::create_table() {
}
void StringTable::item_added() {
Atomic::inc(&_items_count);
AtomicAccess::inc(&_items_count);
}
void StringTable::item_removed() {
Atomic::dec(&_items_count);
AtomicAccess::dec(&_items_count);
}
double StringTable::get_load_factor() {
@ -345,18 +345,18 @@ size_t StringTable::table_size() {
}
bool StringTable::has_work() {
return Atomic::load_acquire(&_has_work);
return AtomicAccess::load_acquire(&_has_work);
}
size_t StringTable::items_count_acquire() {
return Atomic::load_acquire(&_items_count);
return AtomicAccess::load_acquire(&_items_count);
}
void StringTable::trigger_concurrent_work() {
// Avoid churn on ServiceThread
if (!has_work()) {
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
Atomic::store(&_has_work, true);
AtomicAccess::store(&_has_work, true);
Service_lock->notify_all();
}
}
@ -510,7 +510,7 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
}
oop StringTable::intern(const StringWrapper& name, TRAPS) {
assert(!Atomic::load_acquire(&_disable_interning_during_cds_dump),
assert(!AtomicAccess::load_acquire(&_disable_interning_during_cds_dump),
"All threads that may intern strings should have been stopped before CDS starts copying the interned string table");
// shared table always uses java_lang_String::hash_code
@ -666,7 +666,7 @@ void StringTable::do_concurrent_work(JavaThread* jt) {
// Rehash if needed. Rehashing goes to a safepoint but the rest of this
// work is concurrent.
if (needs_rehashing() && maybe_rehash_table()) {
Atomic::release_store(&_has_work, false);
AtomicAccess::release_store(&_has_work, false);
return; // done, else grow
}
log_debug(stringtable, perf)("Concurrent work, live factor: %g", get_load_factor());
@ -676,7 +676,7 @@ void StringTable::do_concurrent_work(JavaThread* jt) {
} else {
clean_dead_entries(jt);
}
Atomic::release_store(&_has_work, false);
AtomicAccess::release_store(&_has_work, false);
}
// Called at VM_Operation safepoint
@ -966,7 +966,7 @@ void StringTable::allocate_shared_strings_array(TRAPS) {
// This flag will be cleared after intern table dumping has completed, so we can run the
// compiler again (for future AOT method compilation, etc).
DEBUG_ONLY(Atomic::release_store(&_disable_interning_during_cds_dump, true));
DEBUG_ONLY(AtomicAccess::release_store(&_disable_interning_during_cds_dump, true));
if (items_count_acquire() > (size_t)max_jint) {
fatal("Too many strings to be archived: %zu", items_count_acquire());
@ -1105,7 +1105,7 @@ void StringTable::write_shared_table() {
_local_table->do_safepoint_scan(copy_into_shared_table);
writer.dump(&_shared_table, "string");
DEBUG_ONLY(Atomic::release_store(&_disable_interning_during_cds_dump, false));
DEBUG_ONLY(AtomicAccess::release_store(&_disable_interning_during_cds_dump, false));
}
void StringTable::set_shared_strings_array_index(int root_index) {

View File

@ -34,7 +34,7 @@
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/trimNativeHeap.hpp"
@ -216,17 +216,17 @@ void SymbolTable::create_table () {
}
}
void SymbolTable::reset_has_items_to_clean() { Atomic::store(&_has_items_to_clean, false); }
void SymbolTable::mark_has_items_to_clean() { Atomic::store(&_has_items_to_clean, true); }
bool SymbolTable::has_items_to_clean() { return Atomic::load(&_has_items_to_clean); }
void SymbolTable::reset_has_items_to_clean() { AtomicAccess::store(&_has_items_to_clean, false); }
void SymbolTable::mark_has_items_to_clean() { AtomicAccess::store(&_has_items_to_clean, true); }
bool SymbolTable::has_items_to_clean() { return AtomicAccess::load(&_has_items_to_clean); }
void SymbolTable::item_added() {
Atomic::inc(&_items_count);
AtomicAccess::inc(&_items_count);
}
void SymbolTable::item_removed() {
Atomic::inc(&(_symbols_removed));
Atomic::dec(&_items_count);
AtomicAccess::inc(&(_symbols_removed));
AtomicAccess::dec(&_items_count);
}
double SymbolTable::get_load_factor() {
@ -237,7 +237,7 @@ size_t SymbolTable::table_size() {
return ((size_t)1) << _local_table->get_size_log2(Thread::current());
}
bool SymbolTable::has_work() { return Atomic::load_acquire(&_has_work); }
bool SymbolTable::has_work() { return AtomicAccess::load_acquire(&_has_work); }
void SymbolTable::trigger_cleanup() {
// Avoid churn on ServiceThread
@ -786,7 +786,7 @@ void SymbolTable::clean_dead_entries(JavaThread* jt) {
bdt.done(jt);
}
Atomic::add(&_symbols_counted, stdc._processed);
AtomicAccess::add(&_symbols_counted, stdc._processed);
log_debug(symboltable)("Cleaned %zu of %zu",
stdd._deleted, stdc._processed);
@ -814,7 +814,7 @@ void SymbolTable::do_concurrent_work(JavaThread* jt) {
// Rehash if needed. Rehashing goes to a safepoint but the rest of this
// work is concurrent.
if (needs_rehashing() && maybe_rehash_table()) {
Atomic::release_store(&_has_work, false);
AtomicAccess::release_store(&_has_work, false);
return; // done, else grow
}
log_debug(symboltable, perf)("Concurrent work, live factor: %g", get_load_factor());
@ -824,7 +824,7 @@ void SymbolTable::do_concurrent_work(JavaThread* jt) {
} else {
clean_dead_entries(jt);
}
Atomic::release_store(&_has_work, false);
AtomicAccess::release_store(&_has_work, false);
}
// Called at VM_Operation safepoint

View File

@ -66,7 +66,7 @@
#include "prims/jvmtiExport.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
@ -1079,7 +1079,7 @@ InstanceKlass* SystemDictionary::load_shared_class(InstanceKlass* ik,
assert(ik != nullptr, "sanity");
assert(ik->in_aot_cache(), "sanity");
assert(!ik->is_unshareable_info_restored(), "shared class can be restored only once");
assert(Atomic::add(&ik->_shared_class_load_count, 1) == 1, "shared class loaded more than once");
assert(AtomicAccess::add(&ik->_shared_class_load_count, 1) == 1, "shared class loaded more than once");
Symbol* class_name = ik->name();
if (!is_shared_class_visible(class_name, ik, pkg_entry, class_loader)) {

View File

@ -51,7 +51,7 @@
#include "oops/oop.inline.hpp"
#include "oops/verifyOopClosure.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
@ -580,7 +580,7 @@ void CodeCache::free(CodeBlob* cb) {
if (cb->is_nmethod()) {
heap->set_nmethod_count(heap->nmethod_count() - 1);
if (((nmethod *)cb)->has_dependencies()) {
Atomic::dec(&_number_of_nmethods_with_dependencies);
AtomicAccess::dec(&_number_of_nmethods_with_dependencies);
}
}
if (cb->is_adapter_blob()) {
@ -616,7 +616,7 @@ void CodeCache::commit(CodeBlob* cb) {
if (cb->is_nmethod()) {
heap->set_nmethod_count(heap->nmethod_count() + 1);
if (((nmethod *)cb)->has_dependencies()) {
Atomic::inc(&_number_of_nmethods_with_dependencies);
AtomicAccess::inc(&_number_of_nmethods_with_dependencies);
}
}
if (cb->is_adapter_blob()) {
@ -786,7 +786,7 @@ void CodeCache::gc_on_allocation() {
double free_ratio = double(free) / double(max);
if (free_ratio <= StartAggressiveSweepingAt / 100.0) {
// In case the GC is concurrent, we make sure only one thread requests the GC.
if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
if (AtomicAccess::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
}
@ -812,7 +812,7 @@ void CodeCache::gc_on_allocation() {
// it is eventually invoked to avoid trouble.
if (allocated_since_last_ratio > threshold) {
// In case the GC is concurrent, we make sure only one thread requests the GC.
if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
if (AtomicAccess::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
Universe::heap()->collect(GCCause::_codecache_GC_threshold);
@ -899,9 +899,9 @@ void CodeCache::release_exception_cache(ExceptionCache* entry) {
delete entry;
} else {
for (;;) {
ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
ExceptionCache* purge_list_head = AtomicAccess::load(&_exception_cache_purge_list);
entry->set_purge_list_next(purge_list_head);
if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
if (AtomicAccess::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
break;
}
}
@ -1152,7 +1152,7 @@ void codeCache_init() {
//------------------------------------------------------------------------------------------------
bool CodeCache::has_nmethods_with_dependencies() {
return Atomic::load_acquire(&_number_of_nmethods_with_dependencies) != 0;
return AtomicAccess::load_acquire(&_number_of_nmethods_with_dependencies) != 0;
}
void CodeCache::clear_inline_caches() {

View File

@ -32,7 +32,7 @@
#include "oops/compressedKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/continuationEntry.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@ -104,8 +104,8 @@ void CompiledICData::clean_metadata() {
// subsequent miss handlers will upgrade the callsite to megamorphic,
// which makes sense as it obviously is megamorphic then.
if (!speculated_klass()->is_loader_alive()) {
Atomic::store(&_speculated_klass, (uintptr_t)0);
Atomic::store(&_speculated_method, (Method*)nullptr);
AtomicAccess::store(&_speculated_klass, (uintptr_t)0);
AtomicAccess::store(&_speculated_method, (Method*)nullptr);
}
assert(_speculated_method == nullptr || _speculated_method->method_holder()->is_loader_alive(),

View File

@ -28,7 +28,7 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.hpp"
@ -107,7 +107,7 @@ void DependencyContext::add_dependent_nmethod(nmethod* nm) {
// to skip list scans. The individual method checks are cheap, but walking the large
// list of dependencies gets expensive.
nmethodBucket* head = Atomic::load(_dependency_context_addr);
nmethodBucket* head = AtomicAccess::load(_dependency_context_addr);
if (head != nullptr && nm == head->get_nmethod()) {
return;
}
@ -121,10 +121,10 @@ void DependencyContext::add_dependent_nmethod(nmethod* nm) {
nmethodBucket* new_head = new nmethodBucket(nm, nullptr);
for (;;) {
new_head->set_next(head);
if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) {
if (AtomicAccess::cmpxchg(_dependency_context_addr, head, new_head) == head) {
break;
}
head = Atomic::load(_dependency_context_addr);
head = AtomicAccess::load(_dependency_context_addr);
}
if (UsePerfData) {
_perf_total_buckets_allocated_count->inc();
@ -142,9 +142,9 @@ void DependencyContext::release(nmethodBucket* b) {
// Mark the context as having stale entries, since it is not safe to
// expunge the list right now.
for (;;) {
nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
nmethodBucket* purge_list_head = AtomicAccess::load(&_purge_list);
b->set_purge_list_next(purge_list_head);
if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) {
if (AtomicAccess::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) {
break;
}
}
@ -196,7 +196,7 @@ void DependencyContext::remove_all_dependents() {
// purge list when calling this.
assert(!delete_on_release(), "should not delete on release");
nmethodBucket* first = Atomic::load_acquire(_dependency_context_addr);
nmethodBucket* first = AtomicAccess::load_acquire(_dependency_context_addr);
if (first == nullptr) {
return;
}
@ -211,10 +211,10 @@ void DependencyContext::remove_all_dependents() {
}
// Add the whole list to the purge list at once.
nmethodBucket* old_purge_list_head = Atomic::load(&_purge_list);
nmethodBucket* old_purge_list_head = AtomicAccess::load(&_purge_list);
for (;;) {
last->set_purge_list_next(old_purge_list_head);
nmethodBucket* next_purge_list_head = Atomic::cmpxchg(&_purge_list, old_purge_list_head, first);
nmethodBucket* next_purge_list_head = AtomicAccess::cmpxchg(&_purge_list, old_purge_list_head, first);
if (old_purge_list_head == next_purge_list_head) {
break;
}
@ -264,16 +264,16 @@ bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
// dependency context was cleaned. GC threads claim cleanup tasks by performing
// a CAS on this value.
bool DependencyContext::claim_cleanup() {
uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
uint64_t cleaning_epoch = AtomicAccess::load(&_cleaning_epoch);
uint64_t last_cleanup = AtomicAccess::load(_last_cleanup_addr);
if (last_cleanup >= cleaning_epoch) {
return false;
}
return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
return AtomicAccess::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
}
bool DependencyContext::delete_on_release() {
return Atomic::load(&_cleaning_epoch) == 0;
return AtomicAccess::load(&_cleaning_epoch) == 0;
}
// Retrieve the first nmethodBucket that has a dependent that does not correspond to
@ -282,17 +282,17 @@ bool DependencyContext::delete_on_release() {
nmethodBucket* DependencyContext::dependencies_not_unloading() {
for (;;) {
// Need acquire because the read value could come from a concurrent insert.
nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
nmethodBucket* head = AtomicAccess::load_acquire(_dependency_context_addr);
if (head == nullptr || !head->get_nmethod()->is_unloading()) {
return head;
}
nmethodBucket* head_next = head->next();
OrderAccess::loadload();
if (Atomic::load(_dependency_context_addr) != head) {
if (AtomicAccess::load(_dependency_context_addr) != head) {
// Unstable load of head w.r.t. head->next
continue;
}
if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) {
if (AtomicAccess::cmpxchg(_dependency_context_addr, head, head_next) == head) {
// Release is_unloading entries if unlinking was claimed
DependencyContext::release(head);
}
@ -301,11 +301,11 @@ nmethodBucket* DependencyContext::dependencies_not_unloading() {
// Relaxed accessors
void DependencyContext::set_dependencies(nmethodBucket* b) {
Atomic::store(_dependency_context_addr, b);
AtomicAccess::store(_dependency_context_addr, b);
}
nmethodBucket* DependencyContext::dependencies() {
return Atomic::load(_dependency_context_addr);
return AtomicAccess::load(_dependency_context_addr);
}
// After the gc_prologue, the dependency contexts may be claimed by the GC
@ -314,7 +314,7 @@ nmethodBucket* DependencyContext::dependencies() {
void DependencyContext::cleaning_start() {
assert(SafepointSynchronize::is_at_safepoint(), "must be");
uint64_t epoch = ++_cleaning_epoch_monotonic;
Atomic::store(&_cleaning_epoch, epoch);
AtomicAccess::store(&_cleaning_epoch, epoch);
}
// The epilogue marks the end of dependency context cleanup by the GC,
@ -324,7 +324,7 @@ void DependencyContext::cleaning_start() {
// was called. That allows dependency contexts to be cleaned concurrently.
void DependencyContext::cleaning_end() {
uint64_t epoch = 0;
Atomic::store(&_cleaning_epoch, epoch);
AtomicAccess::store(&_cleaning_epoch, epoch);
}
// This function skips over nmethodBuckets in the list corresponding to
@ -336,17 +336,17 @@ nmethodBucket* nmethodBucket::next_not_unloading() {
for (;;) {
// Do not need acquire because the loaded entry can never be
// concurrently inserted.
nmethodBucket* next = Atomic::load(&_next);
nmethodBucket* next = AtomicAccess::load(&_next);
if (next == nullptr || !next->get_nmethod()->is_unloading()) {
return next;
}
nmethodBucket* next_next = Atomic::load(&next->_next);
nmethodBucket* next_next = AtomicAccess::load(&next->_next);
OrderAccess::loadload();
if (Atomic::load(&_next) != next) {
if (AtomicAccess::load(&_next) != next) {
// Unstable load of next w.r.t. next->next
continue;
}
if (Atomic::cmpxchg(&_next, next, next_next) == next) {
if (AtomicAccess::cmpxchg(&_next, next, next_next) == next) {
// Release is_unloading entries if unlinking was claimed
DependencyContext::release(next);
}
@ -355,17 +355,17 @@ nmethodBucket* nmethodBucket::next_not_unloading() {
// Relaxed accessors
nmethodBucket* nmethodBucket::next() {
return Atomic::load(&_next);
return AtomicAccess::load(&_next);
}
void nmethodBucket::set_next(nmethodBucket* b) {
Atomic::store(&_next, b);
AtomicAccess::store(&_next, b);
}
nmethodBucket* nmethodBucket::purge_list_next() {
return Atomic::load(&_purge_list_next);
return AtomicAccess::load(&_purge_list_next);
}
void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
Atomic::store(&_purge_list_next, b);
AtomicAccess::store(&_purge_list_next, b);
}

View File

@ -59,7 +59,7 @@
#include "prims/jvmtiImpl.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/continuation.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/flags/flagSetting.hpp"
@ -376,11 +376,11 @@ bool ExceptionCache::add_address_and_handler(address addr, address handler) {
}
ExceptionCache* ExceptionCache::next() {
return Atomic::load(&_next);
return AtomicAccess::load(&_next);
}
void ExceptionCache::set_next(ExceptionCache *ec) {
Atomic::store(&_next, ec);
AtomicAccess::store(&_next, ec);
}
//-----------------------------------------------------------------------------
@ -492,12 +492,12 @@ const char* nmethod::state() const {
void nmethod::set_deoptimized_done() {
ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
if (_deoptimization_status != deoptimize_done) { // can't go backwards
Atomic::store(&_deoptimization_status, deoptimize_done);
AtomicAccess::store(&_deoptimization_status, deoptimize_done);
}
}
ExceptionCache* nmethod::exception_cache_acquire() const {
return Atomic::load_acquire(&_exception_cache);
return AtomicAccess::load_acquire(&_exception_cache);
}
void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
@ -517,7 +517,7 @@ void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
// next pointers always point at live ExceptionCaches, that are not removed due
// to concurrent ExceptionCache cleanup.
ExceptionCache* next = ec->next();
if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
if (AtomicAccess::cmpxchg(&_exception_cache, ec, next) == ec) {
CodeCache::release_exception_cache(ec);
}
continue;
@ -527,7 +527,7 @@ void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
new_entry->set_next(ec);
}
}
if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
if (AtomicAccess::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
return;
}
}
@ -560,7 +560,7 @@ void nmethod::clean_exception_cache() {
// Try to clean head; this is contended by concurrent inserts, that
// both lazily clean the head, and insert entries at the head. If
// the CAS fails, the operation is restarted.
if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
if (AtomicAccess::cmpxchg(&_exception_cache, curr, next) != curr) {
prev = nullptr;
curr = exception_cache_acquire();
continue;
@ -919,7 +919,7 @@ void nmethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all
if (md != nullptr && md->is_method()) {
Method* method = static_cast<Method*>(md);
if (!method->method_holder()->is_loader_alive()) {
Atomic::store(r->metadata_addr(), (Method*)nullptr);
AtomicAccess::store(r->metadata_addr(), (Method*)nullptr);
if (!r->metadata_is_immediate()) {
r->fix_metadata_relocation();
@ -1923,13 +1923,13 @@ void nmethod::verify_clean_inline_caches() {
}
void nmethod::mark_as_maybe_on_stack() {
Atomic::store(&_gc_epoch, CodeCache::gc_epoch());
AtomicAccess::store(&_gc_epoch, CodeCache::gc_epoch());
}
bool nmethod::is_maybe_on_stack() {
// If the condition below is true, it means that the nmethod was found to
// be alive the previous completed marking cycle.
return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
return AtomicAccess::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
}
void nmethod::inc_decompile_count() {
@ -1956,7 +1956,7 @@ bool nmethod::try_transition(signed char new_state_int) {
// Ensure monotonicity of transitions.
return false;
}
Atomic::store(&_state, new_state);
AtomicAccess::store(&_state, new_state);
return true;
}
@ -2007,7 +2007,7 @@ bool nmethod::make_not_entrant(InvalidationReason invalidation_reason) {
return false;
}
if (Atomic::load(&_state) == not_entrant) {
if (AtomicAccess::load(&_state) == not_entrant) {
// Avoid taking the lock if already in required state.
// This is safe from races because the state is an end-state,
// which the nmethod cannot back out of once entered.
@ -2019,7 +2019,7 @@ bool nmethod::make_not_entrant(InvalidationReason invalidation_reason) {
// Enter critical section. Does not block for safepoint.
ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
if (Atomic::load(&_state) == not_entrant) {
if (AtomicAccess::load(&_state) == not_entrant) {
// another thread already performed this transition so nothing
// to do, but return false to indicate this.
return false;
@ -2390,7 +2390,7 @@ public:
};
bool nmethod::is_unloading() {
uint8_t state = Atomic::load(&_is_unloading_state);
uint8_t state = AtomicAccess::load(&_is_unloading_state);
bool state_is_unloading = IsUnloadingState::is_unloading(state);
if (state_is_unloading) {
return true;
@ -2413,7 +2413,7 @@ bool nmethod::is_unloading() {
// different outcomes, so we guard the computed result with a CAS
// to ensure all threads have a shared view of whether an nmethod
// is_unloading or not.
uint8_t found_state = Atomic::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed);
uint8_t found_state = AtomicAccess::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed);
if (found_state == state) {
// First to change state, we win
@ -2426,7 +2426,7 @@ bool nmethod::is_unloading() {
void nmethod::clear_unloading_state() {
uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
Atomic::store(&_is_unloading_state, state);
AtomicAccess::store(&_is_unloading_state, state);
}
@ -2511,7 +2511,7 @@ bool nmethod::oops_do_try_claim_weak_request() {
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
if ((_oops_do_mark_link == nullptr) &&
(Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) {
(AtomicAccess::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) {
oops_do_log_change("oops_do, mark weak request");
return true;
}
@ -2525,7 +2525,7 @@ void nmethod::oops_do_set_strong_done(nmethod* old_head) {
nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() {
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, mark_link(nullptr, claim_weak_request_tag), mark_link(this, claim_strong_done_tag));
oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, mark_link(nullptr, claim_weak_request_tag), mark_link(this, claim_strong_done_tag));
if (old_next == nullptr) {
oops_do_log_change("oops_do, mark strong done");
}
@ -2536,7 +2536,7 @@ nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oop
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak");
oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag));
oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag));
if (old_next == next) {
oops_do_log_change("oops_do, mark strong request");
}
@ -2547,7 +2547,7 @@ bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_l
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done");
oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag));
oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag));
if (old_next == next) {
oops_do_log_change("oops_do, mark weak done -> mark strong done");
return true;
@ -2562,13 +2562,13 @@ nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
extract_state(_oops_do_mark_link) == claim_strong_request_tag,
"must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
nmethod* old_head = AtomicAccess::xchg(&_oops_do_mark_nmethods, this);
// Self-loop if needed.
if (old_head == nullptr) {
old_head = this;
}
// Try to install end of list and weak done tag.
if (Atomic::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) {
if (AtomicAccess::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) {
oops_do_log_change("oops_do, mark weak done");
return nullptr;
} else {
@ -2579,7 +2579,7 @@ nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
void nmethod::oops_do_add_to_list_as_strong_done() {
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
nmethod* old_head = AtomicAccess::xchg(&_oops_do_mark_nmethods, this);
// Self-loop if needed.
if (old_head == nullptr) {
old_head = this;

View File

@ -286,7 +286,7 @@ class nmethod : public CodeBlob {
volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
DeoptimizationStatus deoptimization_status() const {
return Atomic::load(&_deoptimization_status);
return AtomicAccess::load(&_deoptimization_status);
}
// Initialize fields to their default values

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
#include "code/nmethod.hpp"
#include "code/nativeInst.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/frame.hpp"
inline bool nmethod::is_deopt_pc(address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
@ -43,7 +43,7 @@ inline bool nmethod::is_deopt_mh_entry(address pc) {
// class ExceptionCache methods
inline int ExceptionCache::count() { return Atomic::load_acquire(&_count); }
inline int ExceptionCache::count() { return AtomicAccess::load_acquire(&_count); }
address ExceptionCache::pc_at(int index) {
assert(index >= 0 && index < count(),"");
@ -56,7 +56,7 @@ address ExceptionCache::handler_at(int index) {
}
// increment_count is only called under lock, but there may be concurrent readers.
inline void ExceptionCache::increment_count() { Atomic::release_store(&_count, _count + 1); }
inline void ExceptionCache::increment_count() { AtomicAccess::release_store(&_count, _count + 1); }
#endif // SHARE_CODE_NMETHOD_INLINE_HPP

View File

@ -127,7 +127,7 @@ void VtableStubs::initialize() {
{
MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
for (int i = 0; i < N; i++) {
Atomic::store(&_table[i], (VtableStub*)nullptr);
AtomicAccess::store(&_table[i], (VtableStub*)nullptr);
}
}
}
@ -268,7 +268,7 @@ inline uint VtableStubs::unsafe_hash(address entry_point) {
VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
assert_lock_strong(VtableStubs_lock);
unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
VtableStub* s = Atomic::load(&_table[hash]);
VtableStub* s = AtomicAccess::load(&_table[hash]);
while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
return s;
}
@ -279,9 +279,9 @@ void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
// Insert s at the beginning of the corresponding list.
s->set_next(Atomic::load(&_table[h]));
s->set_next(AtomicAccess::load(&_table[h]));
// Make sure that concurrent readers not taking the mutex observe the writing of "next".
Atomic::release_store(&_table[h], s);
AtomicAccess::release_store(&_table[h], s);
}
VtableStub* VtableStubs::entry_point(address pc) {
@ -292,7 +292,7 @@ VtableStub* VtableStubs::entry_point(address pc) {
MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
uint hash = VtableStubs::unsafe_hash(pc);
VtableStub* s;
for (s = Atomic::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
for (s = AtomicAccess::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
return (s != nullptr && s->entry_point() == pc) ? s : nullptr;
}
@ -305,7 +305,7 @@ bool VtableStubs::contains(address pc) {
VtableStub* VtableStubs::stub_containing(address pc) {
for (int i = 0; i < N; i++) {
for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
for (VtableStub* s = AtomicAccess::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
if (s->contains(pc)) return s;
}
}
@ -318,7 +318,7 @@ void vtableStubs_init() {
void VtableStubs::vtable_stub_do(void f(VtableStub*)) {
for (int i = 0; i < N; i++) {
for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
for (VtableStub* s = AtomicAccess::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
f(s);
}
}

View File

@ -37,7 +37,7 @@
#include "nmt/nmtCommon.hpp"
#include "oops/method.inline.hpp"
#include "oops/symbol.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "utilities/checkedCast.hpp"
@ -902,7 +902,7 @@ void CompilationMemoryStatistic::on_arena_chunk_allocation(size_t size, int aren
// Store this ArenaStat. If other threads also run into OOMs, let them sleep.
// We will never return, so the global store will not contain this info. We will
// print the stored ArenaStat in hs-err (see print_error_report)
if (Atomic::cmpxchg(&_arenastat_oom_crash, (ArenaStatCounter*) nullptr, arena_stat) != nullptr) {
if (AtomicAccess::cmpxchg(&_arenastat_oom_crash, (ArenaStatCounter*) nullptr, arena_stat) != nullptr) {
os::infinite_sleep();
}
}
@ -992,7 +992,7 @@ static bool check_before_reporting(outputStream* st) {
}
bool CompilationMemoryStatistic::in_oom_crash() {
return Atomic::load(&_arenastat_oom_crash) != nullptr;
return AtomicAccess::load(&_arenastat_oom_crash) != nullptr;
}
void CompilationMemoryStatistic::print_error_report(outputStream* st) {
@ -1000,7 +1000,7 @@ void CompilationMemoryStatistic::print_error_report(outputStream* st) {
return;
}
StreamIndentor si(tty, 4);
const ArenaStatCounter* const oom_stats = Atomic::load(&_arenastat_oom_crash);
const ArenaStatCounter* const oom_stats = AtomicAccess::load(&_arenastat_oom_crash);
if (oom_stats != nullptr) {
// we crashed due to a compiler limit hit. Lead with a printout of the offending stats
// in detail.

View File

@ -53,7 +53,7 @@
#include "prims/jvmtiExport.hpp"
#include "prims/nativeLookup.hpp"
#include "prims/whitebox.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/escapeBarrier.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
@ -1587,14 +1587,14 @@ int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) {
assert(!is_osr, "can't be osr");
// Adapters, native wrappers and method handle intrinsics
// should be generated always.
return Atomic::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1);
return AtomicAccess::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1);
} else if (CICountOSR && is_osr) {
id = Atomic::add(&_osr_compilation_id, 1);
id = AtomicAccess::add(&_osr_compilation_id, 1);
if (CIStartOSR <= id && id < CIStopOSR) {
return id;
}
} else {
id = Atomic::add(&_compilation_id, 1);
id = AtomicAccess::add(&_compilation_id, 1);
if (CIStart <= id && id < CIStop) {
return id;
}
@ -1606,7 +1606,7 @@ int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) {
#else
// CICountOSR is a develop flag and set to 'false' by default. In a product built,
// only _compilation_id is incremented.
return Atomic::add(&_compilation_id, 1);
return AtomicAccess::add(&_compilation_id, 1);
#endif
}

View File

@ -30,7 +30,7 @@
#include "compiler/compilerDirectives.hpp"
#include "compiler/compilerThread.hpp"
#include "compiler/compileTask.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/perfDataTypes.hpp"
#include "utilities/stack.hpp"
#if INCLUDE_JVMCI
@ -362,7 +362,7 @@ public:
static inline bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
static bool set_should_compile_new_jobs(jint new_state) {
// Return success if the current caller set it
jint old = Atomic::cmpxchg(&_should_compile_new_jobs, 1-new_state, new_state);
jint old = AtomicAccess::cmpxchg(&_should_compile_new_jobs, 1-new_state, new_state);
bool success = (old == (1-new_state));
if (success) {
if (new_state == run_compilation) {
@ -377,11 +377,11 @@ public:
static void disable_compilation_forever() {
UseCompiler = false;
AlwaysCompileLoopMethods = false;
Atomic::xchg(&_should_compile_new_jobs, jint(shutdown_compilation));
AtomicAccess::xchg(&_should_compile_new_jobs, jint(shutdown_compilation));
}
static bool is_compilation_disabled_forever() {
return Atomic::load(&_should_compile_new_jobs) == shutdown_compilation;
return AtomicAccess::load(&_should_compile_new_jobs) == shutdown_compilation;
}
static void wait_for_no_active_tasks();
@ -389,7 +389,7 @@ public:
static void handle_full_code_cache(CodeBlobType code_blob_type);
// Ensures that warning is only printed once.
static bool should_print_compiler_warning() {
jint old = Atomic::cmpxchg(&_print_compilation_warning, 0, 1);
jint old = AtomicAccess::cmpxchg(&_print_compilation_warning, 0, 1);
return old == 0;
}
// Return total compilation ticks

View File

@ -28,7 +28,7 @@
#include "jvm.h"
#include "memory/allocation.inline.hpp"
#include "oops/method.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
@ -51,9 +51,9 @@ CompileLog::CompileLog(const char* file_name, FILE* fp, intx thread_id)
// link into the global list
while (true) {
CompileLog* head = Atomic::load_acquire(&_list_head);
CompileLog* head = AtomicAccess::load_acquire(&_list_head);
_next = head;
if (Atomic::cmpxchg(&_list_head, head, this) == head) {
if (AtomicAccess::cmpxchg(&_list_head, head, this) == head) {
break;
}
}
@ -206,7 +206,7 @@ void CompileLog::finish_log_on_error(outputStream* file, char* buf, int buflen)
if (called_exit) return;
called_exit = true;
CompileLog* log = Atomic::load_acquire(&_list_head);
CompileLog* log = AtomicAccess::load_acquire(&_list_head);
while (log != nullptr) {
log->flush();
const char* partial_file = log->file();
@ -294,7 +294,7 @@ void CompileLog::finish_log_on_error(outputStream* file, char* buf, int buflen)
delete log; // Removes partial file
log = next_log;
}
Atomic::store(&_list_head, (CompileLog*)nullptr);
AtomicAccess::store(&_list_head, (CompileLog*)nullptr);
}
// ------------------------------------------------------------------

View File

@ -76,7 +76,7 @@ CompileTask::CompileTask(int compile_id,
_next = nullptr;
_prev = nullptr;
Atomic::add(&_active_tasks, 1, memory_order_relaxed);
AtomicAccess::add(&_active_tasks, 1, memory_order_relaxed);
}
CompileTask::~CompileTask() {
@ -91,7 +91,7 @@ CompileTask::~CompileTask() {
_failure_reason_on_C_heap = false;
}
if (Atomic::sub(&_active_tasks, 1, memory_order_relaxed) == 0) {
if (AtomicAccess::sub(&_active_tasks, 1, memory_order_relaxed) == 0) {
MonitorLocker wait_ml(CompileTaskWait_lock);
wait_ml.notify_all();
}
@ -99,7 +99,7 @@ CompileTask::~CompileTask() {
void CompileTask::wait_for_no_active_tasks() {
MonitorLocker locker(CompileTaskWait_lock);
while (Atomic::load(&_active_tasks) > 0) {
while (AtomicAccess::load(&_active_tasks) > 0) {
locker.wait();
}
}

View File

@ -34,7 +34,7 @@
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compressedOops.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/signature.hpp"

View File

@ -34,7 +34,7 @@
#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals.hpp"
#include "utilities/ostream.hpp"
@ -154,7 +154,7 @@ HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) {
// Allocation successful, update counters
if (verbose) {
size_t last = _last_counter_update;
if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) {
if ((used - last >= _step_counter_update) && AtomicAccess::cmpxchg(&_last_counter_update, last, used) == last) {
_monitoring_support->update_counters();
}
}
@ -162,7 +162,7 @@ HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) {
// ...and print the occupancy line, if needed
if (verbose) {
size_t last = _last_heap_print;
if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) {
if ((used - last >= _step_heap_print) && AtomicAccess::cmpxchg(&_last_heap_print, last, used) == last) {
print_heap_info(used);
print_metaspace_info();
}

View File

@ -26,7 +26,7 @@
#include "gc/g1/g1BatchedTask.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1GCParPhaseTimesTracker.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/growableArray.hpp"
void G1AbstractSubTask::record_work_item(uint worker_id, uint index, size_t count) {
@ -40,7 +40,7 @@ const char* G1AbstractSubTask::name() const {
}
bool G1BatchedTask::try_claim_serial_task(int& task) {
task = Atomic::fetch_then_add(&_num_serial_tasks_done, 1);
task = AtomicAccess::fetch_then_add(&_num_serial_tasks_done, 1);
return task < _serial_tasks.length();
}
@ -96,8 +96,8 @@ void G1BatchedTask::work(uint worker_id) {
}
G1BatchedTask::~G1BatchedTask() {
assert(Atomic::load(&_num_serial_tasks_done) >= _serial_tasks.length(),
"Only %d tasks of %d claimed", Atomic::load(&_num_serial_tasks_done), _serial_tasks.length());
assert(AtomicAccess::load(&_num_serial_tasks_done) >= _serial_tasks.length(),
"Only %d tasks of %d claimed", AtomicAccess::load(&_num_serial_tasks_done), _serial_tasks.length());
for (G1AbstractSubTask* task : _parallel_tasks) {
delete task;

View File

@ -49,7 +49,7 @@ G1BlockOffsetTable::G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* st
void G1BlockOffsetTable::set_offset_array(uint8_t* addr, uint8_t offset) {
check_address(addr, "Block offset table address out of range");
Atomic::store(addr, offset);
AtomicAccess::store(addr, offset);
}
void G1BlockOffsetTable::set_offset_array(uint8_t* addr, HeapWord* high, HeapWord* low) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,7 @@
#include "gc/shared/cardTable.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
inline HeapWord* G1BlockOffsetTable::block_start_reaching_into_card(const void* addr) const {
assert(_reserved.contains(addr), "invalid address");
@ -52,7 +52,7 @@ inline HeapWord* G1BlockOffsetTable::block_start_reaching_into_card(const void*
uint8_t G1BlockOffsetTable::offset_array(uint8_t* addr) const {
check_address(addr, "Block offset table address out of range");
return Atomic::load(addr);
return AtomicAccess::load(addr);
}
inline uint8_t* G1BlockOffsetTable::entry_for_addr(const void* const p) const {

View File

@ -29,7 +29,7 @@
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "utilities/bitMap.inline.hpp"
@ -215,9 +215,9 @@ void G1CardSetCoarsenStats::subtract_from(G1CardSetCoarsenStats& other) {
void G1CardSetCoarsenStats::record_coarsening(uint tag, bool collision) {
assert(tag < ARRAY_SIZE(_coarsen_from), "tag %u out of bounds", tag);
Atomic::inc(&_coarsen_from[tag], memory_order_relaxed);
AtomicAccess::inc(&_coarsen_from[tag], memory_order_relaxed);
if (collision) {
Atomic::inc(&_coarsen_collision[tag], memory_order_relaxed);
AtomicAccess::inc(&_coarsen_collision[tag], memory_order_relaxed);
}
}
@ -314,7 +314,7 @@ public:
if (!_inserted_card && inserted) {
// It does not matter to us who is setting the flag so a regular atomic store
// is sufficient.
Atomic::store(&_inserted_card, true);
AtomicAccess::store(&_inserted_card, true);
}
return found.value();
@ -343,9 +343,9 @@ public:
}
void reset() {
if (Atomic::load(&_inserted_card)) {
if (AtomicAccess::load(&_inserted_card)) {
_table.unsafe_reset(InitialLogTableSize);
Atomic::store(&_inserted_card, false);
AtomicAccess::store(&_inserted_card, false);
}
}
@ -462,7 +462,7 @@ G1CardSet::ContainerPtr G1CardSet::acquire_container(ContainerPtr volatile* cont
GlobalCounter::CriticalSection cs(Thread::current());
while (true) {
// Get ContainerPtr and increment refcount atomically wrt to memory reuse.
ContainerPtr container = Atomic::load_acquire(container_addr);
ContainerPtr container = AtomicAccess::load_acquire(container_addr);
uint cs_type = container_type(container);
if (container == FullCardSet || cs_type == ContainerInlinePtr) {
return container;
@ -505,13 +505,13 @@ class G1ReleaseCardsets : public StackObj {
void coarsen_to_full(ContainerPtr* container_addr) {
while (true) {
ContainerPtr cur_container = Atomic::load_acquire(container_addr);
ContainerPtr cur_container = AtomicAccess::load_acquire(container_addr);
uint cs_type = G1CardSet::container_type(cur_container);
if (cur_container == G1CardSet::FullCardSet) {
return;
}
ContainerPtr old_value = Atomic::cmpxchg(container_addr, cur_container, G1CardSet::FullCardSet);
ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, G1CardSet::FullCardSet);
if (old_value == cur_container) {
_card_set->release_and_maybe_free_container(cur_container);
@ -547,7 +547,7 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
ContainerPtr volatile* bucket_entry = howl->container_addr(bucket);
while (true) {
if (Atomic::load(&howl->_num_entries) >= _config->cards_in_howl_threshold()) {
if (AtomicAccess::load(&howl->_num_entries) >= _config->cards_in_howl_threshold()) {
return Overflow;
}
@ -571,7 +571,7 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container,
}
if (increment_total && add_result == Added) {
Atomic::inc(&howl->_num_entries, memory_order_relaxed);
AtomicAccess::inc(&howl->_num_entries, memory_order_relaxed);
}
if (to_transfer != nullptr) {
@ -640,7 +640,7 @@ bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr,
ShouldNotReachHere();
}
ContainerPtr old_value = Atomic::cmpxchg(container_addr, cur_container, new_container); // Memory order?
ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, new_container); // Memory order?
if (old_value == cur_container) {
// Success. Indicate that the cards from the current card set must be transferred
// by this caller.
@ -687,7 +687,7 @@ void G1CardSet::transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPt
assert(container_type(source_container) == ContainerHowl, "must be");
// Need to correct for that the Full remembered set occupies more cards than the
// AoCS before.
Atomic::add(&_num_occupied, _config->max_cards_in_region() - table_entry->_num_occupied, memory_order_relaxed);
AtomicAccess::add(&_num_occupied, _config->max_cards_in_region() - table_entry->_num_occupied, memory_order_relaxed);
}
}
@ -713,14 +713,14 @@ void G1CardSet::transfer_cards_in_howl(ContainerPtr parent_container,
diff -= 1;
G1CardSetHowl* howling_array = container_ptr<G1CardSetHowl>(parent_container);
Atomic::add(&howling_array->_num_entries, diff, memory_order_relaxed);
AtomicAccess::add(&howling_array->_num_entries, diff, memory_order_relaxed);
G1CardSetHashTableValue* table_entry = get_container(card_region);
assert(table_entry != nullptr, "Table entry not found for transferred cards");
Atomic::add(&table_entry->_num_occupied, diff, memory_order_relaxed);
AtomicAccess::add(&table_entry->_num_occupied, diff, memory_order_relaxed);
Atomic::add(&_num_occupied, diff, memory_order_relaxed);
AtomicAccess::add(&_num_occupied, diff, memory_order_relaxed);
}
}
@ -827,8 +827,8 @@ G1AddCardResult G1CardSet::add_card(uint card_region, uint card_in_region, bool
}
if (increment_total && add_result == Added) {
Atomic::inc(&table_entry->_num_occupied, memory_order_relaxed);
Atomic::inc(&_num_occupied, memory_order_relaxed);
AtomicAccess::inc(&table_entry->_num_occupied, memory_order_relaxed);
AtomicAccess::inc(&_num_occupied, memory_order_relaxed);
}
if (should_grow_table) {
_table->grow();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,7 @@
#include "gc/g1/g1CardSet.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/globalDefinitions.hpp"
@ -151,7 +151,7 @@ protected:
public:
G1CardSetContainer() : _ref_count(3) { }
uintptr_t refcount() const { return Atomic::load_acquire(&_ref_count); }
uintptr_t refcount() const { return AtomicAccess::load_acquire(&_ref_count); }
bool try_increment_refcount();
@ -192,7 +192,7 @@ private:
}
~G1CardSetArrayLocker() {
Atomic::release_store(_num_entries_addr, _local_num_entries);
AtomicAccess::release_store(_num_entries_addr, _local_num_entries);
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ inline G1AddCardResult G1CardSetInlinePtr::add(uint card_idx, uint bits_per_card
return Overflow;
}
ContainerPtr new_value = merge(_value, card_idx, num_cards, bits_per_card);
ContainerPtr old_value = Atomic::cmpxchg(_value_addr, _value, new_value, memory_order_relaxed);
ContainerPtr old_value = AtomicAccess::cmpxchg(_value_addr, _value, new_value, memory_order_relaxed);
if (_value == old_value) {
return Added;
}
@ -126,7 +126,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
}
uintptr_t new_value = old_value + 2;
uintptr_t ref_count = Atomic::cmpxchg(&_ref_count, old_value, new_value);
uintptr_t ref_count = AtomicAccess::cmpxchg(&_ref_count, old_value, new_value);
if (ref_count == old_value) {
return true;
}
@ -137,7 +137,7 @@ inline bool G1CardSetContainer::try_increment_refcount() {
inline uintptr_t G1CardSetContainer::decrement_refcount() {
uintptr_t old_value = refcount();
assert((old_value & 0x1) != 0 && old_value >= 3, "precondition");
return Atomic::sub(&_ref_count, 2u);
return AtomicAccess::sub(&_ref_count, 2u);
}
inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_cards) :
@ -152,11 +152,11 @@ inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_ca
inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(EntryCountType volatile* num_entries_addr) :
_num_entries_addr(num_entries_addr) {
SpinYield s;
EntryCountType num_entries = Atomic::load(_num_entries_addr) & EntryMask;
EntryCountType num_entries = AtomicAccess::load(_num_entries_addr) & EntryMask;
while (true) {
EntryCountType old_value = Atomic::cmpxchg(_num_entries_addr,
num_entries,
(EntryCountType)(num_entries | LockBitMask));
EntryCountType old_value = AtomicAccess::cmpxchg(_num_entries_addr,
num_entries,
(EntryCountType)(num_entries | LockBitMask));
if (old_value == num_entries) {
// Succeeded locking the array.
_local_num_entries = num_entries;
@ -189,7 +189,7 @@ inline G1CardSetArray::EntryDataType G1CardSetArray::at(EntryCountType index) co
inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
assert(card_idx < (1u << (sizeof(EntryDataType) * BitsPerByte)),
"Card index %u does not fit allowed card value range.", card_idx);
EntryCountType num_entries = Atomic::load_acquire(&_num_entries) & EntryMask;
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
EntryCountType idx = 0;
for (; idx < num_entries; idx++) {
if (at(idx) == card_idx) {
@ -223,7 +223,7 @@ inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
}
inline bool G1CardSetArray::contains(uint card_idx) {
EntryCountType num_entries = Atomic::load_acquire(&_num_entries) & EntryMask;
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) {
if (at(idx) == card_idx) {
@ -235,7 +235,7 @@ inline bool G1CardSetArray::contains(uint card_idx) {
template <class CardVisitor>
void G1CardSetArray::iterate(CardVisitor& found) {
EntryCountType num_entries = Atomic::load_acquire(&_num_entries) & EntryMask;
EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask;
for (EntryCountType idx = 0; idx < num_entries; idx++) {
found(at(idx));
}
@ -260,7 +260,7 @@ inline G1AddCardResult G1CardSetBitMap::add(uint card_idx, size_t threshold, siz
return bm.at(card_idx) ? Found : Overflow;
}
if (bm.par_set_bit(card_idx)) {
Atomic::inc(&_num_bits_set, memory_order_relaxed);
AtomicAccess::inc(&_num_bits_set, memory_order_relaxed);
return Added;
}
return Found;
@ -311,7 +311,7 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf
inline bool G1CardSetHowl::contains(uint card_idx, G1CardSetConfiguration* config) {
EntryCountType bucket = config->howl_bucket_index(card_idx);
ContainerPtr* array_entry = container_addr(bucket);
ContainerPtr container = Atomic::load_acquire(array_entry);
ContainerPtr container = AtomicAccess::load_acquire(array_entry);
switch (G1CardSet::container_type(container)) {
case G1CardSet::ContainerArrayOfCards: {

View File

@ -26,7 +26,7 @@
#include "gc/g1/g1CardSetContainers.inline.hpp"
#include "gc/g1/g1CardSetMemory.inline.hpp"
#include "gc/g1/g1MonotonicArena.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/ostream.hpp"
G1CardSetAllocator::G1CardSetAllocator(const char* name,

View File

@ -28,7 +28,7 @@
#include "gc/g1/g1HeapRegion.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/concurrentHashTable.inline.hpp"
#include "utilities/concurrentHashTableTasks.inline.hpp"
@ -120,7 +120,7 @@ public:
bool grow_hint = false;
bool inserted = _table.insert(Thread::current(), lookup, method, &grow_hint);
if (inserted) {
Atomic::inc(&_num_entries);
AtomicAccess::inc(&_num_entries);
}
if (grow_hint) {
_table.grow(Thread::current());
@ -131,7 +131,7 @@ public:
HashTableLookUp lookup(method);
bool removed = _table.remove(Thread::current(), lookup);
if (removed) {
Atomic::dec(&_num_entries);
AtomicAccess::dec(&_num_entries);
}
return removed;
}
@ -182,7 +182,7 @@ public:
guarantee(succeeded, "unable to clean table");
if (num_deleted != 0) {
size_t current_size = Atomic::sub(&_num_entries, num_deleted);
size_t current_size = AtomicAccess::sub(&_num_entries, num_deleted);
shrink_to_match(current_size);
}
}
@ -226,7 +226,7 @@ public:
size_t mem_size() { return sizeof(*this) + _table.get_mem_size(Thread::current()); }
size_t number_of_entries() const { return Atomic::load(&_num_entries); }
size_t number_of_entries() const { return AtomicAccess::load(&_num_entries); }
};
uintx G1CodeRootSetHashTable::HashTableLookUp::get_hash() const {

View File

@ -105,7 +105,7 @@
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/cpuTimeCounters.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"

View File

@ -41,7 +41,7 @@
#include "gc/shared/markBitMap.inline.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "oops/stackChunkOop.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/threadSMR.inline.hpp"
#include "utilities/bitMap.inline.hpp"
@ -53,10 +53,10 @@ inline bool G1STWIsAliveClosure::do_object_b(oop p) {
inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) {
count = 0;
if (Atomic::load(&_cur_claim) >= _list.length()) {
if (AtomicAccess::load(&_cur_claim) >= _list.length()) {
return nullptr;
}
uint claim = Atomic::fetch_then_add(&_cur_claim, _claim_step);
uint claim = AtomicAccess::fetch_then_add(&_cur_claim, _claim_step);
if (claim >= _list.length()) {
return nullptr;
}

View File

@ -27,7 +27,7 @@
#include "gc/g1/g1CollectionSetChooser.hpp"
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
#include "gc/shared/space.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/quickSort.hpp"
// Determine collection set candidates (from marking): For all regions determine
@ -105,7 +105,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
// Claim a new chunk, returning its bounds [from, to[.
void claim_chunk(uint& from, uint& to) {
uint result = Atomic::add(&_cur_claim_idx, _chunk_size);
uint result = AtomicAccess::add(&_cur_claim_idx, _chunk_size);
assert(_max_size > result - 1,
"Array too small, is %u should be %u with chunk size %u.",
_max_size, result, _chunk_size);
@ -208,7 +208,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
void update_totals(uint num_regions) {
if (num_regions > 0) {
Atomic::add(&_num_regions_added, num_regions);
AtomicAccess::add(&_num_regions_added, num_regions);
}
}
@ -220,7 +220,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
void prune(G1HeapRegion** data) {
G1Policy* p = G1CollectedHeap::heap()->policy();
uint num_candidates = Atomic::load(&_num_regions_added);
uint num_candidates = AtomicAccess::load(&_num_regions_added);
uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates);
uint num_pruned = 0;
@ -253,7 +253,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
wasted_bytes,
allowed_waste);
Atomic::sub(&_num_regions_added, num_pruned, memory_order_relaxed);
AtomicAccess::sub(&_num_regions_added, num_pruned, memory_order_relaxed);
}
public:

View File

@ -66,7 +66,7 @@
#include "nmt/memTracker.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
@ -151,21 +151,21 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_
return nullptr;
}
size_t cur_idx = Atomic::fetch_then_add(&_size, 1u);
size_t cur_idx = AtomicAccess::fetch_then_add(&_size, 1u);
if (cur_idx >= _max_capacity) {
return nullptr;
}
size_t bucket = get_bucket(cur_idx);
if (Atomic::load_acquire(&_buckets[bucket]) == nullptr) {
if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
if (!_should_grow) {
// Prefer to restart the CM.
return nullptr;
}
MutexLocker x(G1MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
if (Atomic::load_acquire(&_buckets[bucket]) == nullptr) {
if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
size_t desired_capacity = bucket_size(bucket) * 2;
if (!try_expand_to(desired_capacity)) {
return nullptr;
@ -258,7 +258,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
// and the new capacity (new_capacity). This step ensures that there are no gaps in the
// array and that the capacity accurately reflects the reserved memory.
for (; i <= highest_bucket; i++) {
if (Atomic::load_acquire(&_buckets[i]) != nullptr) {
if (AtomicAccess::load_acquire(&_buckets[i]) != nullptr) {
continue; // Skip over already allocated buckets.
}
@ -278,7 +278,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
return false;
}
_capacity += bucket_capacity;
Atomic::release_store(&_buckets[i], bucket_base);
AtomicAccess::release_store(&_buckets[i], bucket_base);
}
return true;
}
@ -383,7 +383,7 @@ void G1CMRootMemRegions::reset() {
void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
assert_at_safepoint();
size_t idx = Atomic::fetch_then_add(&_num_root_regions, 1u);
size_t idx = AtomicAccess::fetch_then_add(&_num_root_regions, 1u);
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space %zu", _max_regions);
assert(start != nullptr && end != nullptr && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
"end (" PTR_FORMAT ")", p2i(start), p2i(end));
@ -411,7 +411,7 @@ const MemRegion* G1CMRootMemRegions::claim_next() {
return nullptr;
}
size_t claimed_index = Atomic::fetch_then_add(&_claimed_root_regions, 1u);
size_t claimed_index = AtomicAccess::fetch_then_add(&_claimed_root_regions, 1u);
if (claimed_index < _num_root_regions) {
return &_root_regions[claimed_index];
}
@ -1109,7 +1109,7 @@ void G1ConcurrentMark::concurrent_cycle_start() {
}
uint G1ConcurrentMark::completed_mark_cycles() const {
return Atomic::load(&_completed_mark_cycles);
return AtomicAccess::load(&_completed_mark_cycles);
}
void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
@ -1118,7 +1118,7 @@ void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
_g1h->trace_heap_after_gc(_gc_tracer_cm);
if (mark_cycle_completed) {
Atomic::inc(&_completed_mark_cycles, memory_order_relaxed);
AtomicAccess::inc(&_completed_mark_cycles, memory_order_relaxed);
}
if (has_aborted()) {
@ -1320,7 +1320,7 @@ public:
G1OnRegionClosure on_region_cl(_g1h, _cm, &local_cleanup_list);
_g1h->heap_region_par_iterate_from_worker_offset(&on_region_cl, &_hrclaimer, worker_id);
Atomic::add(&_total_selected_for_rebuild, on_region_cl._num_selected_for_rebuild);
AtomicAccess::add(&_total_selected_for_rebuild, on_region_cl._num_selected_for_rebuild);
// Update the old/humongous region sets
_g1h->remove_from_old_gen_sets(on_region_cl._num_old_regions_removed,
@ -1903,7 +1903,7 @@ G1HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
HeapWord* end = curr_region != nullptr ? curr_region->end() : finger + G1HeapRegion::GrainWords;
// Is the gap between reading the finger and doing the CAS too long?
HeapWord* res = Atomic::cmpxchg(&_finger, finger, end);
HeapWord* res = AtomicAccess::cmpxchg(&_finger, finger, end);
if (res == finger && curr_region != nullptr) {
// we succeeded
HeapWord* bottom = curr_region->bottom();

View File

@ -338,7 +338,7 @@ void G1ConcurrentRefine::adjust_threads_wanted(size_t available_bytes) {
assert_current_thread_is_primary_refinement_thread();
size_t num_cards = _dcqs.num_cards();
size_t mutator_threshold = SIZE_MAX;
uint old_wanted = Atomic::load(&_threads_wanted);
uint old_wanted = AtomicAccess::load(&_threads_wanted);
_threads_needed.update(old_wanted,
available_bytes,
@ -360,7 +360,7 @@ void G1ConcurrentRefine::adjust_threads_wanted(size_t available_bytes) {
// worse.
mutator_threshold = _pending_cards_target;
}
Atomic::store(&_threads_wanted, new_wanted);
AtomicAccess::store(&_threads_wanted, new_wanted);
_dcqs.set_mutator_refinement_threshold(mutator_threshold);
log_debug(gc, refine)("Concurrent refinement: wanted %u, cards: %zu, "
"predicted: %zu, time: %1.2fms",
@ -374,7 +374,7 @@ void G1ConcurrentRefine::adjust_threads_wanted(size_t available_bytes) {
if (!_thread_control.activate(i)) {
// Failed to allocate and activate thread. Stop trying to activate, and
// instead use mutator threads to make up the gap.
Atomic::store(&_threads_wanted, i);
AtomicAccess::store(&_threads_wanted, i);
_dcqs.set_mutator_refinement_threshold(_pending_cards_target);
break;
}
@ -384,9 +384,9 @@ void G1ConcurrentRefine::adjust_threads_wanted(size_t available_bytes) {
void G1ConcurrentRefine::reduce_threads_wanted() {
assert_current_thread_is_primary_refinement_thread();
if (!_needs_adjust) { // Defer if adjustment request is active.
uint wanted = Atomic::load(&_threads_wanted);
uint wanted = AtomicAccess::load(&_threads_wanted);
if (wanted > 0) {
Atomic::store(&_threads_wanted, --wanted);
AtomicAccess::store(&_threads_wanted, --wanted);
}
// If very little time remains until GC, enable mutator refinement. If
// the target has been reached, this keeps the number of pending cards on
@ -398,7 +398,7 @@ void G1ConcurrentRefine::reduce_threads_wanted() {
}
bool G1ConcurrentRefine::is_thread_wanted(uint worker_id) const {
return worker_id < Atomic::load(&_threads_wanted);
return worker_id < AtomicAccess::load(&_threads_wanted);
}
bool G1ConcurrentRefine::is_thread_adjustment_needed() const {

View File

@ -37,7 +37,7 @@
#include "gc/shared/bufferNodeList.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "memory/iterator.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
@ -117,14 +117,14 @@ void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) {
}
size_t G1DirtyCardQueueSet::num_cards() const {
return Atomic::load(&_num_cards);
return AtomicAccess::load(&_num_cards);
}
void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
assert(cbn != nullptr, "precondition");
// Increment _num_cards before adding to queue, so queue removal doesn't
// need to deal with _num_cards possibly going negative.
Atomic::add(&_num_cards, cbn->size());
AtomicAccess::add(&_num_cards, cbn->size());
// Perform push in CS. The old tail may be popped while the push is
// observing it (attaching it to the new buffer). We need to ensure it
// can't be reused until the push completes, to avoid ABA problems.
@ -160,7 +160,7 @@ BufferNode* G1DirtyCardQueueSet::get_completed_buffer() {
result = dequeue_completed_buffer();
if (result == nullptr) return nullptr;
}
Atomic::sub(&_num_cards, result->size());
AtomicAccess::sub(&_num_cards, result->size());
return result;
}
@ -172,9 +172,9 @@ void G1DirtyCardQueueSet::verify_num_cards() const {
cur = cur->next()) {
actual += cur->size();
}
assert(actual == Atomic::load(&_num_cards),
assert(actual == AtomicAccess::load(&_num_cards),
"Num entries in completed buffers should be %zu but are %zu",
Atomic::load(&_num_cards), actual);
AtomicAccess::load(&_num_cards), actual);
}
#endif // ASSERT
@ -185,7 +185,7 @@ G1DirtyCardQueueSet::PausedBuffers::PausedList::PausedList() :
#ifdef ASSERT
G1DirtyCardQueueSet::PausedBuffers::PausedList::~PausedList() {
assert(Atomic::load(&_head) == nullptr, "precondition");
assert(AtomicAccess::load(&_head) == nullptr, "precondition");
assert(_tail == nullptr, "precondition");
}
#endif // ASSERT
@ -198,7 +198,7 @@ bool G1DirtyCardQueueSet::PausedBuffers::PausedList::is_next() const {
void G1DirtyCardQueueSet::PausedBuffers::PausedList::add(BufferNode* node) {
assert_not_at_safepoint();
assert(is_next(), "precondition");
BufferNode* old_head = Atomic::xchg(&_head, node);
BufferNode* old_head = AtomicAccess::xchg(&_head, node);
if (old_head == nullptr) {
assert(_tail == nullptr, "invariant");
_tail = node;
@ -208,9 +208,9 @@ void G1DirtyCardQueueSet::PausedBuffers::PausedList::add(BufferNode* node) {
}
G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::PausedList::take() {
BufferNode* head = Atomic::load(&_head);
BufferNode* head = AtomicAccess::load(&_head);
BufferNode* tail = _tail;
Atomic::store(&_head, (BufferNode*)nullptr);
AtomicAccess::store(&_head, (BufferNode*)nullptr);
_tail = nullptr;
return HeadTail(head, tail);
}
@ -219,17 +219,17 @@ G1DirtyCardQueueSet::PausedBuffers::PausedBuffers() : _plist(nullptr) {}
#ifdef ASSERT
G1DirtyCardQueueSet::PausedBuffers::~PausedBuffers() {
assert(Atomic::load(&_plist) == nullptr, "invariant");
assert(AtomicAccess::load(&_plist) == nullptr, "invariant");
}
#endif // ASSERT
void G1DirtyCardQueueSet::PausedBuffers::add(BufferNode* node) {
assert_not_at_safepoint();
PausedList* plist = Atomic::load_acquire(&_plist);
PausedList* plist = AtomicAccess::load_acquire(&_plist);
if (plist == nullptr) {
// Try to install a new next list.
plist = new PausedList();
PausedList* old_plist = Atomic::cmpxchg(&_plist, (PausedList*)nullptr, plist);
PausedList* old_plist = AtomicAccess::cmpxchg(&_plist, (PausedList*)nullptr, plist);
if (old_plist != nullptr) {
// Some other thread installed a new next list. Use it instead.
delete plist;
@ -247,11 +247,11 @@ G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_previous(
// Deal with plist in a critical section, to prevent it from being
// deleted out from under us by a concurrent take_previous().
GlobalCounter::CriticalSection cs(Thread::current());
previous = Atomic::load_acquire(&_plist);
previous = AtomicAccess::load_acquire(&_plist);
if ((previous == nullptr) || // Nothing to take.
previous->is_next() || // Not from a previous safepoint.
// Some other thread stole it.
(Atomic::cmpxchg(&_plist, previous, (PausedList*)nullptr) != previous)) {
(AtomicAccess::cmpxchg(&_plist, previous, (PausedList*)nullptr) != previous)) {
return HeadTail();
}
}
@ -268,9 +268,9 @@ G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_previous(
G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_all() {
assert_at_safepoint();
HeadTail result;
PausedList* plist = Atomic::load(&_plist);
PausedList* plist = AtomicAccess::load(&_plist);
if (plist != nullptr) {
Atomic::store(&_plist, (PausedList*)nullptr);
AtomicAccess::store(&_plist, (PausedList*)nullptr);
result = plist->take();
delete plist;
}
@ -286,7 +286,7 @@ void G1DirtyCardQueueSet::record_paused_buffer(BufferNode* node) {
// notification checking after the coming safepoint if it doesn't GC.
// Note that this means the queue's _num_cards differs from the number
// of cards in the queued buffers when there are paused buffers.
Atomic::add(&_num_cards, node->size());
AtomicAccess::add(&_num_cards, node->size());
_paused.add(node);
}
@ -325,7 +325,7 @@ void G1DirtyCardQueueSet::merge_bufferlists(G1RedirtyCardsQueueSet* src) {
assert(allocator() == src->allocator(), "precondition");
const BufferNodeList from = src->take_all_completed_buffers();
if (from._head != nullptr) {
Atomic::add(&_num_cards, from._entry_count);
AtomicAccess::add(&_num_cards, from._entry_count);
_completed.append(*from._head, *from._tail);
}
}
@ -334,8 +334,8 @@ BufferNodeList G1DirtyCardQueueSet::take_all_completed_buffers() {
enqueue_all_paused_buffers();
verify_num_cards();
Pair<BufferNode*, BufferNode*> pair = _completed.take_all();
size_t num_cards = Atomic::load(&_num_cards);
Atomic::store(&_num_cards, size_t(0));
size_t num_cards = AtomicAccess::load(&_num_cards);
AtomicAccess::store(&_num_cards, size_t(0));
return BufferNodeList(pair.first, pair.second, num_cards);
}
@ -480,7 +480,7 @@ void G1DirtyCardQueueSet::handle_completed_buffer(BufferNode* new_node,
enqueue_completed_buffer(new_node);
// No need for mutator refinement if number of cards is below limit.
if (Atomic::load(&_num_cards) <= Atomic::load(&_mutator_refinement_threshold)) {
if (AtomicAccess::load(&_num_cards) <= AtomicAccess::load(&_mutator_refinement_threshold)) {
return;
}
@ -514,7 +514,7 @@ bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_id,
size_t stop_at,
G1ConcurrentRefineStats* stats) {
// Not enough cards to trigger processing.
if (Atomic::load(&_num_cards) <= stop_at) return false;
if (AtomicAccess::load(&_num_cards) <= stop_at) return false;
BufferNode* node = get_completed_buffer();
if (node == nullptr) return false; // Didn't get a buffer to process.
@ -591,9 +591,9 @@ void G1DirtyCardQueueSet::record_detached_refinement_stats(G1ConcurrentRefineSta
}
size_t G1DirtyCardQueueSet::mutator_refinement_threshold() const {
return Atomic::load(&_mutator_refinement_threshold);
return AtomicAccess::load(&_mutator_refinement_threshold);
}
void G1DirtyCardQueueSet::set_mutator_refinement_threshold(size_t value) {
Atomic::store(&_mutator_refinement_threshold, value);
AtomicAccess::store(&_mutator_refinement_threshold, value);
}

View File

@ -28,7 +28,7 @@
#include "gc/g1/g1EvacFailureRegions.inline.hpp"
#include "gc/g1/g1HeapRegion.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/bitMap.inline.hpp"
G1EvacFailureRegions::G1EvacFailureRegions() :
@ -43,7 +43,7 @@ G1EvacFailureRegions::~G1EvacFailureRegions() {
}
void G1EvacFailureRegions::pre_collection(uint max_regions) {
Atomic::store(&_num_regions_evac_failed, 0u);
AtomicAccess::store(&_num_regions_evac_failed, 0u);
_regions_evac_failed.resize(max_regions);
_regions_pinned.resize(max_regions);
_regions_alloc_failed.resize(max_regions);
@ -69,6 +69,6 @@ void G1EvacFailureRegions::par_iterate(G1HeapRegionClosure* closure,
G1CollectedHeap::heap()->par_iterate_regions_array(closure,
hrclaimer,
_evac_failed_regions,
Atomic::load(&_num_regions_evac_failed),
AtomicAccess::load(&_num_regions_evac_failed),
worker_id);
}

View File

@ -29,10 +29,10 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
uint G1EvacFailureRegions::num_regions_evac_failed() const {
return Atomic::load(&_num_regions_evac_failed);
return AtomicAccess::load(&_num_regions_evac_failed);
}
bool G1EvacFailureRegions::has_regions_evac_failed() const {
@ -57,7 +57,7 @@ bool G1EvacFailureRegions::record(uint worker_id, uint region_idx, bool cause_pi
bool success = _regions_evac_failed.par_set_bit(region_idx,
memory_order_relaxed);
if (success) {
size_t offset = Atomic::fetch_then_add(&_num_regions_evac_failed, 1u);
size_t offset = AtomicAccess::fetch_then_add(&_num_regions_evac_failed, 1u);
_evac_failed_regions[offset] = region_idx;
G1CollectedHeap* g1h = G1CollectedHeap::heap();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,28 +27,28 @@
#include "gc/g1/g1EvacStats.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
inline void G1EvacStats::add_direct_allocated(size_t value) {
Atomic::add(&_direct_allocated, value, memory_order_relaxed);
AtomicAccess::add(&_direct_allocated, value, memory_order_relaxed);
}
inline void G1EvacStats::add_num_plab_filled(size_t value) {
Atomic::add(&_num_plab_filled, value, memory_order_relaxed);
AtomicAccess::add(&_num_plab_filled, value, memory_order_relaxed);
}
inline void G1EvacStats::add_num_direct_allocated(size_t value) {
Atomic::add(&_num_direct_allocated, value, memory_order_relaxed);
AtomicAccess::add(&_num_direct_allocated, value, memory_order_relaxed);
}
inline void G1EvacStats::add_region_end_waste(size_t value) {
Atomic::add(&_region_end_waste, value, memory_order_relaxed);
Atomic::inc(&_regions_filled, memory_order_relaxed);
AtomicAccess::add(&_region_end_waste, value, memory_order_relaxed);
AtomicAccess::inc(&_regions_filled, memory_order_relaxed);
}
inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) {
Atomic::add(&_failure_used, used, memory_order_relaxed);
Atomic::add(&_failure_waste, waste, memory_order_relaxed);
AtomicAccess::add(&_failure_used, used, memory_order_relaxed);
AtomicAccess::add(&_failure_waste, waste, memory_order_relaxed);
}
#endif // SHARE_GC_G1_G1EVACSTATS_INLINE_HPP

View File

@ -24,7 +24,7 @@
#include "gc/g1/g1FreeIdSet.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/checkedCast.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@ -74,13 +74,13 @@ uint G1FreeIdSet::claim_par_id() {
// Semaphore gate permits passage by no more than the number of
// available ids, so there must be one that we can claim. But there
// may be multiple threads trying to claim ids at the same time.
uintx old_head = Atomic::load(&_head);
uintx old_head = AtomicAccess::load(&_head);
uint index;
while (true) {
index = head_index(old_head);
assert(index < _size, "invariant");
uintx new_head = make_head(_next[index], old_head);
new_head = Atomic::cmpxchg(&_head, old_head, new_head);
new_head = AtomicAccess::cmpxchg(&_head, old_head, new_head);
if (new_head == old_head) break;
old_head = new_head;
}
@ -92,11 +92,11 @@ void G1FreeIdSet::release_par_id(uint id) {
uint index = id - _start;
assert(index < _size, "invalid id %u", id);
assert(_next[index] == Claimed, "precondition");
uintx old_head = Atomic::load(&_head);
uintx old_head = AtomicAccess::load(&_head);
while (true) {
_next[index] = head_index(old_head);
uintx new_head = make_head(index, old_head);
new_head = Atomic::cmpxchg(&_head, old_head, new_head);
new_head = AtomicAccess::cmpxchg(&_head, old_head, new_head);
if (new_head == old_head) break;
old_head = new_head;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
#include "gc/g1/g1FullGCHeapRegionAttr.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
bool G1FullCollector::is_compacting(oop obj) const {
return _region_attr_table.is_compacting(cast_from_oop<HeapWord *>(obj));
@ -63,11 +63,11 @@ void G1FullCollector::update_from_skip_compacting_to_compacting(uint region_idx)
}
void G1FullCollector::set_compaction_top(G1HeapRegion* r, HeapWord* value) {
Atomic::store(&_compaction_tops[r->hrm_index()], value);
AtomicAccess::store(&_compaction_tops[r->hrm_index()], value);
}
HeapWord* G1FullCollector::compaction_top(G1HeapRegion* r) const {
return Atomic::load(&_compaction_tops[r->hrm_index()]);
return AtomicAccess::load(&_compaction_tops[r->hrm_index()]);
}
void G1FullCollector::set_has_compaction_targets() {

View File

@ -37,7 +37,7 @@
#include "gc/shared/weakProcessor.inline.hpp"
#include "logging/log.hpp"
#include "memory/iterator.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
class G1AdjustLiveClosure : public StackObj {
G1AdjustClosure* _adjust_closure;

View File

@ -43,7 +43,7 @@
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "utilities/powerOfTwo.hpp"
@ -288,7 +288,7 @@ void G1HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
}
void G1HeapRegion::note_self_forward_chunk_done(size_t garbage_bytes) {
Atomic::add(&_garbage_bytes, garbage_bytes, memory_order_relaxed);
AtomicAccess::add(&_garbage_bytes, garbage_bytes, memory_order_relaxed);
}
// Code roots support
@ -441,7 +441,7 @@ void G1HeapRegion::print_on(outputStream* st) const {
st->print("|-");
}
}
st->print("|%3zu", Atomic::load(&_pinned_object_count));
st->print("|%3zu", AtomicAccess::load(&_pinned_object_count));
st->print_cr("");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -394,7 +394,7 @@ public:
bool is_old_or_humongous() const { return _type.is_old_or_humongous(); }
size_t pinned_count() const { return Atomic::load(&_pinned_object_count); }
size_t pinned_count() const { return AtomicAccess::load(&_pinned_object_count); }
bool has_pinned_objects() const { return pinned_count() > 0; }
void set_free();

View File

@ -35,7 +35,7 @@
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1Predictions.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/init.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/safepoint.hpp"
@ -194,7 +194,7 @@ inline HeapWord* G1HeapRegion::par_allocate(size_t min_word_size,
size_t want_to_allocate = MIN2(available, desired_word_size);
if (want_to_allocate >= min_word_size) {
HeapWord* new_top = obj + want_to_allocate;
HeapWord* result = Atomic::cmpxchg(&_top, obj, new_top);
HeapWord* result = AtomicAccess::cmpxchg(&_top, obj, new_top);
// result can be one of two:
// the old top value: the exchange succeeded
// otherwise: the new value of the top is returned.
@ -258,11 +258,11 @@ inline HeapWord* G1HeapRegion::parsable_bottom() const {
}
inline HeapWord* G1HeapRegion::parsable_bottom_acquire() const {
return Atomic::load_acquire(&_parsable_bottom);
return AtomicAccess::load_acquire(&_parsable_bottom);
}
inline void G1HeapRegion::reset_parsable_bottom() {
Atomic::release_store(&_parsable_bottom, bottom());
AtomicAccess::release_store(&_parsable_bottom, bottom());
}
inline void G1HeapRegion::note_end_of_marking(HeapWord* top_at_mark_start, size_t marked_bytes, size_t incoming_refs) {
@ -511,7 +511,7 @@ inline void G1HeapRegion::record_surv_words_in_group(size_t words_survived) {
inline void G1HeapRegion::add_pinned_object_count(size_t value) {
assert(value != 0, "wasted effort");
assert(!is_free(), "trying to pin free region %u, adding %zu", hrm_index(), value);
Atomic::add(&_pinned_object_count, value, memory_order_relaxed);
AtomicAccess::add(&_pinned_object_count, value, memory_order_relaxed);
}
inline void G1HeapRegion::install_cset_group(G1CSetCandidateGroup* cset_group) {

View File

@ -34,7 +34,7 @@
#include "jfr/jfrEvents.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.hpp"
#include "utilities/bitMap.inline.hpp"
@ -726,7 +726,7 @@ bool G1HeapRegionClaimer::is_region_claimed(uint region_index) const {
bool G1HeapRegionClaimer::claim_region(uint region_index) {
assert(region_index < _n_regions, "Invalid index.");
uint old_val = Atomic::cmpxchg(&_claims[region_index], Unclaimed, Claimed);
uint old_val = AtomicAccess::cmpxchg(&_claims[region_index], Unclaimed, Claimed);
return old_val == Unclaimed;
}

View File

@ -31,7 +31,7 @@
#include "memory/allocation.hpp"
#include "memory/padded.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"

View File

@ -30,7 +30,7 @@
#include "gc/g1/g1CodeRootSet.hpp"
#include "gc/g1/g1CollectionSetCandidates.hpp"
#include "gc/g1/g1FromCardCache.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/bitMap.hpp"

View File

@ -30,7 +30,7 @@
#include "gc/g1/g1CardSet.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/bitMap.inline.hpp"
void G1HeapRegionRemSet::set_state_untracked() {

View File

@ -24,7 +24,7 @@
#include "gc/g1/g1MonotonicArena.inline.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/vmOperations.hpp"
#include "utilities/globalCounter.inline.hpp"
@ -61,13 +61,13 @@ void G1MonotonicArena::SegmentFreeList::bulk_add(Segment& first,
size_t num,
size_t mem_size) {
_list.prepend(first, last);
Atomic::add(&_num_segments, num, memory_order_relaxed);
Atomic::add(&_mem_size, mem_size, memory_order_relaxed);
AtomicAccess::add(&_num_segments, num, memory_order_relaxed);
AtomicAccess::add(&_mem_size, mem_size, memory_order_relaxed);
}
void G1MonotonicArena::SegmentFreeList::print_on(outputStream* out, const char* prefix) {
out->print_cr("%s: segments %zu size %zu",
prefix, Atomic::load(&_num_segments), Atomic::load(&_mem_size));
prefix, AtomicAccess::load(&_num_segments), AtomicAccess::load(&_mem_size));
}
G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& num_segments,
@ -75,12 +75,12 @@ G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& nu
GlobalCounter::CriticalSection cs(Thread::current());
Segment* result = _list.pop_all();
num_segments = Atomic::load(&_num_segments);
mem_size = Atomic::load(&_mem_size);
num_segments = AtomicAccess::load(&_num_segments);
mem_size = AtomicAccess::load(&_mem_size);
if (result != nullptr) {
Atomic::sub(&_num_segments, num_segments, memory_order_relaxed);
Atomic::sub(&_mem_size, mem_size, memory_order_relaxed);
AtomicAccess::sub(&_num_segments, num_segments, memory_order_relaxed);
AtomicAccess::sub(&_mem_size, mem_size, memory_order_relaxed);
}
return result;
}
@ -96,8 +96,8 @@ void G1MonotonicArena::SegmentFreeList::free_all() {
Segment::delete_segment(cur);
}
Atomic::sub(&_num_segments, num_freed, memory_order_relaxed);
Atomic::sub(&_mem_size, mem_size_freed, memory_order_relaxed);
AtomicAccess::sub(&_num_segments, num_freed, memory_order_relaxed);
AtomicAccess::sub(&_mem_size, mem_size_freed, memory_order_relaxed);
}
G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
@ -115,7 +115,7 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
}
// Install it as current allocation segment.
Segment* old = Atomic::cmpxchg(&_first, prev, next);
Segment* old = AtomicAccess::cmpxchg(&_first, prev, next);
if (old != prev) {
// Somebody else installed the segment, use that one.
Segment::delete_segment(next);
@ -126,9 +126,9 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
_last = next;
}
// Successfully installed the segment into the list.
Atomic::inc(&_num_segments, memory_order_relaxed);
Atomic::add(&_mem_size, next->mem_size(), memory_order_relaxed);
Atomic::add(&_num_total_slots, next->num_slots(), memory_order_relaxed);
AtomicAccess::inc(&_num_segments, memory_order_relaxed);
AtomicAccess::add(&_mem_size, next->mem_size(), memory_order_relaxed);
AtomicAccess::add(&_num_total_slots, next->num_slots(), memory_order_relaxed);
return next;
}
}
@ -155,7 +155,7 @@ uint G1MonotonicArena::slot_size() const {
}
void G1MonotonicArena::drop_all() {
Segment* cur = Atomic::load_acquire(&_first);
Segment* cur = AtomicAccess::load_acquire(&_first);
if (cur != nullptr) {
assert(_last != nullptr, "If there is at least one segment, there must be a last one.");
@ -193,7 +193,7 @@ void G1MonotonicArena::drop_all() {
void* G1MonotonicArena::allocate() {
assert(slot_size() > 0, "instance size not set.");
Segment* cur = Atomic::load_acquire(&_first);
Segment* cur = AtomicAccess::load_acquire(&_first);
if (cur == nullptr) {
cur = new_segment(cur);
}
@ -201,7 +201,7 @@ void* G1MonotonicArena::allocate() {
while (true) {
void* slot = cur->allocate_slot();
if (slot != nullptr) {
Atomic::inc(&_num_allocated_slots, memory_order_relaxed);
AtomicAccess::inc(&_num_allocated_slots, memory_order_relaxed);
guarantee(is_aligned(slot, _alloc_options->slot_alignment()),
"result " PTR_FORMAT " not aligned at %u", p2i(slot), _alloc_options->slot_alignment());
return slot;
@ -213,7 +213,7 @@ void* G1MonotonicArena::allocate() {
}
uint G1MonotonicArena::num_segments() const {
return Atomic::load(&_num_segments);
return AtomicAccess::load(&_num_segments);
}
#ifdef ASSERT
@ -238,7 +238,7 @@ uint G1MonotonicArena::calculate_length() const {
template <typename SegmentClosure>
void G1MonotonicArena::iterate_segments(SegmentClosure& closure) const {
Segment* cur = Atomic::load_acquire(&_first);
Segment* cur = AtomicAccess::load_acquire(&_first);
assert((cur != nullptr) == (_last != nullptr),
"If there is at least one segment, there must be a last one");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -81,11 +81,11 @@ private:
DEBUG_ONLY(uint calculate_length() const;)
public:
const Segment* first_segment() const { return Atomic::load(&_first); }
const Segment* first_segment() const { return AtomicAccess::load(&_first); }
uint num_total_slots() const { return Atomic::load(&_num_total_slots); }
uint num_total_slots() const { return AtomicAccess::load(&_num_total_slots); }
uint num_allocated_slots() const {
uint allocated = Atomic::load(&_num_allocated_slots);
uint allocated = AtomicAccess::load(&_num_allocated_slots);
assert(calculate_length() == allocated, "Must be");
return allocated;
}
@ -214,8 +214,8 @@ public:
void print_on(outputStream* out, const char* prefix = "");
size_t num_segments() const { return Atomic::load(&_num_segments); }
size_t mem_size() const { return Atomic::load(&_mem_size); }
size_t num_segments() const { return AtomicAccess::load(&_num_segments); }
size_t mem_size() const { return AtomicAccess::load(&_mem_size); }
};
// Configuration for G1MonotonicArena, e.g slot size, slot number of next Segment.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -28,14 +28,14 @@
#include "gc/g1/g1MonotonicArena.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/globalCounter.inline.hpp"
inline void* G1MonotonicArena::Segment::allocate_slot() {
if (_next_allocate >= _num_slots) {
return nullptr;
}
uint result = Atomic::fetch_then_add(&_next_allocate, 1u, memory_order_relaxed);
uint result = AtomicAccess::fetch_then_add(&_next_allocate, 1u, memory_order_relaxed);
if (result >= _num_slots) {
return nullptr;
}
@ -48,8 +48,8 @@ inline G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get() {
Segment* result = _list.pop();
if (result != nullptr) {
Atomic::dec(&_num_segments, memory_order_relaxed);
Atomic::sub(&_mem_size, result->mem_size(), memory_order_relaxed);
AtomicAccess::dec(&_num_segments, memory_order_relaxed);
AtomicAccess::sub(&_mem_size, result->mem_size(), memory_order_relaxed);
}
return result;
}

View File

@ -28,7 +28,7 @@
#include "nmt/memTracker.hpp"
#include "oops/markWord.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/bitMap.inline.hpp"

View File

@ -42,7 +42,7 @@
#include "memory/allocation.inline.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/prefetch.inline.hpp"
#include "utilities/globalDefinitions.hpp"

View File

@ -24,7 +24,7 @@
#include "gc/g1/g1ParallelCleaning.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmci.hpp"
#endif
@ -35,11 +35,11 @@ JVMCICleaningTask::JVMCICleaningTask() :
}
bool JVMCICleaningTask::claim_cleaning_task() {
if (Atomic::load(&_cleaning_claimed)) {
if (AtomicAccess::load(&_cleaning_claimed)) {
return false;
}
return !Atomic::cmpxchg(&_cleaning_claimed, false, true);
return !AtomicAccess::cmpxchg(&_cleaning_claimed, false, true);
}
void JVMCICleaningTask::work(bool unloading_occurred) {

View File

@ -204,8 +204,8 @@ void G1Policy::update_young_length_bounds(size_t pending_cards, size_t card_rs_l
// allocation.
// That is "fine" - at most this will schedule a GC (hopefully only a little) too
// early or too late.
Atomic::store(&_young_list_desired_length, new_young_list_desired_length);
Atomic::store(&_young_list_target_length, new_young_list_target_length);
AtomicAccess::store(&_young_list_desired_length, new_young_list_desired_length);
AtomicAccess::store(&_young_list_target_length, new_young_list_target_length);
}
// Calculates desired young gen length. It is calculated from:

Some files were not shown because too many files have changed in this diff Show More