mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
8374328: Convert simple AtomicAccess uses in gc/shared to use Atomic<T>
Reviewed-by: dholmes, tschatzl
This commit is contained in:
parent
904ba5f5ed
commit
c5159fc9fa
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,6 +33,7 @@
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/method.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
@ -196,8 +197,8 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
|
||||
// Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
|
||||
// a very rare event.
|
||||
if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
|
||||
static volatile uint32_t counter=0;
|
||||
if (AtomicAccess::add(&counter, 1u) % 10 == 0) {
|
||||
static Atomic<uint32_t> counter{0};
|
||||
if (counter.add_then_fetch(1u) % 10 == 0) {
|
||||
may_enter = false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "gc/shared/concurrentGCThread.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
@ -48,7 +48,7 @@ void ConcurrentGCThread::run() {
|
||||
|
||||
// Signal thread has terminated
|
||||
MonitorLocker ml(Terminator_lock);
|
||||
AtomicAccess::release_store(&_has_terminated, true);
|
||||
_has_terminated.release_store(true);
|
||||
ml.notify_all();
|
||||
}
|
||||
|
||||
@ -57,21 +57,21 @@ void ConcurrentGCThread::stop() {
|
||||
assert(!has_terminated(), "Invalid state");
|
||||
|
||||
// Signal thread to terminate
|
||||
AtomicAccess::release_store_fence(&_should_terminate, true);
|
||||
_should_terminate.release_store_fence(true);
|
||||
|
||||
stop_service();
|
||||
|
||||
// Wait for thread to terminate
|
||||
MonitorLocker ml(Terminator_lock);
|
||||
while (!_has_terminated) {
|
||||
while (!_has_terminated.load_relaxed()) {
|
||||
ml.wait();
|
||||
}
|
||||
}
|
||||
|
||||
bool ConcurrentGCThread::should_terminate() const {
|
||||
return AtomicAccess::load_acquire(&_should_terminate);
|
||||
return _should_terminate.load_acquire();
|
||||
}
|
||||
|
||||
bool ConcurrentGCThread::has_terminated() const {
|
||||
return AtomicAccess::load_acquire(&_has_terminated);
|
||||
return _has_terminated.load_acquire();
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,14 +25,15 @@
|
||||
#ifndef SHARE_GC_SHARED_CONCURRENTGCTHREAD_HPP
|
||||
#define SHARE_GC_SHARED_CONCURRENTGCTHREAD_HPP
|
||||
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/nonJavaThread.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
class ConcurrentGCThread: public NamedThread {
|
||||
private:
|
||||
volatile bool _should_terminate;
|
||||
volatile bool _has_terminated;
|
||||
Atomic<bool> _should_terminate;
|
||||
Atomic<bool> _has_terminated;
|
||||
|
||||
protected:
|
||||
void create_and_start(ThreadPriority prio = NearMaxPriority);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/javaThread.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
@ -60,16 +60,13 @@ public:
|
||||
};
|
||||
|
||||
Monitor* GCLocker::_lock;
|
||||
volatile bool GCLocker::_is_gc_request_pending;
|
||||
Atomic<bool> GCLocker::_is_gc_request_pending{false};
|
||||
|
||||
DEBUG_ONLY(uint64_t GCLocker::_verify_in_cr_count;)
|
||||
DEBUG_ONLY(Atomic<uint64_t> GCLocker::_verify_in_cr_count{0};)
|
||||
|
||||
void GCLocker::initialize() {
|
||||
assert(JNICritical_lock != nullptr, "inv");
|
||||
_lock = JNICritical_lock;
|
||||
_is_gc_request_pending = false;
|
||||
|
||||
DEBUG_ONLY(_verify_in_cr_count = 0;)
|
||||
}
|
||||
|
||||
bool GCLocker::is_active() {
|
||||
@ -84,11 +81,11 @@ bool GCLocker::is_active() {
|
||||
void GCLocker::block() {
|
||||
// _lock is held from the beginning of block() to the end of of unblock().
|
||||
_lock->lock();
|
||||
assert(AtomicAccess::load(&_is_gc_request_pending) == false, "precondition");
|
||||
assert(_is_gc_request_pending.load_relaxed() == false, "precondition");
|
||||
|
||||
GCLockerTimingDebugLogger logger("Thread blocked to start GC.");
|
||||
|
||||
AtomicAccess::store(&_is_gc_request_pending, true);
|
||||
_is_gc_request_pending.store_relaxed(true);
|
||||
|
||||
// The _is_gc_request_pending and _jni_active_critical (inside
|
||||
// in_critical_atomic()) variables form a Dekker duality. On the GC side, the
|
||||
@ -112,14 +109,14 @@ void GCLocker::block() {
|
||||
#ifdef ASSERT
|
||||
// Matching the storestore in GCLocker::exit.
|
||||
OrderAccess::loadload();
|
||||
assert(AtomicAccess::load(&_verify_in_cr_count) == 0, "inv");
|
||||
assert(_verify_in_cr_count.load_relaxed() == 0, "inv");
|
||||
#endif
|
||||
}
|
||||
|
||||
void GCLocker::unblock() {
|
||||
assert(AtomicAccess::load(&_is_gc_request_pending) == true, "precondition");
|
||||
assert(_is_gc_request_pending.load_relaxed() == true, "precondition");
|
||||
|
||||
AtomicAccess::store(&_is_gc_request_pending, false);
|
||||
_is_gc_request_pending.store_relaxed(false);
|
||||
_lock->unlock();
|
||||
}
|
||||
|
||||
@ -139,7 +136,7 @@ void GCLocker::enter_slow(JavaThread* current_thread) {
|
||||
// Same as fast path.
|
||||
OrderAccess::fence();
|
||||
|
||||
if (!AtomicAccess::load(&_is_gc_request_pending)) {
|
||||
if (!_is_gc_request_pending.load_relaxed()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/shared/gcCause.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
|
||||
// GCLocker provides synchronization between the garbage collector (GC) and
|
||||
@ -43,11 +44,11 @@
|
||||
|
||||
class GCLocker: public AllStatic {
|
||||
static Monitor* _lock;
|
||||
static volatile bool _is_gc_request_pending;
|
||||
static Atomic<bool> _is_gc_request_pending;
|
||||
|
||||
#ifdef ASSERT
|
||||
// Debug-only: to track the number of java threads in critical-region.
|
||||
static uint64_t _verify_in_cr_count;
|
||||
static Atomic<uint64_t> _verify_in_cr_count;
|
||||
#endif
|
||||
static void enter_slow(JavaThread* current_thread);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,13 +38,13 @@ void GCLocker::enter(JavaThread* current_thread) {
|
||||
// Matching the fence in GCLocker::block.
|
||||
OrderAccess::fence();
|
||||
|
||||
if (AtomicAccess::load(&_is_gc_request_pending)) {
|
||||
if (_is_gc_request_pending.load_relaxed()) {
|
||||
current_thread->exit_critical();
|
||||
// slow-path
|
||||
enter_slow(current_thread);
|
||||
}
|
||||
|
||||
DEBUG_ONLY(AtomicAccess::add(&_verify_in_cr_count, (uint64_t)1);)
|
||||
DEBUG_ONLY(_verify_in_cr_count.add_then_fetch(1u);)
|
||||
} else {
|
||||
current_thread->enter_critical();
|
||||
}
|
||||
@ -55,7 +55,7 @@ void GCLocker::exit(JavaThread* current_thread) {
|
||||
|
||||
#ifdef ASSERT
|
||||
if (current_thread->in_last_critical()) {
|
||||
AtomicAccess::add(&_verify_in_cr_count, (uint64_t)-1);
|
||||
_verify_in_cr_count.sub_then_fetch(1u);
|
||||
// Matching the loadload in GCLocker::block.
|
||||
OrderAccess::storestore();
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,7 +25,7 @@
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/pretouchTask.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
@ -52,11 +52,11 @@ size_t PretouchTask::chunk_size() {
|
||||
|
||||
void PretouchTask::work(uint worker_id) {
|
||||
while (true) {
|
||||
char* cur_start = AtomicAccess::load(&_cur_addr);
|
||||
char* cur_start = _cur_addr.load_relaxed();
|
||||
char* cur_end = cur_start + MIN2(_chunk_size, pointer_delta(_end_addr, cur_start, 1));
|
||||
if (cur_start >= cur_end) {
|
||||
break;
|
||||
} else if (cur_start == AtomicAccess::cmpxchg(&_cur_addr, cur_start, cur_end)) {
|
||||
} else if (cur_start == _cur_addr.compare_exchange(cur_start, cur_end)) {
|
||||
os::pretouch_memory(cur_start, cur_end, _page_size);
|
||||
} // Else attempt to claim chunk failed, so try again.
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,9 +26,11 @@
|
||||
#define SHARE_GC_SHARED_PRETOUCH_HPP
|
||||
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class PretouchTask : public WorkerTask {
|
||||
char* volatile _cur_addr;
|
||||
Atomic<char*> _cur_addr;
|
||||
char* const _end_addr;
|
||||
size_t _page_size;
|
||||
size_t _chunk_size;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,7 @@
|
||||
|
||||
uint SuspendibleThreadSet::_nthreads = 0;
|
||||
uint SuspendibleThreadSet::_nthreads_stopped = 0;
|
||||
volatile bool SuspendibleThreadSet::_suspend_all = false;
|
||||
Atomic<bool> SuspendibleThreadSet::_suspend_all{false};
|
||||
double SuspendibleThreadSet::_suspend_all_start = 0.0;
|
||||
|
||||
static Semaphore* _synchronize_wakeup = nullptr;
|
||||
@ -96,7 +96,7 @@ void SuspendibleThreadSet::synchronize() {
|
||||
{
|
||||
MonitorLocker ml(STS_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(!should_yield(), "Only one at a time");
|
||||
AtomicAccess::store(&_suspend_all, true);
|
||||
_suspend_all.store_relaxed(true);
|
||||
if (is_synchronized()) {
|
||||
return;
|
||||
}
|
||||
@ -127,6 +127,6 @@ void SuspendibleThreadSet::desynchronize() {
|
||||
MonitorLocker ml(STS_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(should_yield(), "STS not synchronizing");
|
||||
assert(is_synchronized(), "STS not synchronized");
|
||||
AtomicAccess::store(&_suspend_all, false);
|
||||
_suspend_all.store_relaxed(false);
|
||||
ml.notify_all();
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,7 @@
|
||||
#define SHARE_GC_SHARED_SUSPENDIBLETHREADSET_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
// A SuspendibleThreadSet is a set of threads that can be suspended.
|
||||
// A thread can join and later leave the set, and periodically yield.
|
||||
@ -43,7 +43,7 @@ class SuspendibleThreadSet : public AllStatic {
|
||||
private:
|
||||
static uint _nthreads;
|
||||
static uint _nthreads_stopped;
|
||||
static volatile bool _suspend_all;
|
||||
static Atomic<bool> _suspend_all;
|
||||
static double _suspend_all_start;
|
||||
|
||||
static bool is_synchronized();
|
||||
@ -59,7 +59,7 @@ private:
|
||||
|
||||
public:
|
||||
// Returns true if an suspension is in progress.
|
||||
static bool should_yield() { return AtomicAccess::load(&_suspend_all); }
|
||||
static bool should_yield() { return _suspend_all.load_relaxed(); }
|
||||
|
||||
// Suspends the current thread if a suspension is in progress.
|
||||
static void yield() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,7 @@ WorkerTaskDispatcher::WorkerTaskDispatcher() :
|
||||
void WorkerTaskDispatcher::coordinator_distribute_task(WorkerTask* task, uint num_workers) {
|
||||
// No workers are allowed to read the state variables until they have been signaled.
|
||||
_task = task;
|
||||
_not_finished = num_workers;
|
||||
_not_finished.store_relaxed(num_workers);
|
||||
|
||||
// Dispatch 'num_workers' number of tasks.
|
||||
_start_semaphore.signal(num_workers);
|
||||
@ -51,9 +51,12 @@ void WorkerTaskDispatcher::coordinator_distribute_task(WorkerTask* task, uint nu
|
||||
_end_semaphore.wait();
|
||||
|
||||
// No workers are allowed to read the state variables after the coordinator has been signaled.
|
||||
assert(_not_finished == 0, "%d not finished workers?", _not_finished);
|
||||
#ifdef ASSERT
|
||||
uint not_finished = _not_finished.load_relaxed();
|
||||
assert(not_finished == 0, "%u not finished workers?", not_finished);
|
||||
#endif // ASSERT
|
||||
_task = nullptr;
|
||||
_started = 0;
|
||||
_started.store_relaxed(0);
|
||||
}
|
||||
|
||||
void WorkerTaskDispatcher::worker_run_task() {
|
||||
@ -61,7 +64,7 @@ void WorkerTaskDispatcher::worker_run_task() {
|
||||
_start_semaphore.wait();
|
||||
|
||||
// Get and set worker id.
|
||||
const uint worker_id = AtomicAccess::fetch_then_add(&_started, 1u);
|
||||
const uint worker_id = _started.fetch_then_add(1u);
|
||||
WorkerThread::set_worker_id(worker_id);
|
||||
|
||||
// Run task.
|
||||
@ -70,7 +73,7 @@ void WorkerTaskDispatcher::worker_run_task() {
|
||||
|
||||
// Mark that the worker is done with the task.
|
||||
// The worker is not allowed to read the state variables after this line.
|
||||
const uint not_finished = AtomicAccess::sub(&_not_finished, 1u);
|
||||
const uint not_finished = _not_finished.sub_then_fetch(1u);
|
||||
|
||||
// The last worker signals to the coordinator that all work is completed.
|
||||
if (not_finished == 0) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/shared/gcId.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/nonJavaThread.hpp"
|
||||
#include "runtime/semaphore.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -58,8 +59,8 @@ class WorkerTaskDispatcher {
|
||||
// The task currently being dispatched to the WorkerThreads.
|
||||
WorkerTask* _task;
|
||||
|
||||
volatile uint _started;
|
||||
volatile uint _not_finished;
|
||||
Atomic<uint> _started;
|
||||
Atomic<uint> _not_finished;
|
||||
|
||||
// Semaphore used to start the WorkerThreads.
|
||||
Semaphore _start_semaphore;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user