mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
8375093: Convert GlobalCounter to use Atomic<T>
Reviewed-by: dholmes, iwalulya
This commit is contained in:
parent
f2d5290c29
commit
496af3cf47
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -36,6 +36,7 @@
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/javaThread.inline.hpp"
|
||||
@ -82,7 +83,7 @@ Thread::Thread(MemTag mem_tag) {
|
||||
_threads_hazard_ptr = nullptr;
|
||||
_threads_list_ptr = nullptr;
|
||||
_nested_threads_hazard_ptr_cnt = 0;
|
||||
_rcu_counter = 0;
|
||||
_rcu_counter.store_relaxed(0);
|
||||
|
||||
// the handle mark links itself to last_handle_mark
|
||||
new HandleMark(this);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -30,6 +30,7 @@
|
||||
#include "gc/shared/threadLocalAllocBuffer.hpp"
|
||||
#include "jni.h"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
@ -238,9 +239,9 @@ class Thread: public ThreadShadow {
|
||||
|
||||
// Support for GlobalCounter
|
||||
private:
|
||||
volatile uintx _rcu_counter;
|
||||
Atomic<uintx> _rcu_counter;
|
||||
public:
|
||||
volatile uintx* get_rcu_counter() {
|
||||
Atomic<uintx>* get_rcu_counter() {
|
||||
return &_rcu_counter;
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "memory/iterator.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/threadSMR.inline.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
@ -41,7 +41,7 @@ class GlobalCounter::CounterThreadCheck : public ThreadClosure {
|
||||
SpinYield yield;
|
||||
// Loops on this thread until it has exited the critical read section.
|
||||
while(true) {
|
||||
uintx cnt = AtomicAccess::load_acquire(thread->get_rcu_counter());
|
||||
uintx cnt = thread->get_rcu_counter()->load_acquire();
|
||||
// This checks if the thread's counter is active. And if so is the counter
|
||||
// for a pre-existing reader (belongs to this grace period). A pre-existing
|
||||
// reader will have a lower counter than the global counter version for this
|
||||
@ -57,9 +57,9 @@ class GlobalCounter::CounterThreadCheck : public ThreadClosure {
|
||||
};
|
||||
|
||||
void GlobalCounter::write_synchronize() {
|
||||
assert((*Thread::current()->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section");
|
||||
// AtomicAccess::add must provide fence since we have storeload dependency.
|
||||
uintx gbl_cnt = AtomicAccess::add(&_global_counter._counter, COUNTER_INCREMENT);
|
||||
assert((Thread::current()->get_rcu_counter()->load_relaxed() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section");
|
||||
// Atomic add must provide fence since we have storeload dependency.
|
||||
uintx gbl_cnt = _global_counter._counter.add_then_fetch(COUNTER_INCREMENT);
|
||||
|
||||
// Do all RCU threads.
|
||||
CounterThreadCheck ctc(gbl_cnt);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "memory/padded.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
class Thread;
|
||||
|
||||
@ -47,7 +48,7 @@ class GlobalCounter : public AllStatic {
|
||||
// counter is on a separate cacheline.
|
||||
struct PaddedCounter {
|
||||
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_PADDING_SIZE, 0);
|
||||
volatile uintx _counter;
|
||||
Atomic<uintx> _counter;
|
||||
DEFINE_PAD_MINUS_SIZE(1, DEFAULT_PADDING_SIZE, sizeof(volatile uintx));
|
||||
};
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,30 +27,29 @@
|
||||
|
||||
#include "utilities/globalCounter.hpp"
|
||||
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
|
||||
inline GlobalCounter::CSContext
|
||||
GlobalCounter::critical_section_begin(Thread *thread) {
|
||||
assert(thread == Thread::current(), "must be current thread");
|
||||
uintx old_cnt = AtomicAccess::load(thread->get_rcu_counter());
|
||||
uintx old_cnt = thread->get_rcu_counter()->load_relaxed();
|
||||
// Retain the old counter value if already active, e.g. nested.
|
||||
// Otherwise, set the counter to the current version + active bit.
|
||||
uintx new_cnt = old_cnt;
|
||||
if ((new_cnt & COUNTER_ACTIVE) == 0) {
|
||||
new_cnt = AtomicAccess::load(&_global_counter._counter) | COUNTER_ACTIVE;
|
||||
new_cnt = _global_counter._counter.load_relaxed() | COUNTER_ACTIVE;
|
||||
}
|
||||
AtomicAccess::release_store_fence(thread->get_rcu_counter(), new_cnt);
|
||||
thread->get_rcu_counter()->release_store_fence(new_cnt);
|
||||
return static_cast<CSContext>(old_cnt);
|
||||
}
|
||||
|
||||
inline void
|
||||
GlobalCounter::critical_section_end(Thread *thread, CSContext context) {
|
||||
assert(thread == Thread::current(), "must be current thread");
|
||||
assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section");
|
||||
assert((thread->get_rcu_counter()->load_relaxed() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section");
|
||||
// Restore the counter value from before the associated begin.
|
||||
AtomicAccess::release_store(thread->get_rcu_counter(),
|
||||
static_cast<uintx>(context));
|
||||
thread->get_rcu_counter()->release_store(static_cast<uintx>(context));
|
||||
}
|
||||
|
||||
class GlobalCounter::CriticalSection {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user