mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
8374350: Convert hotspot gtests to use Atomic<T>
Reviewed-by: aboldtch, iwalulya
This commit is contained in:
parent
df5b49e604
commit
e27309f10d
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -23,20 +24,21 @@
|
||||
*/
|
||||
|
||||
#include "cds/archiveUtils.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
class TestArchiveWorkerTask : public ArchiveWorkerTask {
|
||||
private:
|
||||
volatile int _sum;
|
||||
int _max;
|
||||
Atomic<int> _sum;
|
||||
Atomic<int> _max;
|
||||
public:
|
||||
TestArchiveWorkerTask() : ArchiveWorkerTask("Test"), _sum(0), _max(0) {}
|
||||
void work(int chunk, int max_chunks) override {
|
||||
AtomicAccess::add(&_sum, chunk);
|
||||
AtomicAccess::store(&_max, max_chunks);
|
||||
_sum.add_then_fetch(chunk);
|
||||
_max.store_relaxed(max_chunks);
|
||||
}
|
||||
int sum() { return AtomicAccess::load(&_sum); }
|
||||
int max() { return AtomicAccess::load(&_max); }
|
||||
int sum() { return _sum.load_relaxed(); }
|
||||
int max() { return _max.load_relaxed(); }
|
||||
};
|
||||
|
||||
// Test a repeated cycle of workers init/shutdown without task works.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,9 +22,10 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cppstdlib/new.hpp"
|
||||
#include "gc/g1/g1BatchedTask.hpp"
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
class G1BatchedTaskWorkers : AllStatic {
|
||||
@ -49,26 +50,26 @@ WorkerThreads* G1BatchedTaskWorkers::_workers = nullptr;
|
||||
|
||||
class G1TestSubTask : public G1AbstractSubTask {
|
||||
mutable uint _phase;
|
||||
volatile uint _num_do_work; // Amount of do_work() has been called.
|
||||
Atomic<uint> _num_do_work; // Amount of do_work() has been called.
|
||||
|
||||
void check_and_inc_phase(uint expected) const {
|
||||
ASSERT_EQ(_phase, expected);
|
||||
_phase++;
|
||||
}
|
||||
|
||||
bool volatile* _do_work_called_by;
|
||||
Atomic<bool>* _do_work_called_by;
|
||||
|
||||
protected:
|
||||
uint _max_workers;
|
||||
|
||||
void do_work_called(uint worker_id) {
|
||||
AtomicAccess::inc(&_num_do_work);
|
||||
bool orig_value = AtomicAccess::cmpxchg(&_do_work_called_by[worker_id], false, true);
|
||||
_num_do_work.add_then_fetch(1u);
|
||||
bool orig_value = _do_work_called_by[worker_id].compare_exchange(false, true);
|
||||
ASSERT_EQ(orig_value, false);
|
||||
}
|
||||
|
||||
void verify_do_work_called_by(uint num_workers) {
|
||||
ASSERT_EQ(AtomicAccess::load(&_num_do_work), num_workers);
|
||||
ASSERT_EQ(_num_do_work.load_relaxed(), num_workers);
|
||||
// Do not need to check the _do_work_called_by array. The count is already verified
|
||||
// by above statement, and we already check that a given flag is only set once.
|
||||
}
|
||||
@ -86,7 +87,7 @@ public:
|
||||
|
||||
~G1TestSubTask() {
|
||||
check_and_inc_phase(3);
|
||||
FREE_C_HEAP_ARRAY(bool, _do_work_called_by);
|
||||
FREE_C_HEAP_ARRAY(Atomic<bool>, _do_work_called_by);
|
||||
}
|
||||
|
||||
double worker_cost() const override {
|
||||
@ -100,9 +101,9 @@ public:
|
||||
assert(max_workers >= 1, "must be");
|
||||
check_and_inc_phase(2);
|
||||
|
||||
_do_work_called_by = NEW_C_HEAP_ARRAY(bool, max_workers, mtInternal);
|
||||
_do_work_called_by = NEW_C_HEAP_ARRAY(Atomic<bool>, max_workers, mtInternal);
|
||||
for (uint i = 0; i < max_workers; i++) {
|
||||
_do_work_called_by[i] = false;
|
||||
::new (&_do_work_called_by[i]) Atomic<bool>{false};
|
||||
}
|
||||
_max_workers = max_workers;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,6 +30,7 @@
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "unittest.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
@ -385,8 +386,8 @@ void G1CardSetTest::cardset_basic_test() {
|
||||
class G1CardSetMtTestTask : public WorkerTask {
|
||||
G1CardSet* _card_set;
|
||||
|
||||
size_t _added;
|
||||
size_t _found;
|
||||
Atomic<size_t> _added;
|
||||
Atomic<size_t> _found;
|
||||
|
||||
public:
|
||||
G1CardSetMtTestTask(G1CardSet* card_set) :
|
||||
@ -413,12 +414,12 @@ public:
|
||||
found++;
|
||||
}
|
||||
}
|
||||
AtomicAccess::add(&_added, added);
|
||||
AtomicAccess::add(&_found, found);
|
||||
_added.add_then_fetch(added);
|
||||
_found.add_then_fetch(found);
|
||||
}
|
||||
|
||||
size_t added() const { return _added; }
|
||||
size_t found() const { return _found; }
|
||||
size_t added() const { return _added.load_relaxed(); }
|
||||
size_t found() const { return _found.load_relaxed(); }
|
||||
};
|
||||
|
||||
void G1CardSetTest::cardset_mt_test() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,7 @@
|
||||
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "memory/memoryReserver.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
@ -51,7 +51,7 @@ WorkerThreads* G1MapperWorkers::_workers = nullptr;
|
||||
|
||||
class G1TestCommitUncommit : public WorkerTask {
|
||||
G1RegionToSpaceMapper* _mapper;
|
||||
uint _claim_id;
|
||||
Atomic<uint> _claim_id;
|
||||
public:
|
||||
G1TestCommitUncommit(G1RegionToSpaceMapper* mapper) :
|
||||
WorkerTask("Stress mapper"),
|
||||
@ -59,7 +59,7 @@ public:
|
||||
_claim_id(0) { }
|
||||
|
||||
void work(uint worker_id) {
|
||||
uint index = AtomicAccess::fetch_then_add(&_claim_id, 1u);
|
||||
uint index = _claim_id.fetch_then_add(1u);
|
||||
|
||||
for (int i = 0; i < 100000; i++) {
|
||||
// Stress commit and uncommit of a single region. The same
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,7 +24,7 @@
|
||||
|
||||
#include "gc/shared/bufferNode.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/semaphore.inline.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
@ -116,16 +116,16 @@ public:
|
||||
class BufferNode::TestSupport::AllocatorThread : public JavaTestThread {
|
||||
BufferNode::Allocator* _allocator;
|
||||
CompletedList* _cbl;
|
||||
volatile size_t* _total_allocations;
|
||||
volatile bool* _continue_running;
|
||||
Atomic<size_t>* _total_allocations;
|
||||
Atomic<bool>* _continue_running;
|
||||
size_t _allocations;
|
||||
|
||||
public:
|
||||
AllocatorThread(Semaphore* post,
|
||||
BufferNode::Allocator* allocator,
|
||||
CompletedList* cbl,
|
||||
volatile size_t* total_allocations,
|
||||
volatile bool* continue_running) :
|
||||
Atomic<size_t>* total_allocations,
|
||||
Atomic<bool>* continue_running) :
|
||||
JavaTestThread(post),
|
||||
_allocator(allocator),
|
||||
_cbl(cbl),
|
||||
@ -135,14 +135,14 @@ public:
|
||||
{}
|
||||
|
||||
virtual void main_run() {
|
||||
while (AtomicAccess::load_acquire(_continue_running)) {
|
||||
while (_continue_running->load_acquire()) {
|
||||
BufferNode* node = _allocator->allocate();
|
||||
_cbl->push(node);
|
||||
++_allocations;
|
||||
ThreadBlockInVM tbiv(this); // Safepoint check.
|
||||
}
|
||||
tty->print_cr("allocations: %zu", _allocations);
|
||||
AtomicAccess::add(_total_allocations, _allocations);
|
||||
_total_allocations->add_then_fetch(_allocations);
|
||||
}
|
||||
};
|
||||
|
||||
@ -151,13 +151,13 @@ public:
|
||||
class BufferNode::TestSupport::ProcessorThread : public JavaTestThread {
|
||||
BufferNode::Allocator* _allocator;
|
||||
CompletedList* _cbl;
|
||||
volatile bool* _continue_running;
|
||||
Atomic<bool>* _continue_running;
|
||||
|
||||
public:
|
||||
ProcessorThread(Semaphore* post,
|
||||
BufferNode::Allocator* allocator,
|
||||
CompletedList* cbl,
|
||||
volatile bool* continue_running) :
|
||||
Atomic<bool>* continue_running) :
|
||||
JavaTestThread(post),
|
||||
_allocator(allocator),
|
||||
_cbl(cbl),
|
||||
@ -172,7 +172,7 @@ public:
|
||||
_allocator->release(node);
|
||||
} else if (shutdown_requested) {
|
||||
return;
|
||||
} else if (!AtomicAccess::load_acquire(_continue_running)) {
|
||||
} else if (!_continue_running->load_acquire()) {
|
||||
// To avoid a race that could leave buffers in the list after this
|
||||
// thread has shut down, continue processing until the list is empty
|
||||
// *after* the shut down request has been received.
|
||||
@ -193,9 +193,9 @@ static void run_test(BufferNode::Allocator* allocator, CompletedList* cbl) {
|
||||
constexpr uint milliseconds_to_run = 1000;
|
||||
|
||||
Semaphore post;
|
||||
volatile size_t total_allocations = 0;
|
||||
volatile bool allocator_running = true;
|
||||
volatile bool processor_running = true;
|
||||
Atomic<size_t> total_allocations{0};
|
||||
Atomic<bool> allocator_running{true};
|
||||
Atomic<bool> processor_running{true};
|
||||
|
||||
ProcessorThread* proc_threads[num_processor_threads] = {};
|
||||
for (uint i = 0; i < num_processor_threads; ++i) {
|
||||
@ -222,18 +222,18 @@ static void run_test(BufferNode::Allocator* allocator, CompletedList* cbl) {
|
||||
ThreadInVMfromNative invm(this_thread);
|
||||
this_thread->sleep(milliseconds_to_run);
|
||||
}
|
||||
AtomicAccess::release_store(&allocator_running, false);
|
||||
allocator_running.release_store(false);
|
||||
for (uint i = 0; i < num_allocator_threads; ++i) {
|
||||
ThreadInVMfromNative invm(this_thread);
|
||||
post.wait_with_safepoint_check(this_thread);
|
||||
}
|
||||
AtomicAccess::release_store(&processor_running, false);
|
||||
processor_running.release_store(false);
|
||||
for (uint i = 0; i < num_processor_threads; ++i) {
|
||||
ThreadInVMfromNative invm(this_thread);
|
||||
post.wait_with_safepoint_check(this_thread);
|
||||
}
|
||||
ASSERT_TRUE(BufferNode::TestSupport::try_transfer_pending(allocator));
|
||||
tty->print_cr("total allocations: %zu", total_allocations);
|
||||
tty->print_cr("total allocations: %zu", total_allocations.load_relaxed());
|
||||
tty->print_cr("allocator free count: %zu", allocator->free_count());
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/semaphore.hpp"
|
||||
@ -1169,12 +1170,12 @@ TEST_VM(ConcurrentHashTable, concurrent_mt_bulk_delete) {
|
||||
class CHTParallelScanTask: public WorkerTask {
|
||||
TestTable* _cht;
|
||||
TestTable::ScanTask* _scan_task;
|
||||
size_t *_total_scanned;
|
||||
Atomic<size_t>* _total_scanned;
|
||||
|
||||
public:
|
||||
CHTParallelScanTask(TestTable* cht,
|
||||
TestTable::ScanTask* bc,
|
||||
size_t *total_scanned) :
|
||||
Atomic<size_t>* total_scanned) :
|
||||
WorkerTask("CHT Parallel Scan"),
|
||||
_cht(cht),
|
||||
_scan_task(bc),
|
||||
@ -1184,7 +1185,7 @@ public:
|
||||
void work(uint worker_id) {
|
||||
ChtCountScan par_scan;
|
||||
_scan_task->do_safepoint_scan(par_scan);
|
||||
AtomicAccess::add(_total_scanned, par_scan._count);
|
||||
_total_scanned->add_then_fetch(par_scan._count);
|
||||
}
|
||||
};
|
||||
|
||||
@ -1217,13 +1218,14 @@ public:
|
||||
{}
|
||||
|
||||
void doit() {
|
||||
size_t total_scanned = 0;
|
||||
Atomic<size_t> total_scanned{0};
|
||||
TestTable::ScanTask scan_task(_cht, 64);
|
||||
|
||||
CHTParallelScanTask task(_cht, &scan_task, &total_scanned);
|
||||
CHTWorkers::run_task(&task);
|
||||
|
||||
EXPECT_TRUE(total_scanned == (size_t)_num_items) << " Should scan all inserted items: " << total_scanned;
|
||||
EXPECT_TRUE(total_scanned.load_relaxed() == (size_t)_num_items)
|
||||
<< " Should scan all inserted items: " << total_scanned.load_relaxed();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -21,7 +21,7 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalCounter.inline.hpp"
|
||||
#include "utilities/spinYield.hpp"
|
||||
@ -39,8 +39,8 @@ enum NestedTestState {
|
||||
};
|
||||
|
||||
class RCUNestedThread : public JavaTestThread {
|
||||
volatile NestedTestState _state;
|
||||
volatile bool _proceed;
|
||||
Atomic<NestedTestState> _state;
|
||||
Atomic<bool> _proceed;
|
||||
|
||||
protected:
|
||||
RCUNestedThread(Semaphore* post) :
|
||||
@ -52,21 +52,21 @@ protected:
|
||||
~RCUNestedThread() {}
|
||||
|
||||
void set_state(NestedTestState new_state) {
|
||||
AtomicAccess::release_store(&_state, new_state);
|
||||
_state.release_store(new_state);
|
||||
}
|
||||
|
||||
void wait_with_state(NestedTestState new_state) {
|
||||
SpinYield spinner;
|
||||
AtomicAccess::release_store(&_state, new_state);
|
||||
while (!AtomicAccess::load_acquire(&_proceed)) {
|
||||
_state.release_store(new_state);
|
||||
while (!_proceed.load_acquire()) {
|
||||
spinner.wait();
|
||||
}
|
||||
AtomicAccess::release_store(&_proceed, false);
|
||||
_proceed.release_store(false);
|
||||
}
|
||||
|
||||
public:
|
||||
NestedTestState state() const {
|
||||
return AtomicAccess::load_acquire(&_state);
|
||||
return _state.load_acquire();
|
||||
}
|
||||
|
||||
void wait_for_state(NestedTestState goal) {
|
||||
@ -77,7 +77,7 @@ public:
|
||||
}
|
||||
|
||||
void proceed() {
|
||||
AtomicAccess::release_store(&_proceed, true);
|
||||
_proceed.release_store(true);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,7 +22,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
@ -36,16 +36,16 @@
|
||||
|
||||
class SingleWriterSynchronizerTestReader : public JavaTestThread {
|
||||
SingleWriterSynchronizer* _synchronizer;
|
||||
volatile uintx* _synchronized_value;
|
||||
volatile int* _continue_running;
|
||||
Atomic<uintx>* _synchronized_value;
|
||||
Atomic<int>* _continue_running;
|
||||
|
||||
static const uint reader_iterations = 10;
|
||||
|
||||
public:
|
||||
SingleWriterSynchronizerTestReader(Semaphore* post,
|
||||
SingleWriterSynchronizer* synchronizer,
|
||||
volatile uintx* synchronized_value,
|
||||
volatile int* continue_running) :
|
||||
Atomic<uintx>* synchronized_value,
|
||||
Atomic<int>* continue_running) :
|
||||
JavaTestThread(post),
|
||||
_synchronizer(synchronizer),
|
||||
_synchronized_value(synchronized_value),
|
||||
@ -55,14 +55,14 @@ public:
|
||||
virtual void main_run() {
|
||||
size_t iterations = 0;
|
||||
size_t values_changed = 0;
|
||||
while (AtomicAccess::load_acquire(_continue_running) != 0) {
|
||||
while (_continue_running->load_acquire() != 0) {
|
||||
{ ThreadBlockInVM tbiv(this); } // Safepoint check outside critical section.
|
||||
++iterations;
|
||||
SingleWriterSynchronizer::CriticalSection cs(_synchronizer);
|
||||
uintx value = AtomicAccess::load_acquire(_synchronized_value);
|
||||
uintx value = _synchronized_value->load_acquire();
|
||||
uintx new_value = value;
|
||||
for (uint i = 0; i < reader_iterations; ++i) {
|
||||
new_value = AtomicAccess::load_acquire(_synchronized_value);
|
||||
new_value = _synchronized_value->load_acquire();
|
||||
// A reader can see either the value it first read after
|
||||
// entering the critical section, or that value + 1. No other
|
||||
// values are possible.
|
||||
@ -81,14 +81,14 @@ public:
|
||||
|
||||
class SingleWriterSynchronizerTestWriter : public JavaTestThread {
|
||||
SingleWriterSynchronizer* _synchronizer;
|
||||
volatile uintx* _synchronized_value;
|
||||
volatile int* _continue_running;
|
||||
Atomic<uintx>* _synchronized_value;
|
||||
Atomic<int>* _continue_running;
|
||||
|
||||
public:
|
||||
SingleWriterSynchronizerTestWriter(Semaphore* post,
|
||||
SingleWriterSynchronizer* synchronizer,
|
||||
volatile uintx* synchronized_value,
|
||||
volatile int* continue_running) :
|
||||
Atomic<uintx>* synchronized_value,
|
||||
Atomic<int>* continue_running) :
|
||||
JavaTestThread(post),
|
||||
_synchronizer(synchronizer),
|
||||
_synchronized_value(synchronized_value),
|
||||
@ -96,12 +96,12 @@ public:
|
||||
{}
|
||||
|
||||
virtual void main_run() {
|
||||
while (AtomicAccess::load_acquire(_continue_running) != 0) {
|
||||
++*_synchronized_value;
|
||||
while (_continue_running->load_acquire() != 0) {
|
||||
_synchronized_value->add_then_fetch(1u, memory_order_relaxed);
|
||||
_synchronizer->synchronize();
|
||||
{ ThreadBlockInVM tbiv(this); } // Safepoint check.
|
||||
}
|
||||
tty->print_cr("writer iterations: %zu", *_synchronized_value);
|
||||
tty->print_cr("writer iterations: %zu", _synchronized_value->load_relaxed());
|
||||
}
|
||||
};
|
||||
|
||||
@ -111,8 +111,8 @@ const uint milliseconds_to_run = 1000;
|
||||
TEST_VM(TestSingleWriterSynchronizer, stress) {
|
||||
Semaphore post;
|
||||
SingleWriterSynchronizer synchronizer;
|
||||
volatile uintx synchronized_value = 0;
|
||||
volatile int continue_running = 1;
|
||||
Atomic<uintx> synchronized_value{0};
|
||||
Atomic<int> continue_running{1};
|
||||
|
||||
JavaTestThread* readers[nreaders] = {};
|
||||
for (uint i = 0; i < nreaders; ++i) {
|
||||
@ -137,7 +137,7 @@ TEST_VM(TestSingleWriterSynchronizer, stress) {
|
||||
ThreadInVMfromNative invm(cur);
|
||||
cur->sleep(milliseconds_to_run);
|
||||
}
|
||||
continue_running = 0;
|
||||
continue_running.store_relaxed(0);
|
||||
for (uint i = 0; i < nreaders + 1; ++i) {
|
||||
post.wait();
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -21,23 +21,23 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/spinYield.hpp"
|
||||
#include "utilities/waitBarrier.hpp"
|
||||
#include "threadHelper.inline.hpp"
|
||||
|
||||
static volatile int wait_tag = 0;
|
||||
static volatile int valid_value = 0;
|
||||
static Atomic<int> wait_tag{0};
|
||||
static Atomic<int> valid_value{0};
|
||||
|
||||
template <typename WaitBarrierImpl>
|
||||
class WBThread : public JavaTestThread {
|
||||
public:
|
||||
static volatile bool _exit;
|
||||
static Atomic<bool> _exit;
|
||||
WaitBarrierType<WaitBarrierImpl>* _wait_barrier;
|
||||
Semaphore* _wrt_start;
|
||||
volatile int _on_barrier;
|
||||
Atomic<int> _on_barrier;
|
||||
|
||||
WBThread(Semaphore* post, WaitBarrierType<WaitBarrierImpl>* wb, Semaphore* wrt_start)
|
||||
: JavaTestThread(post), _wait_barrier(wb), _wrt_start(wrt_start) {};
|
||||
@ -46,12 +46,12 @@ public:
|
||||
_wrt_start->signal();
|
||||
int vv, tag;
|
||||
// Similar to how a JavaThread would stop in a safepoint.
|
||||
while (!_exit) {
|
||||
while (!_exit.load_relaxed()) {
|
||||
// Load the published tag.
|
||||
tag = AtomicAccess::load_acquire(&wait_tag);
|
||||
tag = wait_tag.load_acquire();
|
||||
// Publish the tag this thread is going to wait for.
|
||||
AtomicAccess::release_store(&_on_barrier, tag);
|
||||
if (_on_barrier == 0) {
|
||||
_on_barrier.release_store(tag);
|
||||
if (_on_barrier.load_relaxed() == 0) {
|
||||
SpinPause();
|
||||
continue;
|
||||
}
|
||||
@ -59,15 +59,15 @@ public:
|
||||
// Wait until we are woken.
|
||||
_wait_barrier->wait(tag);
|
||||
// Verify that we do not see an invalid value.
|
||||
vv = AtomicAccess::load_acquire(&valid_value);
|
||||
vv = valid_value.load_acquire();
|
||||
ASSERT_EQ((vv & 0x1), 0);
|
||||
AtomicAccess::release_store(&_on_barrier, 0);
|
||||
_on_barrier.release_store(0);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename WaitBarrierImpl>
|
||||
volatile bool WBThread<WaitBarrierImpl>::_exit = false;
|
||||
Atomic<bool> WBThread<WaitBarrierImpl>::_exit{false};
|
||||
|
||||
template <typename WaitBarrierImpl>
|
||||
class WBArmerThread : public JavaTestThread {
|
||||
@ -103,35 +103,35 @@ public:
|
||||
// Arm next tag.
|
||||
wb.arm(next_tag);
|
||||
// Publish tag.
|
||||
AtomicAccess::release_store_fence(&wait_tag, next_tag);
|
||||
wait_tag.release_store_fence(next_tag);
|
||||
|
||||
// Wait until threads picked up new tag.
|
||||
while (reader1->_on_barrier != wait_tag ||
|
||||
reader2->_on_barrier != wait_tag ||
|
||||
reader3->_on_barrier != wait_tag ||
|
||||
reader4->_on_barrier != wait_tag) {
|
||||
while (reader1->_on_barrier.load_relaxed() != wait_tag.load_relaxed() ||
|
||||
reader2->_on_barrier.load_relaxed() != wait_tag.load_relaxed() ||
|
||||
reader3->_on_barrier.load_relaxed() != wait_tag.load_relaxed() ||
|
||||
reader4->_on_barrier.load_relaxed() != wait_tag.load_relaxed()) {
|
||||
SpinPause();
|
||||
}
|
||||
|
||||
// Set an invalid value.
|
||||
AtomicAccess::release_store(&valid_value, valid_value + 1); // odd
|
||||
valid_value.release_store(valid_value.load_relaxed() + 1); // odd
|
||||
os::naked_yield();
|
||||
// Set a valid value.
|
||||
AtomicAccess::release_store(&valid_value, valid_value + 1); // even
|
||||
valid_value.release_store(valid_value.load_relaxed() + 1); // even
|
||||
// Publish inactive tag.
|
||||
AtomicAccess::release_store_fence(&wait_tag, 0); // Stores in WB must not float up.
|
||||
wait_tag.release_store_fence(0); // Stores in WB must not float up.
|
||||
wb.disarm();
|
||||
|
||||
// Wait until threads done valid_value verification.
|
||||
while (reader1->_on_barrier != 0 ||
|
||||
reader2->_on_barrier != 0 ||
|
||||
reader3->_on_barrier != 0 ||
|
||||
reader4->_on_barrier != 0) {
|
||||
while (reader1->_on_barrier.load_relaxed() != 0 ||
|
||||
reader2->_on_barrier.load_relaxed() != 0 ||
|
||||
reader3->_on_barrier.load_relaxed() != 0 ||
|
||||
reader4->_on_barrier.load_relaxed() != 0) {
|
||||
SpinPause();
|
||||
}
|
||||
++next_tag;
|
||||
}
|
||||
WBThread<WaitBarrierImpl>::_exit = true;
|
||||
WBThread<WaitBarrierImpl>::_exit.store_relaxed(true);
|
||||
for (int i = 0; i < NUMBER_OF_READERS; i++) {
|
||||
post.wait();
|
||||
}
|
||||
@ -139,13 +139,13 @@ public:
|
||||
};
|
||||
|
||||
TEST_VM(WaitBarrier, default_wb) {
|
||||
WBThread<WaitBarrierDefault>::_exit = false;
|
||||
WBThread<WaitBarrierDefault>::_exit.store_relaxed(false);
|
||||
mt_test_doer<WBArmerThread<WaitBarrierDefault> >();
|
||||
}
|
||||
|
||||
#if defined(LINUX)
|
||||
TEST_VM(WaitBarrier, generic_wb) {
|
||||
WBThread<GenericWaitBarrier>::_exit = false;
|
||||
WBThread<GenericWaitBarrier>::_exit.store_relaxed(false);
|
||||
mt_test_doer<WBArmerThread<GenericWaitBarrier> >();
|
||||
}
|
||||
#endif
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user