From bcc72bdb73ef63a5101c9dd40e234e80c27fb9b7 Mon Sep 17 00:00:00 2001 From: Thomas Schatzl Date: Thu, 22 Jan 2026 15:58:44 +0100 Subject: [PATCH] 8376115 Hi all, please review this change to convert `G1CMRootRegions` to use `Atomic`. Testing: gha Thanks, Thomas --- src/hotspot/share/gc/g1/g1ConcurrentMark.cpp | 30 ++++++++++---------- src/hotspot/share/gc/g1/g1ConcurrentMark.hpp | 12 ++++---- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index 52591f7ce5f..06d45ac8f59 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -382,12 +382,12 @@ G1CMRootMemRegions::~G1CMRootMemRegions() { } void G1CMRootMemRegions::reset() { - _num_root_regions = 0; + _num_root_regions.store_relaxed(0); } void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) { assert_at_safepoint(); - size_t idx = AtomicAccess::fetch_then_add(&_num_root_regions, 1u); + size_t idx = _num_root_regions.fetch_then_add(1u); assert(idx < _max_regions, "Trying to add more root MemRegions than there is space %zu", _max_regions); assert(start != nullptr && end != nullptr && start <= end, "Start (" PTR_FORMAT ") should be less or equal to " "end (" PTR_FORMAT ")", p2i(start), p2i(end)); @@ -398,36 +398,36 @@ void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) { void G1CMRootMemRegions::prepare_for_scan() { assert(!scan_in_progress(), "pre-condition"); - _scan_in_progress = _num_root_regions > 0; + _scan_in_progress.store_relaxed(num_root_regions() > 0); - _claimed_root_regions = 0; - _should_abort = false; + _claimed_root_regions.store_relaxed(0); + _should_abort.store_relaxed(false); } const MemRegion* G1CMRootMemRegions::claim_next() { - if (_should_abort) { + if (_should_abort.load_relaxed()) { // If someone has set the should_abort flag, we return null to // force the caller to bail out of their loop. return nullptr; } - if (_claimed_root_regions >= _num_root_regions) { + if (_claimed_root_regions.load_relaxed() >= num_root_regions()) { return nullptr; } - size_t claimed_index = AtomicAccess::fetch_then_add(&_claimed_root_regions, 1u); - if (claimed_index < _num_root_regions) { + size_t claimed_index = _claimed_root_regions.fetch_then_add(1u); + if (claimed_index < num_root_regions()) { return &_root_regions[claimed_index]; } return nullptr; } uint G1CMRootMemRegions::num_root_regions() const { - return (uint)_num_root_regions; + return (uint)_num_root_regions.load_relaxed(); } bool G1CMRootMemRegions::contains(const MemRegion mr) const { - for (uint i = 0; i < _num_root_regions; i++) { + for (uint i = 0; i < num_root_regions(); i++) { if (_root_regions[i].equals(mr)) { return true; } @@ -437,7 +437,7 @@ bool G1CMRootMemRegions::contains(const MemRegion mr) const { void G1CMRootMemRegions::notify_scan_done() { MutexLocker x(G1RootRegionScan_lock, Mutex::_no_safepoint_check_flag); - _scan_in_progress = false; + _scan_in_progress.store_relaxed(false); G1RootRegionScan_lock->notify_all(); } @@ -448,10 +448,10 @@ void G1CMRootMemRegions::cancel_scan() { void G1CMRootMemRegions::scan_finished() { assert(scan_in_progress(), "pre-condition"); - if (!_should_abort) { - assert(_claimed_root_regions >= num_root_regions(), + if (!_should_abort.load_relaxed()) { + assert(_claimed_root_regions.load_relaxed() >= num_root_regions(), "we should have claimed all root regions, claimed %zu, length = %u", - _claimed_root_regions, num_root_regions()); + _claimed_root_regions.load_relaxed(), num_root_regions()); } notify_scan_done(); diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp index 52a1b133439..8be724a84dd 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp @@ -290,12 +290,12 @@ class G1CMRootMemRegions { MemRegion* _root_regions; size_t const _max_regions; - volatile size_t _num_root_regions; // Actual number of root regions. + Atomic _num_root_regions; // Actual number of root regions. - volatile size_t _claimed_root_regions; // Number of root regions currently claimed. + Atomic _claimed_root_regions; // Number of root regions currently claimed. - volatile bool _scan_in_progress; - volatile bool _should_abort; + Atomic _scan_in_progress; + Atomic _should_abort; void notify_scan_done(); @@ -312,11 +312,11 @@ public: void prepare_for_scan(); // Forces get_next() to return null so that the iteration aborts early. - void abort() { _should_abort = true; } + void abort() { _should_abort.store_relaxed(true); } // Return true if the CM thread are actively scanning root regions, // false otherwise. - bool scan_in_progress() { return _scan_in_progress; } + bool scan_in_progress() { return _scan_in_progress.load_relaxed(); } // Claim the next root MemRegion to scan atomically, or return null if // all have been claimed.