From 3e181485709d108ef3d1e6b595fbd95ecc8ef74a Mon Sep 17 00:00:00 2001 From: Thomas Schatzl Date: Mon, 19 Jan 2026 09:02:33 +0000 Subject: [PATCH] 8375439: G1: Convert G1MonotonicArena class to use Atomic Reviewed-by: stefank, iwalulya --- src/hotspot/share/gc/g1/g1MonotonicArena.cpp | 55 +++++++++---------- src/hotspot/share/gc/g1/g1MonotonicArena.hpp | 47 ++++++++-------- .../share/gc/g1/g1MonotonicArena.inline.hpp | 11 ++-- 3 files changed, 56 insertions(+), 57 deletions(-) diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.cpp b/src/hotspot/share/gc/g1/g1MonotonicArena.cpp index a9c6462680f..3f97870a67f 100644 --- a/src/hotspot/share/gc/g1/g1MonotonicArena.cpp +++ b/src/hotspot/share/gc/g1/g1MonotonicArena.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,7 +24,6 @@ #include "gc/g1/g1MonotonicArena.inline.hpp" #include "memory/allocation.hpp" -#include "runtime/atomicAccess.hpp" #include "runtime/vmOperations.hpp" #include "utilities/globalCounter.inline.hpp" @@ -61,13 +60,13 @@ void G1MonotonicArena::SegmentFreeList::bulk_add(Segment& first, size_t num, size_t mem_size) { _list.prepend(first, last); - AtomicAccess::add(&_num_segments, num, memory_order_relaxed); - AtomicAccess::add(&_mem_size, mem_size, memory_order_relaxed); + _num_segments.add_then_fetch(num, memory_order_relaxed); + _mem_size.add_then_fetch(mem_size, memory_order_relaxed); } void G1MonotonicArena::SegmentFreeList::print_on(outputStream* out, const char* prefix) { out->print_cr("%s: segments %zu size %zu", - prefix, AtomicAccess::load(&_num_segments), AtomicAccess::load(&_mem_size)); + prefix, _num_segments.load_relaxed(), _mem_size.load_relaxed()); } G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& num_segments, @@ -75,12 +74,12 @@ G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& nu GlobalCounter::CriticalSection cs(Thread::current()); Segment* result = _list.pop_all(); - num_segments = AtomicAccess::load(&_num_segments); - mem_size = AtomicAccess::load(&_mem_size); + num_segments = _num_segments.load_relaxed(); + mem_size = _mem_size.load_relaxed(); if (result != nullptr) { - AtomicAccess::sub(&_num_segments, num_segments, memory_order_relaxed); - AtomicAccess::sub(&_mem_size, mem_size, memory_order_relaxed); + _num_segments.sub_then_fetch(num_segments, memory_order_relaxed); + _mem_size.sub_then_fetch(mem_size, memory_order_relaxed); } return result; } @@ -96,8 +95,8 @@ void G1MonotonicArena::SegmentFreeList::free_all() { Segment::delete_segment(cur); } - AtomicAccess::sub(&_num_segments, num_freed, memory_order_relaxed); - AtomicAccess::sub(&_mem_size, mem_size_freed, memory_order_relaxed); + _num_segments.sub_then_fetch(num_freed, memory_order_relaxed); + _mem_size.sub_then_fetch(mem_size_freed, memory_order_relaxed); } G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) { @@ -115,7 +114,7 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) { } // Install it as current allocation segment. - Segment* old = AtomicAccess::cmpxchg(&_first, prev, next); + Segment* old = _first.compare_exchange(prev, next); if (old != prev) { // Somebody else installed the segment, use that one. Segment::delete_segment(next); @@ -126,9 +125,9 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) { _last = next; } // Successfully installed the segment into the list. - AtomicAccess::inc(&_num_segments, memory_order_relaxed); - AtomicAccess::add(&_mem_size, next->mem_size(), memory_order_relaxed); - AtomicAccess::add(&_num_total_slots, next->num_slots(), memory_order_relaxed); + _num_segments.add_then_fetch(1u, memory_order_relaxed); + _mem_size.add_then_fetch(next->mem_size(), memory_order_relaxed); + _num_total_slots.add_then_fetch(next->num_slots(), memory_order_relaxed); return next; } } @@ -155,7 +154,7 @@ uint G1MonotonicArena::slot_size() const { } void G1MonotonicArena::drop_all() { - Segment* cur = AtomicAccess::load_acquire(&_first); + Segment* cur = _first.load_acquire(); if (cur != nullptr) { assert(_last != nullptr, "If there is at least one segment, there must be a last one."); @@ -175,25 +174,25 @@ void G1MonotonicArena::drop_all() { cur = next; } #endif - assert(num_segments == _num_segments, "Segment count inconsistent %u %u", num_segments, _num_segments); - assert(mem_size == _mem_size, "Memory size inconsistent"); + assert(num_segments == _num_segments.load_relaxed(), "Segment count inconsistent %u %u", num_segments, _num_segments.load_relaxed()); + assert(mem_size == _mem_size.load_relaxed(), "Memory size inconsistent"); assert(last == _last, "Inconsistent last segment"); - _segment_free_list->bulk_add(*first, *_last, _num_segments, _mem_size); + _segment_free_list->bulk_add(*first, *_last, _num_segments.load_relaxed(), _mem_size.load_relaxed()); } - _first = nullptr; + _first.store_relaxed(nullptr); _last = nullptr; - _num_segments = 0; - _mem_size = 0; - _num_total_slots = 0; - _num_allocated_slots = 0; + _num_segments.store_relaxed(0); + _mem_size.store_relaxed(0); + _num_total_slots.store_relaxed(0); + _num_allocated_slots.store_relaxed(0); } void* G1MonotonicArena::allocate() { assert(slot_size() > 0, "instance size not set."); - Segment* cur = AtomicAccess::load_acquire(&_first); + Segment* cur = _first.load_acquire(); if (cur == nullptr) { cur = new_segment(cur); } @@ -201,7 +200,7 @@ void* G1MonotonicArena::allocate() { while (true) { void* slot = cur->allocate_slot(); if (slot != nullptr) { - AtomicAccess::inc(&_num_allocated_slots, memory_order_relaxed); + _num_allocated_slots.add_then_fetch(1u, memory_order_relaxed); guarantee(is_aligned(slot, _alloc_options->slot_alignment()), "result " PTR_FORMAT " not aligned at %u", p2i(slot), _alloc_options->slot_alignment()); return slot; @@ -213,7 +212,7 @@ void* G1MonotonicArena::allocate() { } uint G1MonotonicArena::num_segments() const { - return AtomicAccess::load(&_num_segments); + return _num_segments.load_relaxed(); } #ifdef ASSERT @@ -238,7 +237,7 @@ uint G1MonotonicArena::calculate_length() const { template void G1MonotonicArena::iterate_segments(SegmentClosure& closure) const { - Segment* cur = AtomicAccess::load_acquire(&_first); + Segment* cur = _first.load_acquire(); assert((cur != nullptr) == (_last != nullptr), "If there is at least one segment, there must be a last one"); diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.hpp b/src/hotspot/share/gc/g1/g1MonotonicArena.hpp index 211820c5254..d8e658b5a64 100644 --- a/src/hotspot/share/gc/g1/g1MonotonicArena.hpp +++ b/src/hotspot/share/gc/g1/g1MonotonicArena.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -28,6 +28,7 @@ #include "gc/shared/freeListAllocator.hpp" #include "nmt/memTag.hpp" +#include "runtime/atomic.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/lockFreeStack.hpp" @@ -65,27 +66,27 @@ private: // AllocOptions provides parameters for Segment sizing and expansion. const AllocOptions* _alloc_options; - Segment* volatile _first; // The (start of the) list of all segments. - Segment* _last; // The last segment of the list of all segments. - volatile uint _num_segments; // Number of assigned segments to this allocator. - volatile size_t _mem_size; // Memory used by all segments. + Atomic _first; // The (start of the) list of all segments. + Segment* _last; // The last segment of the list of all segments. + Atomic _num_segments; // Number of assigned segments to this allocator. + Atomic _mem_size; // Memory used by all segments. SegmentFreeList* _segment_free_list; // The global free segment list to preferentially // get new segments from. - volatile uint _num_total_slots; // Number of slots available in all segments (allocated + not yet used). - volatile uint _num_allocated_slots; // Number of total slots allocated ever (including free and pending). + Atomic _num_total_slots; // Number of slots available in all segments (allocated + not yet used). + Atomic _num_allocated_slots; // Number of total slots allocated ever (including free and pending). inline Segment* new_segment(Segment* const prev); DEBUG_ONLY(uint calculate_length() const;) public: - const Segment* first_segment() const { return AtomicAccess::load(&_first); } + const Segment* first_segment() const { return _first.load_relaxed(); } - uint num_total_slots() const { return AtomicAccess::load(&_num_total_slots); } + uint num_total_slots() const { return _num_total_slots.load_relaxed(); } uint num_allocated_slots() const { - uint allocated = AtomicAccess::load(&_num_allocated_slots); + uint allocated = _num_allocated_slots.load_relaxed(); assert(calculate_length() == allocated, "Must be"); return allocated; } @@ -116,11 +117,11 @@ static constexpr uint SegmentPayloadMaxAlignment = 8; class alignas(SegmentPayloadMaxAlignment) G1MonotonicArena::Segment { const uint _slot_size; const uint _num_slots; - Segment* volatile _next; + Atomic _next; // Index into the next free slot to allocate into. Full if equal (or larger) // to _num_slots (can be larger because we atomically increment this value and // check only afterwards if the allocation has been successful). - uint volatile _next_allocate; + Atomic _next_allocate; const MemTag _mem_tag; static size_t header_size() { return align_up(sizeof(Segment), SegmentPayloadMaxAlignment); } @@ -139,21 +140,21 @@ class alignas(SegmentPayloadMaxAlignment) G1MonotonicArena::Segment { Segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag); ~Segment() = default; public: - Segment* volatile* next_addr() { return &_next; } + Atomic* next_addr() { return &_next; } void* allocate_slot(); uint num_slots() const { return _num_slots; } - Segment* next() const { return _next; } + Segment* next() const { return _next.load_relaxed(); } void set_next(Segment* next) { assert(next != this, " loop condition"); - _next = next; + _next.store_relaxed(next); } void reset(Segment* next) { - _next_allocate = 0; + _next_allocate.store_relaxed(0); assert(next != this, " loop condition"); set_next(next); memset(payload(0), 0, payload_size()); @@ -166,7 +167,7 @@ public: uint length() const { // _next_allocate might grow larger than _num_slots in multi-thread environments // due to races. - return MIN2(_next_allocate, _num_slots); + return MIN2(_next_allocate.load_relaxed(), _num_slots); } static size_t size_in_bytes(uint slot_size, uint num_slots) { @@ -176,7 +177,7 @@ public: static Segment* create_segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag); static void delete_segment(Segment* segment); - bool is_full() const { return _next_allocate >= _num_slots; } + bool is_full() const { return _next_allocate.load_relaxed() >= _num_slots; } }; static_assert(alignof(G1MonotonicArena::Segment) >= SegmentPayloadMaxAlignment, "assert alignment of Segment (and indirectly its payload)"); @@ -186,15 +187,15 @@ static_assert(alignof(G1MonotonicArena::Segment) >= SegmentPayloadMaxAlignment, // performed by multiple threads concurrently. // Counts and memory usage are current on a best-effort basis if accessed concurrently. class G1MonotonicArena::SegmentFreeList { - static Segment* volatile* next_ptr(Segment& segment) { + static Atomic* next_ptr(Segment& segment) { return segment.next_addr(); } using SegmentStack = LockFreeStack; SegmentStack _list; - volatile size_t _num_segments; - volatile size_t _mem_size; + Atomic _num_segments; + Atomic _mem_size; public: SegmentFreeList() : _list(), _num_segments(0), _mem_size(0) { } @@ -210,8 +211,8 @@ public: void print_on(outputStream* out, const char* prefix = ""); - size_t num_segments() const { return AtomicAccess::load(&_num_segments); } - size_t mem_size() const { return AtomicAccess::load(&_mem_size); } + size_t num_segments() const { return _num_segments.load_relaxed(); } + size_t mem_size() const { return _mem_size.load_relaxed(); } }; // Configuration for G1MonotonicArena, e.g slot size, slot number of next Segment. diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp b/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp index dd9ccae1849..cf1b35ccead 100644 --- a/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp +++ b/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -28,14 +28,13 @@ #include "gc/g1/g1MonotonicArena.hpp" -#include "runtime/atomicAccess.hpp" #include "utilities/globalCounter.inline.hpp" inline void* G1MonotonicArena::Segment::allocate_slot() { - if (_next_allocate >= _num_slots) { + if (_next_allocate.load_relaxed() >= _num_slots) { return nullptr; } - uint result = AtomicAccess::fetch_then_add(&_next_allocate, 1u, memory_order_relaxed); + uint result = _next_allocate.fetch_then_add(1u, memory_order_relaxed); if (result >= _num_slots) { return nullptr; } @@ -48,8 +47,8 @@ inline G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get() { Segment* result = _list.pop(); if (result != nullptr) { - AtomicAccess::dec(&_num_segments, memory_order_relaxed); - AtomicAccess::sub(&_mem_size, result->mem_size(), memory_order_relaxed); + _num_segments.sub_then_fetch(1u, memory_order_relaxed); + _mem_size.sub_then_fetch(result->mem_size(), memory_order_relaxed); } return result; }