From 475bdac7cda7b8210dc9192fbc4a994a0afeb93d Mon Sep 17 00:00:00 2001 From: Xiaolong Peng Date: Tue, 13 Jan 2026 14:55:31 -0800 Subject: [PATCH] While eagerly refresh alloc regions, thread should not yield to safepoint because it is holding uninitialized new object --- .../gc/shenandoah/shenandoahAllocator.cpp | 8 +++--- .../gc/shenandoah/shenandoahAllocator.hpp | 25 ++++++++++++------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAllocator.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAllocator.cpp index 8e3e1ba32e0..397577cb284 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAllocator.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAllocator.cpp @@ -141,10 +141,12 @@ HeapWord* ShenandoahAllocator::attempt_allocation(ShenandoahAll // Slow path under heap lock obj = attempt_allocation_slow(req, in_new_region, regions_ready_for_refresh, old_epoch_id); } else { - // Eagerly refresh alloc regions if there are 50% or more of alloc regions ready for retire - ShenandoahHeapLocker locker(ShenandoahHeap::heap()->lock(), _yield_to_safepoint); + // Eagerly refresh alloc regions if there are 50% or more of alloc regions ready for retire. + // While holding uninitialized new object, the thread MUST NOT yield to safepoint. + ShenandoahHeapLocker locker(ShenandoahHeap::heap()->lock(), false); + ShenandoahHeapAccountingUpdater accounting_updater(_free_set, ALLOC_PARTITION); if (_epoch_id == old_epoch_id) { - reserve_alloc_regions(); + accounting_updater._need_update = refresh_alloc_regions() > 0; } } return obj; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAllocator.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAllocator.hpp index ca5981305e8..5d776387e37 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAllocator.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAllocator.hpp @@ -64,14 +64,15 @@ protected: // for non GC worker, the value is calculated with random like: abs(os::random()) % _alloc_region_count. uint alloc_start_index(); - // Attempt to allocate memory to satisfy alloc request. - // If _alloc_region_count is not 0, it will try to allocate in shared alloc regions first with atomic operations w/o - // the need of global heap lock(fast path); when fast path fails, it will call attempt_allocation_slow which takes - // global heap lock and try to refresh shared alloc regions if they are not refreshed by other mutator thread. - // If _alloc_region_count is 0, no shared alloc region will be reserved, allocation is always done with global heap lock held. + // Attempt to allocate memory to satisfy non-humongous allocation. + // The function is the main entry point of non-humongous allocation work, it tries fast-path allocation calling function + // attempt_allocation_in_alloc_regions to directly allocate from shared alloc regions. + // When fast-path allocation fails, it will call attempt_allocation_slow which acquires heap lock. + // When fast-path allocation succeeds while it also determines >=50% of alloc regions are ready to retire, it calls + // refresh_alloc_regions to eagerly retire and refill those alloc regions. HeapWord* attempt_allocation(ShenandoahAllocRequest& req, bool& in_new_region); - // Slow path of allocation attempt. When fast path trying to allocate in shared alloc regions fails attempt_allocation_slow will + // Slow path of allocation work. When fast path trying to allocate in shared alloc regions fails attempt_allocation_slow will // be called to refresh shared alloc regions and allocate memory for the alloc request. HeapWord* attempt_allocation_slow(ShenandoahAllocRequest& req, bool& in_new_region, uint regions_ready_for_refresh, uint32_t old_epoch_id); @@ -137,14 +138,17 @@ public: virtual void reserve_alloc_regions(); }; -/* - * Allocator impl for mutator - */ +// Allocator impl for mutator: +// 1. _yield_to_safepoint is set to true, +// 1. _alloc_region_count is configured by flag ShenandoahMutatorAllocRegions class ShenandoahMutatorAllocator : public ShenandoahAllocator { public: ShenandoahMutatorAllocator(ShenandoahFreeSet* free_set); }; +// Allocator impl for collector: +// 1. _yield_to_safepoint is set to false, +// 1. _alloc_region_count is configured by flag ShenandoahCollectorAllocRegions class ShenandoahCollectorAllocator : public ShenandoahAllocator { public: ShenandoahCollectorAllocator(ShenandoahFreeSet* free_set); @@ -156,6 +160,9 @@ public: class ShenandoahOldCollectorAllocator : public ShenandoahAllocator { public: ShenandoahOldCollectorAllocator(ShenandoahFreeSet* free_set); + // Overrides ShenandoahAllocator::allocate function for OldCollector partition. + // It delegates allocation work to ShenandoahFreeSet::allocate_for_collector, + // in addition, after allocation it handles plab and remembered set related works which are needed only for old gen. HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region) override; };