diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAllocator.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAllocator.cpp index 1ef146f83b6..4a7818924b6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAllocator.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAllocator.cpp @@ -123,7 +123,7 @@ HeapWord* ShenandoahAllocator::attempt_allocation_slow(Shenando uint regions_ready_for_refresh = 0u; // Attempt to allocate in shared alloc regions after taking heap lock, // because other mutator may have refreshed shared alloc regions - HeapWord* obj = attempt_allocation_in_alloc_regions(req, in_new_region, alloc_start_index(), regions_ready_for_refresh); + HeapWord* obj = attempt_allocation_in_alloc_regions(req, in_new_region, alloc_start_index(), regions_ready_for_refresh); if (obj != nullptr) { return obj; } @@ -182,6 +182,7 @@ HeapWord* ShenandoahAllocator::attempt_allocation_from_free_set } template +template HeapWord* ShenandoahAllocator::attempt_allocation_in_alloc_regions(ShenandoahAllocRequest &req, bool &in_new_region, uint const alloc_start_index, @@ -189,7 +190,7 @@ HeapWord* ShenandoahAllocator::attempt_allocation_in_alloc_regi assert(regions_ready_for_refresh == 0u && in_new_region == false && alloc_start_index < _alloc_region_count, "Sanity check"); uint i = alloc_start_index; do { - if (ShenandoahHeapRegion* r = nullptr; (r = AtomicAccess::load(&_alloc_regions[i].address)) != nullptr) { + if (ShenandoahHeapRegion* r = nullptr; (r = HOLDING_HEAP_LOCK ? _alloc_regions[i].address : AtomicAccess::load(&_alloc_regions[i].address)) != nullptr) { bool ready_for_retire = false; HeapWord* obj = allocate_in(r, true, req, in_new_region, ready_for_retire); if (ready_for_retire) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAllocator.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAllocator.hpp index 18e5f2878e8..8fa3b107941 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAllocator.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAllocator.hpp @@ -71,6 +71,7 @@ protected: // Attempt to allocate in a shared alloc region using atomic operation without holding the heap lock. // Returns nullptr and overwrites regions_ready_for_refresh with the number of shared alloc regions that are ready // to be retired if it is unable to satisfy the allocation request from the existing shared alloc regions. + template HeapWord* attempt_allocation_in_alloc_regions(ShenandoahAllocRequest& req, bool& in_new_region, uint const alloc_start_index, uint ®ions_ready_for_refresh); // Allocate in a region, use atomic operations if template parameter ATOMIC is true.