From 5e080fb5229094e79d8c3206023ac3f9dae171e1 Mon Sep 17 00:00:00 2001 From: Xiaolong Peng Date: Fri, 16 Jan 2026 16:24:22 -0800 Subject: [PATCH] Enforce memory order by adding fence --- .../share/gc/shenandoah/shenandoahAllocator.cpp | 5 +++++ .../share/gc/shenandoah/shenandoahHeapRegion.hpp | 14 ++++++++++---- .../gc/shenandoah/shenandoahHeapRegion.inline.hpp | 5 +++-- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAllocator.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAllocator.cpp index ad7d7026803..aa4f29cc93c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAllocator.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAllocator.cpp @@ -256,6 +256,7 @@ HeapWord* ShenandoahAllocator::allocate_in(ShenandoahHeapRegion assert(ready_for_retire == false, "Sanity check"); if (!IS_SHARED_ALLOC_REGION) { shenandoah_assert_heaplocked(); + assert(!region->is_active_alloc_region(), "Must not"); } HeapWord* obj = nullptr; size_t actual_size = req.size(); @@ -337,8 +338,12 @@ int ShenandoahAllocator::refresh_alloc_regions(ShenandoahAllocR *obj = allocate_in(reserved[i], true, *req, *in_new_region, ready_for_retire); assert(*obj != nullptr, "Should always succeed"); satisfy_alloc_req_first = false; + // Enforce order here, + // allocate_in must be executed before set the region to active alloc region. + OrderAccess::fence(); } reserved[i]->set_active_alloc_region(); + OrderAccess::fence(); log_debug(gc, alloc)("%sAllocator: Storing heap region %li to alloc region %i", _alloc_partition_name, reserved[i]->index(), refreshable[i]->alloc_region_index); AtomicAccess::store(&refreshable[i]->address, reserved[i]); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp index 9a6e33377c7..106fdecb8c7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp @@ -462,10 +462,15 @@ public: return AtomicAccess::load(&_atomic_top); } + template HeapWord* top() const { - HeapWord* v_top = atomic_top(); - return v_top == nullptr ? AtomicAccess::load(&_top) : v_top; + if (CHECK_ATOMIC_TOP) { + HeapWord* v_top = atomic_top(); + return v_top == nullptr ? AtomicAccess::load(&_top) : v_top; + } + return AtomicAccess::load(&_top); } + void set_top(HeapWord* v) { AtomicAccess::store(&_top, v); } @@ -558,7 +563,7 @@ public: assert(_active_alloc_region.is_unset(), "Must be"); // Sync _top to _atomic_top before setting _active_alloc_region flag to prepare for CAS allocation assert(atomic_top() == nullptr, "Must be"); - AtomicAccess::store(&_atomic_top, _top); + AtomicAccess::store(&_atomic_top, top()); _active_alloc_region.set(); } @@ -577,12 +582,13 @@ public: previous_atomic_top = atomic_top(); assert(previous_atomic_top != nullptr, "Must not"); set_top(previous_atomic_top); // Sync current _atomic_top back to _top + OrderAccess::fence(); if (AtomicAccess::cmpxchg(&_atomic_top, previous_atomic_top, (HeapWord*) nullptr) == previous_atomic_top) { // break when successfully exchange _atomic_top to nullptr break; } } - assert(_top == previous_atomic_top, "Must be"); + assert(top() == previous_atomic_top, "Must be"); _active_alloc_region.unset(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp index c6879cc716f..d554e7f2da7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp @@ -107,9 +107,10 @@ HeapWord* ShenandoahHeapRegion::allocate_fill(size_t size) { HeapWord* ShenandoahHeapRegion::allocate(size_t size, const ShenandoahAllocRequest& req) { shenandoah_assert_heaplocked_or_safepoint(); + assert(!is_active_alloc_region(), "Must not"); assert(is_object_aligned(size), "alloc size breaks alignment: %zu", size); - HeapWord* obj = top(); + HeapWord* obj = top(); if (pointer_delta(end(), obj) >= size) { make_regular_allocation(req.affiliation()); adjust_alloc_metadata(req, size); @@ -133,7 +134,7 @@ HeapWord* ShenandoahHeapRegion::allocate_lab(const ShenandoahAllocRequest& req, size_t adjusted_size = req.size(); HeapWord* obj = nullptr; - HeapWord* old_top = top(); + HeapWord* old_top = top(); size_t free_words = align_down(byte_size(old_top, end()) >> LogHeapWordSize, MinObjAlignment); if (adjusted_size > free_words) { adjusted_size = free_words;