Enforce memory order by adding fence

This commit is contained in:
Xiaolong Peng 2026-01-16 16:24:22 -08:00
parent e6192824da
commit 5e080fb522
3 changed files with 18 additions and 6 deletions

View File

@ -256,6 +256,7 @@ HeapWord* ShenandoahAllocator<ALLOC_PARTITION>::allocate_in(ShenandoahHeapRegion
assert(ready_for_retire == false, "Sanity check");
if (!IS_SHARED_ALLOC_REGION) {
shenandoah_assert_heaplocked();
assert(!region->is_active_alloc_region(), "Must not");
}
HeapWord* obj = nullptr;
size_t actual_size = req.size();
@ -337,8 +338,12 @@ int ShenandoahAllocator<ALLOC_PARTITION>::refresh_alloc_regions(ShenandoahAllocR
*obj = allocate_in<false>(reserved[i], true, *req, *in_new_region, ready_for_retire);
assert(*obj != nullptr, "Should always succeed");
satisfy_alloc_req_first = false;
// Enforce order here,
// allocate_in must be executed before set the region to active alloc region.
OrderAccess::fence();
}
reserved[i]->set_active_alloc_region();
OrderAccess::fence();
log_debug(gc, alloc)("%sAllocator: Storing heap region %li to alloc region %i",
_alloc_partition_name, reserved[i]->index(), refreshable[i]->alloc_region_index);
AtomicAccess::store(&refreshable[i]->address, reserved[i]);

View File

@ -462,10 +462,15 @@ public:
return AtomicAccess::load(&_atomic_top);
}
template<bool CHECK_ATOMIC_TOP = true>
HeapWord* top() const {
HeapWord* v_top = atomic_top();
return v_top == nullptr ? AtomicAccess::load(&_top) : v_top;
if (CHECK_ATOMIC_TOP) {
HeapWord* v_top = atomic_top();
return v_top == nullptr ? AtomicAccess::load(&_top) : v_top;
}
return AtomicAccess::load(&_top);
}
void set_top(HeapWord* v) {
AtomicAccess::store(&_top, v);
}
@ -558,7 +563,7 @@ public:
assert(_active_alloc_region.is_unset(), "Must be");
// Sync _top to _atomic_top before setting _active_alloc_region flag to prepare for CAS allocation
assert(atomic_top() == nullptr, "Must be");
AtomicAccess::store(&_atomic_top, _top);
AtomicAccess::store(&_atomic_top, top<false>());
_active_alloc_region.set();
}
@ -577,12 +582,13 @@ public:
previous_atomic_top = atomic_top();
assert(previous_atomic_top != nullptr, "Must not");
set_top(previous_atomic_top); // Sync current _atomic_top back to _top
OrderAccess::fence();
if (AtomicAccess::cmpxchg(&_atomic_top, previous_atomic_top, (HeapWord*) nullptr) == previous_atomic_top) {
// break when successfully exchange _atomic_top to nullptr
break;
}
}
assert(_top == previous_atomic_top, "Must be");
assert(top<false>() == previous_atomic_top, "Must be");
_active_alloc_region.unset();
}

View File

@ -107,9 +107,10 @@ HeapWord* ShenandoahHeapRegion::allocate_fill(size_t size) {
HeapWord* ShenandoahHeapRegion::allocate(size_t size, const ShenandoahAllocRequest& req) {
shenandoah_assert_heaplocked_or_safepoint();
assert(!is_active_alloc_region(), "Must not");
assert(is_object_aligned(size), "alloc size breaks alignment: %zu", size);
HeapWord* obj = top();
HeapWord* obj = top<false>();
if (pointer_delta(end(), obj) >= size) {
make_regular_allocation(req.affiliation());
adjust_alloc_metadata(req, size);
@ -133,7 +134,7 @@ HeapWord* ShenandoahHeapRegion::allocate_lab(const ShenandoahAllocRequest& req,
size_t adjusted_size = req.size();
HeapWord* obj = nullptr;
HeapWord* old_top = top();
HeapWord* old_top = top<false>();
size_t free_words = align_down(byte_size(old_top, end()) >> LogHeapWordSize, MinObjAlignment);
if (adjusted_size > free_words) {
adjusted_size = free_words;