Enforce memory ordering for read/write of _atomic_top to fix crash on MacOS

This commit is contained in:
Xiaolong Peng 2026-01-16 16:24:22 -08:00
parent e6192824da
commit 2d606a6215
5 changed files with 39 additions and 15 deletions

View File

@ -256,6 +256,7 @@ HeapWord* ShenandoahAllocator<ALLOC_PARTITION>::allocate_in(ShenandoahHeapRegion
assert(ready_for_retire == false, "Sanity check");
if (!IS_SHARED_ALLOC_REGION) {
shenandoah_assert_heaplocked();
assert(!region->is_active_alloc_region(), "Must not");
}
HeapWord* obj = nullptr;
size_t actual_size = req.size();
@ -339,6 +340,9 @@ int ShenandoahAllocator<ALLOC_PARTITION>::refresh_alloc_regions(ShenandoahAllocR
satisfy_alloc_req_first = false;
}
reserved[i]->set_active_alloc_region();
// Enforce order here,
// set_active_alloc_region must be executed before storing the region to the shared address
OrderAccess::fence();
log_debug(gc, alloc)("%sAllocator: Storing heap region %li to alloc region %i",
_alloc_partition_name, reserved[i]->index(), refreshable[i]->alloc_region_index);
AtomicAccess::store(&refreshable[i]->address, reserved[i]);

View File

@ -3101,6 +3101,7 @@ int ShenandoahFreeSet::reserve_alloc_regions_internal(Iter iterator, int const r
for (idx_t idx = iterator.current(); iterator.has_next() && reserved_regions_count < regions_to_reserve; idx = iterator.next()) {
ShenandoahHeapRegion* r = _heap->get_region(idx);
assert(r->get_top_before_promote() == nullptr, "Must not be PIP region");
if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
continue;
}
@ -3234,6 +3235,7 @@ ShenandoahHeapRegion* ShenandoahFreeSet::find_heap_region_for_allocation_interna
bool const use_affiliated_region_first = ALLOC_PARTITION != ShenandoahFreeSetPartitionId::Mutator;
for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) {
ShenandoahHeapRegion* r = _heap->get_region(idx);
assert(r->get_top_before_promote() == nullptr, "Must not be PIP region");
if (r->is_trash() && _heap->is_concurrent_weak_root_in_progress()) {
continue;
}

View File

@ -186,6 +186,7 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion
assert(region->is_regular(), "Use different service to promote humongous regions");
assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
assert(region->get_top_before_promote() == tams, "Region %zu has been used for allocations before promotion", region->index());
assert(!region->is_active_alloc_region(), "Must not be atomic alloc region");
}
ShenandoahOldGeneration* const old_gen = _heap->old_generation();

View File

@ -459,14 +459,21 @@ public:
ShenandoahHeapRegion* humongous_start_region() const;
HeapWord* atomic_top() const {
return AtomicAccess::load(&_atomic_top);
return AtomicAccess::load_acquire(&_atomic_top);
}
template<bool CHECK_ATOMIC_TOP = true>
HeapWord* top() const {
HeapWord* v_top = atomic_top();
return v_top == nullptr ? AtomicAccess::load(&_top) : v_top;
if (CHECK_ATOMIC_TOP) {
HeapWord* v_top = atomic_top();
return v_top == nullptr ? AtomicAccess::load(&_top) : v_top;
}
assert(atomic_top() == nullptr, "Must be");
return AtomicAccess::load(&_top);
}
void set_top(HeapWord* v) {
assert(!is_active_alloc_region(), "Sanity check");
AtomicAccess::store(&_top, v);
}
@ -555,10 +562,11 @@ public:
}
inline void set_active_alloc_region() {
shenandoah_assert_heaplocked();
assert(_active_alloc_region.is_unset(), "Must be");
// Sync _top to _atomic_top before setting _active_alloc_region flag to prepare for CAS allocation
assert(atomic_top() == nullptr, "Must be");
AtomicAccess::store(&_atomic_top, _top);
AtomicAccess::release_store(&_atomic_top, top<false>());
_active_alloc_region.set();
}
@ -572,17 +580,21 @@ public:
// Before unset _active_alloc_region flag, _atomic_top needs to be set to sentinel value using AtomicAccess::cmpxchg,
// this avoids race condition when the alloc region removed from the alloc regions array used by lock-free allocation in allocator;
// meanwhile the previous value of _atomic_top needs to be synced back to _top.
HeapWord* previous_atomic_top = nullptr;
HeapWord* prior_atomic_top = nullptr;
HeapWord* current_atomic_top = atomic_top();
while (true /*always break out in the loop*/) {
previous_atomic_top = atomic_top();
assert(previous_atomic_top != nullptr, "Must not");
set_top(previous_atomic_top); // Sync current _atomic_top back to _top
if (AtomicAccess::cmpxchg(&_atomic_top, previous_atomic_top, (HeapWord*) nullptr) == previous_atomic_top) {
// break when successfully exchange _atomic_top to nullptr
assert(current_atomic_top != nullptr, "Must not");
AtomicAccess::store(&_top, current_atomic_top); // Sync current _atomic_top back to _top
OrderAccess::fence();
prior_atomic_top = AtomicAccess::cmpxchg(&_atomic_top, current_atomic_top, (HeapWord*) nullptr, memory_order_release);
if (prior_atomic_top == current_atomic_top) {
// break out the loop when successfully exchange _atomic_top to nullptr
break;
}
current_atomic_top = prior_atomic_top;
}
assert(_top == previous_atomic_top, "Must be");
OrderAccess::fence();
assert(top<false>() == current_atomic_top, "Value of _atomic_top must have synced to _top.");
_active_alloc_region.unset();
}

View File

@ -107,9 +107,10 @@ HeapWord* ShenandoahHeapRegion::allocate_fill(size_t size) {
HeapWord* ShenandoahHeapRegion::allocate(size_t size, const ShenandoahAllocRequest& req) {
shenandoah_assert_heaplocked_or_safepoint();
assert(!is_active_alloc_region(), "Must not");
assert(is_object_aligned(size), "alloc size breaks alignment: %zu", size);
HeapWord* obj = top();
HeapWord* obj = top<false>();
if (pointer_delta(end(), obj) >= size) {
make_regular_allocation(req.affiliation());
adjust_alloc_metadata(req, size);
@ -133,7 +134,7 @@ HeapWord* ShenandoahHeapRegion::allocate_lab(const ShenandoahAllocRequest& req,
size_t adjusted_size = req.size();
HeapWord* obj = nullptr;
HeapWord* old_top = top();
HeapWord* old_top = top<false>();
size_t free_words = align_down(byte_size(old_top, end()) >> LogHeapWordSize, MinObjAlignment);
if (adjusted_size > free_words) {
adjusted_size = free_words;
@ -238,7 +239,7 @@ HeapWord* ShenandoahHeapRegion::allocate_lab_atomic(const ShenandoahAllocRequest
bool ShenandoahHeapRegion::try_allocate(HeapWord* const obj, size_t const size, HeapWord* &prior_atomic_top) {
HeapWord* new_top = obj + size;
if ((prior_atomic_top = AtomicAccess::cmpxchg(&_atomic_top, obj, new_top)) == obj) {
if ((prior_atomic_top = AtomicAccess::cmpxchg(&_atomic_top, obj, new_top, memory_order_release)) == obj) {
assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top));
assert(is_object_aligned(obj), "obj is not aligned: " PTR_FORMAT, p2i(obj));
return true;
@ -357,10 +358,14 @@ inline bool ShenandoahHeapRegion::is_affiliated() const {
}
inline void ShenandoahHeapRegion::save_top_before_promote() {
_top_before_promoted = top();
assert(!is_active_alloc_region(), "Must not");
assert(atomic_top() == nullptr, "Must be");
_top_before_promoted = top<false>();
}
inline void ShenandoahHeapRegion::restore_top_before_promote() {
assert(!is_active_alloc_region(), "Must not");
assert(atomic_top() == nullptr, "Must be");
_top = _top_before_promoted;
_top_before_promoted = nullptr;
}