Add _volatile_top for atomic allocation in heap region w/o heap lock to address potential race condition when refresh alloc regions

This commit is contained in:
Xiaolong Peng 2026-01-15 16:52:20 -08:00
parent 42770e08c5
commit 3fe4192ae6
6 changed files with 57 additions and 29 deletions

View File

@ -274,7 +274,7 @@ HeapWord* ShenandoahAllocator<ALLOC_PARTITION>::allocate_in(ShenandoahHeapRegion
// For GC allocations, we advance update_watermark because the objects relocated into this memory during
// evacuation are not updated during evacuation. For both young and old regions r, it is essential that all
// PLABs be made parsable at the end of evacuation. This is enabled by retiring all plabs at end of evacuation.
region->concurrent_set_update_watermark(region->top());
region->concurrent_set_update_watermark(IS_SHARED_ALLOC_REGION ? region->volatile_top() : region->top());
}
if (!IS_SHARED_ALLOC_REGION && region->free_words() < PLAB::min_size()) {
@ -301,7 +301,7 @@ int ShenandoahAllocator<ALLOC_PARTITION>::refresh_alloc_regions(ShenandoahAllocR
for (uint i = 0; i < _alloc_region_count; i++) {
ShenandoahAllocRegion* alloc_region = &_alloc_regions[i];
ShenandoahHeapRegion* region = alloc_region->address;
const size_t free_bytes = region == nullptr ? 0 : region->free();
const size_t free_bytes = region == nullptr ? 0 : region->free_bytes_for_atomic_alloc();
if (region == nullptr || free_bytes / HeapWordSize < PLAB::min_size()) {
if (region != nullptr) {
region->unset_active_alloc_region();
@ -334,13 +334,8 @@ int ShenandoahAllocator<ALLOC_PARTITION>::refresh_alloc_regions(ShenandoahAllocR
*obj = allocate_in<false>(reserved[i], true, *req, *in_new_region, ready_for_retire);
assert(*obj != nullptr, "Should always succeed");
satisfy_alloc_req_first = false;
if (ready_for_retire) {
log_debug(gc, alloc)("%sAllocator: heap region %li has no space left after satisfying alloc req.",
_alloc_partition_name, reserved[i]->index());
reserved[i]->unset_active_alloc_region();
continue;
}
}
reserved[i]->set_active_alloc_region();
log_debug(gc, alloc)("%sAllocator: Storing heap region %li to alloc region %i",
_alloc_partition_name, reserved[i]->index(), refreshable[i]->alloc_region_index);
AtomicAccess::store(&refreshable[i]->address, reserved[i]);

View File

@ -3175,7 +3175,6 @@ int ShenandoahFreeSet::reserve_alloc_regions_internal(Iter iterator, int const r
if (ALLOC_PARTITION == ShenandoahFreeSetPartitionId::Mutator) {
increase_bytes_allocated(reserved_bytes);
}
r->set_active_alloc_region();
}
return reserved_regions_count;

View File

@ -69,6 +69,7 @@ ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool c
_empty_time(os::elapsedTime()),
_top_before_promoted(nullptr),
_state(committed ? _empty_committed : _empty_uncommitted),
_volatile_top(start + RegionSizeWords),
_top(start),
_tlab_allocs(0),
_gclab_allocs(0),
@ -572,6 +573,8 @@ ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
void ShenandoahHeapRegion::recycle_internal() {
assert(_recycling.is_set() && is_trash(), "Wrong state");
assert(!is_active_alloc_region(), "Must not be active alloc region");
assert(volatile_top() == end(), "Must be");
ShenandoahHeap* heap = ShenandoahHeap::heap();
set_top(bottom());

View File

@ -250,7 +250,8 @@ private:
HeapWord* _coalesce_and_fill_boundary; // for old regions not selected as collection set candidates.
// Frequently updated fields
HeapWord* volatile _top;
HeapWord* volatile _volatile_top;
HeapWord* _top;
size_t volatile _tlab_allocs;
size_t volatile _gclab_allocs;
@ -455,11 +456,15 @@ public:
// Find humongous start region that this region belongs to
ShenandoahHeapRegion* humongous_start_region() const;
HeapWord* volatile_top() const {
return AtomicAccess::load(&_volatile_top);
}
HeapWord* top() const {
return AtomicAccess::load(&_top);
return is_active_alloc_region() ? volatile_top() : _top;
}
void set_top(HeapWord* v) {
AtomicAccess::store(&_top, v);
_top = v;
}
HeapWord* new_top() const { return _new_top; }
@ -473,7 +478,10 @@ public:
size_t used_before_promote() const { return byte_size(bottom(), get_top_before_promote()); }
size_t free() const { return byte_size(top(), end()); }
size_t free_words() const { return pointer_delta(end(), top()); }
size_t free_bytes_for_atomic_alloc() const {
assert(is_active_alloc_region(), "Must be");
return byte_size(volatile_top(), end());
}
// Does this region contain this address?
bool contains(HeapWord* p) const {
@ -545,10 +553,28 @@ public:
inline void set_active_alloc_region() {
assert(_active_alloc_region.is_unset(), "Must be");
// Sync _top to _volatile_top before setting _active_alloc_region flag to prepare for CAS allocation
AtomicAccess::store(&_volatile_top, _top);
_active_alloc_region.set();
}
// Unset a heap region as active alloc region,
// This method should be only called from ShenandoahAllocator::refresh_alloc_regions or ShenandoahAllocator::release_alloc_regions
// when the region is removed from the alloc region array in ShenandoahAllocator.
inline void unset_active_alloc_region() {
shenandoah_assert_heaplocked();
// Before unset _active_alloc_region flag, _volatile_top needs to be set to the end of region,
// this avoid race condition when the alloc region removed from the alloc regions array used by lock-free allocation in allocator.
HeapWord* current_volatile_top = volatile_top();
while (current_volatile_top != end() &&
AtomicAccess::cmpxchg(&_volatile_top, current_volatile_top, end()) != current_volatile_top) {
// Failed to exchange _volatile_top with end, get new _volatile_top and retry
current_volatile_top = volatile_top();
}
set_top(current_volatile_top); // Sync current _volatile_top back to _top
assert(is_active_alloc_region(), "Must be");
_active_alloc_region.unset();
}

View File

@ -171,7 +171,7 @@ HeapWord* ShenandoahHeapRegion::allocate_atomic(size_t size, const ShenandoahAll
ShenandoahHeapRegionReadyForRetireChecker retire_checker(ready_for_retire);
for (;/*Always return in the loop*/;) {
HeapWord* obj = top();
HeapWord* obj = volatile_top();
size_t free_words = pointer_delta( end(), obj);
if (free_words >= size) {
if (try_allocate(obj, size)) {
@ -195,7 +195,7 @@ HeapWord* ShenandoahHeapRegion::allocate_lab_atomic(const ShenandoahAllocRequest
ShenandoahHeapRegionReadyForRetireChecker retire_checker(ready_for_retire);
for (;/*Always return in the loop*/;) {
size_t adjusted_size = req.size();
HeapWord* obj = top();
HeapWord* obj = volatile_top();
size_t free_words = pointer_delta(end(), obj);
size_t aligned_free_words = align_down((free_words * HeapWordSize) >> LogHeapWordSize, MinObjAlignment);
if (adjusted_size > aligned_free_words) {
@ -220,7 +220,7 @@ HeapWord* ShenandoahHeapRegion::allocate_lab_atomic(const ShenandoahAllocRequest
bool ShenandoahHeapRegion::try_allocate(HeapWord* const obj, size_t const size) {
HeapWord* new_top = obj + size;
if (AtomicAccess::cmpxchg(&_top, obj, new_top) == obj) {
if (AtomicAccess::cmpxchg(&_volatile_top, obj, new_top) == obj) {
assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top));
assert(is_object_aligned(obj), "obj is not aligned: " PTR_FORMAT, p2i(obj));
return true;

View File

@ -31,19 +31,24 @@
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
#define VM_STRUCTS_SHENANDOAH(nonstatic_field, volatile_nonstatic_field, static_field) \
nonstatic_field(ShenandoahHeap, _num_regions, size_t) \
nonstatic_field(ShenandoahHeap, _regions, ShenandoahHeapRegion**) \
nonstatic_field(ShenandoahHeap, _log_min_obj_alignment_in_bytes, int) \
nonstatic_field(ShenandoahHeap, _free_set, ShenandoahFreeSet*) \
volatile_nonstatic_field(ShenandoahHeap, _committed, size_t) \
static_field(ShenandoahHeapRegion, RegionSizeBytes, size_t) \
static_field(ShenandoahHeapRegion, RegionSizeBytesShift, size_t) \
volatile_nonstatic_field(ShenandoahHeapRegion, _state, ShenandoahHeapRegion::RegionState) \
nonstatic_field(ShenandoahHeapRegion, _index, size_t const) \
nonstatic_field(ShenandoahHeapRegion, _bottom, HeapWord* const) \
volatile_nonstatic_field(ShenandoahHeapRegion, _top, HeapWord*) \
nonstatic_field(ShenandoahHeapRegion, _end, HeapWord* const) \
nonstatic_field(ShenandoahFreeSet, _total_global_used, size_t) \
nonstatic_field(ShenandoahHeap, _num_regions, size_t) \
nonstatic_field(ShenandoahHeap, _regions, ShenandoahHeapRegion**) \
nonstatic_field(ShenandoahHeap, _log_min_obj_alignment_in_bytes, int) \
nonstatic_field(ShenandoahHeap, _free_set, ShenandoahFreeSet*) \
volatile_nonstatic_field(ShenandoahHeap, _committed, size_t) \
static_field(ShenandoahHeapRegion, RegionSizeBytes, size_t) \
static_field(ShenandoahHeapRegion, RegionSizeBytesShift, size_t) \
volatile_nonstatic_field(ShenandoahHeapRegion, _state, ShenandoahHeapRegion::RegionState) \
nonstatic_field(ShenandoahHeapRegion, _index, size_t const) \
nonstatic_field(ShenandoahHeapRegion, _bottom, HeapWord* const) \
volatile_nonstatic_field(ShenandoahHeapRegion, _volatile_top, HeapWord*) \
volatile_nonstatic_field(ShenandoahHeapRegion, _tlab_allocs, size_t) \
volatile_nonstatic_field(ShenandoahHeapRegion, _gclab_allocs, size_t) \
volatile_nonstatic_field(ShenandoahHeapRegion, _plab_allocs, size_t) \
volatile_nonstatic_field(ShenandoahHeapRegion, _age, uint) \
CENSUS_NOISE(volatile_nonstatic_field(ShenandoahHeapRegion, _youth, uint)) \
nonstatic_field(ShenandoahHeapRegion, _end, HeapWord* const) \
nonstatic_field(ShenandoahFreeSet, _total_global_used, size_t) \
#define VM_INT_CONSTANTS_SHENANDOAH(declare_constant, declare_constant_with_value) \
declare_constant(ShenandoahHeapRegion::_empty_uncommitted) \