mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
Rename function is_active_alloc_region to is_atomic_alloc_region
This commit is contained in:
parent
27dafac753
commit
bacdb925c0
@ -77,7 +77,7 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
|
||||
|
||||
for (size_t i = 0; i < num_regions; i++) {
|
||||
ShenandoahHeapRegion* region = heap->get_region(i);
|
||||
assert(!region->is_active_alloc_region(), "There should be no active alloc regions when choosing collection set");
|
||||
assert(!region->is_atomic_alloc_region(), "There should be no active alloc regions when choosing collection set");
|
||||
if (!_generation->contains(region)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -99,7 +99,7 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
|
||||
|
||||
for (size_t i = 0; i < num_regions; i++) {
|
||||
ShenandoahHeapRegion* region = heap->get_region(i);
|
||||
assert(!region->is_active_alloc_region(), "There should be no active alloc regions when rebuilding free set");
|
||||
assert(!region->is_atomic_alloc_region(), "There should be no active alloc regions when rebuilding free set");
|
||||
|
||||
size_t garbage = region->garbage();
|
||||
total_garbage += garbage;
|
||||
|
||||
@ -276,9 +276,9 @@ HeapWord* ShenandoahAllocator<ALLOC_PARTITION>::allocate_in(ShenandoahHeapRegion
|
||||
// evacuation are not updated during evacuation. For both young and old regions r, it is essential that all
|
||||
// PLABs be made parsable at the end of evacuation. This is enabled by retiring all plabs at end of evacuation.
|
||||
if (IS_SHARED_ALLOC_REGION) {
|
||||
region->concurrent_set_update_watermark(region->top());
|
||||
region->concurrent_set_update_watermark(region->top<true>());
|
||||
} else {
|
||||
region->set_update_watermark(region->top());
|
||||
region->set_update_watermark(region->top<false>());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -91,7 +91,7 @@ void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
|
||||
assert(Thread::current()->is_VM_thread(), "Must be VMThread");
|
||||
assert(!is_in(r), "Already in collection set");
|
||||
assert(!r->is_humongous(), "Only add regular regions to the collection set");
|
||||
assert(!r->is_active_alloc_region(), "Active alloc region can't be added to collection set");
|
||||
assert(!r->is_atomic_alloc_region(), "Atomic alloc region can't be added to collection set");
|
||||
|
||||
_cset_map[r->index()] = 1;
|
||||
size_t live = r->get_live_data_bytes();
|
||||
|
||||
@ -907,7 +907,7 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
switch (partition) {
|
||||
case ShenandoahFreeSetPartitionId::NotFree:
|
||||
{
|
||||
assert(!validate_totals || r->is_active_alloc_region() || (capacity != _region_size_bytes), "Should not be retired if empty");
|
||||
assert(!validate_totals || r->is_atomic_alloc_region() || (capacity != _region_size_bytes), "Should not be retired if empty");
|
||||
if (r->is_humongous()) {
|
||||
if (r->is_old()) {
|
||||
regions[int(ShenandoahFreeSetPartitionId::OldCollector)]++;
|
||||
@ -923,7 +923,7 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
|
||||
young_humongous_waste += capacity;
|
||||
}
|
||||
} else {
|
||||
assert(r->is_cset() || r->is_active_alloc_region() || (capacity < PLAB::min_size() * HeapWordSize),
|
||||
assert(r->is_cset() || r->is_atomic_alloc_region() || (capacity < PLAB::min_size() * HeapWordSize),
|
||||
"Expect retired remnant size to be smaller than min plab size");
|
||||
// This region has been retired already or it is in the cset. In either case, we set capacity to zero
|
||||
// so that the entire region will be counted as used. We count young cset regions as "retired".
|
||||
@ -1633,7 +1633,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
|
||||
// If region is not completely free, the current [beg; end] is useless, and we may fast-forward. If we can extend
|
||||
// the existing range, we can exploit that certain regions are already known to be in the Mutator free set.
|
||||
while (!can_allocate_from(_heap->get_region(end))) {
|
||||
assert(!_heap->get_region(end)->is_active_alloc_region(), "Must not");
|
||||
assert(!_heap->get_region(end)->is_atomic_alloc_region(), "Must not be atomic alloc region");
|
||||
// region[end] is not empty, so we restart our search after region[end]
|
||||
idx_t slide_delta = end + 1 - beg;
|
||||
if (beg + slide_delta > last_possible_start) {
|
||||
@ -1959,7 +1959,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
|
||||
size_t num_regions = _heap->num_regions();
|
||||
for (size_t idx = 0; idx < num_regions; idx++) {
|
||||
ShenandoahHeapRegion* region = _heap->get_region(idx);
|
||||
assert(!region->is_active_alloc_region(), "There should be no active alloc regions when choosing collection set");
|
||||
assert(!region->is_atomic_alloc_region(), "There should be no atomic alloc regions when choosing collection set");
|
||||
if (region->is_trash()) {
|
||||
// Trashed regions represent immediate garbage identified by final mark and regions that had been in the collection
|
||||
// partition but have not yet been "cleaned up" following update refs.
|
||||
|
||||
@ -576,7 +576,7 @@ size_t ShenandoahGeneration::select_aged_regions(const size_t old_promotion_rese
|
||||
// We prefer to promote this region in place because it has a small amount of garbage and a large usage.
|
||||
HeapWord* tams = ctx->top_at_mark_start(r);
|
||||
HeapWord* original_top = r->top();
|
||||
if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top && !r->is_active_alloc_region()) {
|
||||
if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top && !r->is_atomic_alloc_region()) {
|
||||
// No allocations from this region have been made during concurrent mark. It meets all the criteria
|
||||
// for in-place-promotion. Though we only need the value of top when we fill the end of the region,
|
||||
// we use this field to indicate that this region should be promoted in place during the evacuation
|
||||
|
||||
@ -138,7 +138,7 @@ void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() {
|
||||
|
||||
|
||||
void ShenandoahGenerationalEvacuationTask::maybe_promote_region(ShenandoahHeapRegion* r) {
|
||||
if (r->is_young() && r->is_active() && _heap->is_tenurable(r) && !r->is_active_alloc_region()) {
|
||||
if (r->is_young() && r->is_active() && _heap->is_tenurable(r) && !r->is_atomic_alloc_region()) {
|
||||
if (r->is_humongous_start()) {
|
||||
// We promote humongous_start regions along with their affiliated continuations during evacuation rather than
|
||||
// doing this work during a safepoint. We cannot put humongous regions into the collection set because that
|
||||
@ -186,7 +186,7 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion
|
||||
assert(region->is_regular(), "Use different service to promote humongous regions");
|
||||
assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
|
||||
assert(region->get_top_before_promote() == tams, "Region %zu has been used for allocations before promotion", region->index());
|
||||
assert(!region->is_active_alloc_region(), "Must not be atomic alloc region");
|
||||
assert(!region->is_atomic_alloc_region(), "Must not be atomic alloc region");
|
||||
}
|
||||
|
||||
ShenandoahOldGeneration* const old_gen = _heap->old_generation();
|
||||
|
||||
@ -572,7 +572,7 @@ ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
|
||||
|
||||
void ShenandoahHeapRegion::recycle_internal() {
|
||||
assert(_recycling.is_set() && is_trash(), "Wrong state");
|
||||
assert(!is_active_alloc_region(), "Must not be active alloc region");
|
||||
assert(!is_atomic_alloc_region(), "Must not be atomic alloc region");
|
||||
assert(atomic_top() == nullptr, "Must be");
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
|
||||
@ -460,18 +460,20 @@ public:
|
||||
return AtomicAccess::load_acquire(&_atomic_top);
|
||||
}
|
||||
|
||||
// The field _top can be stale when the region is an atomic alloc region, therefore,
|
||||
// it always checks the atomic top first if CHECK_ATOMIC_TOP is not overridden.
|
||||
template<bool CHECK_ATOMIC_TOP = true>
|
||||
HeapWord* top() const {
|
||||
if (CHECK_ATOMIC_TOP) {
|
||||
HeapWord* v_top = atomic_top();
|
||||
return v_top == nullptr ? AtomicAccess::load(&_top) : v_top;
|
||||
HeapWord* at = atomic_top();
|
||||
return at == nullptr ? AtomicAccess::load(&_top) : at;
|
||||
}
|
||||
assert(atomic_top() == nullptr, "Must be");
|
||||
assert(!is_atomic_alloc_region(), "Must not be an atomic alloc region");
|
||||
return AtomicAccess::load(&_top);
|
||||
}
|
||||
|
||||
void set_top(HeapWord* v) {
|
||||
assert(!is_active_alloc_region(), "Sanity check");
|
||||
assert(!is_atomic_alloc_region(), "Must not be an atomic alloc region");
|
||||
AtomicAccess::store(&_top, v);
|
||||
}
|
||||
|
||||
@ -571,7 +573,7 @@ public:
|
||||
// when the region is removed from the alloc region array in ShenandoahAllocator.
|
||||
inline void unset_active_alloc_region() {
|
||||
shenandoah_assert_heaplocked();
|
||||
assert(is_active_alloc_region(), "Must be");
|
||||
assert(is_atomic_alloc_region(), "Must be");
|
||||
|
||||
// Before unset _active_alloc_region flag, _atomic_top needs to be set to sentinel value using AtomicAccess::cmpxchg,
|
||||
// this avoids race condition when the alloc region removed from the alloc regions array used by lock-free allocation in allocator;
|
||||
@ -592,7 +594,7 @@ public:
|
||||
assert(top<false>() == current_atomic_top, "Value of _atomic_top must have synced to _top");
|
||||
}
|
||||
|
||||
inline bool is_active_alloc_region() const {
|
||||
inline bool is_atomic_alloc_region() const {
|
||||
// region is an active atomic alloc region if the atomic top is set
|
||||
return atomic_top() != nullptr;
|
||||
}
|
||||
|
||||
@ -107,7 +107,7 @@ HeapWord* ShenandoahHeapRegion::allocate_fill(size_t size) {
|
||||
|
||||
HeapWord* ShenandoahHeapRegion::allocate(size_t size, const ShenandoahAllocRequest& req) {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
assert(!is_active_alloc_region(), "Must not");
|
||||
assert(!is_atomic_alloc_region(), "Must not");
|
||||
assert(is_object_aligned(size), "alloc size breaks alignment: %zu", size);
|
||||
|
||||
HeapWord* obj = top<false>();
|
||||
@ -358,13 +358,13 @@ inline bool ShenandoahHeapRegion::is_affiliated() const {
|
||||
}
|
||||
|
||||
inline void ShenandoahHeapRegion::save_top_before_promote() {
|
||||
assert(!is_active_alloc_region(), "Must not");
|
||||
assert(!is_atomic_alloc_region(), "Must not");
|
||||
assert(atomic_top() == nullptr, "Must be");
|
||||
_top_before_promoted = top<false>();
|
||||
}
|
||||
|
||||
inline void ShenandoahHeapRegion::restore_top_before_promote() {
|
||||
assert(!is_active_alloc_region(), "Must not");
|
||||
assert(!is_atomic_alloc_region(), "Must not");
|
||||
assert(atomic_top() == nullptr, "Must be");
|
||||
_top = _top_before_promoted;
|
||||
_top_before_promoted = nullptr;
|
||||
|
||||
@ -377,7 +377,7 @@ public:
|
||||
};
|
||||
|
||||
void heap_region_do(ShenandoahHeapRegion* r) override {
|
||||
if (r->is_cset() || r->is_trash() || r->is_active_alloc_region()) {
|
||||
if (r->is_cset() || r->is_trash() || r->is_atomic_alloc_region()) {
|
||||
// Count the entire cset, trashed (formerly cset) or alloc reserved region as used
|
||||
// Note: Immediate garbage trash regions were never in the cset.
|
||||
_used += _region_size_bytes;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user