mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
Fix crash after merging from tip(due to the new free-set rebuild calls)
This commit is contained in:
parent
03c87a5ea9
commit
9ba19e4226
@ -373,7 +373,6 @@ HeapWord* ShenandoahAllocator<ALLOC_PARTITION>::allocate(ShenandoahAllocRequest
|
||||
|
||||
template <ShenandoahFreeSetPartitionId ALLOC_PARTITION>
|
||||
void ShenandoahAllocator<ALLOC_PARTITION>::release_alloc_regions(bool should_update_accounting) {
|
||||
assert_at_safepoint();
|
||||
shenandoah_assert_heaplocked();
|
||||
|
||||
log_debug(gc, alloc)("%sAllocator: Releasing all alloc regions", _alloc_partition_name);
|
||||
|
||||
@ -538,7 +538,7 @@ void ShenandoahConcurrentGC::entry_cleanup_early() {
|
||||
// This is an abbreviated cycle. Rebuild the freeset in order to establish reserves for the next GC cycle. Doing
|
||||
// the rebuild ASAP also expedites availability of immediate trash, reducing the likelihood that we will degenerate
|
||||
// during promote-in-place processing.
|
||||
heap->rebuild_free_set(true /*concurrent*/);
|
||||
heap->rebuild_free_set(true /*concurrent*/, true /*release alloc regions before rebuilding*/);
|
||||
}
|
||||
}
|
||||
|
||||
@ -745,9 +745,6 @@ void ShenandoahConcurrentGC::op_final_mark() {
|
||||
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
|
||||
assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
|
||||
|
||||
// Release all alloc regions at the beginning of final mark.
|
||||
heap->free_set()->release_alloc_regions_under_lock();
|
||||
|
||||
if (ShenandoahVerify) {
|
||||
heap->verifier()->verify_roots_no_forwarded(_generation);
|
||||
}
|
||||
@ -759,6 +756,9 @@ void ShenandoahConcurrentGC::op_final_mark() {
|
||||
// Notify JVMTI that the tagmap table will need cleaning.
|
||||
JvmtiTagMap::set_needs_cleaning();
|
||||
|
||||
// Release all alloc regions before choosing cset.
|
||||
heap->free_set()->release_alloc_regions_under_lock();
|
||||
|
||||
// The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been
|
||||
// established to govern the evacuation efforts that are about to begin. Refer to comments on reserve members in
|
||||
// ShenandoahGeneration and ShenandoahOldGeneration for more detail.
|
||||
@ -801,10 +801,7 @@ void ShenandoahConcurrentGC::op_final_mark() {
|
||||
|
||||
{
|
||||
ShenandoahHeapLocker locker(heap->lock());
|
||||
if (heap->is_evacuation_in_progress()) {
|
||||
// Reserve alloc regions for evacuation.
|
||||
heap->free_set()->collector_allocator()->reserve_alloc_regions();
|
||||
}
|
||||
// Free set has been re-built, reserve alloc regions for mutator
|
||||
heap->free_set()->mutator_allocator()->reserve_alloc_regions();
|
||||
}
|
||||
}
|
||||
@ -1103,6 +1100,12 @@ void ShenandoahConcurrentGC::op_cleanup_early() {
|
||||
}
|
||||
|
||||
void ShenandoahConcurrentGC::op_evacuate() {
|
||||
{
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
ShenandoahHeapLocker locker(heap->lock());
|
||||
// Reserve alloc regions for evacuation.
|
||||
heap->free_set()->collector_allocator()->reserve_alloc_regions();
|
||||
}
|
||||
ShenandoahHeap::heap()->evacuate_collection_set(_generation, true /*concurrent*/);
|
||||
}
|
||||
|
||||
|
||||
@ -2484,8 +2484,11 @@ void ShenandoahHeap::final_update_refs_update_region_states() {
|
||||
parallel_heap_region_iterate(&cl);
|
||||
}
|
||||
|
||||
void ShenandoahHeap::rebuild_free_set_within_phase() {
|
||||
void ShenandoahHeap::rebuild_free_set_within_phase(const bool release_atomic_alloc_regions_first) {
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
if (release_atomic_alloc_regions_first) {
|
||||
_free_set->release_alloc_regions();
|
||||
}
|
||||
size_t young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count;
|
||||
_free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count);
|
||||
// If there are no old regions, first_old_region will be greater than last_old_region
|
||||
@ -2522,11 +2525,11 @@ void ShenandoahHeap::rebuild_free_set_within_phase() {
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahHeap::rebuild_free_set(bool concurrent) {
|
||||
void ShenandoahHeap::rebuild_free_set(bool concurrent, bool release_atomic_alloc_regions_first) {
|
||||
ShenandoahGCPhase phase(concurrent ?
|
||||
ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
|
||||
rebuild_free_set_within_phase();
|
||||
rebuild_free_set_within_phase(release_atomic_alloc_regions_first);
|
||||
}
|
||||
|
||||
bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
|
||||
|
||||
@ -482,8 +482,8 @@ private:
|
||||
void recycle_trash();
|
||||
public:
|
||||
// The following two functions rebuild the free set at the end of GC, in preparation for an idle phase.
|
||||
void rebuild_free_set(bool concurrent);
|
||||
void rebuild_free_set_within_phase();
|
||||
void rebuild_free_set(bool concurrent, bool release_atomic_alloc_regions_first = false);
|
||||
void rebuild_free_set_within_phase(bool release_atomic_alloc_regions_first = false);
|
||||
void notify_gc_progress();
|
||||
void notify_gc_no_progress();
|
||||
size_t get_gc_no_progress_count() const;
|
||||
|
||||
@ -167,8 +167,6 @@ public:
|
||||
|
||||
HeapWord* ShenandoahHeapRegion::allocate_atomic(size_t size, const ShenandoahAllocRequest& req, bool &ready_for_retire) {
|
||||
assert(is_object_aligned(size), "alloc size breaks alignment: %zu", size);
|
||||
assert(this->affiliation() == req.affiliation(), "Region affiliation should already be established");
|
||||
assert(this->is_regular() || this->is_regular_pinned(), "must be a regular region");
|
||||
|
||||
ShenandoahHeapRegionReadyForRetireChecker retire_checker(ready_for_retire);
|
||||
HeapWord* obj = atomic_top();
|
||||
@ -199,8 +197,6 @@ HeapWord* ShenandoahHeapRegion::allocate_atomic(size_t size, const ShenandoahAll
|
||||
|
||||
HeapWord* ShenandoahHeapRegion::allocate_lab_atomic(const ShenandoahAllocRequest& req, size_t &actual_size, bool &ready_for_retire) {
|
||||
assert(req.is_lab_alloc() && req.type() != ShenandoahAllocRequest::_alloc_plab, "Only tlab/gclab alloc");
|
||||
assert(this->affiliation() == req.affiliation(), "Region affiliation should already be established");
|
||||
assert(this->is_regular() || this->is_regular_pinned(), "must be a regular region");
|
||||
|
||||
ShenandoahHeapRegionReadyForRetireChecker retire_checker(ready_for_retire);
|
||||
HeapWord* obj = atomic_top();
|
||||
|
||||
@ -147,7 +147,7 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) {
|
||||
// After concurrent old marking finishes, we reclaim immediate garbage. Further, we may also want to expand OLD in order
|
||||
// to make room for anticipated promotions and/or for mixed evacuations. Mixed evacuations are especially likely to
|
||||
// follow the end of OLD marking.
|
||||
heap->rebuild_free_set_within_phase();
|
||||
heap->rebuild_free_set_within_phase(true/*release_atomic_alloc_regions_first*/);
|
||||
heap->free_set()->log_status_under_lock();
|
||||
return true;
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user