8357471: GenShen: Share collector reserves between young and old

Reviewed-by: wkemper
This commit is contained in:
Kelvin Nilsen 2026-01-22 21:28:57 +00:00
parent f3121d1023
commit d6ebcf8a4f
41 changed files with 1228 additions and 714 deletions

View File

@ -68,9 +68,9 @@ ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics(ShenandoahSpaceInfo*
ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {}
void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
size_t ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
// The logic for cset selection in adaptive is as follows:
@ -124,6 +124,7 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
cur_garbage = new_garbage;
}
}
return 0;
}
void ShenandoahAdaptiveHeuristics::record_cycle_start() {

View File

@ -108,9 +108,9 @@ public:
virtual ~ShenandoahAdaptiveHeuristics();
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
virtual void record_cycle_start() override;
virtual void record_success_concurrent() override;

View File

@ -39,15 +39,16 @@ ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics(ShenandoahSpaceIn
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahEvacReserveOverflow);
}
void ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) {
size_t ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) {
for (size_t idx = 0; idx < size; idx++) {
ShenandoahHeapRegion* r = data[idx].get_region();
if (r->garbage() > 0) {
cset->add_region(r);
}
}
return 0;
}
bool ShenandoahAggressiveHeuristics::should_start_gc() {

View File

@ -35,9 +35,9 @@ class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
public:
ShenandoahAggressiveHeuristics(ShenandoahSpaceInfo* space_info);
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free);
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free);
virtual bool should_start_gc();

View File

@ -76,9 +76,9 @@ bool ShenandoahCompactHeuristics::should_start_gc() {
return ShenandoahHeuristics::should_start_gc();
}
void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
size_t ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
// Do not select too large CSet that would overflow the available free space
size_t max_cset = actual_free * 3 / 4;
@ -97,4 +97,5 @@ void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(Shenando
cset->add_region(r);
}
}
return 0;
}

View File

@ -37,9 +37,9 @@ public:
virtual bool should_start_gc();
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free);
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free);
virtual const char* name() { return "Compact"; }
virtual bool is_diagnostic() { return false; }

View File

@ -37,7 +37,7 @@ ShenandoahGenerationalHeuristics::ShenandoahGenerationalHeuristics(ShenandoahGen
: ShenandoahAdaptiveHeuristics(generation), _generation(generation) {
}
void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
size_t ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
assert(collection_set->is_empty(), "Must be empty");
auto heap = ShenandoahGenerationalHeap::heap();
@ -168,16 +168,12 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage));
size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage);
bool doing_promote_in_place = (humongous_regions_promoted + regular_regions_promoted_in_place > 0);
if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
// Only young collections need to prime the collection set.
if (_generation->is_young()) {
heap->old_generation()->heuristics()->prime_collection_set(collection_set);
}
size_t add_regions_to_old = 0;
if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
// Call the subclasses to add young-gen regions into the collection set.
choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
add_regions_to_old = choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
}
if (collection_set->has_old_regions()) {
@ -194,6 +190,7 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
regular_regions_promoted_free,
immediate_regions,
immediate_garbage);
return add_regions_to_old;
}
@ -210,13 +207,6 @@ size_t ShenandoahGenerationalHeuristics::add_preselected_regions_to_collection_s
assert(ShenandoahGenerationalHeap::heap()->is_tenurable(r), "Preselected regions must have tenure age");
// Entire region will be promoted, This region does not impact young-gen or old-gen evacuation reserve.
// This region has been pre-selected and its impact on promotion reserve is already accounted for.
// r->used() is r->garbage() + r->get_live_data_bytes()
// Since all live data in this region is being evacuated from young-gen, it is as if this memory
// is garbage insofar as young-gen is concerned. Counting this as garbage reduces the need to
// reclaim highly utilized young-gen regions just for the sake of finding min_garbage to reclaim
// within young-gen memory.
cur_young_garbage += r->garbage();
cset->add_region(r);
}

View File

@ -44,7 +44,7 @@ class ShenandoahGenerationalHeuristics : public ShenandoahAdaptiveHeuristics {
public:
explicit ShenandoahGenerationalHeuristics(ShenandoahGeneration* generation);
void choose_collection_set(ShenandoahCollectionSet* collection_set) override;
size_t choose_collection_set(ShenandoahCollectionSet* collection_set) override;
protected:
ShenandoahGeneration* _generation;

View File

@ -24,6 +24,7 @@
*/
#include "gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp"
#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
#include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
@ -35,13 +36,14 @@ ShenandoahGlobalHeuristics::ShenandoahGlobalHeuristics(ShenandoahGlobalGeneratio
}
void ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
size_t ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
// Better select garbage-first regions
QuickSort::sort<RegionData>(data, (int) size, compare_by_garbage);
choose_global_collection_set(cset, data, size, actual_free, 0 /* cur_young_garbage */);
return 0;
}
@ -49,94 +51,212 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti
const ShenandoahHeuristics::RegionData* data,
size_t size, size_t actual_free,
size_t cur_young_garbage) const {
shenandoah_assert_heaplocked_or_safepoint();
auto heap = ShenandoahGenerationalHeap::heap();
auto free_set = heap->free_set();
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
size_t capacity = heap->soft_max_capacity();
size_t garbage_threshold = region_size_bytes * ShenandoahGarbageThreshold / 100;
size_t ignore_threshold = region_size_bytes * ShenandoahIgnoreGarbageThreshold / 100;
size_t young_evac_reserve = heap->young_generation()->get_evacuation_reserve();
size_t original_young_evac_reserve = young_evac_reserve;
size_t old_evac_reserve = heap->old_generation()->get_evacuation_reserve();
size_t max_young_cset = (size_t) (young_evac_reserve / ShenandoahEvacWaste);
size_t young_cur_cset = 0;
size_t max_old_cset = (size_t) (old_evac_reserve / ShenandoahOldEvacWaste);
size_t old_cur_cset = 0;
size_t old_promo_reserve = heap->old_generation()->get_promoted_reserve();
// Figure out how many unaffiliated young regions are dedicated to mutator and to evacuator. Allow the young
// collector's unaffiliated regions to be transferred to old-gen if old-gen has more easily reclaimed garbage
// than young-gen. At the end of this cycle, any excess regions remaining in old-gen will be transferred back
// to young. Do not transfer the mutator's unaffiliated regions to old-gen. Those must remain available
// to the mutator as it needs to be able to consume this memory during concurrent GC.
size_t unaffiliated_young_regions = heap->young_generation()->free_unaffiliated_regions();
size_t unaffiliated_young_regions = free_set->collector_unaffiliated_regions();
size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
size_t unaffiliated_old_regions = free_set->old_collector_unaffiliated_regions();
size_t unaffiliated_old_memory = unaffiliated_old_regions * region_size_bytes;
if (unaffiliated_young_memory > max_young_cset) {
size_t unaffiliated_mutator_memory = unaffiliated_young_memory - max_young_cset;
unaffiliated_young_memory -= unaffiliated_mutator_memory;
unaffiliated_young_regions = unaffiliated_young_memory / region_size_bytes; // round down
unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
// Figure out how many unaffiliated regions are dedicated to Collector and OldCollector reserves. Let these
// be shuffled between young and old generations in order to expedite evacuation of whichever regions have the
// most garbage, regardless of whether these garbage-first regions reside in young or old generation.
// Excess reserves will be transferred back to the mutator after collection set has been chosen. At the end
// of evacuation, any reserves not consumed by evacuation will also be transferred to the mutator free set.
// Truncate reserves to only target unaffiliated memory
size_t shared_reserve_regions = 0;
if (young_evac_reserve > unaffiliated_young_memory) {
shared_reserve_regions += unaffiliated_young_regions;
} else {
size_t delta_regions = young_evac_reserve / region_size_bytes;
shared_reserve_regions += delta_regions;
}
young_evac_reserve = 0;
size_t total_old_reserve = old_evac_reserve + old_promo_reserve;
if (total_old_reserve > unaffiliated_old_memory) {
// Give all the unaffiliated memory to the shared reserves. Leave the rest for promo reserve.
shared_reserve_regions += unaffiliated_old_regions;
old_promo_reserve = total_old_reserve - unaffiliated_old_memory;
} else {
size_t delta_regions = old_evac_reserve / region_size_bytes;
shared_reserve_regions += delta_regions;
}
old_evac_reserve = 0;
assert(shared_reserve_regions <=
(heap->young_generation()->free_unaffiliated_regions() + heap->old_generation()->free_unaffiliated_regions()),
"simple math");
// We'll affiliate these unaffiliated regions with either old or young, depending on need.
max_young_cset -= unaffiliated_young_memory;
size_t shared_reserves = shared_reserve_regions * region_size_bytes;
size_t committed_from_shared_reserves = 0;
// Keep track of how many regions we plan to transfer from young to old.
size_t regions_transferred_to_old = 0;
size_t promo_bytes = 0;
size_t old_evac_bytes = 0;
size_t young_evac_bytes = 0;
size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_young_cset;
size_t consumed_by_promo = 0; // promo_bytes * ShenandoahPromoEvacWaste
size_t consumed_by_old_evac = 0; // old_evac_bytes * ShenandoahOldEvacWaste
size_t consumed_by_young_evac = 0; // young_evac_bytes * ShenandoahEvacWaste
// Of the memory reclaimed by GC, some of this will need to be reserved for the next GC collection. Use the current
// young reserve as an approximation of the future Collector reserve requirement. Try to end with at least
// (capacity * ShenandoahMinFreeThreshold) / 100 bytes available to the mutator.
size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + original_young_evac_reserve;
size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0;
log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Max Young Evacuation: %zu"
"%s, Max Old Evacuation: %zu%s, Max Either Evacuation: %zu%s, Actual Free: %zu%s.",
byte_size_in_proper_unit(max_young_cset), proper_unit_for_byte_size(max_young_cset),
byte_size_in_proper_unit(max_old_cset), proper_unit_for_byte_size(max_old_cset),
byte_size_in_proper_unit(unaffiliated_young_memory), proper_unit_for_byte_size(unaffiliated_young_memory),
byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free));
size_t aged_regions_promoted = 0;
size_t young_regions_evacuated = 0;
size_t old_regions_evacuated = 0;
log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Discretionary evacuation budget (for either old or young): %zu%s"
", Actual Free: %zu%s.",
byte_size_in_proper_unit(shared_reserves), proper_unit_for_byte_size(shared_reserves),
byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free));
size_t cur_garbage = cur_young_garbage;
for (size_t idx = 0; idx < size; idx++) {
ShenandoahHeapRegion* r = data[idx].get_region();
assert(!cset->is_preselected(r->index()), "There should be no preselected regions during GLOBAL GC");
bool add_region = false;
if (r->is_old() || heap->is_tenurable(r)) {
size_t new_cset = old_cur_cset + r->get_live_data_bytes();
if ((r->garbage() > garbage_threshold)) {
while ((new_cset > max_old_cset) && (unaffiliated_young_regions > 0)) {
unaffiliated_young_regions--;
regions_transferred_to_old++;
max_old_cset += region_size_bytes / ShenandoahOldEvacWaste;
size_t region_garbage = r->garbage();
size_t new_garbage = cur_garbage + region_garbage;
bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
size_t live_bytes = r->get_live_data_bytes();
if (add_regardless || (region_garbage >= garbage_threshold)) {
if (r->is_old()) {
size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahOldEvacWaste);
size_t new_old_consumption = consumed_by_old_evac + anticipated_consumption;
size_t new_old_evac_reserve = old_evac_reserve;
size_t proposed_old_region_expansion = 0;
while ((new_old_consumption > new_old_evac_reserve) && (committed_from_shared_reserves < shared_reserves)) {
committed_from_shared_reserves += region_size_bytes;
proposed_old_region_expansion++;
new_old_evac_reserve += region_size_bytes;
}
}
if ((new_cset <= max_old_cset) && (r->garbage() > garbage_threshold)) {
add_region = true;
old_cur_cset = new_cset;
}
} else {
assert(r->is_young() && !heap->is_tenurable(r), "DeMorgan's law (assuming r->is_affiliated)");
size_t new_cset = young_cur_cset + r->get_live_data_bytes();
size_t region_garbage = r->garbage();
size_t new_garbage = cur_young_garbage + region_garbage;
bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
if (add_regardless || (r->garbage() > garbage_threshold)) {
while ((new_cset > max_young_cset) && (unaffiliated_young_regions > 0)) {
unaffiliated_young_regions--;
max_young_cset += region_size_bytes / ShenandoahEvacWaste;
// If this region has free memory and we choose to place it in the collection set, its free memory is no longer
// available to hold promotion results. So we behave as if its free memory is consumed within the promotion reserve.
size_t anticipated_loss_from_promo_reserve = r->free();
size_t new_promo_consumption = consumed_by_promo + anticipated_loss_from_promo_reserve;
size_t new_promo_reserve = old_promo_reserve;
while ((new_promo_consumption > new_promo_reserve) && (committed_from_shared_reserves < shared_reserves)) {
committed_from_shared_reserves += region_size_bytes;
proposed_old_region_expansion++;
new_promo_reserve += region_size_bytes;
}
if ((new_old_consumption <= new_old_evac_reserve) && (new_promo_consumption <= new_promo_reserve)) {
add_region = true;
old_evac_reserve = new_old_evac_reserve;
old_promo_reserve = new_promo_reserve;
old_evac_bytes += live_bytes;
consumed_by_old_evac = new_old_consumption;
consumed_by_promo = new_promo_consumption;
cur_garbage = new_garbage;
old_regions_evacuated++;
} else {
// We failed to sufficiently expand old so unwind proposed expansion
committed_from_shared_reserves -= proposed_old_region_expansion * region_size_bytes;
}
} else if (heap->is_tenurable(r)) {
size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahPromoEvacWaste);
size_t new_promo_consumption = consumed_by_promo + anticipated_consumption;
size_t new_promo_reserve = old_promo_reserve;
size_t proposed_old_region_expansion = 0;
while ((new_promo_consumption > new_promo_reserve) && (committed_from_shared_reserves < shared_reserves)) {
committed_from_shared_reserves += region_size_bytes;
proposed_old_region_expansion++;
new_promo_reserve += region_size_bytes;
}
if (new_promo_consumption <= new_promo_reserve) {
add_region = true;
old_promo_reserve = new_promo_reserve;
promo_bytes += live_bytes;
consumed_by_promo = new_promo_consumption;
cur_garbage = new_garbage;
aged_regions_promoted++;
} else {
// We failed to sufficiently expand old so unwind proposed expansion
committed_from_shared_reserves -= proposed_old_region_expansion * region_size_bytes;
}
} else {
assert(r->is_young() && !heap->is_tenurable(r), "DeMorgan's law (assuming r->is_affiliated)");
size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahEvacWaste);
size_t new_young_evac_consumption = consumed_by_young_evac + anticipated_consumption;
size_t new_young_evac_reserve = young_evac_reserve;
size_t proposed_young_region_expansion = 0;
while ((new_young_evac_consumption > new_young_evac_reserve) && (committed_from_shared_reserves < shared_reserves)) {
committed_from_shared_reserves += region_size_bytes;
proposed_young_region_expansion++;
new_young_evac_reserve += region_size_bytes;
}
if (new_young_evac_consumption <= new_young_evac_reserve) {
add_region = true;
young_evac_reserve = new_young_evac_reserve;
young_evac_bytes += live_bytes;
consumed_by_young_evac = new_young_evac_consumption;
cur_garbage = new_garbage;
young_regions_evacuated++;
} else {
// We failed to sufficiently expand old so unwind proposed expansion
committed_from_shared_reserves -= proposed_young_region_expansion * region_size_bytes;
}
}
if ((new_cset <= max_young_cset) && (add_regardless || (region_garbage > garbage_threshold))) {
add_region = true;
young_cur_cset = new_cset;
cur_young_garbage = new_garbage;
}
}
if (add_region) {
cset->add_region(r);
}
}
if (regions_transferred_to_old > 0) {
assert(young_evac_reserve > regions_transferred_to_old * region_size_bytes, "young reserve cannot be negative");
heap->young_generation()->set_evacuation_reserve(young_evac_reserve - regions_transferred_to_old * region_size_bytes);
heap->old_generation()->set_evacuation_reserve(old_evac_reserve + regions_transferred_to_old * region_size_bytes);
if (committed_from_shared_reserves < shared_reserves) {
// Give all the rest to promotion
old_promo_reserve += (shared_reserves - committed_from_shared_reserves);
// dead code: committed_from_shared_reserves = shared_reserves;
}
// Consider the effects of round-off:
// 1. We know that the sum over each evacuation mutiplied by Evacuation Waste is <= total evacuation reserve
// 2. However, the reserve for each individual evacuation may be rounded down. In the worst case, we will be over budget
// by the number of regions evacuated, since each region's reserve might be under-estimated by at most 1
// 3. Likewise, if we take the sum of bytes evacuated and multiply this by the Evacuation Waste and then round down
// to nearest integer, the calculated reserve will underestimate the true reserve needs by at most 1.
// 4. This explains the adjustments to subtotals in the assert statements below.
assert(young_evac_bytes * ShenandoahEvacWaste <= young_evac_reserve + young_regions_evacuated,
"budget: %zu <= %zu", (size_t) (young_evac_bytes * ShenandoahEvacWaste), young_evac_reserve);
assert(old_evac_bytes * ShenandoahOldEvacWaste <= old_evac_reserve + old_regions_evacuated,
"budget: %zu <= %zu", (size_t) (old_evac_bytes * ShenandoahOldEvacWaste), old_evac_reserve);
assert(promo_bytes * ShenandoahPromoEvacWaste <= old_promo_reserve + aged_regions_promoted,
"budget: %zu <= %zu", (size_t) (promo_bytes * ShenandoahPromoEvacWaste), old_promo_reserve);
assert(young_evac_reserve + old_evac_reserve + old_promo_reserve <=
heap->young_generation()->get_evacuation_reserve() + heap->old_generation()->get_evacuation_reserve() +
heap->old_generation()->get_promoted_reserve(), "Exceeded budget");
if (heap->young_generation()->get_evacuation_reserve() < young_evac_reserve) {
size_t delta_bytes = young_evac_reserve - heap->young_generation()->get_evacuation_reserve();
size_t delta_regions = delta_bytes / region_size_bytes;
size_t regions_to_transfer = MIN2(unaffiliated_old_regions, delta_regions);
log_info(gc)("Global GC moves %zu unaffiliated regions from old collector to young collector reserves", regions_to_transfer);
ssize_t negated_regions = -regions_to_transfer;
heap->free_set()->move_unaffiliated_regions_from_collector_to_old_collector(negated_regions);
} else if (heap->young_generation()->get_evacuation_reserve() > young_evac_reserve) {
size_t delta_bytes = heap->young_generation()->get_evacuation_reserve() - young_evac_reserve;
size_t delta_regions = delta_bytes / region_size_bytes;
size_t regions_to_transfer = MIN2(unaffiliated_young_regions, delta_regions);
log_info(gc)("Global GC moves %zu unaffiliated regions from young collector to old collector reserves", regions_to_transfer);
heap->free_set()->move_unaffiliated_regions_from_collector_to_old_collector(regions_to_transfer);
}
heap->young_generation()->set_evacuation_reserve(young_evac_reserve);
heap->old_generation()->set_evacuation_reserve(old_evac_reserve);
heap->old_generation()->set_promoted_reserve(old_promo_reserve);
}

View File

@ -39,9 +39,9 @@ class ShenandoahGlobalHeuristics : public ShenandoahGenerationalHeuristics {
public:
ShenandoahGlobalHeuristics(ShenandoahGlobalGeneration* generation);
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
private:
void choose_global_collection_set(ShenandoahCollectionSet* cset,

View File

@ -72,7 +72,7 @@ ShenandoahHeuristics::~ShenandoahHeuristics() {
FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
}
void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
size_t ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
assert(collection_set->is_empty(), "Must be empty");
@ -153,8 +153,8 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
if (immediate_percent <= ShenandoahImmediateThreshold) {
choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
}
collection_set->summarize(total_garbage, immediate_garbage, immediate_regions);
return 0;
}
void ShenandoahHeuristics::record_cycle_start() {

View File

@ -183,9 +183,12 @@ protected:
static int compare_by_garbage(RegionData a, RegionData b);
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size,
size_t free) = 0;
// This is a helper function to choose_collection_set(), returning the number of regions that need to be transferred to
// the old reserve from the young reserve in order to effectively evacuate the chosen collection set. In non-generational
// mode, the return value is 0.
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size,
size_t free) = 0;
void adjust_penalty(intx step);
@ -233,7 +236,9 @@ public:
virtual void record_requested_gc();
virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
// Choose the collection set, returning the number of regions that need to be transferred to the old reserve from the young
// reserve in order to effectively evacuate the chosen collection set. In non-generational mode, the return value is 0.
virtual size_t choose_collection_set(ShenandoahCollectionSet* collection_set);
virtual bool can_unload_classes();

View File

@ -26,9 +26,11 @@
#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
#include "logging/log.hpp"
#include "utilities/quickSort.hpp"
@ -77,15 +79,17 @@ ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* genera
}
bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) {
if (unprocessed_old_collection_candidates() == 0) {
return false;
}
_mixed_evac_cset = collection_set;
_included_old_regions = 0;
_evacuated_old_bytes = 0;
_collected_old_bytes = 0;
if (_old_generation->is_preparing_for_mark()) {
// We have unprocessed old collection candidates, but the heuristic has given up on evacuating them.
// This is most likely because they were _all_ pinned at the time of the last mixed evacuation (and
// this in turn is most likely because there are just one or two candidate regions remaining).
log_info(gc, ergo)("Remaining " UINT32_FORMAT " old regions are being coalesced and filled", unprocessed_old_collection_candidates());
log_info(gc, ergo)("Remaining " UINT32_FORMAT
" old regions are being coalesced and filled", unprocessed_old_collection_candidates());
return false;
}
@ -111,150 +115,44 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll
// of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount
// of live memory in that region and by the amount of unallocated memory in that region if the evacuation
// budget is constrained by availability of free memory.
const size_t old_evacuation_reserve = _old_generation->get_evacuation_reserve();
const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste);
size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
size_t fragmented_available;
size_t excess_fragmented_available;
_old_evacuation_reserve = _old_generation->get_evacuation_reserve();
_old_evacuation_budget = (size_t) ((double) _old_evacuation_reserve / ShenandoahOldEvacWaste);
if (unfragmented_available > old_evacuation_budget) {
unfragmented_available = old_evacuation_budget;
fragmented_available = 0;
excess_fragmented_available = 0;
// fragmented_available is the amount of memory within partially consumed old regions that may be required to
// hold the results of old evacuations. If all of the memory required by the old evacuation reserve is available
// in unfragmented regions (unaffiliated old regions), then fragmented_available is zero because we do not need
// to evacuate into the existing partially consumed old regions.
// if fragmented_available is non-zero, excess_fragmented_old_budget represents the amount of fragmented memory
// that is available within old, but is not required to hold the resuilts of old evacuation. As old-gen regions
// are added into the collection set, their free memory is subtracted from excess_fragmented_old_budget until the
// excess is exhausted. For old-gen regions subsequently added to the collection set, their free memory is
// subtracted from fragmented_available and from the old_evacuation_budget (since the budget decreases when this
// fragmented_available memory decreases). After fragmented_available has been exhausted, any further old regions
// selected for the cset do not further decrease the old_evacuation_budget because all further evacuation is targeted
// to unfragmented regions.
size_t unaffiliated_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
if (unaffiliated_available > _old_evacuation_reserve) {
_unspent_unfragmented_old_budget = _old_evacuation_budget;
_unspent_fragmented_old_budget = 0;
_excess_fragmented_old_budget = 0;
} else {
assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available");
fragmented_available = _old_generation->available() - unfragmented_available;
assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up");
if (fragmented_available + unfragmented_available > old_evacuation_budget) {
excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget;
fragmented_available -= excess_fragmented_available;
assert(_old_generation->available() >= _old_evacuation_reserve, "Cannot reserve more than is available");
size_t affiliated_available = _old_generation->available() - unaffiliated_available;
assert(affiliated_available + unaffiliated_available >= _old_evacuation_reserve, "Budgets do not add up");
if (affiliated_available + unaffiliated_available > _old_evacuation_reserve) {
_excess_fragmented_old_budget = (affiliated_available + unaffiliated_available) - _old_evacuation_reserve;
affiliated_available -= _excess_fragmented_old_budget;
}
_unspent_fragmented_old_budget = (size_t) ((double) affiliated_available / ShenandoahOldEvacWaste);
_unspent_unfragmented_old_budget = (size_t) ((double) unaffiliated_available / ShenandoahOldEvacWaste);
}
size_t remaining_old_evacuation_budget = old_evacuation_budget;
log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: %zu%s, candidates: %u",
byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget),
log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: " PROPERFMT ", candidates: %u",
PROPERFMTARGS(_old_evacuation_budget),
unprocessed_old_collection_candidates());
size_t lost_evacuation_capacity = 0;
// The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
// concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
// Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to
// evacuate region N, then there is no need to even consider evacuating region N+1.
while (unprocessed_old_collection_candidates() > 0) {
// Old collection candidates are sorted in order of decreasing garbage contained therein.
ShenandoahHeapRegion* r = next_old_collection_candidate();
if (r == nullptr) {
break;
}
assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
// If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
// to decrease the capacity of the fragmented memory by the scaled loss.
const size_t live_data_for_evacuation = r->get_live_data_bytes();
size_t lost_available = r->free();
if ((lost_available > 0) && (excess_fragmented_available > 0)) {
if (lost_available < excess_fragmented_available) {
excess_fragmented_available -= lost_available;
lost_evacuation_capacity -= lost_available;
lost_available = 0;
} else {
lost_available -= excess_fragmented_available;
lost_evacuation_capacity -= excess_fragmented_available;
excess_fragmented_available = 0;
}
}
size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste);
if ((lost_available > 0) && (fragmented_available > 0)) {
if (scaled_loss + live_data_for_evacuation < fragmented_available) {
fragmented_available -= scaled_loss;
scaled_loss = 0;
} else {
// We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother
// to decrement scaled_loss
}
}
if (scaled_loss > 0) {
// We were not able to account for the lost free memory within fragmented memory, so we need to take this
// allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free.
if (live_data_for_evacuation > unfragmented_available) {
// There is no room to evacuate this region or any that come after it in within the candidates array.
log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)",
unfragmented_available, live_data_for_evacuation, r->index());
break;
} else {
unfragmented_available -= live_data_for_evacuation;
}
} else {
// Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either
// fragmented or unfragmented available memory. Use up the fragmented memory budget first.
size_t evacuation_need = live_data_for_evacuation;
if (evacuation_need > fragmented_available) {
evacuation_need -= fragmented_available;
fragmented_available = 0;
} else {
fragmented_available -= evacuation_need;
evacuation_need = 0;
}
if (evacuation_need > unfragmented_available) {
// There is no room to evacuate this region or any that come after it in within the candidates array.
log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)",
unfragmented_available, live_data_for_evacuation, r->index());
break;
} else {
unfragmented_available -= evacuation_need;
// dead code: evacuation_need == 0;
}
}
collection_set->add_region(r);
included_old_regions++;
evacuated_old_bytes += live_data_for_evacuation;
collected_old_bytes += r->garbage();
consume_old_collection_candidate();
}
if (_first_pinned_candidate != NOT_FOUND) {
// Need to deal with pinned regions
slide_pinned_regions_to_front();
}
decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes);
if (included_old_regions > 0) {
log_info(gc, ergo)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " PROPERFMT ", reclaiming: " PROPERFMT ")",
included_old_regions, PROPERFMTARGS(evacuated_old_bytes), PROPERFMTARGS(collected_old_bytes));
}
if (unprocessed_old_collection_candidates() == 0) {
// We have added the last of our collection candidates to a mixed collection.
// Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate.
clear_triggers();
_old_generation->complete_mixed_evacuations();
} else if (included_old_regions == 0) {
// We have candidates, but none were included for evacuation - are they all pinned?
// or did we just not have enough room for any of them in this collection set?
// We don't want a region with a stuck pin to prevent subsequent old collections, so
// if they are all pinned we transition to a state that will allow us to make these uncollected
// (pinned) regions parsable.
if (all_candidates_are_pinned()) {
log_info(gc, ergo)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
_old_generation->abandon_mixed_evacuations();
} else {
log_info(gc, ergo)("No regions selected for mixed collection. "
"Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT
", Lost capacity: " PROPERFMT
", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
PROPERFMTARGS(old_evacuation_reserve),
PROPERFMTARGS(remaining_old_evacuation_budget),
PROPERFMTARGS(lost_evacuation_capacity),
_next_old_collection_candidate, _last_old_collection_candidate);
}
}
return (included_old_regions > 0);
return add_old_regions_to_cset();
}
bool ShenandoahOldHeuristics::all_candidates_are_pinned() {
@ -328,6 +226,187 @@ void ShenandoahOldHeuristics::slide_pinned_regions_to_front() {
_next_old_collection_candidate = write_index + 1;
}
bool ShenandoahOldHeuristics::add_old_regions_to_cset() {
if (unprocessed_old_collection_candidates() == 0) {
return false;
}
_first_pinned_candidate = NOT_FOUND;
// The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
// concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
// Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to
// evacuate region N, then there is no need to even consider evacuating region N+1.
while (unprocessed_old_collection_candidates() > 0) {
// Old collection candidates are sorted in order of decreasing garbage contained therein.
ShenandoahHeapRegion* r = next_old_collection_candidate();
if (r == nullptr) {
break;
}
assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
// If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
// to decrease the capacity of the fragmented memory by the scaled loss.
const size_t live_data_for_evacuation = r->get_live_data_bytes();
size_t lost_available = r->free();
ssize_t fragmented_delta = 0;
ssize_t unfragmented_delta = 0;
ssize_t excess_delta = 0;
// We must decrease our mixed-evacuation budgets proportional to the lost available memory. This memory that is no
// longer available was likely "promised" to promotions, so we must decrease our mixed evacuations now.
// (e.g. if we loose 14 bytes of available old memory, we must decrease the evacuation budget by 10 bytes.)
size_t scaled_loss = (size_t) (((double) lost_available) / ShenandoahOldEvacWaste);
if (lost_available > 0) {
// We need to subtract lost_available from our working evacuation budgets
if (scaled_loss < _excess_fragmented_old_budget) {
excess_delta -= scaled_loss;
_excess_fragmented_old_budget -= scaled_loss;
} else {
excess_delta -= _excess_fragmented_old_budget;
_excess_fragmented_old_budget = 0;
}
if (scaled_loss < _unspent_fragmented_old_budget) {
_unspent_fragmented_old_budget -= scaled_loss;
fragmented_delta = -scaled_loss;
scaled_loss = 0;
} else {
scaled_loss -= _unspent_fragmented_old_budget;
fragmented_delta = -_unspent_fragmented_old_budget;
_unspent_fragmented_old_budget = 0;
}
if (scaled_loss < _unspent_unfragmented_old_budget) {
_unspent_unfragmented_old_budget -= scaled_loss;
unfragmented_delta = -scaled_loss;
scaled_loss = 0;
} else {
scaled_loss -= _unspent_unfragmented_old_budget;
fragmented_delta = -_unspent_unfragmented_old_budget;
_unspent_unfragmented_old_budget = 0;
}
}
// Allocate replica from unfragmented memory if that exists
size_t evacuation_need = live_data_for_evacuation;
if (evacuation_need < _unspent_unfragmented_old_budget) {
_unspent_unfragmented_old_budget -= evacuation_need;
} else {
if (_unspent_unfragmented_old_budget > 0) {
evacuation_need -= _unspent_unfragmented_old_budget;
unfragmented_delta -= _unspent_unfragmented_old_budget;
_unspent_unfragmented_old_budget = 0;
}
// Take the remaining allocation out of fragmented available
if (_unspent_fragmented_old_budget > evacuation_need) {
_unspent_fragmented_old_budget -= evacuation_need;
} else {
// We cannot add this region into the collection set. We're done. Undo the adjustments to available.
_unspent_fragmented_old_budget -= fragmented_delta;
_unspent_unfragmented_old_budget -= unfragmented_delta;
_excess_fragmented_old_budget -= excess_delta;
break;
}
}
_mixed_evac_cset->add_region(r);
_included_old_regions++;
_evacuated_old_bytes += live_data_for_evacuation;
_collected_old_bytes += r->garbage();
consume_old_collection_candidate();
}
return true;
}
bool ShenandoahOldHeuristics::finalize_mixed_evacs() {
if (_first_pinned_candidate != NOT_FOUND) {
// Need to deal with pinned regions
slide_pinned_regions_to_front();
}
decrease_unprocessed_old_collection_candidates_live_memory(_evacuated_old_bytes);
if (_included_old_regions > 0) {
log_info(gc)("Old-gen mixed evac (%zu regions, evacuating %zu%s, reclaiming: %zu%s)",
_included_old_regions,
byte_size_in_proper_unit(_evacuated_old_bytes), proper_unit_for_byte_size(_evacuated_old_bytes),
byte_size_in_proper_unit(_collected_old_bytes), proper_unit_for_byte_size(_collected_old_bytes));
}
if (unprocessed_old_collection_candidates() == 0) {
// We have added the last of our collection candidates to a mixed collection.
// Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate.
clear_triggers();
_old_generation->complete_mixed_evacuations();
} else if (_included_old_regions == 0) {
// We have candidates, but none were included for evacuation - are they all pinned?
// or did we just not have enough room for any of them in this collection set?
// We don't want a region with a stuck pin to prevent subsequent old collections, so
// if they are all pinned we transition to a state that will allow us to make these uncollected
// (pinned) regions parsable.
if (all_candidates_are_pinned()) {
log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
_old_generation->abandon_mixed_evacuations();
} else {
log_info(gc)("No regions selected for mixed collection. "
"Old evacuation budget: " PROPERFMT ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
PROPERFMTARGS(_old_evacuation_reserve),
_next_old_collection_candidate, _last_old_collection_candidate);
}
}
return (_included_old_regions > 0);
}
bool ShenandoahOldHeuristics::top_off_collection_set(size_t &add_regions_to_old) {
if (unprocessed_old_collection_candidates() == 0) {
add_regions_to_old = 0;
return false;
} else {
ShenandoahYoungGeneration* young_generation = _heap->young_generation();
size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
size_t max_young_cset = young_generation->get_evacuation_reserve();
// We have budgeted to assure the live_bytes_in_tenurable_regions() get evacuated into old generation. Young reserves
// only for untenurable region evacuations.
size_t planned_young_evac = _mixed_evac_cset->get_live_bytes_in_untenurable_regions();
size_t consumed_from_young_cset = (size_t) (planned_young_evac * ShenandoahEvacWaste);
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
size_t regions_required_for_collector_reserve = (consumed_from_young_cset + region_size_bytes - 1) / region_size_bytes;
assert(consumed_from_young_cset <= max_young_cset, "sanity");
assert(max_young_cset <= young_unaffiliated_regions * region_size_bytes, "sanity");
size_t regions_for_old_expansion;
if (consumed_from_young_cset < max_young_cset) {
size_t excess_young_reserves = max_young_cset - consumed_from_young_cset;
// We can only transfer empty regions from young to old. Furthermore, we must be careful to assure that the young
// Collector reserve that remains after transfer is comprised entirely of empty (unaffiliated) regions.
size_t consumed_unaffiliated_regions = (consumed_from_young_cset + region_size_bytes - 1) / region_size_bytes;
size_t available_unaffiliated_regions = ((young_unaffiliated_regions > consumed_unaffiliated_regions)?
young_unaffiliated_regions - consumed_unaffiliated_regions: 0);
regions_for_old_expansion = MIN2(available_unaffiliated_regions, excess_young_reserves / region_size_bytes);
} else {
regions_for_old_expansion = 0;
}
if (regions_for_old_expansion > 0) {
log_info(gc)("Augmenting old-gen evacuation budget from unexpended young-generation reserve by %zu regions",
regions_for_old_expansion);
add_regions_to_old = regions_for_old_expansion;
size_t budget_supplement = region_size_bytes * regions_for_old_expansion;
size_t supplement_without_waste = (size_t) (((double) budget_supplement) / ShenandoahOldEvacWaste);
_old_evacuation_budget += supplement_without_waste;
_unspent_unfragmented_old_budget += supplement_without_waste;
_old_generation->augment_evacuation_reserve(budget_supplement);
young_generation->set_evacuation_reserve(max_young_cset - budget_supplement);
return add_old_regions_to_cset();
} else {
add_regions_to_old = 0;
return false;
}
}
}
void ShenandoahOldHeuristics::prepare_for_old_collections() {
ShenandoahHeap* heap = ShenandoahHeap::heap();
@ -336,7 +415,6 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
size_t immediate_garbage = 0;
size_t immediate_regions = 0;
size_t live_data = 0;
RegionData* candidates = _region_data;
for (size_t i = 0; i < num_regions; i++) {
ShenandoahHeapRegion* region = heap->get_region(i);
@ -355,10 +433,10 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
// else, regions that were promoted in place had 0 old live data at mark start
if (region->is_regular() || region->is_regular_pinned()) {
// Only place regular or pinned regions with live data into the candidate set.
// Pinned regions cannot be evacuated, but we are not actually choosing candidates
// for the collection set here. That happens later during the next young GC cycle,
// by which time, the pinned region may no longer be pinned.
// Only place regular or pinned regions with live data into the candidate set.
// Pinned regions cannot be evacuated, but we are not actually choosing candidates
// for the collection set here. That happens later during the next young GC cycle,
// by which time, the pinned region may no longer be pinned.
if (!region->has_live()) {
assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
region->make_trash_immediate();
@ -561,6 +639,7 @@ unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(Shenandoa
void ShenandoahOldHeuristics::abandon_collection_candidates() {
_last_old_collection_candidate = 0;
_next_old_collection_candidate = 0;
_live_bytes_in_unprocessed_candidates = 0;
_last_old_region = 0;
}
@ -805,8 +884,9 @@ bool ShenandoahOldHeuristics::is_experimental() {
return true;
}
void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
ShenandoahHeuristics::RegionData* data,
size_t data_size, size_t free) {
size_t ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
ShenandoahHeuristics::RegionData* data,
size_t data_size, size_t free) {
ShouldNotReachHere();
return 0;
}

View File

@ -102,6 +102,30 @@ private:
size_t _fragmentation_first_old_region;
size_t _fragmentation_last_old_region;
// State variables involved in construction of a mixed-evacuation collection set. These variables are initialized
// when client code invokes prime_collection_set(). They are consulted, and sometimes modified, when client code
// calls top_off_collection_set() to possibly expand the number of old-gen regions in a mixed evacuation cset, and by
// finalize_mixed_evacs(), which prepares the way for mixed evacuations to begin.
ShenandoahCollectionSet* _mixed_evac_cset;
size_t _evacuated_old_bytes;
size_t _collected_old_bytes;
size_t _included_old_regions;
size_t _old_evacuation_reserve;
size_t _old_evacuation_budget;
// This represents the amount of memory that can be evacuated from old into initially empty regions during a mixed evacuation.
// This is the total amount of unfragmented free memory in old divided by ShenandoahOldEvacWaste.
size_t _unspent_unfragmented_old_budget;
// This represents the amount of memory that can be evacuated from old into initially non-empty regions during a mixed
// evacuation. This is the total amount of initially fragmented free memory in old divided by ShenandoahOldEvacWaste.
size_t _unspent_fragmented_old_budget;
// If there is more available memory in old than is required by the intended mixed evacuation, the amount of excess
// memory is represented by _excess_fragmented_old. To convert this value into a promotion budget, multiply by
// ShenandoahOldEvacWaste and divide by ShenandoahPromoWaste.
size_t _excess_fragmented_old_budget;
// The value of command-line argument ShenandoahOldGarbageThreshold represents the percent of garbage that must
// be present within an old-generation region before that region is considered a good candidate for inclusion in
// the collection set under normal circumstances. For our purposes, normal circustances are when the memory consumed
@ -131,7 +155,15 @@ private:
void set_trigger_if_old_is_overgrown();
protected:
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override;
size_t
choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override;
// This internal helper routine adds as many mixed evacuation candidate regions as fit within the old-gen evacuation budget
// to the collection set. This may be called twice to prepare for any given mixed evacuation cycle, the first time with
// a conservative old evacuation budget, and the second time with a larger more aggressive old evacuation budget. Returns
// true iff we need to finalize mixed evacs. (If no regions are added to the collection set, there is no need to finalize
// mixed evacuations.)
bool add_old_regions_to_cset();
public:
explicit ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap);
@ -139,8 +171,22 @@ public:
// Prepare for evacuation of old-gen regions by capturing the mark results of a recently completed concurrent mark pass.
void prepare_for_old_collections();
// Return true iff the collection set is primed with at least one old-gen region.
bool prime_collection_set(ShenandoahCollectionSet* set);
// Initialize instance variables to support the preparation of a mixed-evacuation collection set. Adds as many
// old candidate regions into the collection set as can fit within the iniital conservative old evacuation budget.
// Returns true iff we need to finalize mixed evacs.
bool prime_collection_set(ShenandoahCollectionSet* collection_set);
// If young evacuation did not consume all of its available evacuation reserve, add as many additional mixed-
// evacuation candidate regions into the collection set as will fit within this excess repurposed reserved.
// Returns true iff we need to finalize mixed evacs. Upon return, the var parameter regions_to_xfer holds the
// number of regions to transfer from young to old.
bool top_off_collection_set(size_t &add_regions_to_old);
// Having added all eligible mixed-evacuation candidates to the collection set, this function updates the total count
// of how much old-gen memory remains to be evacuated and adjusts the representation of old-gen regions that remain to
// be evacuated, giving special attention to regions that are currently pinned. It outputs relevant log messages and
// returns true iff the collection set holds at least one unpinned mixed evacuation candidate.
bool finalize_mixed_evacs();
// How many old-collection candidates have not yet been processed?
uint unprocessed_old_collection_candidates() const;

View File

@ -50,9 +50,9 @@ bool ShenandoahPassiveHeuristics::should_degenerate_cycle() {
return ShenandoahDegeneratedGC;
}
void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
size_t ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
assert(ShenandoahDegeneratedGC, "This path is only taken for Degenerated GC");
// Do not select too large CSet that would overflow the available free space.
@ -76,4 +76,5 @@ void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenando
cset->add_region(r);
}
}
return 0;
}

View File

@ -46,9 +46,9 @@ public:
virtual bool should_degenerate_cycle();
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size,
size_t free);
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size,
size_t free);
virtual const char* name() { return "Passive"; }
virtual bool is_diagnostic() { return true; }

View File

@ -59,9 +59,9 @@ bool ShenandoahStaticHeuristics::should_start_gc() {
return ShenandoahHeuristics::should_start_gc();
}
void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) {
size_t ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) {
size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
for (size_t idx = 0; idx < size; idx++) {
@ -70,4 +70,5 @@ void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(Shenandoa
cset->add_region(r);
}
}
return 0;
}

View File

@ -40,9 +40,9 @@ public:
virtual bool should_start_gc();
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free);
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free);
virtual const char* name() { return "Static"; }
virtual bool is_diagnostic() { return false; }

View File

@ -33,11 +33,11 @@
#include "utilities/quickSort.hpp"
ShenandoahYoungHeuristics::ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation)
: ShenandoahGenerationalHeuristics(generation) {
: ShenandoahGenerationalHeuristics(generation) {
}
void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
size_t ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
// See comments in ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata():
@ -48,6 +48,8 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah
// array before younger regions that typically contain more garbage. This is one reason why,
// for example, we continue examining regions even after rejecting a region that has
// more live data than we can evacuate.
ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
bool need_to_finalize_mixed = heap->old_generation()->heuristics()->prime_collection_set(cset);
// Better select garbage-first regions
QuickSort::sort<RegionData>(data, (int) size, compare_by_garbage);
@ -55,6 +57,17 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah
size_t cur_young_garbage = add_preselected_regions_to_collection_set(cset, data, size);
choose_young_collection_set(cset, data, size, actual_free, cur_young_garbage);
// Especially when young-gen trigger is expedited in order to finish mixed evacuations, there may not be
// enough consolidated garbage to make effective use of young-gen evacuation reserve. If there is still
// young-gen reserve available following selection of the young-gen collection set, see if we can use
// this memory to expand the old-gen evacuation collection set.
size_t add_regions_to_old;
need_to_finalize_mixed |= heap->old_generation()->heuristics()->top_off_collection_set(add_regions_to_old);
if (need_to_finalize_mixed) {
heap->old_generation()->heuristics()->finalize_mixed_evacs();
}
return add_regions_to_old;
}
void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollectionSet* cset,

View File

@ -38,9 +38,9 @@ public:
explicit ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation);
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
bool should_start_gc() override;

View File

@ -50,6 +50,8 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS
_region_count(0),
_old_garbage(0),
_preselected_regions(nullptr),
_young_available_bytes_collected(0),
_old_available_bytes_collected(0),
_current_index(0) {
// The collection set map is reserved to cover the entire heap *and* zero addresses.
@ -104,6 +106,7 @@ void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
}
} else if (r->is_old()) {
_old_bytes_to_evacuate += live;
_old_available_bytes_collected += free;
_old_garbage += garbage;
}
@ -140,6 +143,7 @@ void ShenandoahCollectionSet::clear() {
_old_bytes_to_evacuate = 0;
_young_available_bytes_collected = 0;
_old_available_bytes_collected = 0;
_has_old_regions = false;
}

View File

@ -75,6 +75,10 @@ private:
// should be subtracted from what's available.
size_t _young_available_bytes_collected;
// When a region having memory available to be allocated is added to the collection set, the region's available memory
// should be subtracted from what's available.
size_t _old_available_bytes_collected;
shenandoah_padding(0);
volatile size_t _current_index;
shenandoah_padding(1);
@ -121,6 +125,9 @@ public:
// Returns the amount of free bytes in young regions in the collection set.
size_t get_young_available_bytes_collected() const { return _young_available_bytes_collected; }
// Returns the amount of free bytes in old regions in the collection set.
size_t get_old_available_bytes_collected() const { return _old_available_bytes_collected; }
// Returns the amount of garbage in old regions in the collection set.
inline size_t get_old_garbage() const;

View File

@ -204,9 +204,8 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
return false;
}
entry_concurrent_update_refs_prepare(heap);
// Perform update-refs phase.
entry_concurrent_update_refs_prepare(heap);
if (ShenandoahVerify) {
vmop_entry_init_update_refs();
}
@ -227,6 +226,7 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
// Update references freed up collection set, kick the cleanup to reclaim the space.
entry_cleanup_complete();
} else {
_abbreviated = true;
if (!entry_final_roots()) {
assert(_degen_point != _degenerated_unset, "Need to know where to start degenerated cycle");
return false;
@ -235,7 +235,6 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
if (VerifyAfterGC) {
vmop_entry_verify_final_roots();
}
_abbreviated = true;
}
// We defer generation resizing actions until after cset regions have been recycled. We do this even following an
@ -282,7 +281,6 @@ bool ShenandoahConcurrentGC::complete_abbreviated_cycle() {
return true;
}
void ShenandoahConcurrentGC::vmop_entry_init_mark() {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
@ -536,6 +534,12 @@ void ShenandoahConcurrentGC::entry_cleanup_early() {
// This phase does not use workers, no need for setup
heap->try_inject_alloc_failure();
op_cleanup_early();
if (!heap->is_evacuation_in_progress()) {
// This is an abbreviated cycle. Rebuild the freeset in order to establish reserves for the next GC cycle. Doing
// the rebuild ASAP also expedites availability of immediate trash, reducing the likelihood that we will degenerate
// during promote-in-place processing.
heap->rebuild_free_set(true /*concurrent*/);
}
}
void ShenandoahConcurrentGC::entry_evacuate() {

View File

@ -326,7 +326,7 @@ void ShenandoahRegionPartitions::initialize_old_collector() {
}
void ShenandoahRegionPartitions::make_all_regions_unavailable() {
shenandoah_assert_heaplocked();
shenandoah_assert_heaplocked_or_safepoint();
for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
_membership[partition_id].clear_all();
_leftmosts[partition_id] = _max;
@ -439,6 +439,13 @@ void ShenandoahRegionPartitions::set_capacity_of(ShenandoahFreeSetPartitionId wh
_available[int(which_partition)] = value - _used[int(which_partition)];
}
void ShenandoahRegionPartitions::set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
shenandoah_assert_heaplocked();
assert (which_partition < NumPartitions, "selected free set must be valid");
_used[int(which_partition)] = value;
_available[int(which_partition)] = _capacity[int(which_partition)] - value;
}
void ShenandoahRegionPartitions::increase_capacity(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
shenandoah_assert_heaplocked();
@ -900,7 +907,7 @@ idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId w
#ifdef ASSERT
void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
void ShenandoahRegionPartitions::assert_bounds() {
size_t capacities[UIntNumPartitions];
size_t used[UIntNumPartitions];
@ -936,7 +943,7 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
switch (partition) {
case ShenandoahFreeSetPartitionId::NotFree:
{
assert(!validate_totals || (capacity != _region_size_bytes), "Should not be retired if empty");
assert(capacity != _region_size_bytes, "Should not be retired if empty");
ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i);
if (r->is_humongous()) {
if (r->is_old()) {
@ -976,12 +983,12 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
case ShenandoahFreeSetPartitionId::Collector:
case ShenandoahFreeSetPartitionId::OldCollector:
{
ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i);
assert(capacity > 0, "free regions must have allocation capacity");
bool is_empty = (capacity == _region_size_bytes);
regions[int(partition)]++;
used[int(partition)] += _region_size_bytes - capacity;
capacities[int(partition)] += _region_size_bytes;
if (i < leftmosts[int(partition)]) {
leftmosts[int(partition)] = i;
}
@ -1020,20 +1027,20 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator),
"Mutator free regions before the leftmost: %zd, bound %zd",
"Mutator free region before the leftmost: %zd, bound %zd",
beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator));
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Mutator),
"Mutator free regions past the rightmost: %zd, bound %zd",
"Mutator free region past the rightmost: %zd, bound %zd",
end_off, rightmost(ShenandoahFreeSetPartitionId::Mutator));
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator),
"Mutator free empty regions before the leftmost: %zd, bound %zd",
beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator));
assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator),
"Mutator free empty regions past the rightmost: %zd, bound %zd",
end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
"free empty region (%zd) before the leftmost bound %zd",
beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]);
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
"free empty region (%zd) past the rightmost bound %zd",
end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]);
// Performance invariants. Failing these would not break the free partition, but performance would suffer.
assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: %zd < %zd",
@ -1053,20 +1060,20 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
"Collector free regions before the leftmost: %zd, bound %zd",
"Collector free region before the leftmost: %zd, bound %zd",
beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
"Collector free regions past the rightmost: %zd, bound %zd",
"Collector free region past the rightmost: %zd, bound %zd",
end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector free empty regions before the leftmost: %zd, bound %zd",
beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
"Collector free empty region before the leftmost: %zd, bound %zd",
beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector free empty regions past the rightmost: %zd, bound %zd",
end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
"Collector free empty region past the rightmost: %zd, bound %zd",
end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
// Performance invariants. Failing these would not break the free partition, but performance would suffer.
assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "OldCollector leftmost in bounds: %zd < %zd",
@ -1083,106 +1090,109 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
ShenandoahFreeSetPartitionId::OldCollector),
"OldCollector rightmost region should be free: %zd", rightmost(ShenandoahFreeSetPartitionId::OldCollector));
// Concurrent recycling of trash recycles a region (changing its state from is_trash to is_empty without the heap lock),
// If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
// Likewise for empty region partitions.
beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector),
"OldCollector free regions before the leftmost: %zd, bound %zd",
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector), "free regions before the leftmost: %zd, bound %zd",
beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector),
"OldCollector free regions past the rightmost: %zd, bound %zd",
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector), "free regions past the rightmost: %zd, bound %zd",
end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
"OldCollector free empty regions before the leftmost: %zd, bound %zd",
beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
"free empty region (%zd) before the leftmost bound %zd, region %s trash",
beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
((beg_off >= _max)? "out of bounds is not":
(ShenandoahHeap::heap()->get_region(_leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)])->is_trash()?
"is": "is not")));
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
"OldCollector free empty regions past the rightmost: %zd, bound %zd",
end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
"free empty region (%zd) past the rightmost bound %zd, region %s trash",
end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
((end_off < 0)? "out of bounds is not" :
(ShenandoahHeap::heap()->get_region(_rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)])->is_trash()?
"is": "is not")));
if (validate_totals) {
// young_retired_regions need to be added to either Mutator or Collector partitions, 100% used.
// Give enough of young_retired_regions, young_retired_capacity, young_retired_user
// to the Mutator partition to top it off so that it matches the running totals.
//
// Give any remnants to the Collector partition. After topping off the Collector partition, its values
// should also match running totals.
// young_retired_regions need to be added to either Mutator or Collector partitions, 100% used.
// Give enough of young_retired_regions, young_retired_capacity, young_retired_user
// to the Mutator partition to top it off so that it matches the running totals.
//
// Give any remnants to the Collector partition. After topping off the Collector partition, its values
// should also match running totals.
assert(young_retired_regions * _region_size_bytes == young_retired_capacity, "sanity");
assert(young_retired_capacity == young_retired_used, "sanity");
assert(young_retired_regions * _region_size_bytes == young_retired_capacity, "sanity");
assert(young_retired_capacity == young_retired_used, "sanity");
assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)]
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector capacities must match (%zu != %zu)",
capacities[int(ShenandoahFreeSetPartitionId::OldCollector)],
_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]);
assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)]
== _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector used must match");
assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)]
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] / _region_size_bytes, "Old collector regions must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]
>= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector capacity must be >= used");
assert(_available[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]),
"Old Collector available must equal capacity minus used");
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector humongous waste must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)],
"Capacity total must be >= counted tally");
size_t mutator_capacity_shortfall =
_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - capacities[int(ShenandoahFreeSetPartitionId::Mutator)];
assert(mutator_capacity_shortfall <= young_retired_capacity, "sanity");
capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_shortfall;
young_retired_capacity -= mutator_capacity_shortfall;
capacities[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_capacity;
assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)]
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector capacities must match");
assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)]
== _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector used must match");
assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)]
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] / _region_size_bytes, "Old collector regions must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]
>= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector capacity must be >= used");
assert(_available[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]),
"Old Collector available must equal capacity minus used");
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector humongous waste must match");
assert(_used[int(ShenandoahFreeSetPartitionId::Mutator)] >= used[int(ShenandoahFreeSetPartitionId::Mutator)],
"Used total must be >= counted tally");
size_t mutator_used_shortfall =
_used[int(ShenandoahFreeSetPartitionId::Mutator)] - used[int(ShenandoahFreeSetPartitionId::Mutator)];
assert(mutator_used_shortfall <= young_retired_used, "sanity");
used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_shortfall;
young_retired_used -= mutator_used_shortfall;
used[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_used;
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)],
"Capacity total must be >= counted tally");
size_t mutator_capacity_shortfall =
_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - capacities[int(ShenandoahFreeSetPartitionId::Mutator)];
assert(mutator_capacity_shortfall <= young_retired_capacity, "sanity");
capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_shortfall;
young_retired_capacity -= mutator_capacity_shortfall;
capacities[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_capacity;
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
>= regions[int(ShenandoahFreeSetPartitionId::Mutator)], "Region total must be >= counted tally");
size_t mutator_regions_shortfall = (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
- regions[int(ShenandoahFreeSetPartitionId::Mutator)]);
assert(mutator_regions_shortfall <= young_retired_regions, "sanity");
regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_shortfall;
young_retired_regions -= mutator_regions_shortfall;
regions[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_regions;
assert(capacities[int(ShenandoahFreeSetPartitionId::Collector)] == _capacity[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector capacities must match");
assert(used[int(ShenandoahFreeSetPartitionId::Collector)] == _used[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector used must match");
assert(regions[int(ShenandoahFreeSetPartitionId::Collector)]
== _capacity[int(ShenandoahFreeSetPartitionId::Collector)] / _region_size_bytes, "Collector regions must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] >= _used[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector Capacity must be >= used");
assert(_available[int(ShenandoahFreeSetPartitionId::Collector)] ==
(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] - _used[int(ShenandoahFreeSetPartitionId::Collector)]),
"Collector Available must equal capacity minus used");
assert(_used[int(ShenandoahFreeSetPartitionId::Mutator)] >= used[int(ShenandoahFreeSetPartitionId::Mutator)],
"Used total must be >= counted tally");
size_t mutator_used_shortfall =
_used[int(ShenandoahFreeSetPartitionId::Mutator)] - used[int(ShenandoahFreeSetPartitionId::Mutator)];
assert(mutator_used_shortfall <= young_retired_used, "sanity");
used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_shortfall;
young_retired_used -= mutator_used_shortfall;
used[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_used;
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
>= regions[int(ShenandoahFreeSetPartitionId::Mutator)], "Region total must be >= counted tally");
size_t mutator_regions_shortfall = (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
- regions[int(ShenandoahFreeSetPartitionId::Mutator)]);
assert(mutator_regions_shortfall <= young_retired_regions, "sanity");
regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_shortfall;
young_retired_regions -= mutator_regions_shortfall;
regions[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_regions;
assert(capacities[int(ShenandoahFreeSetPartitionId::Collector)] == _capacity[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector capacities must match");
assert(used[int(ShenandoahFreeSetPartitionId::Collector)] == _used[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector used must match");
assert(regions[int(ShenandoahFreeSetPartitionId::Collector)]
== _capacity[int(ShenandoahFreeSetPartitionId::Collector)] / _region_size_bytes, "Collector regions must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] >= _used[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector Capacity must be >= used");
assert(_available[int(ShenandoahFreeSetPartitionId::Collector)] ==
(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] - _used[int(ShenandoahFreeSetPartitionId::Collector)]),
"Collector Available must equal capacity minus used");
assert(capacities[int(ShenandoahFreeSetPartitionId::Mutator)] == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)],
"Mutator capacities must match");
assert(used[int(ShenandoahFreeSetPartitionId::Mutator)] == _used[int(ShenandoahFreeSetPartitionId::Mutator)],
"Mutator used must match");
assert(regions[int(ShenandoahFreeSetPartitionId::Mutator)]
== _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes, "Mutator regions must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= _used[int(ShenandoahFreeSetPartitionId::Mutator)],
"Mutator capacity must be >= used");
assert(_available[int(ShenandoahFreeSetPartitionId::Mutator)] ==
(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]),
"Mutator available must equal capacity minus used");
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] == young_humongous_waste,
"Mutator humongous waste must match");
}
assert(capacities[int(ShenandoahFreeSetPartitionId::Mutator)] == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)],
"Mutator capacities must match");
assert(used[int(ShenandoahFreeSetPartitionId::Mutator)] == _used[int(ShenandoahFreeSetPartitionId::Mutator)],
"Mutator used must match");
assert(regions[int(ShenandoahFreeSetPartitionId::Mutator)]
== _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes, "Mutator regions must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= _used[int(ShenandoahFreeSetPartitionId::Mutator)],
"Mutator capacity must be >= used");
assert(_available[int(ShenandoahFreeSetPartitionId::Mutator)] ==
(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]),
"Mutator available must equal capacity minus used");
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] == young_humongous_waste,
"Mutator humongous waste must match");
}
#endif
@ -1206,6 +1216,36 @@ ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
clear_internal();
}
void ShenandoahFreeSet::move_unaffiliated_regions_from_collector_to_old_collector(ssize_t count) {
shenandoah_assert_heaplocked();
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
size_t old_capacity = _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector);
size_t collector_capacity = _partitions.get_capacity(ShenandoahFreeSetPartitionId::Collector);
if (count > 0) {
size_t ucount = count;
size_t bytes_moved = ucount * region_size_bytes;
assert(collector_capacity >= bytes_moved, "Cannot transfer");
assert(_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector) >= ucount,
"Cannot transfer %zu of %zu", ucount, _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector));
_partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, ucount);
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::Collector, collector_capacity - bytes_moved);
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::OldCollector, old_capacity + bytes_moved);
_partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, ucount);
} else if (count < 0) {
size_t ucount = -count;
size_t bytes_moved = ucount * region_size_bytes;
assert(old_capacity >= bytes_moved, "Cannot transfer");
assert(_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector) >= ucount,
"Cannot transfer %zu of %zu", ucount, _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector));
_partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, ucount);
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::OldCollector, old_capacity - bytes_moved);
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::Collector, collector_capacity + bytes_moved);
_partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, ucount);
}
// else, do nothing
}
// was pip_pad_bytes
void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) {
shenandoah_assert_heaplocked();
@ -1261,7 +1301,7 @@ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(Shenandoah
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
}
template<typename Iter>
@ -1496,9 +1536,12 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
return nullptr;
}
HeapWord* result = nullptr;
// We must call try_recycle_under_lock() even if !r->is_trash(). The reason is that if r is being recycled at this
// moment by a GC worker thread, it may appear to be not trash even though it has not yet been fully recycled. If
// we proceed without waiting for the worker to finish recycling the region, the worker thread may overwrite the
// region's affiliation with FREE after we set the region's affiliation to req.afiliation() below
r->try_recycle_under_lock();
in_new_region = r->is_empty();
if (in_new_region) {
log_debug(gc, free)("Using new region (%zu) for %s (" PTR_FORMAT ").",
r->index(), req.type_string(), p2i(&req));
@ -1668,7 +1711,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
default:
assert(false, "won't happen");
}
_partitions.assert_bounds(true);
_partitions.assert_bounds();
return result;
}
@ -1799,6 +1842,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
increase_bytes_allocated(waste_bytes);
}
}
_partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_used);
increase_bytes_allocated(total_used);
req.set_actual_size(words_size);
@ -1819,14 +1863,16 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ false,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
return _heap->get_region(beg)->bottom();
}
class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionClosure {
public:
void heap_region_do(ShenandoahHeapRegion* r) {
r->try_recycle();
if (r->is_trash()) {
r->try_recycle();
}
}
bool is_thread_safe() {
@ -1861,7 +1907,7 @@ bool ShenandoahFreeSet::transfer_one_region_from_mutator_to_old_collector(size_t
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
return true;
} else {
return false;
@ -1914,7 +1960,7 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
// 4. Do not adjust capacities for generations, we just swapped the regions that have already
// been accounted for. However, we should adjust the evacuation reserves as those may have changed.
shenandoah_assert_heaplocked();
@ -1945,7 +1991,7 @@ void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ false,
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
// We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
// to recycle trash before attempting to allocate anything in the region.
}
@ -2025,16 +2071,23 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
for (size_t idx = 0; idx < num_regions; idx++) {
ShenandoahHeapRegion* region = _heap->get_region(idx);
if (region->is_trash()) {
// Trashed regions represent immediate garbage identified by final mark and regions that had been in the collection
// partition but have not yet been "cleaned up" following update refs.
// Trashed regions represent regions that had been in the collection set (or may have been identified as immediate garbage)
// but have not yet been "cleaned up". The cset regions are not "trashed" until we have finished update refs.
if (region->is_old()) {
// We're going to place this region into the Mutator set. We increment old_trashed_regions because this count represents
// regions that the old generation is entitled to without any transfer from young. We do not place this region into
// the OldCollector partition at this time. Instead, we let reserve_regions() decide whether to place this region
// into the OldCollector partition. Deferring the decision allows reserve_regions() to more effectively pack the
// OldCollector regions into high-address memory. We do not adjust capacities of old and young generations at this
// time. At the end of finish_rebuild(), the capacities are adjusted based on the results of reserve_regions().
old_trashed_regions++;
} else {
assert(region->is_young(), "Trashed region should be old or young");
young_trashed_regions++;
}
} else if (region->is_old()) {
// count both humongous and regular regions, but don't count trash (cset) regions.
// We count humongous and regular regions as "old regions". We do not count trashed regions that are old. Those
// are counted (above) as old_trashed_regions.
old_region_count++;
if (first_old_region > idx) {
first_old_region = idx;
@ -2048,7 +2101,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
size_t ac = alloc_capacity(region);
if (ac >= PLAB::min_size() * HeapWordSize) {
if (region->is_trash() || !region->is_old()) {
// Both young and old collected regions (trashed) are placed into the Mutator set
// Both young and old (possibly immediately) collected regions (trashed) are placed into the Mutator set
_partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
if (idx < mutator_leftmost) {
mutator_leftmost = idx;
@ -2111,10 +2164,19 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Region should have been retired");
size_t humongous_waste_bytes = 0;
if (region->is_humongous_start()) {
oop obj = cast_to_oop(region->bottom());
size_t byte_size = obj->size() * HeapWordSize;
size_t region_span = ShenandoahHeapRegion::required_regions(byte_size);
humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_bytes() - byte_size;
// Since rebuild does not necessarily happen at a safepoint, a newly allocated humongous object may not have been
// fully initialized. Therefore, we cannot safely consult its header.
ShenandoahHeapRegion* last_of_humongous_continuation = region;
size_t next_idx;
for (next_idx = idx + 1; next_idx < num_regions; next_idx++) {
ShenandoahHeapRegion* humongous_cont_candidate = _heap->get_region(next_idx);
if (!humongous_cont_candidate->is_humongous_continuation()) {
break;
}
last_of_humongous_continuation = humongous_cont_candidate;
}
// For humongous regions, used() is established while holding the global heap lock so it is reliable here
humongous_waste_bytes = ShenandoahHeapRegion::region_size_bytes() - last_of_humongous_continuation->used();
}
if (region->is_old()) {
old_collector_used += region_size_bytes;
@ -2183,7 +2245,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
#ifdef ASSERT
if (_heap->mode()->is_generational()) {
assert(young_affiliated_regions() == _heap->young_generation()->get_affiliated_region_count(), "sanity");
@ -2221,7 +2283,7 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
// global_used is unaffected by this transfer
// No need to adjust ranges because humongous regions are not allocatable
@ -2303,7 +2365,7 @@ void ShenandoahFreeSet::transfer_empty_regions_from_to(ShenandoahFreeSetPartitio
/* UnaffiliatedChangesAreYoungNeutral */ true>();
}
}
_partitions.assert_bounds(true);
_partitions.assert_bounds();
}
// Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
@ -2370,7 +2432,7 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
}
_partitions.assert_bounds(true);
_partitions.assert_bounds();
return transferred_regions;
}
@ -2445,7 +2507,7 @@ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPa
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
}
_partitions.assert_bounds(true);
_partitions.assert_bounds();
return transferred_regions;
}
@ -2507,14 +2569,13 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t
first_old_region, last_old_region, old_region_count);
}
void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t old_region_count,
bool have_evacuation_reserves) {
void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count) {
shenandoah_assert_heaplocked();
size_t young_reserve(0), old_reserve(0);
if (_heap->mode()->is_generational()) {
compute_young_and_old_reserves(young_trashed_regions, old_trashed_regions, have_evacuation_reserves,
young_reserve, old_reserve);
compute_young_and_old_reserves(young_cset_regions, old_cset_regions, young_reserve, old_reserve);
} else {
young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
old_reserve = 0;
@ -2531,8 +2592,41 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
// Release the rebuild lock now. What remains in this function is read-only
rebuild_lock()->unlock();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
log_status();
if (_heap->mode()->is_generational()) {
// Clear the region balance until it is adjusted in preparation for a subsequent GC cycle.
_heap->old_generation()->set_region_balance(0);
}
}
// Reduce old reserve (when there are insufficient resources to satisfy the original request).
void ShenandoahFreeSet::reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve) {
ShenandoahOldGeneration* const old_generation = _heap->old_generation();
size_t requested_promoted_reserve = old_generation->get_promoted_reserve();
size_t requested_old_evac_reserve = old_generation->get_evacuation_reserve();
assert(adjusted_old_reserve < requested_old_reserve, "Only allow reduction");
assert(requested_promoted_reserve + requested_old_evac_reserve >= adjusted_old_reserve, "Sanity");
size_t delta = requested_old_reserve - adjusted_old_reserve;
if (requested_promoted_reserve >= delta) {
requested_promoted_reserve -= delta;
old_generation->set_promoted_reserve(requested_promoted_reserve);
} else {
delta -= requested_promoted_reserve;
requested_promoted_reserve = 0;
requested_old_evac_reserve -= delta;
old_generation->set_promoted_reserve(requested_promoted_reserve);
old_generation->set_evacuation_reserve(requested_old_evac_reserve);
}
}
// Reduce young reserve (when there are insufficient resources to satisfy the original request).
void ShenandoahFreeSet::reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve) {
ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
assert(adjusted_young_reserve < requested_young_reserve, "Only allow reduction");
young_generation->set_evacuation_reserve(adjusted_young_reserve);
}
/**
@ -2549,7 +2643,6 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
* this value should computed by ShenandoahGenerationalHeap::compute_old_generation_balance().
*/
void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regions, size_t old_trashed_regions,
bool have_evacuation_reserves,
size_t& young_reserve_result, size_t& old_reserve_result) const {
shenandoah_assert_generational();
shenandoah_assert_heaplocked();
@ -2566,6 +2659,15 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
old_available += old_trashed_regions * region_size_bytes;
young_unaffiliated_regions += young_trashed_regions;
assert(young_capacity >= young_generation->used(),
"Young capacity (%zu) must exceed used (%zu)", young_capacity, young_generation->used());
size_t young_available = young_capacity - young_generation->used();
young_available += young_trashed_regions * region_size_bytes;
assert(young_available >= young_unaffiliated_regions * region_size_bytes, "sanity");
assert(old_available >= old_unaffiliated_regions * region_size_bytes, "sanity");
// Consult old-region balance to make adjustments to current generation capacities and availability.
// The generation region transfers take place after we rebuild. old_region_balance represents number of regions
// to transfer from old to young.
@ -2585,6 +2687,7 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
old_available -= xfer_bytes;
old_unaffiliated_regions -= old_region_balance;
young_available += xfer_bytes;
young_capacity += xfer_bytes;
young_unaffiliated_regions += old_region_balance;
}
@ -2593,41 +2696,22 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
// promotions and evacuations. The partition between which old memory is reserved for evacuation and
// which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
// each PLAB's available memory.
if (have_evacuation_reserves) {
// We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
const size_t promoted_reserve = old_generation->get_promoted_reserve();
const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
young_reserve_result = young_generation->get_evacuation_reserve();
old_reserve_result = promoted_reserve + old_evac_reserve;
if (old_reserve_result > old_available) {
// Try to transfer memory from young to old.
size_t old_deficit = old_reserve_result - old_available;
size_t old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
if (young_unaffiliated_regions < old_region_deficit) {
old_region_deficit = young_unaffiliated_regions;
}
young_unaffiliated_regions -= old_region_deficit;
old_unaffiliated_regions += old_region_deficit;
old_region_balance -= old_region_deficit;
old_generation->set_region_balance(old_region_balance);
}
} else {
// We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
// The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
// Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of
// unaffiliated regions.
old_reserve_result = old_available;
}
const size_t promoted_reserve = old_generation->get_promoted_reserve();
const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
young_reserve_result = young_generation->get_evacuation_reserve();
old_reserve_result = promoted_reserve + old_evac_reserve;
assert(old_reserve_result + young_reserve_result <= old_available + young_available,
"Cannot reserve (%zu + %zu + %zu) more than is available: %zu + %zu",
promoted_reserve, old_evac_reserve, young_reserve_result, old_available, young_available);
// Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
// free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust
// the reserve downward to account for this possibility. This loss is part of the reason why the original budget
// was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
if (old_reserve_result >
_partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
_partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
old_reserve_result =
_partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
_partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
}
if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) {
@ -2791,19 +2875,17 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
ShenandoahFreeSetPartitionId p = _partitions.membership(idx);
size_t ac = alloc_capacity(r);
assert(ac != region_size_bytes, "Empty regions should be in Mutator partion at entry to reserve_regions");
if (p == ShenandoahFreeSetPartitionId::Collector) {
if (ac != region_size_bytes) {
young_used_regions++;
young_used_bytes = region_size_bytes - ac;
}
// else, unaffiliated region has no used
} else if (p == ShenandoahFreeSetPartitionId::OldCollector) {
if (ac != region_size_bytes) {
old_used_regions++;
old_used_bytes = region_size_bytes - ac;
}
// else, unaffiliated region has no used
} else if (p == ShenandoahFreeSetPartitionId::NotFree) {
assert(p != ShenandoahFreeSetPartitionId::Collector, "Collector regions must be converted from Mutator regions");
if (p == ShenandoahFreeSetPartitionId::OldCollector) {
assert(!r->is_empty(), "Empty regions should be in Mutator partition at entry to reserve_regions");
old_used_regions++;
old_used_bytes = region_size_bytes - ac;
// This region is within the range for OldCollector partition, as established by find_regions_with_alloc_capacity()
assert((_partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= idx) &&
(_partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector) >= idx),
"find_regions_with_alloc_capacity() should have established this is in range");
} else {
assert(p == ShenandoahFreeSetPartitionId::NotFree, "sanity");
// This region has been retired
if (r->is_old()) {
old_used_regions++;
@ -2813,21 +2895,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
young_used_regions++;
young_used_bytes += region_size_bytes - ac;
}
} else {
assert(p == ShenandoahFreeSetPartitionId::OldCollector, "Not mutator and not NotFree, so must be OldCollector");
assert(!r->is_empty(), "Empty regions should be in Mutator partition at entry to reserve_regions");
if (idx < old_collector_low_idx) {
old_collector_low_idx = idx;
}
if (idx > old_collector_high_idx) {
old_collector_high_idx = idx;
}
if (idx < old_collector_empty_low_idx) {
old_collector_empty_low_idx = idx;
}
if (idx > old_collector_empty_high_idx) {
old_collector_empty_high_idx = idx;
}
}
}
}
@ -2856,14 +2923,14 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
_partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, used_to_old_collector);
}
_partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector,
collector_low_idx, collector_high_idx,
collector_empty_low_idx, collector_empty_high_idx);
_partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator,
mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx);
_partitions.establish_interval(ShenandoahFreeSetPartitionId::Collector,
collector_low_idx, collector_high_idx, collector_empty_low_idx, collector_empty_high_idx);
_partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::OldCollector,
old_collector_low_idx, old_collector_high_idx,
old_collector_empty_low_idx, old_collector_empty_high_idx);
_partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator,
mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx);
recompute_total_used</* UsedByMutatorChanged */ true,
/* UsedByCollectorChanged */ true, /* UsedByOldCollectorChanged */ true>();
@ -2872,17 +2939,22 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
if (LogTarget(Info, gc, free)::is_enabled()) {
size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);
if (old_reserve < to_reserve_old) {
log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
assert(_heap->mode()->is_generational(), "to_old_reserve > 0 implies generational mode");
reduce_old_reserve(old_reserve, to_reserve_old);
}
size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector);
if (reserve < to_reserve) {
if (_heap->mode()->is_generational()) {
reduce_young_reserve(reserve, to_reserve);
}
log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
}
}
}

View File

@ -224,6 +224,10 @@ public:
void transfer_used_capacity_from_to(ShenandoahFreeSetPartitionId from_partition, ShenandoahFreeSetPartitionId to_partition,
size_t regions);
// For recycled region r in the OldCollector partition but possibly not within the interval for empty OldCollector regions,
// expand the empty interval to include this region.
inline void adjust_interval_for_recycled_old_region_under_lock(ShenandoahHeapRegion* r);
const char* partition_membership_name(idx_t idx) const;
// Return the index of the next available region >= start_index, or maximum_regions if not found.
@ -373,12 +377,7 @@ public:
inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value);
inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
shenandoah_assert_heaplocked();
assert (which_partition < NumPartitions, "selected free set must be valid");
_used[int(which_partition)] = value;
_available[int(which_partition)] = _capacity[int(which_partition)] - value;
}
inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value);
inline size_t count(ShenandoahFreeSetPartitionId which_partition) const { return _region_counts[int(which_partition)]; }
@ -402,7 +401,7 @@ public:
// idx >= leftmost &&
// idx <= rightmost
// }
void assert_bounds(bool validate_totals) NOT_DEBUG_RETURN;
void assert_bounds() NOT_DEBUG_RETURN;
};
// Publicly, ShenandoahFreeSet represents memory that is available to mutator threads. The public capacity(), used(),
@ -634,7 +633,11 @@ private:
void establish_old_collector_alloc_bias();
size_t get_usable_free_words(size_t free_bytes) const;
void reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve);
void reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve);
void log_freeset_stats(ShenandoahFreeSetPartitionId partition_id, LogStream& ls);
// log status, assuming lock has already been acquired by the caller.
void log_status();
@ -685,35 +688,46 @@ public:
return _total_global_used;
}
size_t global_unaffiliated_regions() {
// A negative argument results in moving from old_collector to collector
void move_unaffiliated_regions_from_collector_to_old_collector(ssize_t regions);
inline size_t global_unaffiliated_regions() {
return _global_unaffiliated_regions;
}
size_t young_unaffiliated_regions() {
inline size_t young_unaffiliated_regions() {
return _young_unaffiliated_regions;
}
size_t old_unaffiliated_regions() {
inline size_t collector_unaffiliated_regions() {
return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector);
}
inline size_t old_collector_unaffiliated_regions() {
return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector);
}
size_t young_affiliated_regions() {
inline size_t old_unaffiliated_regions() {
return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector);
}
inline size_t young_affiliated_regions() {
return _young_affiliated_regions;
}
size_t old_affiliated_regions() {
inline size_t old_affiliated_regions() {
return _old_affiliated_regions;
}
size_t global_affiliated_regions() {
inline size_t global_affiliated_regions() {
return _global_affiliated_regions;
}
size_t total_young_regions() {
inline size_t total_young_regions() {
return _total_young_regions;
}
size_t total_old_regions() {
inline size_t total_old_regions() {
return _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector) / ShenandoahHeapRegion::region_size_bytes();
}
@ -725,36 +739,27 @@ public:
// Examine the existing free set representation, capturing the current state into var arguments:
//
// young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
// old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
// young_trashed_regions is the number of trashed regions (immediate garbage at final mark, cset regions after update refs)
// old_trashed_regions is the number of trashed regions
// (immediate garbage at final old mark, cset regions after update refs for mixed evac)
// first_old_region is the index of the first region that is part of the OldCollector set
// last_old_region is the index of the last region that is part of the OldCollector set
// old_region_count is the number of regions in the OldCollector set that have memory available to be allocated
void prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
void prepare_to_rebuild(size_t &young_trashed_regions, size_t &old_trashed_regions,
size_t &first_old_region, size_t &last_old_region, size_t &old_region_count);
// At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to
// hold the results of evacuating to young-gen and to old-gen, and have_evacuation_reserves should be true.
// These quantities, stored as reserves for their respective generations, are consulted prior to rebuilding
// the free set (ShenandoahFreeSet) in preparation for evacuation. When the free set is rebuilt, we make sure
// to reserve sufficient memory in the collector and old_collector sets to hold evacuations.
// hold the results of evacuating to young-gen and to old-gen. These quantities, stored in reserves for their
// respective generations, are consulted prior to rebuilding the free set (ShenandoahFreeSet) in preparation for
// evacuation. When the free set is rebuilt, we make sure to reserve sufficient memory in the collector and
// old_collector sets to hold evacuations. Likewise, at the end of update refs, we rebuild the free set in order
// to set aside reserves to be consumed during the next GC cycle.
//
// We also rebuild the free set at the end of GC, as we prepare to idle GC until the next trigger. In this case,
// have_evacuation_reserves is false because we don't yet know how much memory will need to be evacuated in the
// next GC cycle. When have_evacuation_reserves is false, the free set rebuild operation reserves for the collector
// and old_collector sets based on alternative mechanisms, such as ShenandoahEvacReserve, ShenandoahOldEvacReserve, and
// ShenandoahOldCompactionReserve. In a future planned enhancement, the reserve for old_collector set when the
// evacuation reserves are unknown, is based in part on anticipated promotion as determined by analysis of live data
// found during the previous GC pass which is one less than the current tenure age.
//
// young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
// old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
// young_trashed_regions is the number of trashed regions (immediate garbage at final mark, cset regions after update refs)
// old_trashed_regions is the number of trashed regions
// (immediate garbage at final old mark, cset regions after update refs for mixed evac)
// num_old_regions is the number of old-gen regions that have available memory for further allocations (excluding old cset)
// have_evacuation_reserves is true iff the desired values of young-gen and old-gen evacuation reserves and old-gen
// promotion reserve have been precomputed (and can be obtained by invoking
// <generation>->get_evacuation_reserve() or old_gen->get_promoted_reserve()
void finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t num_old_regions,
bool have_evacuation_reserves = false);
void finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t num_old_regions);
// When a region is promoted in place, we add the region's available memory if it is greater than plab_min_size()
// into the old collector partition by invoking this method.
@ -806,9 +811,18 @@ public:
return _partitions.available_in_locked_for_rebuild(ShenandoahFreeSetPartitionId::Mutator);
}
// Use this version of available() if the heap lock is held.
inline size_t available_locked() const {
return _partitions.available_in(ShenandoahFreeSetPartitionId::Mutator);
}
inline size_t total_humongous_waste() const { return _total_humongous_waste; }
inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); }
inline size_t humongous_waste_in_old() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::OldCollector); }
inline size_t humongous_waste_in_mutator() const {
return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator);
}
inline size_t humongous_waste_in_old() const {
return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::OldCollector);
}
void decrease_humongous_waste_for_regular_bypass(ShenandoahHeapRegion* r, size_t waste);
@ -874,7 +888,7 @@ public:
// Reserve space for evacuations, with regions reserved for old evacuations placed to the right
// of regions reserved of young evacuations.
void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves,
void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions,
size_t &young_reserve_result, size_t &old_reserve_result) const;
};

View File

@ -522,6 +522,7 @@ public:
void heap_region_do(ShenandoahHeapRegion* r) override {
if (r->is_trash()) {
r->try_recycle_under_lock();
// No need to adjust_interval_for_recycled_old_region. That will be taken care of during freeset rebuild.
}
if (r->is_cset()) {
// Leave affiliation unchanged
@ -966,6 +967,7 @@ public:
if (r->is_trash()) {
live = 0;
r->try_recycle_under_lock();
// No need to adjust_interval_for_recycled_old_region. That will be taken care of during freeset rebuild.
} else {
if (r->is_old()) {
ShenandoahGenerationalFullGC::account_for_region(r, _old_regions, _old_usage, _old_humongous_waste);
@ -1113,16 +1115,16 @@ void ShenandoahFullGC::phase5_epilog() {
ShenandoahPostCompactClosure post_compact;
heap->heap_region_iterate(&post_compact);
heap->collection_set()->clear();
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
ShenandoahFreeSet* free_set = heap->free_set();
{
free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
ShenandoahFreeSet* free_set = heap->free_set();
size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
// We also do not expand old generation size following Full GC because we have scrambled age populations and
// no longer have objects separated by age into distinct regions.
if (heap->mode()->is_generational()) {
ShenandoahGenerationalFullGC::compute_balances();
}
free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
}
// Set mark incomplete because the marking bitmaps have been reset except pinned regions.
_generation->set_mark_incomplete();

View File

@ -250,6 +250,7 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
ShenandoahOldGeneration* const old_generation = heap->old_generation();
ShenandoahYoungGeneration* const young_generation = heap->young_generation();
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
// During initialization and phase changes, it is more likely that fewer objects die young and old-gen
// memory is not yet full (or is in the process of being replaced). During these times especially, it
@ -263,15 +264,15 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// First priority is to reclaim the easy garbage out of young-gen.
// maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young
const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve());
// maximum_young_evacuation_reserve is upper bound on memory to be evacuated into young Collector Reserve. This is
// bounded at the end of previous GC cycle, based on available memory and balancing of evacuation to old and young.
size_t maximum_young_evacuation_reserve = young_generation->get_evacuation_reserve();
// maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted),
// clamped by the old generation space available.
//
// Here's the algebra.
// Let SOEP = ShenandoahOldEvacRatioPercent,
// Let SOEP = ShenandoahOldEvacPercent,
// OE = old evac,
// YE = young evac, and
// TE = total evac = OE + YE
@ -283,12 +284,14 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// => OE = YE*SOEP/(100-SOEP)
// We have to be careful in the event that SOEP is set to 100 by the user.
assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
assert(ShenandoahOldEvacPercent <= 100, "Error");
const size_t old_available = old_generation->available();
const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacRatioPercent == 100) ?
old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent),
const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacPercent == 100) ?
old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent),
old_available);
// In some cases, maximum_old_reserve < old_available (when limited by ShenandoahOldEvacPercent)
// This limit affects mixed evacuations, but does not affect promotions.
// Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates. Third priority
// is to promote as much as we have room to promote. However, if old-gen memory is in short supply, this means young
@ -305,10 +308,8 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found.
// Global GC will adjust generation sizes to accommodate the collection set it chooses.
// Set old_promo_reserve to enforce that no regions are preselected for promotion. Such regions typically
// have relatively high memory utilization. We still call select_aged_regions() because this will prepare for
// promotions in place, if relevant.
old_promo_reserve = 0;
// Use remnant of old_available to hold promotions.
old_promo_reserve = old_available - maximum_old_evacuation_reserve;
// Dedicate all available old memory to old_evacuation reserve. This may be small, because old-gen is only
// expanded based on an existing mixed evacuation workload at the end of the previous GC cycle. We'll expand
@ -319,43 +320,48 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote. Prioritize compaction
// over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
old_evacuation_reserve = maximum_old_evacuation_reserve;
old_promo_reserve = 0;
old_promo_reserve = old_available - maximum_old_evacuation_reserve;
} else {
// Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation.
old_evacuation_reserve = 0;
old_evacuation_reserve = old_available - maximum_old_evacuation_reserve;
old_promo_reserve = maximum_old_evacuation_reserve;
}
assert(old_evacuation_reserve <= old_available, "Error");
// We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
// So we limit the old-evacuation reserve to unfragmented memory. Even so, old-evacuation is free to fill in nooks and
// crannies within existing partially used regions and it generally tries to do so.
const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * region_size_bytes;
if (old_evacuation_reserve > old_free_unfragmented) {
const size_t delta = old_evacuation_reserve - old_free_unfragmented;
old_evacuation_reserve -= delta;
// Let promo consume fragments of old-gen memory if not global
if (!is_global()) {
old_promo_reserve += delta;
}
// Let promo consume fragments of old-gen memory
old_promo_reserve += delta;
}
// Preselect regions for promotion by evacuation (obtaining the live data to seed promoted_reserve),
// and identify regions that will promote in place. These use the tenuring threshold.
const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve);
assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory");
// If is_global(), we let garbage-first heuristic determine cset membership. Otherwise, we give priority
// to tenurable regions by preselecting regions for promotion by evacuation (obtaining the live data to seed promoted_reserve).
// This also identifies regions that will be promoted in place. These use the tenuring threshold.
const size_t consumed_by_advance_promotion = select_aged_regions(is_global()? 0: old_promo_reserve);
assert(consumed_by_advance_promotion <= old_promo_reserve, "Do not promote more than budgeted");
// The young evacuation reserve can be no larger than young_unaffiliated. Planning to evacuate into partially consumed
// young regions is doomed to failure if any of those partially consumed regions is selected for the collection set.
size_t young_unaffiliated = young_generation->free_unaffiliated_regions() * region_size_bytes;
// If any regions have been selected for promotion in place, this has the effect of decreasing available within mutator
// and collector partitions, due to padding of remnant memory within each promoted in place region. This will affect
// young_evacuation_reserve but not old_evacuation_reserve or consumed_by_advance_promotion. So recompute.
young_evacuation_reserve = MIN2(young_evacuation_reserve, young_generation->available_with_reserve());
size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_unaffiliated);
// Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this
// to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood
// of old evacuation failure.
// of old evacuation failure. Leave this memory in the promoted reserve as it may be targeted by opportunistic
// promotions (found during evacuation of young regions).
young_generation->set_evacuation_reserve(young_evacuation_reserve);
old_generation->set_evacuation_reserve(old_evacuation_reserve);
old_generation->set_promoted_reserve(consumed_by_advance_promotion);
old_generation->set_promoted_reserve(old_promo_reserve);
// There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the
// case of a GLOBAL gc. During choose_collection_set() of GLOBAL, old will be expanded on demand.
@ -363,8 +369,8 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// Having chosen the collection set, adjust the budgets for generational mode based on its composition. Note
// that young_generation->available() now knows about recently discovered immediate garbage.
//
void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) {
void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
ShenandoahCollectionSet* const collection_set, size_t add_regions_to_old) {
shenandoah_assert_generational();
// We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may
// be able to increase regions_available_to_loan
@ -398,7 +404,8 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
// Leave old_evac_reserve as previously configured
} else if (old_evacuated_committed < old_evacuation_reserve) {
// This happens if the old-gen collection consumes less than full budget.
log_debug(gc, cset)("Shrinking old evac reserve to match old_evac_commited: " PROPERFMT, PROPERFMTARGS(old_evacuated_committed));
log_debug(gc, cset)("Shrinking old evac reserve to match old_evac_commited: " PROPERFMT,
PROPERFMTARGS(old_evacuated_committed));
old_evacuation_reserve = old_evacuated_committed;
old_generation->set_evacuation_reserve(old_evacuation_reserve);
}
@ -409,11 +416,17 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
size_t young_evacuated = collection_set->get_live_bytes_in_untenurable_regions();
size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated));
size_t total_young_available = young_generation->available_with_reserve();
assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young");
size_t total_young_available = young_generation->available_with_reserve() - add_regions_to_old * region_size_bytes;;
assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate (%zu) more than is available in young (%zu)",
young_evacuated_reserve_used, total_young_available);
young_generation->set_evacuation_reserve(young_evacuated_reserve_used);
size_t old_available = old_generation->available();
// We have not yet rebuilt the free set. Some of the memory that is thought to be avaiable within old may no
// longer be available if that memory had been free within regions that were selected for the collection set.
// Make the necessary adjustments to old_available.
size_t old_available =
old_generation->available() + add_regions_to_old * region_size_bytes - collection_set->get_old_available_bytes_collected();
// Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation
// and promotion reserves. Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during
// evac and update phases.
@ -422,21 +435,27 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
if (old_available < old_consumed) {
// This can happen due to round-off errors when adding the results of truncated integer arithmetic.
// We've already truncated old_evacuated_committed. Truncate young_advance_promoted_reserve_used here.
assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32,
"Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu",
young_advance_promoted_reserve_used, old_available - old_evacuated_committed);
young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
if (old_available > old_evacuated_committed) {
young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
} else {
young_advance_promoted_reserve_used = 0;
old_evacuated_committed = old_available;
}
// TODO: reserve for full promotion reserve, not just for advance (preselected) promotion
old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
}
assert(old_available >= old_consumed, "Cannot consume (%zu) more than is available (%zu)",
old_consumed, old_available);
size_t excess_old = old_available - old_consumed;
size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions();
size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions() + add_regions_to_old;
size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
assert(old_available >= unaffiliated_old,
"Unaffiliated old (%zu is %zu * %zu) is a subset of old available (%zu)",
unaffiliated_old, unaffiliated_old_regions, region_size_bytes, old_available);
assert(unaffiliated_old >= old_evacuated_committed, "Do not evacuate (%zu) more than unaffiliated old (%zu)",
old_evacuated_committed, unaffiliated_old);
// Make sure old_evac_committed is unaffiliated
if (old_evacuated_committed > 0) {
@ -454,20 +473,22 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
}
// If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation
// runway during evacuation and update-refs.
size_t regions_to_xfer = 0;
// runway during evacuation and update-refs. We may make further adjustments to balance.
ssize_t add_regions_to_young = 0;
if (excess_old > unaffiliated_old) {
// we can give back unaffiliated_old (all of unaffiliated is excess)
if (unaffiliated_old_regions > 0) {
regions_to_xfer = unaffiliated_old_regions;
add_regions_to_young = unaffiliated_old_regions;
}
} else if (unaffiliated_old_regions > 0) {
// excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions)
size_t excess_regions = excess_old / region_size_bytes;
regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions);
add_regions_to_young = MIN2(excess_regions, unaffiliated_old_regions);
}
if (regions_to_xfer > 0) {
excess_old -= regions_to_xfer * region_size_bytes;
if (add_regions_to_young > 0) {
assert(excess_old >= add_regions_to_young * region_size_bytes, "Cannot xfer more than excess old");
excess_old -= add_regions_to_young * region_size_bytes;
log_debug(gc, ergo)("Before start of evacuation, total_promotion reserve is young_advance_promoted_reserve: %zu "
"plus excess: old: %zu", young_advance_promoted_reserve_used, excess_old);
}
@ -475,6 +496,7 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
// Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated
// promotions than fit in reserved memory, they will be deferred until a future GC pass.
size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old;
old_generation->set_promoted_reserve(total_promotion_reserve);
old_generation->reset_promoted_expended();
}
@ -782,17 +804,13 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions());
// Find the amount that will be promoted, regions that will be promoted in
// place, and preselect older regions that will be promoted by evacuation.
// place, and preselected older regions that will be promoted by evacuation.
compute_evacuation_budgets(heap);
// Choose the collection set, including the regions preselected above for
// promotion into the old generation.
_heuristics->choose_collection_set(collection_set);
if (!collection_set->is_empty()) {
// only make use of evacuation budgets when we are evacuating
adjust_evacuation_budgets(heap, collection_set);
}
// Choose the collection set, including the regions preselected above for promotion into the old generation.
size_t add_regions_to_old = _heuristics->choose_collection_set(collection_set);
// Even if collection_set->is_empty(), we want to adjust budgets, making reserves available to mutator.
adjust_evacuation_budgets(heap, collection_set, add_regions_to_old);
if (is_global()) {
// We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so
// the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will
@ -816,17 +834,16 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
ShenandoahHeapLocker locker(heap->lock());
// We are preparing for evacuation. At this time, we ignore cset region tallies.
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
// We are preparing for evacuation.
size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
_free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
if (heap->mode()->is_generational()) {
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
gen_heap->compute_old_generation_balance(young_cset_regions, old_cset_regions);
size_t allocation_runway =
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trashed_regions);
gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
}
// Free set construction uses reserve quantities, because they are known to be valid here
_free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true);
_free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
}
}

View File

@ -63,9 +63,10 @@ private:
// Compute evacuation budgets prior to choosing collection set.
void compute_evacuation_budgets(ShenandoahHeap* heap);
// Adjust evacuation budgets after choosing collection set.
// Adjust evacuation budgets after choosing collection set. The argument regions_to_xfer represents regions to be
// transfered to old based on decisions made in top_off_collection_set()
void adjust_evacuation_budgets(ShenandoahHeap* heap,
ShenandoahCollectionSet* collection_set);
ShenandoahCollectionSet* collection_set, size_t regions_to_xfer);
// Preselect for possible inclusion into the collection set exactly the most
// garbage-dense regions, including those that satisfy criteria 1 & 2 below,
@ -144,6 +145,22 @@ private:
virtual void prepare_gc();
// Called during final mark, chooses collection set, rebuilds free set.
// Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
// evacuation efforts that are about to begin. In particular:
//
// old_generation->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
// been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage
// of the live young-gen memory within the collection set. If there is more data ready to be promoted than
// can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
// pass.
//
// old_generation->get_evacuation_reserve() represents the amount of memory within old-gen's available memory that has been
// set aside to hold objects evacuated from the old-gen collection set.
//
// young_generation->get_evacuation_reserve() represents the amount of memory within young-gen's available memory that has
// been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value
// equals the entire amount of live young-gen memory within the collection set, even though some of this memory
// will likely be promoted.
virtual void prepare_regions_and_collection_set(bool concurrent);
// Cancel marking (used by Full collect and when cancelling cycle).

View File

@ -55,9 +55,6 @@ void ShenandoahGenerationalFullGC::prepare() {
// Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
heap->set_active_generation(heap->global_generation());
// No need for old_gen->increase_used() as this was done when plabs were allocated.
heap->reset_generation_reserves();
// Full GC supersedes any marking or coalescing in old generation.
heap->old_generation()->cancel_gc();
}
@ -156,8 +153,11 @@ void ShenandoahGenerationalFullGC::compute_balances() {
// In case this Full GC resulted from degeneration, clear the tally on anticipated promotion.
heap->old_generation()->set_promotion_potential(0);
// Invoke this in case we are able to transfer memory from OLD to YOUNG.
heap->compute_old_generation_balance(0, 0);
// Invoke this in case we are able to transfer memory from OLD to YOUNG
size_t allocation_runway =
heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0L);
heap->compute_old_generation_balance(allocation_runway, 0, 0);
}
ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks,

View File

@ -299,9 +299,9 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, uint
alloc_from_lab = false;
}
// else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
// We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
// We choose not to promote objects smaller than size_threshold by way of shared allocations as this is too
// costly. Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
// evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
// evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= size_threshhold).
}
#ifdef ASSERT
}
@ -576,19 +576,18 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) {
// Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
// and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
// xfer_limit, and any surplus is transferred to the young generation.
//
// xfer_limit is the maximum we're able to transfer from young to old based on either:
// 1. an assumption that we will be able to replenish memory "borrowed" from young at the end of collection, or
// 2. there is sufficient excess in the allocation runway during GC idle cycles
void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) {
// mutator_xfer_limit, and any surplus is transferred to the young generation. mutator_xfer_limit is
// the maximum we're able to transfer from young to old. This is called at the end of GC, as we prepare
// for the idle span that precedes the next GC.
void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_xfer_limit,
size_t old_trashed_regions, size_t young_trashed_regions) {
shenandoah_assert_heaplocked();
// We can limit the old reserve to the size of anticipated promotions:
// max_old_reserve is an upper bound on memory evacuated from old and promoted to old,
// clamped by the old generation space available.
//
// Here's the algebra.
// Let SOEP = ShenandoahOldEvacRatioPercent,
// Let SOEP = ShenandoahOldEvacPercent,
// OE = old evac,
// YE = young evac, and
// TE = total evac = OE + YE
@ -600,81 +599,171 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_
// => OE = YE*SOEP/(100-SOEP)
// We have to be careful in the event that SOEP is set to 100 by the user.
assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
const size_t old_available = old_generation()->available();
// The free set will reserve this amount of memory to hold young evacuations
const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
// In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit.
const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve;
const double max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve:
MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent)
/ double(100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve));
assert(ShenandoahOldEvacPercent <= 100, "Error");
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
ShenandoahOldGeneration* old_gen = old_generation();
size_t old_capacity = old_gen->max_capacity();
size_t old_usage = old_gen->used(); // includes humongous waste
size_t old_available = ((old_capacity >= old_usage)? old_capacity - old_usage: 0) + old_trashed_regions * region_size_bytes;
ShenandoahYoungGeneration* young_gen = young_generation();
size_t young_capacity = young_gen->max_capacity();
size_t young_usage = young_gen->used(); // includes humongous waste
size_t young_available = ((young_capacity >= young_usage)? young_capacity - young_usage: 0);
size_t freeset_available = free_set()->available_locked();
if (young_available > freeset_available) {
young_available = freeset_available;
}
young_available += young_trashed_regions * region_size_bytes;
// The free set will reserve this amount of memory to hold young evacuations (initialized to the ideal reserve)
size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
// If ShenandoahOldEvacPercent equals 100, max_old_reserve is limited only by mutator_xfer_limit and young_reserve
const size_t bound_on_old_reserve = ((old_available + mutator_xfer_limit + young_reserve) * ShenandoahOldEvacPercent) / 100;
size_t proposed_max_old = ((ShenandoahOldEvacPercent == 100)?
bound_on_old_reserve:
MIN2((young_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent),
bound_on_old_reserve));
if (young_reserve > young_available) {
young_reserve = young_available;
}
// Decide how much old space we should reserve for a mixed collection
double reserve_for_mixed = 0;
if (old_generation()->has_unprocessed_collection_candidates()) {
size_t reserve_for_mixed = 0;
const size_t old_fragmented_available =
old_available - (old_generation()->free_unaffiliated_regions() + old_trashed_regions) * region_size_bytes;
if (old_fragmented_available > proposed_max_old) {
// After we've promoted regions in place, there may be an abundance of old-fragmented available memory,
// even more than the desired percentage for old reserve. We cannot transfer these fragmented regions back
// to young. Instead we make the best of the situation by using this fragmented memory for both promotions
// and evacuations.
proposed_max_old = old_fragmented_available;
}
size_t reserve_for_promo = old_fragmented_available;
const size_t max_old_reserve = proposed_max_old;
const size_t mixed_candidate_live_memory = old_generation()->unprocessed_collection_candidates_live_memory();
const bool doing_mixed = (mixed_candidate_live_memory > 0);
if (doing_mixed) {
// We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we
// may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
const double max_evac_need =
(double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste);
const size_t max_evac_need = (size_t) (mixed_candidate_live_memory * ShenandoahOldEvacWaste);
assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
"Unaffiliated available must be less than total available");
const double old_fragmented_available =
double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes);
reserve_for_mixed = max_evac_need + old_fragmented_available;
if (reserve_for_mixed > max_old_reserve) {
reserve_for_mixed = max_old_reserve;
// We prefer to evacuate all of mixed into unfragmented memory, and will expand old in order to do so, unless
// we already have too much fragmented available memory in old.
reserve_for_mixed = max_evac_need;
if (reserve_for_mixed + reserve_for_promo > max_old_reserve) {
// In this case, we'll allow old-evac to target some of the fragmented old memory.
size_t excess_reserves = (reserve_for_mixed + reserve_for_promo) - max_old_reserve;
if (reserve_for_promo > excess_reserves) {
reserve_for_promo -= excess_reserves;
} else {
excess_reserves -= reserve_for_promo;
reserve_for_promo = 0;
reserve_for_mixed -= excess_reserves;
}
}
}
// Decide how much space we should reserve for promotions from young
size_t reserve_for_promo = 0;
// Decide how much additional space we should reserve for promotions from young. We give priority to mixed evacations
// over promotions.
const size_t promo_load = old_generation()->get_promotion_potential();
const bool doing_promotions = promo_load > 0;
if (doing_promotions) {
// We're promoting and have a bound on the maximum amount that can be promoted
assert(max_old_reserve >= reserve_for_mixed, "Sanity");
const size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions);
// We've already set aside all of the fragmented available memory within old-gen to represent old objects
// to be promoted from young generation. promo_load represents the memory that we anticipate to be promoted
// from regions that have reached tenure age. In the ideal, we will always use fragmented old-gen memory
// to hold individually promoted objects and will use unfragmented old-gen memory to represent the old-gen
// evacuation workloa.
// We're promoting and have an estimate of memory to be promoted from aged regions
assert(max_old_reserve >= (reserve_for_mixed + reserve_for_promo), "Sanity");
const size_t available_for_additional_promotions = max_old_reserve - (reserve_for_mixed + reserve_for_promo);
size_t promo_need = (size_t)(promo_load * ShenandoahPromoEvacWaste);
if (promo_need > reserve_for_promo) {
reserve_for_promo += MIN2(promo_need - reserve_for_promo, available_for_additional_promotions);
}
// We've already reserved all the memory required for the promo_load, and possibly more. The excess
// can be consumed by objects promoted from regions that have not yet reached tenure age.
}
// This is the total old we want to ideally reserve
const size_t old_reserve = reserve_for_mixed + reserve_for_promo;
assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
// This is the total old we want to reserve (initialized to the ideal reserve)
size_t old_reserve = reserve_for_mixed + reserve_for_promo;
// We now check if the old generation is running a surplus or a deficit.
const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
if (max_old_available >= old_reserve) {
// We are running a surplus, so the old region surplus can go to young
const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes;
const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions);
old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
} else {
// We are running a deficit which we'd like to fill from young.
// Ignore that this will directly impact young_generation()->max_capacity(),
// indirectly impacting young_reserve and old_reserve. These computations are conservative.
// Note that deficit is rounded up by one region.
const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes;
const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes;
size_t old_region_deficit = 0;
size_t old_region_surplus = 0;
// Round down the regions we can transfer from young to old. If we're running short
// on young-gen memory, we restrict the xfer. Old-gen collection activities will be
// curtailed if the budget is restricted.
const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer);
size_t mutator_region_xfer_limit = mutator_xfer_limit / region_size_bytes;
// align the mutator_xfer_limit on region size
mutator_xfer_limit = mutator_region_xfer_limit * region_size_bytes;
if (old_available >= old_reserve) {
// We are running a surplus, so the old region surplus can go to young
const size_t old_surplus = old_available - old_reserve;
old_region_surplus = old_surplus / region_size_bytes;
const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_trashed_regions;
old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions);
old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
} else if (old_available + mutator_xfer_limit >= old_reserve) {
// Mutator's xfer limit is sufficient to satisfy our need: transfer all memory from there
size_t old_deficit = old_reserve - old_available;
old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
} else {
// We'll try to xfer from both mutator excess and from young collector reserve
size_t available_reserves = old_available + young_reserve + mutator_xfer_limit;
size_t old_entitlement = (available_reserves * ShenandoahOldEvacPercent) / 100;
// Round old_entitlement down to nearest multiple of regions to be transferred to old
size_t entitled_xfer = old_entitlement - old_available;
entitled_xfer = region_size_bytes * (entitled_xfer / region_size_bytes);
size_t unaffiliated_young_regions = young_generation()->free_unaffiliated_regions();
size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
if (entitled_xfer > unaffiliated_young_memory) {
entitled_xfer = unaffiliated_young_memory;
}
old_entitlement = old_available + entitled_xfer;
if (old_entitlement < old_reserve) {
// There's not enough memory to satisfy our desire. Scale back our old-gen intentions.
size_t budget_overrun = old_reserve - old_entitlement;;
if (reserve_for_promo > budget_overrun) {
reserve_for_promo -= budget_overrun;
old_reserve -= budget_overrun;
} else {
budget_overrun -= reserve_for_promo;
reserve_for_promo = 0;
reserve_for_mixed = (reserve_for_mixed > budget_overrun)? reserve_for_mixed - budget_overrun: 0;
old_reserve = reserve_for_promo + reserve_for_mixed;
}
}
// Because of adjustments above, old_reserve may be smaller now than it was when we tested the branch
// condition above: "(old_available + mutator_xfer_limit >= old_reserve)
// Therefore, we do NOT know that: mutator_xfer_limit < old_reserve - old_available
size_t old_deficit = old_reserve - old_available;
old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
// Shrink young_reserve to account for loan to old reserve
const size_t reserve_xfer_regions = old_region_deficit - mutator_region_xfer_limit;
young_reserve -= reserve_xfer_regions * region_size_bytes;
old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
}
}
void ShenandoahGenerationalHeap::reset_generation_reserves() {
ShenandoahHeapLocker locker(lock());
young_generation()->set_evacuation_reserve(0);
old_generation()->set_evacuation_reserve(0);
old_generation()->set_promoted_reserve(0);
assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both");
assert(young_reserve + reserve_for_mixed + reserve_for_promo <= old_available + young_available,
"Cannot reserve more memory than is available: %zu + %zu + %zu <= %zu + %zu",
young_reserve, reserve_for_mixed, reserve_for_promo, old_available, young_available);
// deficit/surplus adjustments to generation sizes will precede rebuild
young_generation()->set_evacuation_reserve(young_reserve);
old_generation()->set_evacuation_reserve(reserve_for_mixed);
old_generation()->set_promoted_reserve(reserve_for_promo);
}
void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {
@ -1015,10 +1104,6 @@ void ShenandoahGenerationalHeap::final_update_refs_update_region_states() {
void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
shenandoah_assert_heaplocked_or_safepoint();
// In case degeneration interrupted concurrent evacuation or update references, we need to clean up
// transient state. Otherwise, these actions have no effect.
reset_generation_reserves();
if (!old_generation()->is_parsable()) {
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill);
coalesce_and_fill_old_regions(false);
@ -1036,7 +1121,6 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
// throw off the heuristics.
entry_global_coalesce_and_fill();
}
reset_generation_reserves();
}
void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {

View File

@ -136,7 +136,7 @@ public:
void reset_generation_reserves();
// Computes the optimal size for the old generation, represented as a surplus or deficit of old regions
void compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions);
void compute_old_generation_balance(size_t old_xfer_limit, size_t old_trashed_regions, size_t young_trashed_regions);
// Balances generations, coalesces and fills old regions if necessary
void complete_degenerated_cycle();

View File

@ -425,20 +425,29 @@ jint ShenandoahHeap::initialize() {
_affiliations[i] = ShenandoahAffiliation::FREE;
}
if (mode()->is_generational()) {
size_t young_reserve = (soft_max_capacity() * ShenandoahEvacReserve) / 100;
young_generation()->set_evacuation_reserve(young_reserve);
old_generation()->set_evacuation_reserve((size_t) 0);
old_generation()->set_promoted_reserve((size_t) 0);
}
_free_set = new ShenandoahFreeSet(this, _num_regions);
post_initialize_heuristics();
// We are initializing free set. We ignore cset region tallies.
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
_free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
if (mode()->is_generational()) {
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
// We cannot call
// gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions)
// until after the heap is fully initialized. So we make up a safe value here.
size_t allocation_runway = InitialHeapSize / 2;
gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
}
_free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
_free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
}
if (AlwaysPreTouch) {
@ -2521,13 +2530,10 @@ void ShenandoahHeap::final_update_refs_update_region_states() {
parallel_heap_region_iterate(&cl);
}
void ShenandoahHeap::rebuild_free_set(bool concurrent) {
ShenandoahGCPhase phase(concurrent ?
ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
void ShenandoahHeap::rebuild_free_set_within_phase() {
ShenandoahHeapLocker locker(lock());
size_t young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count;
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
size_t young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count;
_free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count);
// If there are no old regions, first_old_region will be greater than last_old_region
assert((first_old_region > last_old_region) ||
((last_old_region + 1 - first_old_region >= old_region_count) &&
@ -2546,19 +2552,11 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
// available for transfer to old. Note that transfer of humongous regions does not impact available.
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
size_t allocation_runway =
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
// Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
// memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
// regions in place when many of these regular regions have an abundant amount of available memory within them.
// Fragmentation will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
//
// We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
// within partially consumed regions of memory.
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trashed_regions);
gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
}
// Rebuild free set based on adjusted generation sizes.
_free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
_free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, old_region_count);
if (mode()->is_generational()) {
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
@ -2567,6 +2565,13 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
}
}
void ShenandoahHeap::rebuild_free_set(bool concurrent) {
ShenandoahGCPhase phase(concurrent ?
ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
rebuild_free_set_within_phase();
}
bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
size_t slice = r->index() / _bitmap_regions_per_slice;

View File

@ -481,7 +481,9 @@ private:
void rendezvous_threads(const char* name);
void recycle_trash();
public:
// The following two functions rebuild the free set at the end of GC, in preparation for an idle phase.
void rebuild_free_set(bool concurrent);
void rebuild_free_set_within_phase();
void notify_gc_progress();
void notify_gc_no_progress();
size_t get_gc_no_progress_count() const;

View File

@ -595,6 +595,8 @@ void ShenandoahHeapRegion::try_recycle_under_lock() {
_recycling.unset();
} else {
// Ensure recycling is unset before returning to mutator to continue memory allocation.
// Otherwise, the mutator might see region as fully recycled and might change its affiliation only to have
// the racing GC worker thread overwrite its affiliation to FREE.
while (_recycling.is_set()) {
if (os::is_MP()) {
SpinPause();
@ -605,6 +607,8 @@ void ShenandoahHeapRegion::try_recycle_under_lock() {
}
}
// Note that return from try_recycle() does not mean the region has been recycled. It only means that
// some GC worker thread has taken responsibility to recycle the region, eventually.
void ShenandoahHeapRegion::try_recycle() {
shenandoah_assert_not_heaplocked();
if (is_trash() && _recycling.try_set()) {

View File

@ -128,8 +128,6 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) {
// the space. This would be the last action if there is nothing to evacuate.
entry_cleanup_early();
heap->free_set()->log_status_under_lock();
assert(!heap->is_concurrent_strong_root_in_progress(), "No evacuations during old gc.");
// We must execute this vm operation if we completed final mark. We cannot
@ -138,7 +136,10 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) {
// collection.
heap->concurrent_final_roots();
size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0);
heap->compute_old_generation_balance(allocation_runway, 0);
// After concurrent old marking finishes, we reclaim immediate garbage. Further, we may also want to expand OLD in order
// to make room for anticipated promotions and/or for mixed evacuations. Mixed evacuations are especially likely to
// follow the end of OLD marking.
heap->rebuild_free_set_within_phase();
heap->free_set()->log_status_under_lock();
return true;
}

View File

@ -427,8 +427,7 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
size_t allocation_runway =
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trash_regions);
gen_heap->compute_old_generation_balance(allocation_runway, old_trash_regions);
gen_heap->compute_old_generation_balance(allocation_runway, old_trash_regions, young_trash_regions);
heap->free_set()->finish_rebuild(young_trash_regions, old_trash_regions, num_old);
}
}

View File

@ -66,8 +66,8 @@ private:
// remaining in a PLAB when it is retired.
size_t _promoted_expended;
// Represents the quantity of live bytes we expect to promote during the next evacuation
// cycle. This value is used by the young heuristic to trigger mixed collections.
// Represents the quantity of live bytes we expect to promote during the next GC cycle, either by
// evacuation or by promote-in-place. This value is used by the young heuristic to trigger mixed collections.
// It is also used when computing the optimum size for the old generation.
size_t _promotion_potential;

View File

@ -243,8 +243,7 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
#ifdef ASSERT
assert(ShenandoahHeap::heap()->mode()->is_generational(), "Do not use in non-generational mode");
assert(region->is_old(), "Do not use for young regions");
// For HumongousRegion:s it's more efficient to jump directly to the
// start region.
// For humongous regions it's more efficient to jump directly to the start region.
assert(!region->is_humongous(), "Use region->humongous_start_region() instead");
#endif

View File

@ -420,7 +420,14 @@ public:
// span is the total memory affiliated with these stats (some of which is in use and other is available)
size_t span() const { return _regions * ShenandoahHeapRegion::region_size_bytes(); }
size_t non_trashed_span() const { return (_regions - _trashed_regions) * ShenandoahHeapRegion::region_size_bytes(); }
size_t non_trashed_span() const {
assert(_regions >= _trashed_regions, "sanity");
return (_regions - _trashed_regions) * ShenandoahHeapRegion::region_size_bytes();
}
size_t non_trashed_committed() const {
assert(_committed >= _trashed_regions * ShenandoahHeapRegion::region_size_bytes(), "sanity");
return _committed - (_trashed_regions * ShenandoahHeapRegion::region_size_bytes());
}
};
class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure {

View File

@ -400,27 +400,20 @@
"reserve/waste is incorrect, at the risk that application " \
"runs out of memory too early.") \
\
product(uintx, ShenandoahOldEvacRatioPercent, 75, EXPERIMENTAL, \
"The maximum proportion of evacuation from old-gen memory, " \
"expressed as a percentage. The default value 75 denotes that " \
"no more than 75% of the collection set evacuation workload may " \
"be towards evacuation of old-gen heap regions. This limits both "\
"the promotion of aged regions and the compaction of existing " \
"old regions. A value of 75 denotes that the total evacuation " \
"work may increase to up to four times the young gen evacuation " \
"work. A larger value allows quicker promotion and allows " \
"a smaller number of mixed evacuations to process " \
"the entire list of old-gen collection candidates at the cost " \
"of an increased disruption of the normal cadence of young-gen " \
"collections. A value of 100 allows a mixed evacuation to " \
"focus entirely on old-gen memory, allowing no young-gen " \
"regions to be collected, likely resulting in subsequent " \
"allocation failures because the allocation pool is not " \
"replenished. A value of 0 allows a mixed evacuation to " \
"focus entirely on young-gen memory, allowing no old-gen " \
"regions to be collected, likely resulting in subsequent " \
"promotion failures and triggering of stop-the-world full GC " \
"events.") \
product(uintx, ShenandoahOldEvacPercent, 75, EXPERIMENTAL, \
"The maximum evacuation to old-gen expressed as a percent of " \
"the total live memory within the collection set. With the " \
"default setting, if collection set evacuates X, no more than " \
"75% of X may hold objects evacuated from old or promoted to " \
"old from young. A value of 100 allows the entire collection " \
"set to be comprised of old-gen regions and young regions that " \
"have reached the tenure age. Larger values allow fewer mixed " \
"evacuations to reclaim all the garbage from old. Smaller " \
"values result in less variation in GC cycle times between " \
"young vs. mixed cycles. A value of 0 prevents mixed " \
"evacations from running and blocks promotion of aged regions " \
"by evacuation. Setting the value to 0 does not prevent " \
"regions from being promoted in place.") \
range(0,100) \
\
product(bool, ShenandoahEvacTracking, false, DIAGNOSTIC, \

View File

@ -201,7 +201,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, prime_one_old_region) {
size_t garbage = make_garbage_above_collection_threshold(10);
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(10UL));
EXPECT_EQ(garbage, _collection_set->get_old_garbage());
@ -214,7 +216,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, prime_many_old_regions) {
size_t g1 = make_garbage_above_collection_threshold(100);
size_t g2 = make_garbage_above_collection_threshold(101);
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(100UL, 101UL));
EXPECT_EQ(g1 + g2, _collection_set->get_old_garbage());
@ -226,7 +230,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, require_multiple_mixed_evacuations) {
size_t garbage = create_too_much_garbage_for_one_mixed_evacuation();
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_LT(_collection_set->get_old_garbage(), garbage);
EXPECT_GT(_heuristics->unprocessed_old_collection_candidates(), 0UL);
@ -248,7 +254,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, skip_pinned_regions) {
ASSERT_EQ(3UL, _heuristics->unprocessed_old_collection_candidates());
// Here the region is still pinned, so it cannot be added to the collection set.
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
// The two unpinned regions should be added to the collection set and the pinned
// region should be retained at the front of the list of candidates as it would be
@ -261,7 +269,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, skip_pinned_regions) {
// the now unpinned region should be added to the collection set.
make_unpinned(1);
_collection_set->clear();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_EQ(_collection_set->get_old_garbage(), g2);
EXPECT_TRUE(collection_set_is(1UL));
@ -278,14 +288,18 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_first) {
make_pinned(0);
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(1UL, 2UL));
EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 1UL);
make_unpinned(0);
_collection_set->clear();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(0UL));
EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL);
@ -301,7 +315,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_last) {
make_pinned(2);
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(0UL, 1UL));
EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g2);
@ -309,7 +325,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_last) {
make_unpinned(2);
_collection_set->clear();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(2UL));
EXPECT_EQ(_collection_set->get_old_garbage(), g3);
@ -327,7 +345,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, unpinned_region_is_middle) {
make_pinned(0);
make_pinned(2);
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(1UL));
EXPECT_EQ(_collection_set->get_old_garbage(), g2);
@ -336,7 +356,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, unpinned_region_is_middle) {
make_unpinned(0);
make_unpinned(2);
_collection_set->clear();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(0UL, 2UL));
EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g3);
@ -354,7 +376,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, all_candidates_are_pinned) {
make_pinned(1);
make_pinned(2);
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
// In the case when all candidates are pinned, we want to abandon
// this set of mixed collection candidates so that another old collection