8376756: GenShen: Improve encapsulation of generational collection set choosing

Reviewed-by: shade, kdnilsen
This commit is contained in:
William Kemper 2026-02-04 19:33:10 +00:00
parent 792291937f
commit 949370ab0e
22 changed files with 662 additions and 661 deletions

View File

@ -68,9 +68,9 @@ ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics(ShenandoahSpaceInfo*
ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {}
size_t ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
// The logic for cset selection in adaptive is as follows:
@ -124,7 +124,6 @@ size_t ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shena
cur_garbage = new_garbage;
}
}
return 0;
}
void ShenandoahAdaptiveHeuristics::record_cycle_start() {

View File

@ -33,7 +33,7 @@
#include "utilities/numberSeq.hpp"
/**
* ShenanoahAllocationRate maintains a truncated history of recently sampled allocation rates for the purpose of providing
* ShenandoahAllocationRate maintains a truncated history of recently sampled allocation rates for the purpose of providing
* informed estimates of current and future allocation rates based on weighted averages and standard deviations of the
* truncated history. More recently sampled allocations are weighted more heavily than older samples when computing
* averages and standard deviations.
@ -108,20 +108,20 @@ public:
virtual ~ShenandoahAdaptiveHeuristics();
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
virtual void record_cycle_start() override;
virtual void record_success_concurrent() override;
virtual void record_degenerated() override;
virtual void record_success_full() override;
void record_cycle_start() override;
void record_success_concurrent() override;
void record_degenerated() override;
void record_success_full() override;
virtual bool should_start_gc() override;
bool should_start_gc() override;
virtual const char* name() override { return "Adaptive"; }
virtual bool is_diagnostic() override { return false; }
virtual bool is_experimental() override { return false; }
const char* name() override { return "Adaptive"; }
bool is_diagnostic() override { return false; }
bool is_experimental() override { return false; }
private:
// These are used to adjust the margin of error and the spike threshold
@ -185,7 +185,7 @@ protected:
// in the generational case. Controlled by global flag ShenandoahMinFreeThreshold.
size_t min_free_threshold();
inline void accept_trigger_with_type(Trigger trigger_type) {
void accept_trigger_with_type(Trigger trigger_type) {
_last_trigger = trigger_type;
ShenandoahHeuristics::accept_trigger();
}
@ -193,7 +193,7 @@ protected:
public:
// Sample the allocation rate at GC trigger time if possible. Return the number of allocated bytes that were
// not accounted for in the sample. This must be called before resetting bytes allocated since gc start.
virtual size_t force_alloc_rate_sample(size_t bytes_allocated) override {
size_t force_alloc_rate_sample(size_t bytes_allocated) override {
size_t unaccounted_bytes;
_allocation_rate.force_sample(bytes_allocated, unaccounted_bytes);
return unaccounted_bytes;

View File

@ -39,16 +39,15 @@ ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics(ShenandoahSpaceIn
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahEvacReserveOverflow);
}
size_t ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) {
void ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) {
for (size_t idx = 0; idx < size; idx++) {
ShenandoahHeapRegion* r = data[idx].get_region();
if (r->garbage() > 0) {
cset->add_region(r);
}
}
return 0;
}
bool ShenandoahAggressiveHeuristics::should_start_gc() {

View File

@ -35,17 +35,17 @@ class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
public:
ShenandoahAggressiveHeuristics(ShenandoahSpaceInfo* space_info);
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free);
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) override;
virtual bool should_start_gc();
bool should_start_gc() override;
virtual bool should_unload_classes();
bool should_unload_classes() override;
virtual const char* name() { return "Aggressive"; }
virtual bool is_diagnostic() { return true; }
virtual bool is_experimental() { return false; }
const char* name() override { return "Aggressive"; }
bool is_diagnostic() override { return true; }
bool is_experimental() override { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP

View File

@ -76,9 +76,9 @@ bool ShenandoahCompactHeuristics::should_start_gc() {
return ShenandoahHeuristics::should_start_gc();
}
size_t ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
// Do not select too large CSet that would overflow the available free space
size_t max_cset = actual_free * 3 / 4;
@ -97,5 +97,4 @@ size_t ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(Shenan
cset->add_region(r);
}
}
return 0;
}

View File

@ -33,17 +33,17 @@
*/
class ShenandoahCompactHeuristics : public ShenandoahHeuristics {
public:
ShenandoahCompactHeuristics(ShenandoahSpaceInfo* space_info);
explicit ShenandoahCompactHeuristics(ShenandoahSpaceInfo* space_info);
virtual bool should_start_gc();
bool should_start_gc() override;
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free);
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
virtual const char* name() { return "Compact"; }
virtual bool is_diagnostic() { return false; }
virtual bool is_experimental() { return false; }
const char* name() override { return "Compact"; }
bool is_diagnostic() override { return false; }
bool is_experimental() override { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP

View File

@ -25,19 +25,205 @@
#include "gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.hpp"
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
#include "gc/shenandoah/shenandoahCollectionSetPreselector.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahGeneration.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
#include "gc/shenandoah/shenandoahTrace.hpp"
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
#include "logging/log.hpp"
#include "utilities/quickSort.hpp"
ShenandoahGenerationalHeuristics::ShenandoahGenerationalHeuristics(ShenandoahGeneration* generation)
: ShenandoahAdaptiveHeuristics(generation), _generation(generation) {
using idx_t = ShenandoahSimpleBitMap::idx_t;
typedef struct {
ShenandoahHeapRegion* _region;
size_t _live_data;
} AgedRegionData;
static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) {
if (a._live_data < b._live_data)
return -1;
if (a._live_data > b._live_data)
return 1;
return 0;
}
size_t ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
inline void assert_no_in_place_promotions() {
#ifdef ASSERT
class ShenandoahNoInPlacePromotions : public ShenandoahHeapRegionClosure {
public:
void heap_region_do(ShenandoahHeapRegion *r) override {
assert(r->get_top_before_promote() == nullptr,
"Region %zu should not be ready for in-place promotion", r->index());
}
} cl;
ShenandoahHeap::heap()->heap_region_iterate(&cl);
#endif
}
ShenandoahGenerationalHeuristics::ShenandoahGenerationalHeuristics(ShenandoahGeneration* generation)
: ShenandoahAdaptiveHeuristics(generation), _generation(generation), _add_regions_to_old(0) {
}
void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
_add_regions_to_old = 0;
// Seed the collection set with resource area-allocated
// preselected regions, which are removed when we exit this scope.
ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions());
// Find the amount that will be promoted, regions that will be promoted in
// place, and preselected older regions that will be promoted by evacuation.
compute_evacuation_budgets(heap);
// Choose the collection set, including the regions preselected above for promotion into the old generation.
filter_regions(collection_set);
// Even if collection_set->is_empty(), we want to adjust budgets, making reserves available to mutator.
adjust_evacuation_budgets(heap, collection_set);
if (_generation->is_global()) {
// We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so
// the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will
// use the mark bitmap to make the old regions parsable by coalescing and filling any unmarked objects. Thus,
// we prepare for old collections by remembering which regions are old at this time. Note that any objects
// promoted into old regions will be above TAMS, and so will be considered marked. However, free regions that
// become old after this point will not be covered correctly by the mark bitmap, so we must be careful not to
// coalesce those regions. Only the old regions which are not part of the collection set at this point are
// eligible for coalescing. As implemented now, this has the side effect of possibly initiating mixed-evacuations
// after a global cycle for old regions that were not included in this collection set.
heap->old_generation()->prepare_for_mixed_collections_after_global_gc();
}
}
void ShenandoahGenerationalHeuristics::compute_evacuation_budgets(ShenandoahHeap* const heap) {
shenandoah_assert_generational();
ShenandoahOldGeneration* const old_generation = heap->old_generation();
ShenandoahYoungGeneration* const young_generation = heap->young_generation();
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
// During initialization and phase changes, it is more likely that fewer objects die young and old-gen
// memory is not yet full (or is in the process of being replaced). During these times especially, it
// is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases
// of execution.
// Calculate EvacuationReserve before PromotionReserve. Evacuation is more critical than promotion.
// If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory. Promotions are less
// critical. If we cannot promote, there may be degradation of young-gen memory because old objects
// accumulate there until they can be promoted. This increases the young-gen marking and evacuation work.
// First priority is to reclaim the easy garbage out of young-gen.
// maximum_young_evacuation_reserve is upper bound on memory to be evacuated into young Collector Reserve. This is
// bounded at the end of previous GC cycle, based on available memory and balancing of evacuation to old and young.
size_t maximum_young_evacuation_reserve = young_generation->get_evacuation_reserve();
// maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted),
// clamped by the old generation space available.
//
// Here's the algebra.
// Let SOEP = ShenandoahOldEvacPercent,
// OE = old evac,
// YE = young evac, and
// TE = total evac = OE + YE
// By definition:
// SOEP/100 = OE/TE
// = OE/(OE+YE)
// => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
// = OE/YE
// => OE = YE*SOEP/(100-SOEP)
// We have to be careful in the event that SOEP is set to 100 by the user.
assert(ShenandoahOldEvacPercent <= 100, "Error");
const size_t old_available = old_generation->available();
const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacPercent == 100) ?
old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent),
old_available);
// In some cases, maximum_old_reserve < old_available (when limited by ShenandoahOldEvacPercent)
// This limit affects mixed evacuations, but does not affect promotions.
// Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates. Third priority
// is to promote as much as we have room to promote. However, if old-gen memory is in short supply, this means young
// GC is operating under "duress" and was unable to transfer the memory that we would normally expect. In this case,
// old-gen will refrain from compacting itself in order to allow a quicker young-gen cycle (by avoiding the update-refs
// through ALL of old-gen). If there is some memory available in old-gen, we will use this for promotions as promotions
// do not add to the update-refs burden of GC.
size_t old_evacuation_reserve, old_promo_reserve;
if (_generation->is_global()) {
// Global GC is typically triggered by user invocation of System.gc(), and typically indicates that there is lots
// of garbage to be reclaimed because we are starting a new phase of execution. Marking for global GC may take
// significantly longer than typical young marking because we must mark through all old objects. To expedite
// evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found.
// Global GC will adjust generation sizes to accommodate the collection set it chooses.
// Use remnant of old_available to hold promotions.
old_promo_reserve = old_available - maximum_old_evacuation_reserve;
// Dedicate all available old memory to old_evacuation reserve. This may be small, because old-gen is only
// expanded based on an existing mixed evacuation workload at the end of the previous GC cycle. We'll expand
// the budget for evacuation of old during GLOBAL cset selection.
old_evacuation_reserve = maximum_old_evacuation_reserve;
} else if (old_generation->has_unprocessed_collection_candidates()) {
// We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen. If this is
// mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote. Prioritize compaction
// over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
old_evacuation_reserve = maximum_old_evacuation_reserve;
old_promo_reserve = old_available - maximum_old_evacuation_reserve;
} else {
// Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation.
old_evacuation_reserve = old_available - maximum_old_evacuation_reserve;
old_promo_reserve = maximum_old_evacuation_reserve;
}
assert(old_evacuation_reserve <= old_available, "Error");
// We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
// So we limit the old-evacuation reserve to unfragmented memory. Even so, old-evacuation is free to fill in nooks and
// crannies within existing partially used regions and it generally tries to do so.
const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * region_size_bytes;
if (old_evacuation_reserve > old_free_unfragmented) {
const size_t delta = old_evacuation_reserve - old_free_unfragmented;
old_evacuation_reserve -= delta;
// Let promo consume fragments of old-gen memory
old_promo_reserve += delta;
}
// If is_global(), we let garbage-first heuristic determine cset membership. Otherwise, we give priority
// to tenurable regions by preselecting regions for promotion by evacuation (obtaining the live data to seed promoted_reserve).
// This also identifies regions that will be promoted in place. These use the tenuring threshold.
const size_t consumed_by_advance_promotion = select_aged_regions(_generation->is_global()? 0: old_promo_reserve);
assert(consumed_by_advance_promotion <= old_promo_reserve, "Do not promote more than budgeted");
// The young evacuation reserve can be no larger than young_unaffiliated. Planning to evacuate into partially consumed
// young regions is doomed to failure if any of those partially consumed regions is selected for the collection set.
size_t young_unaffiliated = young_generation->free_unaffiliated_regions() * region_size_bytes;
// If any regions have been selected for promotion in place, this has the effect of decreasing available within mutator
// and collector partitions, due to padding of remnant memory within each promoted in place region. This will affect
// young_evacuation_reserve but not old_evacuation_reserve or consumed_by_advance_promotion. So recompute.
size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_unaffiliated);
// Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this
// to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood
// of old evacuation failure. Leave this memory in the promoted reserve as it may be targeted by opportunistic
// promotions (found during evacuation of young regions).
young_generation->set_evacuation_reserve(young_evacuation_reserve);
old_generation->set_evacuation_reserve(old_evacuation_reserve);
old_generation->set_promoted_reserve(old_promo_reserve);
// There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the
// case of a GLOBAL gc. During choose_collection_set() of GLOBAL, old will be expanded on demand.
}
void ShenandoahGenerationalHeuristics::filter_regions(ShenandoahCollectionSet* collection_set) {
assert(collection_set->is_empty(), "Must be empty");
auto heap = ShenandoahGenerationalHeap::heap();
@ -170,10 +356,9 @@ size_t ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollect
size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage);
bool doing_promote_in_place = (humongous_regions_promoted + regular_regions_promoted_in_place > 0);
size_t add_regions_to_old = 0;
if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
// Call the subclasses to add young-gen regions into the collection set.
add_regions_to_old = choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
}
if (collection_set->has_old_regions()) {
@ -190,9 +375,359 @@ size_t ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollect
regular_regions_promoted_free,
immediate_regions,
immediate_garbage);
return add_regions_to_old;
}
// Preselect for inclusion into the collection set all regions whose age is at or above tenure age and for which the
// garbage percentage exceeds a dynamically adjusted threshold (known as the old-garbage threshold percentage). We
// identify these regions by setting the appropriate entry of the collection set's preselected regions array to true.
// All entries are initialized to false before calling this function.
//
// During the subsequent selection of the collection set, we give priority to these promotion set candidates.
// Without this prioritization, we found that the aged regions tend to be ignored because they typically have
// much less garbage and much more live data than the recently allocated "eden" regions. When aged regions are
// repeatedly excluded from the collection set, the amount of live memory within the young generation tends to
// accumulate and this has the undesirable side effect of causing young-generation collections to require much more
// CPU and wall-clock time.
//
// A second benefit of treating aged regions differently than other regions during collection set selection is
// that this allows us to more accurately budget memory to hold the results of evacuation. Memory for evacuation
// of aged regions must be reserved in the old generation. Memory for evacuation of all other regions must be
// reserved in the young generation.
size_t ShenandoahGenerationalHeuristics::select_aged_regions(const size_t old_promotion_reserve) {
// There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle.
assert_no_in_place_promotions();
auto const heap = ShenandoahGenerationalHeap::heap();
ShenandoahFreeSet* free_set = heap->free_set();
bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions();
ShenandoahMarkingContext* const ctx = heap->marking_context();
const size_t old_garbage_threshold =
(ShenandoahHeapRegion::region_size_bytes() * heap->old_generation()->heuristics()->get_old_garbage_threshold()) / 100;
const size_t pip_used_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahGenerationalMinPIPUsage) / 100;
size_t promo_potential = 0;
size_t candidates = 0;
// Tracks the padding of space above top in regions eligible for promotion in place
size_t promote_in_place_pad = 0;
// Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require
// less evacuation effort. This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that
// have more live data.
const idx_t num_regions = heap->num_regions();
ResourceMark rm;
AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions);
ShenandoahFreeSet* freeset = heap->free_set();
// Any region that is to be promoted in place needs to be retired from its Collector or Mutator partition.
idx_t pip_low_collector_idx = freeset->max_regions();
idx_t pip_high_collector_idx = -1;
idx_t pip_low_mutator_idx = freeset->max_regions();
idx_t pip_high_mutator_idx = -1;
size_t collector_regions_to_pip = 0;
size_t mutator_regions_to_pip = 0;
size_t pip_mutator_regions = 0;
size_t pip_collector_regions = 0;
size_t pip_mutator_bytes = 0;
size_t pip_collector_bytes = 0;
for (idx_t i = 0; i < num_regions; i++) {
ShenandoahHeapRegion* const r = heap->get_region(i);
if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) {
// skip over regions that aren't regular young with some live data
continue;
}
if (heap->is_tenurable(r)) {
if ((r->garbage() < old_garbage_threshold) && (r->used() > pip_used_threshold)) {
// We prefer to promote this region in place because it has a small amount of garbage and a large usage.
HeapWord* tams = ctx->top_at_mark_start(r);
HeapWord* original_top = r->top();
if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) {
// No allocations from this region have been made during concurrent mark. It meets all the criteria
// for in-place-promotion. Though we only need the value of top when we fill the end of the region,
// we use this field to indicate that this region should be promoted in place during the evacuation
// phase.
r->save_top_before_promote();
size_t remnant_bytes = r->free();
size_t remnant_words = remnant_bytes / HeapWordSize;
assert(ShenandoahHeap::min_fill_size() <= PLAB::min_size(), "Implementation makes invalid assumptions");
if (remnant_words >= ShenandoahHeap::min_fill_size()) {
ShenandoahHeap::fill_with_object(original_top, remnant_words);
// Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise,
// newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any
// new allocations would not necessarily be eligible for promotion. This addresses both issues.
r->set_top(r->end());
// The region r is either in the Mutator or Collector partition if remnant_words > heap()->plab_min_size.
// Otherwise, the region is in the NotFree partition.
ShenandoahFreeSetPartitionId p = free_set->membership(i);
if (p == ShenandoahFreeSetPartitionId::Mutator) {
mutator_regions_to_pip++;
if (i < pip_low_mutator_idx) {
pip_low_mutator_idx = i;
}
if (i > pip_high_mutator_idx) {
pip_high_mutator_idx = i;
}
pip_mutator_regions++;
pip_mutator_bytes += remnant_bytes;
} else if (p == ShenandoahFreeSetPartitionId::Collector) {
collector_regions_to_pip++;
if (i < pip_low_collector_idx) {
pip_low_collector_idx = i;
}
if (i > pip_high_collector_idx) {
pip_high_collector_idx = i;
}
pip_collector_regions++;
pip_collector_bytes += remnant_bytes;
} else {
assert((p == ShenandoahFreeSetPartitionId::NotFree) && (remnant_words < heap->plab_min_size()),
"Should be NotFree if not in Collector or Mutator partitions");
// In this case, the memory is already counted as used and the region has already been retired. There is
// no need for further adjustments to used. Further, the remnant memory for this region will not be
// unallocated or made available to OldCollector after pip.
remnant_bytes = 0;
}
promote_in_place_pad += remnant_bytes;
free_set->prepare_to_promote_in_place(i, remnant_bytes);
} else {
// Since the remnant is so small that this region has already been retired, we don't have to worry about any
// accidental allocations occurring within this region before the region is promoted in place.
// This region was already not in the Collector or Mutator set, so no need to remove it.
assert(free_set->membership(i) == ShenandoahFreeSetPartitionId::NotFree, "sanity");
}
}
// Else, we do not promote this region (either in place or by copy) because it has received new allocations.
// During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold,
// used > pip_used_threshold, and get_top_before_promote() != tams
} else {
// Record this promotion-eligible candidate region. After sorting and selecting the best candidates below,
// we may still decide to exclude this promotion-eligible region from the current collection set. If this
// happens, we will consider this region as part of the anticipated promotion potential for the next GC
// pass; see further below.
sorted_regions[candidates]._region = r;
sorted_regions[candidates++]._live_data = r->get_live_data_bytes();
}
} else {
// We only evacuate & promote objects from regular regions whose garbage() is above old-garbage-threshold.
// Objects in tenure-worthy regions with less garbage are promoted in place. These take a different path to
// old-gen. Regions excluded from promotion because their garbage content is too low (causing us to anticipate that
// the region would be promoted in place) may be eligible for evacuation promotion by the time promotion takes
// place during a subsequent GC pass because more garbage is found within the region between now and then. This
// should not happen if we are properly adapting the tenure age. The theory behind adaptive tenuring threshold
// is to choose the youngest age that demonstrates no "significant" further loss of population since the previous
// age. If not this, we expect the tenure age to demonstrate linear population decay for at least two population
// samples, whereas we expect to observe exponential population decay for ages younger than the tenure age.
//
// In the case that certain regions which were anticipated to be promoted in place need to be promoted by
// evacuation, it may be the case that there is not sufficient reserve within old-gen to hold evacuation of
// these regions. The likely outcome is that these regions will not be selected for evacuation or promotion
// in the current cycle and we will anticipate that they will be promoted in the next cycle. This will cause
// us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle.
if (heap->is_aging_cycle() && heap->age_census()->is_tenurable(r->age() + 1)) {
if (r->garbage() >= old_garbage_threshold) {
promo_potential += r->get_live_data_bytes();
}
}
}
// Note that we keep going even if one region is excluded from selection.
// Subsequent regions may be selected if they have smaller live data.
}
if (pip_mutator_regions + pip_collector_regions > 0) {
freeset->account_for_pip_regions(pip_mutator_regions, pip_mutator_bytes, pip_collector_regions, pip_collector_bytes);
}
// Retire any regions that have been selected for promote in place
if (collector_regions_to_pip > 0) {
freeset->shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector,
pip_low_collector_idx, pip_high_collector_idx,
collector_regions_to_pip);
}
if (mutator_regions_to_pip > 0) {
freeset->shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Mutator,
pip_low_mutator_idx, pip_high_mutator_idx,
mutator_regions_to_pip);
}
// Sort in increasing order according to live data bytes. Note that candidates represents the number of regions
// that qualify to be promoted by evacuation.
size_t old_consumed = 0;
if (candidates > 0) {
size_t selected_regions = 0;
size_t selected_live = 0;
QuickSort::sort<AgedRegionData>(sorted_regions, candidates, compare_by_aged_live);
for (size_t i = 0; i < candidates; i++) {
ShenandoahHeapRegion* const region = sorted_regions[i]._region;
const size_t region_live_data = sorted_regions[i]._live_data;
const size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste);
if (old_consumed + promotion_need <= old_promotion_reserve) {
old_consumed += promotion_need;
candidate_regions_for_promotion_by_copy[region->index()] = true;
selected_regions++;
selected_live += region_live_data;
} else {
// We rejected this promotable region from the collection set because we had no room to hold its copy.
// Add this region to promo potential for next GC.
promo_potential += region_live_data;
assert(!candidate_regions_for_promotion_by_copy[region->index()], "Shouldn't be selected");
}
// We keep going even if one region is excluded from selection because we need to accumulate all eligible
// regions that are not preselected into promo_potential
}
log_debug(gc, ergo)("Preselected %zu regions containing " PROPERFMT " live data,"
" consuming: " PROPERFMT " of budgeted: " PROPERFMT,
selected_regions, PROPERFMTARGS(selected_live), PROPERFMTARGS(old_consumed), PROPERFMTARGS(old_promotion_reserve));
}
log_info(gc, ergo)("Promotion potential of aged regions with sufficient garbage: " PROPERFMT, PROPERFMTARGS(promo_potential));
heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad);
heap->old_generation()->set_promotion_potential(promo_potential);
return old_consumed;
}
// Having chosen the collection set, adjust the budgets for generational mode based on its composition. Note
// that young_generation->available() now knows about recently discovered immediate garbage.
void ShenandoahGenerationalHeuristics::adjust_evacuation_budgets(ShenandoahHeap* const heap,
ShenandoahCollectionSet* const collection_set) {
shenandoah_assert_generational();
// We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may
// be able to increase regions_available_to_loan
// The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make
// effective use of this memory, including the remnant memory within these regions that may result from rounding loan to
// integral number of regions. Excess memory that is available to be loaned is applied to an allocation supplement,
// which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan
// will be repaid as soon as we finish updating references for the recently evacuated collection set.
// We cannot recalculate regions_available_to_loan by simply dividing old_generation->available() by region_size_bytes
// because the available memory may be distributed between many partially occupied regions that are already holding old-gen
// objects. Memory in partially occupied regions is not "available" to be loaned. Note that an increase in old-gen
// available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned
// to young-gen.
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
ShenandoahOldGeneration* const old_generation = heap->old_generation();
ShenandoahYoungGeneration* const young_generation = heap->young_generation();
const size_t old_evacuated = collection_set->get_live_bytes_in_old_regions();
size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * double(old_evacuated));
size_t old_evacuation_reserve = old_generation->get_evacuation_reserve();
if (old_evacuated_committed > old_evacuation_reserve) {
// This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste
assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32,
"Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu",
old_evacuated_committed, old_evacuation_reserve);
old_evacuated_committed = old_evacuation_reserve;
// Leave old_evac_reserve as previously configured
} else if (old_evacuated_committed < old_evacuation_reserve) {
// This happens if the old-gen collection consumes less than full budget.
log_debug(gc, cset)("Shrinking old evac reserve to match old_evac_commited: " PROPERFMT,
PROPERFMTARGS(old_evacuated_committed));
old_evacuation_reserve = old_evacuated_committed;
old_generation->set_evacuation_reserve(old_evacuation_reserve);
}
size_t young_advance_promoted = collection_set->get_live_bytes_in_tenurable_regions();
size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * double(young_advance_promoted));
size_t young_evacuated = collection_set->get_live_bytes_in_untenurable_regions();
size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated));
size_t total_young_available = young_generation->available_with_reserve() - _add_regions_to_old * region_size_bytes;;
assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate (%zu) more than is available in young (%zu)",
young_evacuated_reserve_used, total_young_available);
young_generation->set_evacuation_reserve(young_evacuated_reserve_used);
// We have not yet rebuilt the free set. Some of the memory that is thought to be avaiable within old may no
// longer be available if that memory had been free within regions that were selected for the collection set.
// Make the necessary adjustments to old_available.
size_t old_available =
old_generation->available() + _add_regions_to_old * region_size_bytes - collection_set->get_old_available_bytes_collected();
// Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation
// and promotion reserves. Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during
// evac and update phases.
size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
if (old_available < old_consumed) {
// This can happen due to round-off errors when adding the results of truncated integer arithmetic.
// We've already truncated old_evacuated_committed. Truncate young_advance_promoted_reserve_used here.
assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32,
"Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu",
young_advance_promoted_reserve_used, old_available - old_evacuated_committed);
if (old_available > old_evacuated_committed) {
young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
} else {
young_advance_promoted_reserve_used = 0;
old_evacuated_committed = old_available;
}
// TODO: reserve for full promotion reserve, not just for advance (preselected) promotion
old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
}
assert(old_available >= old_consumed, "Cannot consume (%zu) more than is available (%zu)",
old_consumed, old_available);
size_t excess_old = old_available - old_consumed;
size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions() + _add_regions_to_old;
size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
assert(unaffiliated_old >= old_evacuated_committed, "Do not evacuate (%zu) more than unaffiliated old (%zu)",
old_evacuated_committed, unaffiliated_old);
// Make sure old_evac_committed is unaffiliated
if (old_evacuated_committed > 0) {
if (unaffiliated_old > old_evacuated_committed) {
size_t giveaway = unaffiliated_old - old_evacuated_committed;
size_t giveaway_regions = giveaway / region_size_bytes; // round down
if (giveaway_regions > 0) {
excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes);
} else {
excess_old = 0;
}
} else {
excess_old = 0;
}
}
// If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation
// runway during evacuation and update-refs. We may make further adjustments to balance.
ssize_t add_regions_to_young = 0;
if (excess_old > unaffiliated_old) {
// we can give back unaffiliated_old (all of unaffiliated is excess)
if (unaffiliated_old_regions > 0) {
add_regions_to_young = unaffiliated_old_regions;
}
} else if (unaffiliated_old_regions > 0) {
// excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions)
size_t excess_regions = excess_old / region_size_bytes;
add_regions_to_young = MIN2(excess_regions, unaffiliated_old_regions);
}
if (add_regions_to_young > 0) {
assert(excess_old >= add_regions_to_young * region_size_bytes, "Cannot xfer more than excess old");
excess_old -= add_regions_to_young * region_size_bytes;
log_debug(gc, ergo)("Before start of evacuation, total_promotion reserve is young_advance_promoted_reserve: %zu "
"plus excess: old: %zu", young_advance_promoted_reserve_used, excess_old);
}
// Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated
// promotions than fit in reserved memory, they will be deferred until a future GC pass.
size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old;
old_generation->set_promoted_reserve(total_promotion_reserve);
old_generation->reset_promoted_expended();
}
size_t ShenandoahGenerationalHeuristics::add_preselected_regions_to_collection_set(ShenandoahCollectionSet* cset,
const RegionData* data,

View File

@ -29,6 +29,9 @@
#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
class ShenandoahGeneration;
class ShenandoahHeap;
class ShenandoahCollectionSet;
class RegionData;
/*
* This class serves as the base class for heuristics used to trigger and
@ -44,10 +47,42 @@ class ShenandoahGenerationalHeuristics : public ShenandoahAdaptiveHeuristics {
public:
explicit ShenandoahGenerationalHeuristics(ShenandoahGeneration* generation);
size_t choose_collection_set(ShenandoahCollectionSet* collection_set) override;
void choose_collection_set(ShenandoahCollectionSet* collection_set) override;
private:
// Compute evacuation budgets prior to choosing collection set.
void compute_evacuation_budgets(ShenandoahHeap* const heap);
// Preselect for possible inclusion into the collection set exactly the most
// garbage-dense regions, including those that satisfy criteria 1 & 2 below,
// and whose live bytes will fit within old_available budget:
// Criterion 1. region age >= tenuring threshold
// Criterion 2. region garbage percentage > old garbage threshold
//
// Identifies regions eligible for promotion in place,
// being those of at least tenuring_threshold age that have lower garbage
// density.
//
// Updates promotion_potential and pad_for_promote_in_place fields
// of the heap. Returns bytes of live object memory in the preselected
// regions, which are marked in the preselected_regions() indicator
// array of the heap's collection set, which should be initialized
// to false.
size_t select_aged_regions(const size_t old_promotion_reserve);
// Filter and sort remaining regions before adding to collection set.
void filter_regions(ShenandoahCollectionSet* collection_set);
// Adjust evacuation budgets after choosing collection set. The argument regions_to_xfer
// represents regions to be transferred to old based on decisions made in top_off_collection_set()
void adjust_evacuation_budgets(ShenandoahHeap* const heap,
ShenandoahCollectionSet* const collection_set);
protected:
ShenandoahGeneration* _generation;
size_t _add_regions_to_old;
size_t add_preselected_regions_to_collection_set(ShenandoahCollectionSet* cset,
const RegionData* data,
size_t size) const;

View File

@ -36,14 +36,13 @@ ShenandoahGlobalHeuristics::ShenandoahGlobalHeuristics(ShenandoahGlobalGeneratio
}
size_t ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
void ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
// Better select garbage-first regions
QuickSort::sort<RegionData>(data, (int) size, compare_by_garbage);
QuickSort::sort<RegionData>(data, size, compare_by_garbage);
choose_global_collection_set(cset, data, size, actual_free, 0 /* cur_young_garbage */);
return 0;
}

View File

@ -39,9 +39,9 @@ class ShenandoahGlobalHeuristics : public ShenandoahGenerationalHeuristics {
public:
ShenandoahGlobalHeuristics(ShenandoahGlobalGeneration* generation);
size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
private:
void choose_global_collection_set(ShenandoahCollectionSet* cset,

View File

@ -72,7 +72,7 @@ ShenandoahHeuristics::~ShenandoahHeuristics() {
FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
}
size_t ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
assert(collection_set->is_empty(), "Must be empty");
@ -154,7 +154,6 @@ size_t ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* coll
choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
}
collection_set->summarize(total_garbage, immediate_garbage, immediate_regions);
return 0;
}
void ShenandoahHeuristics::record_cycle_start() {

View File

@ -183,12 +183,10 @@ protected:
static int compare_by_garbage(RegionData a, RegionData b);
// This is a helper function to choose_collection_set(), returning the number of regions that need to be transferred to
// the old reserve from the young reserve in order to effectively evacuate the chosen collection set. In non-generational
// mode, the return value is 0.
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size,
size_t free) = 0;
// This is a helper function to choose_collection_set()
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size,
size_t free) = 0;
void adjust_penalty(intx step);
@ -238,7 +236,7 @@ public:
// Choose the collection set, returning the number of regions that need to be transferred to the old reserve from the young
// reserve in order to effectively evacuate the chosen collection set. In non-generational mode, the return value is 0.
virtual size_t choose_collection_set(ShenandoahCollectionSet* collection_set);
virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
virtual bool can_unload_classes();

View File

@ -884,9 +884,8 @@ bool ShenandoahOldHeuristics::is_experimental() {
return true;
}
size_t ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
ShenandoahHeuristics::RegionData* data,
size_t data_size, size_t free) {
void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
ShenandoahHeuristics::RegionData* data,
size_t data_size, size_t free) {
ShouldNotReachHere();
return 0;
}

View File

@ -155,8 +155,8 @@ private:
void set_trigger_if_old_is_overgrown();
protected:
size_t
choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override;
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size, size_t free) override;
// This internal helper routine adds as many mixed evacuation candidate regions as fit within the old-gen evacuation budget
// to the collection set. This may be called twice to prepare for any given mixed evacuation cycle, the first time with

View File

@ -50,9 +50,9 @@ bool ShenandoahPassiveHeuristics::should_degenerate_cycle() {
return ShenandoahDegeneratedGC;
}
size_t ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
assert(ShenandoahDegeneratedGC, "This path is only taken for Degenerated GC");
// Do not select too large CSet that would overflow the available free space.
@ -76,5 +76,4 @@ size_t ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenan
cset->add_region(r);
}
}
return 0;
}

View File

@ -40,19 +40,19 @@ class ShenandoahPassiveHeuristics : public ShenandoahHeuristics {
public:
ShenandoahPassiveHeuristics(ShenandoahSpaceInfo* space_info);
virtual bool should_start_gc();
bool should_start_gc() override;
virtual bool should_unload_classes();
bool should_unload_classes() override;
virtual bool should_degenerate_cycle();
bool should_degenerate_cycle() override;
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size,
size_t free);
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size,
size_t free) override;
virtual const char* name() { return "Passive"; }
virtual bool is_diagnostic() { return true; }
virtual bool is_experimental() { return false; }
const char* name() override { return "Passive"; }
bool is_diagnostic() override { return true; }
bool is_experimental() override { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP

View File

@ -37,8 +37,6 @@ ShenandoahStaticHeuristics::ShenandoahStaticHeuristics(ShenandoahSpaceInfo* spac
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
}
ShenandoahStaticHeuristics::~ShenandoahStaticHeuristics() {}
bool ShenandoahStaticHeuristics::should_start_gc() {
size_t capacity = ShenandoahHeap::heap()->soft_max_capacity();
size_t available = _space_info->soft_mutator_available();
@ -59,9 +57,9 @@ bool ShenandoahStaticHeuristics::should_start_gc() {
return ShenandoahHeuristics::should_start_gc();
}
size_t ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) {
void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) {
size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
for (size_t idx = 0; idx < size; idx++) {
@ -70,5 +68,4 @@ size_t ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(Shenand
cset->add_region(r);
}
}
return 0;
}

View File

@ -34,19 +34,17 @@
*/
class ShenandoahStaticHeuristics : public ShenandoahHeuristics {
public:
ShenandoahStaticHeuristics(ShenandoahSpaceInfo* space_info);
explicit ShenandoahStaticHeuristics(ShenandoahSpaceInfo* space_info);
virtual ~ShenandoahStaticHeuristics();
bool should_start_gc() override;
virtual bool should_start_gc();
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) override;
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free);
virtual const char* name() { return "Static"; }
virtual bool is_diagnostic() { return false; }
virtual bool is_experimental() { return false; }
const char* name() override { return "Static"; }
bool is_diagnostic() override { return false; }
bool is_experimental() override { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP

View File

@ -37,7 +37,7 @@ ShenandoahYoungHeuristics::ShenandoahYoungHeuristics(ShenandoahYoungGeneration*
}
size_t ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
// See comments in ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata():
@ -52,7 +52,7 @@ size_t ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenando
bool need_to_finalize_mixed = heap->old_generation()->heuristics()->prime_collection_set(cset);
// Better select garbage-first regions
QuickSort::sort<RegionData>(data, (int) size, compare_by_garbage);
QuickSort::sort<RegionData>(data, size, compare_by_garbage);
size_t cur_young_garbage = add_preselected_regions_to_collection_set(cset, data, size);
@ -62,12 +62,10 @@ size_t ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenando
// enough consolidated garbage to make effective use of young-gen evacuation reserve. If there is still
// young-gen reserve available following selection of the young-gen collection set, see if we can use
// this memory to expand the old-gen evacuation collection set.
size_t add_regions_to_old;
need_to_finalize_mixed |= heap->old_generation()->heuristics()->top_off_collection_set(add_regions_to_old);
need_to_finalize_mixed |= heap->old_generation()->heuristics()->top_off_collection_set(_add_regions_to_old);
if (need_to_finalize_mixed) {
heap->old_generation()->heuristics()->finalize_mixed_evacs();
}
return add_regions_to_old;
}
void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollectionSet* cset,

View File

@ -38,9 +38,9 @@ public:
explicit ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation);
size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
bool should_start_gc() override;

View File

@ -24,7 +24,6 @@
*/
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahCollectionSetPreselector.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahGeneration.hpp"
@ -245,506 +244,6 @@ void ShenandoahGeneration::parallel_heap_region_iterate_free(ShenandoahHeapRegio
ShenandoahHeap::heap()->parallel_heap_region_iterate(cl);
}
void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap) {
shenandoah_assert_generational();
ShenandoahOldGeneration* const old_generation = heap->old_generation();
ShenandoahYoungGeneration* const young_generation = heap->young_generation();
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
// During initialization and phase changes, it is more likely that fewer objects die young and old-gen
// memory is not yet full (or is in the process of being replaced). During these times especially, it
// is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases
// of execution.
// Calculate EvacuationReserve before PromotionReserve. Evacuation is more critical than promotion.
// If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory. Promotions are less
// critical. If we cannot promote, there may be degradation of young-gen memory because old objects
// accumulate there until they can be promoted. This increases the young-gen marking and evacuation work.
// First priority is to reclaim the easy garbage out of young-gen.
// maximum_young_evacuation_reserve is upper bound on memory to be evacuated into young Collector Reserve. This is
// bounded at the end of previous GC cycle, based on available memory and balancing of evacuation to old and young.
size_t maximum_young_evacuation_reserve = young_generation->get_evacuation_reserve();
// maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted),
// clamped by the old generation space available.
//
// Here's the algebra.
// Let SOEP = ShenandoahOldEvacPercent,
// OE = old evac,
// YE = young evac, and
// TE = total evac = OE + YE
// By definition:
// SOEP/100 = OE/TE
// = OE/(OE+YE)
// => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
// = OE/YE
// => OE = YE*SOEP/(100-SOEP)
// We have to be careful in the event that SOEP is set to 100 by the user.
assert(ShenandoahOldEvacPercent <= 100, "Error");
const size_t old_available = old_generation->available();
const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacPercent == 100) ?
old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent),
old_available);
// In some cases, maximum_old_reserve < old_available (when limited by ShenandoahOldEvacPercent)
// This limit affects mixed evacuations, but does not affect promotions.
// Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates. Third priority
// is to promote as much as we have room to promote. However, if old-gen memory is in short supply, this means young
// GC is operating under "duress" and was unable to transfer the memory that we would normally expect. In this case,
// old-gen will refrain from compacting itself in order to allow a quicker young-gen cycle (by avoiding the update-refs
// through ALL of old-gen). If there is some memory available in old-gen, we will use this for promotions as promotions
// do not add to the update-refs burden of GC.
size_t old_evacuation_reserve, old_promo_reserve;
if (is_global()) {
// Global GC is typically triggered by user invocation of System.gc(), and typically indicates that there is lots
// of garbage to be reclaimed because we are starting a new phase of execution. Marking for global GC may take
// significantly longer than typical young marking because we must mark through all old objects. To expedite
// evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found.
// Global GC will adjust generation sizes to accommodate the collection set it chooses.
// Use remnant of old_available to hold promotions.
old_promo_reserve = old_available - maximum_old_evacuation_reserve;
// Dedicate all available old memory to old_evacuation reserve. This may be small, because old-gen is only
// expanded based on an existing mixed evacuation workload at the end of the previous GC cycle. We'll expand
// the budget for evacuation of old during GLOBAL cset selection.
old_evacuation_reserve = maximum_old_evacuation_reserve;
} else if (old_generation->has_unprocessed_collection_candidates()) {
// We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen. If this is
// mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote. Prioritize compaction
// over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
old_evacuation_reserve = maximum_old_evacuation_reserve;
old_promo_reserve = old_available - maximum_old_evacuation_reserve;
} else {
// Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation.
old_evacuation_reserve = old_available - maximum_old_evacuation_reserve;
old_promo_reserve = maximum_old_evacuation_reserve;
}
assert(old_evacuation_reserve <= old_available, "Error");
// We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
// So we limit the old-evacuation reserve to unfragmented memory. Even so, old-evacuation is free to fill in nooks and
// crannies within existing partially used regions and it generally tries to do so.
const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * region_size_bytes;
if (old_evacuation_reserve > old_free_unfragmented) {
const size_t delta = old_evacuation_reserve - old_free_unfragmented;
old_evacuation_reserve -= delta;
// Let promo consume fragments of old-gen memory
old_promo_reserve += delta;
}
// If is_global(), we let garbage-first heuristic determine cset membership. Otherwise, we give priority
// to tenurable regions by preselecting regions for promotion by evacuation (obtaining the live data to seed promoted_reserve).
// This also identifies regions that will be promoted in place. These use the tenuring threshold.
const size_t consumed_by_advance_promotion = select_aged_regions(is_global()? 0: old_promo_reserve);
assert(consumed_by_advance_promotion <= old_promo_reserve, "Do not promote more than budgeted");
// The young evacuation reserve can be no larger than young_unaffiliated. Planning to evacuate into partially consumed
// young regions is doomed to failure if any of those partially consumed regions is selected for the collection set.
size_t young_unaffiliated = young_generation->free_unaffiliated_regions() * region_size_bytes;
// If any regions have been selected for promotion in place, this has the effect of decreasing available within mutator
// and collector partitions, due to padding of remnant memory within each promoted in place region. This will affect
// young_evacuation_reserve but not old_evacuation_reserve or consumed_by_advance_promotion. So recompute.
size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_unaffiliated);
// Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this
// to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood
// of old evacuation failure. Leave this memory in the promoted reserve as it may be targeted by opportunistic
// promotions (found during evacuation of young regions).
young_generation->set_evacuation_reserve(young_evacuation_reserve);
old_generation->set_evacuation_reserve(old_evacuation_reserve);
old_generation->set_promoted_reserve(old_promo_reserve);
// There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the
// case of a GLOBAL gc. During choose_collection_set() of GLOBAL, old will be expanded on demand.
}
// Having chosen the collection set, adjust the budgets for generational mode based on its composition. Note
// that young_generation->available() now knows about recently discovered immediate garbage.
void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
ShenandoahCollectionSet* const collection_set, size_t add_regions_to_old) {
shenandoah_assert_generational();
// We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may
// be able to increase regions_available_to_loan
// The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make
// effective use of this memory, including the remnant memory within these regions that may result from rounding loan to
// integral number of regions. Excess memory that is available to be loaned is applied to an allocation supplement,
// which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan
// will be repaid as soon as we finish updating references for the recently evacuated collection set.
// We cannot recalculate regions_available_to_loan by simply dividing old_generation->available() by region_size_bytes
// because the available memory may be distributed between many partially occupied regions that are already holding old-gen
// objects. Memory in partially occupied regions is not "available" to be loaned. Note that an increase in old-gen
// available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned
// to young-gen.
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
ShenandoahOldGeneration* const old_generation = heap->old_generation();
ShenandoahYoungGeneration* const young_generation = heap->young_generation();
const size_t old_evacuated = collection_set->get_live_bytes_in_old_regions();
size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * double(old_evacuated));
size_t old_evacuation_reserve = old_generation->get_evacuation_reserve();
if (old_evacuated_committed > old_evacuation_reserve) {
// This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste
assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32,
"Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu",
old_evacuated_committed, old_evacuation_reserve);
old_evacuated_committed = old_evacuation_reserve;
// Leave old_evac_reserve as previously configured
} else if (old_evacuated_committed < old_evacuation_reserve) {
// This happens if the old-gen collection consumes less than full budget.
log_debug(gc, cset)("Shrinking old evac reserve to match old_evac_commited: " PROPERFMT,
PROPERFMTARGS(old_evacuated_committed));
old_evacuation_reserve = old_evacuated_committed;
old_generation->set_evacuation_reserve(old_evacuation_reserve);
}
size_t young_advance_promoted = collection_set->get_live_bytes_in_tenurable_regions();
size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * double(young_advance_promoted));
size_t young_evacuated = collection_set->get_live_bytes_in_untenurable_regions();
size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated));
size_t total_young_available = young_generation->available_with_reserve() - add_regions_to_old * region_size_bytes;;
assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate (%zu) more than is available in young (%zu)",
young_evacuated_reserve_used, total_young_available);
young_generation->set_evacuation_reserve(young_evacuated_reserve_used);
// We have not yet rebuilt the free set. Some of the memory that is thought to be avaiable within old may no
// longer be available if that memory had been free within regions that were selected for the collection set.
// Make the necessary adjustments to old_available.
size_t old_available =
old_generation->available() + add_regions_to_old * region_size_bytes - collection_set->get_old_available_bytes_collected();
// Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation
// and promotion reserves. Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during
// evac and update phases.
size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
if (old_available < old_consumed) {
// This can happen due to round-off errors when adding the results of truncated integer arithmetic.
// We've already truncated old_evacuated_committed. Truncate young_advance_promoted_reserve_used here.
assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32,
"Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu",
young_advance_promoted_reserve_used, old_available - old_evacuated_committed);
if (old_available > old_evacuated_committed) {
young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
} else {
young_advance_promoted_reserve_used = 0;
old_evacuated_committed = old_available;
}
// TODO: reserve for full promotion reserve, not just for advance (preselected) promotion
old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
}
assert(old_available >= old_consumed, "Cannot consume (%zu) more than is available (%zu)",
old_consumed, old_available);
size_t excess_old = old_available - old_consumed;
size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions() + add_regions_to_old;
size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
assert(unaffiliated_old >= old_evacuated_committed, "Do not evacuate (%zu) more than unaffiliated old (%zu)",
old_evacuated_committed, unaffiliated_old);
// Make sure old_evac_committed is unaffiliated
if (old_evacuated_committed > 0) {
if (unaffiliated_old > old_evacuated_committed) {
size_t giveaway = unaffiliated_old - old_evacuated_committed;
size_t giveaway_regions = giveaway / region_size_bytes; // round down
if (giveaway_regions > 0) {
excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes);
} else {
excess_old = 0;
}
} else {
excess_old = 0;
}
}
// If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation
// runway during evacuation and update-refs. We may make further adjustments to balance.
ssize_t add_regions_to_young = 0;
if (excess_old > unaffiliated_old) {
// we can give back unaffiliated_old (all of unaffiliated is excess)
if (unaffiliated_old_regions > 0) {
add_regions_to_young = unaffiliated_old_regions;
}
} else if (unaffiliated_old_regions > 0) {
// excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions)
size_t excess_regions = excess_old / region_size_bytes;
add_regions_to_young = MIN2(excess_regions, unaffiliated_old_regions);
}
if (add_regions_to_young > 0) {
assert(excess_old >= add_regions_to_young * region_size_bytes, "Cannot xfer more than excess old");
excess_old -= add_regions_to_young * region_size_bytes;
log_debug(gc, ergo)("Before start of evacuation, total_promotion reserve is young_advance_promoted_reserve: %zu "
"plus excess: old: %zu", young_advance_promoted_reserve_used, excess_old);
}
// Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated
// promotions than fit in reserved memory, they will be deferred until a future GC pass.
size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old;
old_generation->set_promoted_reserve(total_promotion_reserve);
old_generation->reset_promoted_expended();
}
typedef struct {
ShenandoahHeapRegion* _region;
size_t _live_data;
} AgedRegionData;
static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) {
if (a._live_data < b._live_data)
return -1;
else if (a._live_data > b._live_data)
return 1;
else return 0;
}
inline void assert_no_in_place_promotions() {
#ifdef ASSERT
class ShenandoahNoInPlacePromotions : public ShenandoahHeapRegionClosure {
public:
void heap_region_do(ShenandoahHeapRegion *r) override {
assert(r->get_top_before_promote() == nullptr,
"Region %zu should not be ready for in-place promotion", r->index());
}
} cl;
ShenandoahHeap::heap()->heap_region_iterate(&cl);
#endif
}
// Preselect for inclusion into the collection set all regions whose age is at or above tenure age and for which the
// garbage percentage exceeds a dynamically adjusted threshold (known as the old-garbage threshold percentage). We
// identify these regions by setting the appropriate entry of the collection set's preselected regions array to true.
// All entries are initialized to false before calling this function.
//
// During the subsequent selection of the collection set, we give priority to these promotion set candidates.
// Without this prioritization, we found that the aged regions tend to be ignored because they typically have
// much less garbage and much more live data than the recently allocated "eden" regions. When aged regions are
// repeatedly excluded from the collection set, the amount of live memory within the young generation tends to
// accumulate and this has the undesirable side effect of causing young-generation collections to require much more
// CPU and wall-clock time.
//
// A second benefit of treating aged regions differently than other regions during collection set selection is
// that this allows us to more accurately budget memory to hold the results of evacuation. Memory for evacuation
// of aged regions must be reserved in the old generation. Memory for evacuation of all other regions must be
// reserved in the young generation.
size_t ShenandoahGeneration::select_aged_regions(const size_t old_promotion_reserve) {
// There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle.
assert_no_in_place_promotions();
auto const heap = ShenandoahGenerationalHeap::heap();
ShenandoahFreeSet* free_set = heap->free_set();
bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions();
ShenandoahMarkingContext* const ctx = heap->marking_context();
const size_t old_garbage_threshold =
(ShenandoahHeapRegion::region_size_bytes() * heap->old_generation()->heuristics()->get_old_garbage_threshold()) / 100;
const size_t pip_used_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahGenerationalMinPIPUsage) / 100;
size_t promo_potential = 0;
size_t candidates = 0;
// Tracks the padding of space above top in regions eligible for promotion in place
size_t promote_in_place_pad = 0;
// Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require
// less evacuation effort. This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that
// have more live data.
const idx_t num_regions = heap->num_regions();
ResourceMark rm;
AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions);
ShenandoahFreeSet* freeset = heap->free_set();
// Any region that is to be promoted in place needs to be retired from its Collector or Mutator partition.
idx_t pip_low_collector_idx = freeset->max_regions();
idx_t pip_high_collector_idx = -1;
idx_t pip_low_mutator_idx = freeset->max_regions();
idx_t pip_high_mutator_idx = -1;
size_t collector_regions_to_pip = 0;
size_t mutator_regions_to_pip = 0;
size_t pip_mutator_regions = 0;
size_t pip_collector_regions = 0;
size_t pip_mutator_bytes = 0;
size_t pip_collector_bytes = 0;
for (idx_t i = 0; i < num_regions; i++) {
ShenandoahHeapRegion* const r = heap->get_region(i);
if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) {
// skip over regions that aren't regular young with some live data
continue;
}
if (heap->is_tenurable(r)) {
if ((r->garbage() < old_garbage_threshold) && (r->used() > pip_used_threshold)) {
// We prefer to promote this region in place because it has a small amount of garbage and a large usage.
HeapWord* tams = ctx->top_at_mark_start(r);
HeapWord* original_top = r->top();
if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) {
// No allocations from this region have been made during concurrent mark. It meets all the criteria
// for in-place-promotion. Though we only need the value of top when we fill the end of the region,
// we use this field to indicate that this region should be promoted in place during the evacuation
// phase.
r->save_top_before_promote();
size_t remnant_bytes = r->free();
size_t remnant_words = remnant_bytes / HeapWordSize;
assert(ShenandoahHeap::min_fill_size() <= PLAB::min_size(), "Implementation makes invalid assumptions");
if (remnant_words >= ShenandoahHeap::min_fill_size()) {
ShenandoahHeap::fill_with_object(original_top, remnant_words);
// Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise,
// newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any
// new allocations would not necessarily be eligible for promotion. This addresses both issues.
r->set_top(r->end());
// The region r is either in the Mutator or Collector partition if remnant_words > heap()->plab_min_size.
// Otherwise, the region is in the NotFree partition.
ShenandoahFreeSetPartitionId p = free_set->membership(i);
if (p == ShenandoahFreeSetPartitionId::Mutator) {
mutator_regions_to_pip++;
if (i < pip_low_mutator_idx) {
pip_low_mutator_idx = i;
}
if (i > pip_high_mutator_idx) {
pip_high_mutator_idx = i;
}
pip_mutator_regions++;
pip_mutator_bytes += remnant_bytes;
} else if (p == ShenandoahFreeSetPartitionId::Collector) {
collector_regions_to_pip++;
if (i < pip_low_collector_idx) {
pip_low_collector_idx = i;
}
if (i > pip_high_collector_idx) {
pip_high_collector_idx = i;
}
pip_collector_regions++;
pip_collector_bytes += remnant_bytes;
} else {
assert((p == ShenandoahFreeSetPartitionId::NotFree) && (remnant_words < heap->plab_min_size()),
"Should be NotFree if not in Collector or Mutator partitions");
// In this case, the memory is already counted as used and the region has already been retired. There is
// no need for further adjustments to used. Further, the remnant memory for this region will not be
// unallocated or made available to OldCollector after pip.
remnant_bytes = 0;
}
promote_in_place_pad += remnant_bytes;
free_set->prepare_to_promote_in_place(i, remnant_bytes);
} else {
// Since the remnant is so small that this region has already been retired, we don't have to worry about any
// accidental allocations occurring within this region before the region is promoted in place.
// This region was already not in the Collector or Mutator set, so no need to remove it.
assert(free_set->membership(i) == ShenandoahFreeSetPartitionId::NotFree, "sanity");
}
}
// Else, we do not promote this region (either in place or by copy) because it has received new allocations.
// During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold,
// used > pip_used_threshold, and get_top_before_promote() != tams
} else {
// Record this promotion-eligible candidate region. After sorting and selecting the best candidates below,
// we may still decide to exclude this promotion-eligible region from the current collection set. If this
// happens, we will consider this region as part of the anticipated promotion potential for the next GC
// pass; see further below.
sorted_regions[candidates]._region = r;
sorted_regions[candidates++]._live_data = r->get_live_data_bytes();
}
} else {
// We only evacuate & promote objects from regular regions whose garbage() is above old-garbage-threshold.
// Objects in tenure-worthy regions with less garbage are promoted in place. These take a different path to
// old-gen. Regions excluded from promotion because their garbage content is too low (causing us to anticipate that
// the region would be promoted in place) may be eligible for evacuation promotion by the time promotion takes
// place during a subsequent GC pass because more garbage is found within the region between now and then. This
// should not happen if we are properly adapting the tenure age. The theory behind adaptive tenuring threshold
// is to choose the youngest age that demonstrates no "significant" further loss of population since the previous
// age. If not this, we expect the tenure age to demonstrate linear population decay for at least two population
// samples, whereas we expect to observe exponential population decay for ages younger than the tenure age.
//
// In the case that certain regions which were anticipated to be promoted in place need to be promoted by
// evacuation, it may be the case that there is not sufficient reserve within old-gen to hold evacuation of
// these regions. The likely outcome is that these regions will not be selected for evacuation or promotion
// in the current cycle and we will anticipate that they will be promoted in the next cycle. This will cause
// us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle.
if (heap->is_aging_cycle() && heap->age_census()->is_tenurable(r->age() + 1)) {
if (r->garbage() >= old_garbage_threshold) {
promo_potential += r->get_live_data_bytes();
}
}
}
// Note that we keep going even if one region is excluded from selection.
// Subsequent regions may be selected if they have smaller live data.
}
if (pip_mutator_regions + pip_collector_regions > 0) {
freeset->account_for_pip_regions(pip_mutator_regions, pip_mutator_bytes, pip_collector_regions, pip_collector_bytes);
}
// Retire any regions that have been selected for promote in place
if (collector_regions_to_pip > 0) {
freeset->shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector,
pip_low_collector_idx, pip_high_collector_idx,
collector_regions_to_pip);
}
if (mutator_regions_to_pip > 0) {
freeset->shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Mutator,
pip_low_mutator_idx, pip_high_mutator_idx,
mutator_regions_to_pip);
}
// Sort in increasing order according to live data bytes. Note that candidates represents the number of regions
// that qualify to be promoted by evacuation.
size_t old_consumed = 0;
if (candidates > 0) {
size_t selected_regions = 0;
size_t selected_live = 0;
QuickSort::sort<AgedRegionData>(sorted_regions, candidates, compare_by_aged_live);
for (size_t i = 0; i < candidates; i++) {
ShenandoahHeapRegion* const region = sorted_regions[i]._region;
const size_t region_live_data = sorted_regions[i]._live_data;
const size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste);
if (old_consumed + promotion_need <= old_promotion_reserve) {
old_consumed += promotion_need;
candidate_regions_for_promotion_by_copy[region->index()] = true;
selected_regions++;
selected_live += region_live_data;
} else {
// We rejected this promotable region from the collection set because we had no room to hold its copy.
// Add this region to promo potential for next GC.
promo_potential += region_live_data;
assert(!candidate_regions_for_promotion_by_copy[region->index()], "Shouldn't be selected");
}
// We keep going even if one region is excluded from selection because we need to accumulate all eligible
// regions that are not preselected into promo_potential
}
log_debug(gc, ergo)("Preselected %zu regions containing " PROPERFMT " live data,"
" consuming: " PROPERFMT " of budgeted: " PROPERFMT,
selected_regions, PROPERFMTARGS(selected_live), PROPERFMTARGS(old_consumed), PROPERFMTARGS(old_promotion_reserve));
}
log_info(gc, ergo)("Promotion potential of aged regions with sufficient garbage: " PROPERFMT, PROPERFMTARGS(promo_potential));
heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad);
heap->old_generation()->set_promotion_potential(promo_potential);
return old_consumed;
}
void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahCollectionSet* collection_set = heap->collection_set();
@ -798,34 +297,7 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
collection_set->clear();
ShenandoahHeapLocker locker(heap->lock());
if (is_generational) {
// Seed the collection set with resource area-allocated
// preselected regions, which are removed when we exit this scope.
ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions());
// Find the amount that will be promoted, regions that will be promoted in
// place, and preselected older regions that will be promoted by evacuation.
compute_evacuation_budgets(heap);
// Choose the collection set, including the regions preselected above for promotion into the old generation.
size_t add_regions_to_old = _heuristics->choose_collection_set(collection_set);
// Even if collection_set->is_empty(), we want to adjust budgets, making reserves available to mutator.
adjust_evacuation_budgets(heap, collection_set, add_regions_to_old);
if (is_global()) {
// We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so
// the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will
// use the mark bitmap to make the old regions parsable by coalescing and filling any unmarked objects. Thus,
// we prepare for old collections by remembering which regions are old at this time. Note that any objects
// promoted into old regions will be above TAMS, and so will be considered marked. However, free regions that
// become old after this point will not be covered correctly by the mark bitmap, so we must be careful not to
// coalesce those regions. Only the old regions which are not part of the collection set at this point are
// eligible for coalescing. As implemented now, this has the side effect of possibly initiating mixed-evacuations
// after a global cycle for old regions that were not included in this collection set.
heap->old_generation()->prepare_for_mixed_collections_after_global_gc();
}
} else {
_heuristics->choose_collection_set(collection_set);
}
_heuristics->choose_collection_set(collection_set);
}

View File

@ -60,31 +60,6 @@ protected:
ShenandoahHeuristics* _heuristics;
private:
// Compute evacuation budgets prior to choosing collection set.
void compute_evacuation_budgets(ShenandoahHeap* heap);
// Adjust evacuation budgets after choosing collection set. The argument regions_to_xfer represents regions to be
// transfered to old based on decisions made in top_off_collection_set()
void adjust_evacuation_budgets(ShenandoahHeap* heap,
ShenandoahCollectionSet* collection_set, size_t regions_to_xfer);
// Preselect for possible inclusion into the collection set exactly the most
// garbage-dense regions, including those that satisfy criteria 1 & 2 below,
// and whose live bytes will fit within old_available budget:
// Criterion 1. region age >= tenuring threshold
// Criterion 2. region garbage percentage > old garbage threshold
//
// Identifies regions eligible for promotion in place,
// being those of at least tenuring_threshold age that have lower garbage
// density.
//
// Updates promotion_potential and pad_for_promote_in_place fields
// of the heap. Returns bytes of live object memory in the preselected
// regions, which are marked in the preselected_regions() indicator
// array of the heap's collection set, which should be initialized
// to false.
size_t select_aged_regions(size_t old_promotion_reserve);
// Return available assuming that we can allocate no more than capacity bytes within this generation.
size_t available(size_t capacity) const;