mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-20 07:15:31 +00:00
8365880: Shenandoah: Unify memory usage accounting in ShenandoahFreeSet
Reviewed-by: wkemper
This commit is contained in:
parent
1781b186b5
commit
ec059c0e85
@ -88,9 +88,10 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti
|
||||
size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0;
|
||||
|
||||
log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Max Young Evacuation: %zu"
|
||||
"%s, Max Old Evacuation: %zu%s, Actual Free: %zu%s.",
|
||||
"%s, Max Old Evacuation: %zu%s, Max Either Evacuation: %zu%s, Actual Free: %zu%s.",
|
||||
byte_size_in_proper_unit(max_young_cset), proper_unit_for_byte_size(max_young_cset),
|
||||
byte_size_in_proper_unit(max_old_cset), proper_unit_for_byte_size(max_old_cset),
|
||||
byte_size_in_proper_unit(unaffiliated_young_memory), proper_unit_for_byte_size(unaffiliated_young_memory),
|
||||
byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free));
|
||||
|
||||
for (size_t idx = 0; idx < size; idx++) {
|
||||
@ -133,9 +134,8 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti
|
||||
cset->add_region(r);
|
||||
}
|
||||
}
|
||||
|
||||
if (regions_transferred_to_old > 0) {
|
||||
heap->generation_sizer()->force_transfer_to_old(regions_transferred_to_old);
|
||||
assert(young_evac_reserve > regions_transferred_to_old * region_size_bytes, "young reserve cannot be negative");
|
||||
heap->young_generation()->set_evacuation_reserve(young_evac_reserve - regions_transferred_to_old * region_size_bytes);
|
||||
heap->old_generation()->set_evacuation_reserve(old_evac_reserve + regions_transferred_to_old * region_size_bytes);
|
||||
}
|
||||
|
||||
@ -606,12 +606,12 @@ void ShenandoahOldHeuristics::set_trigger_if_old_is_fragmented(size_t first_old_
|
||||
}
|
||||
|
||||
void ShenandoahOldHeuristics::set_trigger_if_old_is_overgrown() {
|
||||
size_t old_used = _old_generation->used() + _old_generation->get_humongous_waste();
|
||||
// used() includes humongous waste
|
||||
size_t old_used = _old_generation->used();
|
||||
size_t trigger_threshold = _old_generation->usage_trigger_threshold();
|
||||
// Detects unsigned arithmetic underflow
|
||||
assert(old_used <= _heap->capacity(),
|
||||
"Old used (%zu, %zu) must not be more than heap capacity (%zu)",
|
||||
_old_generation->used(), _old_generation->get_humongous_waste(), _heap->capacity());
|
||||
"Old used (%zu) must not be more than heap capacity (%zu)", _old_generation->used(), _heap->capacity());
|
||||
if (old_used > trigger_threshold) {
|
||||
_growth_trigger = true;
|
||||
}
|
||||
@ -683,7 +683,8 @@ bool ShenandoahOldHeuristics::should_start_gc() {
|
||||
if (_growth_trigger) {
|
||||
// Growth may be falsely triggered during mixed evacuations, before the mixed-evacuation candidates have been
|
||||
// evacuated. Before acting on a false trigger, we check to confirm the trigger condition is still satisfied.
|
||||
const size_t current_usage = _old_generation->used() + _old_generation->get_humongous_waste();
|
||||
// _old_generation->used() includes humongous waste.
|
||||
const size_t current_usage = _old_generation->used();
|
||||
const size_t trigger_threshold = _old_generation->usage_trigger_threshold();
|
||||
const size_t heap_size = heap->capacity();
|
||||
const size_t ignore_threshold = (ShenandoahIgnoreOldGrowthBelowPercentage * heap_size) / 100;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -35,8 +35,12 @@ enum class ShenandoahFreeSetPartitionId : uint8_t {
|
||||
Mutator, // Region is in the Mutator free set: available memory is available to mutators.
|
||||
Collector, // Region is in the Collector free set: available memory is reserved for evacuations.
|
||||
OldCollector, // Region is in the Old Collector free set:
|
||||
// available memory is reserved for old evacuations and for promotions..
|
||||
NotFree // Region is in no free set: it has no available memory
|
||||
// available memory is reserved for old evacuations and for promotions.
|
||||
NotFree // Region is in no free set: it has no available memory. Consult region affiliation
|
||||
// to determine whether this retired region is young or old. If young, the region
|
||||
// is considered to be part of the Mutator partition. (When we retire from the
|
||||
// Collector partition, we decrease total_region_count for Collector and increaese
|
||||
// for Mutator, making similar adjustments to used (net impact on available is neutral).
|
||||
};
|
||||
|
||||
// ShenandoahRegionPartitions provides an abstraction to help organize the implementation of ShenandoahFreeSet. This
|
||||
@ -45,64 +49,92 @@ enum class ShenandoahFreeSetPartitionId : uint8_t {
|
||||
// for which the ShenandoahFreeSetPartitionId is not equal to NotFree.
|
||||
class ShenandoahRegionPartitions {
|
||||
|
||||
private:
|
||||
using idx_t = ShenandoahSimpleBitMap::idx_t;
|
||||
|
||||
public:
|
||||
// We do not maintain counts, capacity, or used for regions that are not free. Informally, if a region is NotFree, it is
|
||||
// in no partition. NumPartitions represents the size of an array that may be indexed by Mutator or Collector.
|
||||
static constexpr ShenandoahFreeSetPartitionId NumPartitions = ShenandoahFreeSetPartitionId::NotFree;
|
||||
static constexpr int IntNumPartitions = int(ShenandoahFreeSetPartitionId::NotFree);
|
||||
static constexpr uint UIntNumPartitions = uint(ShenandoahFreeSetPartitionId::NotFree);
|
||||
|
||||
const ssize_t _max; // The maximum number of heap regions
|
||||
private:
|
||||
const idx_t _max; // The maximum number of heap regions
|
||||
const size_t _region_size_bytes;
|
||||
const ShenandoahFreeSet* _free_set;
|
||||
// For each partition, we maintain a bitmap of which regions are affiliated with his partition.
|
||||
ShenandoahSimpleBitMap _membership[UIntNumPartitions];
|
||||
|
||||
// For each partition, we track an interval outside of which a region affiliated with that partition is guaranteed
|
||||
// not to be found. This makes searches for free space more efficient. For each partition p, _leftmosts[p]
|
||||
// represents its least index, and its _rightmosts[p] its greatest index. Empty intervals are indicated by the
|
||||
// canonical [_max, -1].
|
||||
ssize_t _leftmosts[UIntNumPartitions];
|
||||
ssize_t _rightmosts[UIntNumPartitions];
|
||||
idx_t _leftmosts[UIntNumPartitions];
|
||||
idx_t _rightmosts[UIntNumPartitions];
|
||||
|
||||
// Allocation for humongous objects needs to find regions that are entirely empty. For each partion p, _leftmosts_empty[p]
|
||||
// represents the first region belonging to this partition that is completely empty and _rightmosts_empty[p] represents the
|
||||
// last region that is completely empty. If there is no completely empty region in this partition, this is represented
|
||||
// by the canonical [_max, -1].
|
||||
ssize_t _leftmosts_empty[UIntNumPartitions];
|
||||
ssize_t _rightmosts_empty[UIntNumPartitions];
|
||||
idx_t _leftmosts_empty[UIntNumPartitions];
|
||||
idx_t _rightmosts_empty[UIntNumPartitions];
|
||||
|
||||
// For each partition p:
|
||||
// _capacity[p] represents the total amount of memory within the partition, including retired regions, as adjusted
|
||||
// by transfers of memory between partitions
|
||||
// _used[p] represents the total amount of memory that has been allocated within this partition (either already
|
||||
// allocated as of the rebuild, or allocated since the rebuild).
|
||||
// _available[p] represents the total amount of memory that can be allocated within partition p, calculated from
|
||||
// _capacity[p] minus _used[p], where the difference is computed and assigned under heap lock
|
||||
//
|
||||
// _region_counts[p] represents the number of regions associated with the partition which currently have available memory.
|
||||
// When a region is retired from partition p, _region_counts[p] is decremented.
|
||||
// total_region_counts[p] is _capacity[p] / RegionSizeBytes.
|
||||
// _empty_region_counts[p] is number of regions associated with p which are entirely empty
|
||||
//
|
||||
// capacity and used values are expressed in bytes.
|
||||
//
|
||||
// When a region is retired, the used[p] is increased to account for alignment waste. capacity is unaffected.
|
||||
//
|
||||
// When a region is "flipped", we adjust capacities and region counts for original and destination partitions. We also
|
||||
// adjust used values when flipping from mutator to collector. Flip to old collector does not need to adjust used because
|
||||
// only empty regions can be flipped to old collector.
|
||||
//
|
||||
// All memory quantities (capacity, available, used) are represented in bytes.
|
||||
|
||||
// For each partition p, _capacity[p] represents the total amount of memory within the partition at the time
|
||||
// of the most recent rebuild, _used[p] represents the total amount of memory that has been allocated within this
|
||||
// partition (either already allocated as of the rebuild, or allocated since the rebuild). _capacity[p] and _used[p]
|
||||
// are denoted in bytes. Note that some regions that had been assigned to a particular partition at rebuild time
|
||||
// may have been retired following the rebuild. The tallies for these regions are still reflected in _capacity[p]
|
||||
// and _used[p], even though the region may have been removed from the free set.
|
||||
size_t _capacity[UIntNumPartitions];
|
||||
|
||||
size_t _used[UIntNumPartitions];
|
||||
size_t _available[UIntNumPartitions];
|
||||
|
||||
// Measured in bytes.
|
||||
size_t _allocated_since_gc_start[UIntNumPartitions];
|
||||
|
||||
// Some notes:
|
||||
// total_region_counts[p] is _capacity[p] / region_size_bytes
|
||||
// retired_regions[p] is total_region_counts[p] - _region_counts[p]
|
||||
// _empty_region_counts[p] <= _region_counts[p] <= total_region_counts[p]
|
||||
// affiliated regions is total_region_counts[p] - empty_region_counts[p]
|
||||
// used_regions is affilaited_regions * region_size_bytes
|
||||
// _available[p] is _capacity[p] - _used[p]
|
||||
size_t _region_counts[UIntNumPartitions];
|
||||
size_t _empty_region_counts[UIntNumPartitions];
|
||||
|
||||
// Humongous waste, in bytes, can exist in Mutator partition for recently allocated humongous objects
|
||||
// and in OldCollector partition for humongous objects that have been promoted in place.
|
||||
size_t _humongous_waste[UIntNumPartitions];
|
||||
|
||||
// For each partition p, _left_to_right_bias is true iff allocations are normally made from lower indexed regions
|
||||
// before higher indexed regions.
|
||||
bool _left_to_right_bias[UIntNumPartitions];
|
||||
|
||||
// Shrink the intervals associated with partition when region idx is removed from this free set
|
||||
inline void shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, ssize_t idx);
|
||||
|
||||
// Shrink the intervals associated with partition when regions low_idx through high_idx inclusive are removed from this free set
|
||||
inline void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition,
|
||||
ssize_t low_idx, ssize_t high_idx);
|
||||
inline void expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, ssize_t idx, size_t capacity);
|
||||
|
||||
inline bool is_mutator_partition(ShenandoahFreeSetPartitionId p);
|
||||
inline bool is_young_collector_partition(ShenandoahFreeSetPartitionId p);
|
||||
inline bool is_old_collector_partition(ShenandoahFreeSetPartitionId p);
|
||||
inline bool available_implies_empty(size_t available);
|
||||
|
||||
#ifndef PRODUCT
|
||||
void dump_bitmap_row(ssize_t region_idx) const;
|
||||
void dump_bitmap_range(ssize_t start_region_idx, ssize_t end_region_idx) const;
|
||||
void dump_bitmap_row(idx_t region_idx) const;
|
||||
void dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const;
|
||||
void dump_bitmap() const;
|
||||
#endif
|
||||
public:
|
||||
@ -111,6 +143,11 @@ public:
|
||||
|
||||
static const size_t FreeSetUnderConstruction = SIZE_MAX;
|
||||
|
||||
inline idx_t max() const { return _max; }
|
||||
|
||||
// At initialization, reset OldCollector tallies
|
||||
void initialize_old_collector();
|
||||
|
||||
// Remove all regions from all partitions and reset all bounds
|
||||
void make_all_regions_unavailable();
|
||||
|
||||
@ -119,70 +156,116 @@ public:
|
||||
_membership[int(p)].set_bit(idx);
|
||||
}
|
||||
|
||||
// Clear the partition id for a particular region without adjusting interval bounds or usage/capacity tallies
|
||||
inline void raw_clear_membership(size_t idx, ShenandoahFreeSetPartitionId p) {
|
||||
_membership[int(p)].clear_bit(idx);
|
||||
}
|
||||
|
||||
inline void one_region_is_no_longer_empty(ShenandoahFreeSetPartitionId partition);
|
||||
|
||||
// Set the Mutator intervals, usage, and capacity according to arguments. Reset the Collector intervals, used, capacity
|
||||
// to represent empty Collector free set. We use this at the end of rebuild_free_set() to avoid the overhead of making
|
||||
// many redundant incremental adjustments to the mutator intervals as the free set is being rebuilt.
|
||||
void establish_mutator_intervals(ssize_t mutator_leftmost, ssize_t mutator_rightmost,
|
||||
ssize_t mutator_leftmost_empty, ssize_t mutator_rightmost_empty,
|
||||
size_t mutator_region_count, size_t mutator_used);
|
||||
void establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost,
|
||||
idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty,
|
||||
size_t total_mutator_regions, size_t empty_mutator_regions,
|
||||
size_t mutator_region_count, size_t mutator_used, size_t mutator_humongous_words_waste);
|
||||
|
||||
// Set the OldCollector intervals, usage, and capacity according to arguments. We use this at the end of rebuild_free_set()
|
||||
// to avoid the overhead of making many redundant incremental adjustments to the mutator intervals as the free set is being
|
||||
// rebuilt.
|
||||
void establish_old_collector_intervals(ssize_t old_collector_leftmost, ssize_t old_collector_rightmost,
|
||||
ssize_t old_collector_leftmost_empty, ssize_t old_collector_rightmost_empty,
|
||||
size_t old_collector_region_count, size_t old_collector_used);
|
||||
void establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost,
|
||||
idx_t old_collector_leftmost_empty, idx_t old_collector_rightmost_empty,
|
||||
size_t total_old_collector_region_count, size_t old_collector_empty,
|
||||
size_t old_collector_regions, size_t old_collector_used,
|
||||
size_t old_collector_humongous_words_waste);
|
||||
|
||||
void establish_interval(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx,
|
||||
idx_t low_empty_idx, idx_t high_empty_idx);
|
||||
|
||||
// Shrink the intervals associated with partition when region idx is removed from this free set
|
||||
inline void shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx);
|
||||
|
||||
// Shrink the intervals associated with partition when regions low_idx through high_idx inclusive are removed from this free set
|
||||
void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition,
|
||||
idx_t low_idx, idx_t high_idx, size_t num_regions);
|
||||
|
||||
void expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t capacity);
|
||||
void expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition,
|
||||
idx_t low_idx, idx_t high_idx,
|
||||
idx_t low_empty_idx, idx_t high_empty_idx);
|
||||
|
||||
// Retire region idx from within partition, , leaving its capacity and used as part of the original free partition's totals.
|
||||
// Requires that region idx is in in the Mutator or Collector partitions. Hereafter, identifies this region as NotFree.
|
||||
// Any remnant of available memory at the time of retirement is added to the original partition's total of used bytes.
|
||||
void retire_from_partition(ShenandoahFreeSetPartitionId p, ssize_t idx, size_t used_bytes);
|
||||
// Return the number of waste bytes (if any).
|
||||
size_t retire_from_partition(ShenandoahFreeSetPartitionId p, idx_t idx, size_t used_bytes);
|
||||
|
||||
// Retire all regions between low_idx and high_idx inclusive from within partition. Requires that each region idx is
|
||||
// in the same Mutator or Collector partition. Hereafter, identifies each region as NotFree. Assumes that each region
|
||||
// is now considered fully used, since the region is presumably used to represent a humongous object.
|
||||
void retire_range_from_partition(ShenandoahFreeSetPartitionId partition, ssize_t low_idx, ssize_t high_idx);
|
||||
void retire_range_from_partition(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx);
|
||||
|
||||
void unretire_to_partition(ShenandoahHeapRegion* region, ShenandoahFreeSetPartitionId which_partition);
|
||||
|
||||
// Place region idx into free set which_partition. Requires that idx is currently NotFree.
|
||||
void make_free(ssize_t idx, ShenandoahFreeSetPartitionId which_partition, size_t region_capacity);
|
||||
void make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t region_capacity);
|
||||
|
||||
// Place region idx into free partition new_partition, adjusting used and capacity totals for the original and new partition
|
||||
// given that available bytes can still be allocated within this region. Requires that idx is currently not NotFree.
|
||||
void move_from_partition_to_partition(ssize_t idx, ShenandoahFreeSetPartitionId orig_partition,
|
||||
// Place region idx into free partition new_partition, not adjusting used and capacity totals for the original and new partition.
|
||||
// available represents bytes that can still be allocated within this region. Requires that idx is currently not NotFree.
|
||||
size_t move_from_partition_to_partition_with_deferred_accounting(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
|
||||
ShenandoahFreeSetPartitionId new_partition, size_t available);
|
||||
|
||||
// Place region idx into free partition new_partition, adjusting used and capacity totals for the original and new partition.
|
||||
// available represents bytes that can still be allocated within this region. Requires that idx is currently not NotFree.
|
||||
void move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
|
||||
ShenandoahFreeSetPartitionId new_partition, size_t available);
|
||||
|
||||
const char* partition_membership_name(ssize_t idx) const;
|
||||
void transfer_used_capacity_from_to(ShenandoahFreeSetPartitionId from_partition, ShenandoahFreeSetPartitionId to_partition,
|
||||
size_t regions);
|
||||
|
||||
const char* partition_membership_name(idx_t idx) const;
|
||||
|
||||
// Return the index of the next available region >= start_index, or maximum_regions if not found.
|
||||
inline ssize_t find_index_of_next_available_region(ShenandoahFreeSetPartitionId which_partition, ssize_t start_index) const;
|
||||
inline idx_t find_index_of_next_available_region(ShenandoahFreeSetPartitionId which_partition,
|
||||
idx_t start_index) const;
|
||||
|
||||
// Return the index of the previous available region <= last_index, or -1 if not found.
|
||||
inline ssize_t find_index_of_previous_available_region(ShenandoahFreeSetPartitionId which_partition, ssize_t last_index) const;
|
||||
inline idx_t find_index_of_previous_available_region(ShenandoahFreeSetPartitionId which_partition,
|
||||
idx_t last_index) const;
|
||||
|
||||
// Return the index of the next available cluster of cluster_size regions >= start_index, or maximum_regions if not found.
|
||||
inline ssize_t find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition,
|
||||
ssize_t start_index, size_t cluster_size) const;
|
||||
inline idx_t find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition,
|
||||
idx_t start_index, size_t cluster_size) const;
|
||||
|
||||
// Return the index of the previous available cluster of cluster_size regions <= last_index, or -1 if not found.
|
||||
inline ssize_t find_index_of_previous_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition,
|
||||
ssize_t last_index, size_t cluster_size) const;
|
||||
inline idx_t find_index_of_previous_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition,
|
||||
idx_t last_index, size_t cluster_size) const;
|
||||
|
||||
inline bool in_free_set(ShenandoahFreeSetPartitionId which_partition, ssize_t idx) const {
|
||||
inline bool in_free_set(ShenandoahFreeSetPartitionId which_partition, idx_t idx) const {
|
||||
return _membership[int(which_partition)].is_set(idx);
|
||||
}
|
||||
|
||||
// Returns the ShenandoahFreeSetPartitionId affiliation of region idx, NotFree if this region is not currently in any partition.
|
||||
// This does not enforce that free_set membership implies allocation capacity.
|
||||
inline ShenandoahFreeSetPartitionId membership(ssize_t idx) const;
|
||||
inline ShenandoahFreeSetPartitionId membership(idx_t idx) const {
|
||||
assert (idx < _max, "index is sane: %zu < %zu", idx, _max);
|
||||
ShenandoahFreeSetPartitionId result = ShenandoahFreeSetPartitionId::NotFree;
|
||||
for (uint partition_id = 0; partition_id < UIntNumPartitions; partition_id++) {
|
||||
if (_membership[partition_id].is_set(idx)) {
|
||||
assert(result == ShenandoahFreeSetPartitionId::NotFree, "Region should reside in only one partition");
|
||||
result = (ShenandoahFreeSetPartitionId) partition_id;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// Returns true iff region idx's membership is which_partition. If which_partition represents a free set, asserts
|
||||
// that the region has allocation capacity.
|
||||
inline bool partition_id_matches(ssize_t idx, ShenandoahFreeSetPartitionId which_partition) const;
|
||||
inline bool partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId which_partition) const;
|
||||
#endif
|
||||
|
||||
inline size_t max_regions() const { return _max; }
|
||||
|
||||
inline size_t region_size_bytes() const { return _region_size_bytes; };
|
||||
|
||||
// The following four methods return the left-most and right-most bounds on ranges of regions representing
|
||||
@ -192,14 +275,54 @@ public:
|
||||
// leftmost() and leftmost_empty() return _max, rightmost() and rightmost_empty() return 0
|
||||
// otherwise, expect the following:
|
||||
// 0 <= leftmost <= leftmost_empty <= rightmost_empty <= rightmost < _max
|
||||
inline ssize_t leftmost(ShenandoahFreeSetPartitionId which_partition) const;
|
||||
inline ssize_t rightmost(ShenandoahFreeSetPartitionId which_partition) const;
|
||||
ssize_t leftmost_empty(ShenandoahFreeSetPartitionId which_partition);
|
||||
ssize_t rightmost_empty(ShenandoahFreeSetPartitionId which_partition);
|
||||
inline idx_t leftmost(ShenandoahFreeSetPartitionId which_partition) const;
|
||||
inline idx_t rightmost(ShenandoahFreeSetPartitionId which_partition) const;
|
||||
idx_t leftmost_empty(ShenandoahFreeSetPartitionId which_partition);
|
||||
idx_t rightmost_empty(ShenandoahFreeSetPartitionId which_partition);
|
||||
|
||||
inline bool is_empty(ShenandoahFreeSetPartitionId which_partition) const;
|
||||
|
||||
inline void increase_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions);
|
||||
inline void decrease_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions);
|
||||
inline size_t get_region_counts(ShenandoahFreeSetPartitionId which_partition) {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
return _region_counts[int(which_partition)];
|
||||
}
|
||||
|
||||
inline void increase_empty_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions);
|
||||
inline void decrease_empty_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions);
|
||||
inline size_t get_empty_region_counts(ShenandoahFreeSetPartitionId which_partition) {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
return _empty_region_counts[int(which_partition)];
|
||||
}
|
||||
|
||||
inline void increase_capacity(ShenandoahFreeSetPartitionId which_partition, size_t bytes);
|
||||
inline void decrease_capacity(ShenandoahFreeSetPartitionId which_partition, size_t bytes);
|
||||
inline size_t get_capacity(ShenandoahFreeSetPartitionId which_partition) {
|
||||
assert (which_partition < NumPartitions, "Partition must be valid");
|
||||
return _capacity[int(which_partition)];
|
||||
}
|
||||
|
||||
inline void increase_available(ShenandoahFreeSetPartitionId which_partition, size_t bytes);
|
||||
inline void decrease_available(ShenandoahFreeSetPartitionId which_partition, size_t bytes);
|
||||
inline size_t get_available(ShenandoahFreeSetPartitionId which_partition);
|
||||
|
||||
inline void increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes);
|
||||
inline void decrease_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes);
|
||||
inline size_t get_used(ShenandoahFreeSetPartitionId which_partition) {
|
||||
assert (which_partition < NumPartitions, "Partition must be valid");
|
||||
return _used[int(which_partition)];
|
||||
}
|
||||
|
||||
inline void increase_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t bytes);
|
||||
inline void decrease_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
|
||||
shenandoah_assert_heaplocked();
|
||||
assert (which_partition < NumPartitions, "Partition must be valid");
|
||||
assert(_humongous_waste[int(which_partition)] >= bytes, "Cannot decrease waste beyond what is there");
|
||||
_humongous_waste[int(which_partition)] -= bytes;
|
||||
}
|
||||
|
||||
inline size_t get_humongous_waste(ShenandoahFreeSetPartitionId which_partition);
|
||||
|
||||
inline void set_bias_from_left_to_right(ShenandoahFreeSetPartitionId which_partition, bool value) {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
@ -227,10 +350,17 @@ public:
|
||||
assert(_available[int(which_partition)] == _capacity[int(which_partition)] - _used[int(which_partition)],
|
||||
"Expect available (%zu) equals capacity (%zu) - used (%zu) for partition %s",
|
||||
_available[int(which_partition)], _capacity[int(which_partition)], _used[int(which_partition)],
|
||||
partition_membership_name(ssize_t(which_partition)));
|
||||
partition_membership_name(idx_t(which_partition)));
|
||||
return _available[int(which_partition)];
|
||||
}
|
||||
|
||||
// Returns bytes of humongous waste
|
||||
inline size_t humongous_waste(ShenandoahFreeSetPartitionId which_partition) const {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
// This may be called with or without the global heap lock. Changes to _humongous_waste[] are always made with heap lock.
|
||||
return _humongous_waste[int(which_partition)];
|
||||
}
|
||||
|
||||
// Return available_in assuming caller does not hold the heap lock. In production builds, available is
|
||||
// returned without acquiring the lock. In debug builds, the global heap lock is acquired in order to
|
||||
// enforce a consistency assert.
|
||||
@ -243,17 +373,12 @@ public:
|
||||
(_available[int(which_partition)] == _capacity[int(which_partition)] - _used[int(which_partition)]),
|
||||
"Expect available (%zu) equals capacity (%zu) - used (%zu) for partition %s",
|
||||
_available[int(which_partition)], _capacity[int(which_partition)], _used[int(which_partition)],
|
||||
partition_membership_name(ssize_t(which_partition)));
|
||||
partition_membership_name(idx_t(which_partition)));
|
||||
#endif
|
||||
return _available[int(which_partition)];
|
||||
}
|
||||
|
||||
inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value) {
|
||||
shenandoah_assert_heaplocked();
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
_capacity[int(which_partition)] = value;
|
||||
_available[int(which_partition)] = value - _used[int(which_partition)];
|
||||
}
|
||||
inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value);
|
||||
|
||||
inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
|
||||
shenandoah_assert_heaplocked();
|
||||
@ -284,7 +409,7 @@ public:
|
||||
// idx >= leftmost &&
|
||||
// idx <= rightmost
|
||||
// }
|
||||
void assert_bounds() NOT_DEBUG_RETURN;
|
||||
void assert_bounds(bool validate_totals) NOT_DEBUG_RETURN;
|
||||
};
|
||||
|
||||
// Publicly, ShenandoahFreeSet represents memory that is available to mutator threads. The public capacity(), used(),
|
||||
@ -312,10 +437,13 @@ public:
|
||||
// during the next GC pass.
|
||||
|
||||
class ShenandoahFreeSet : public CHeapObj<mtGC> {
|
||||
using idx_t = ShenandoahSimpleBitMap::idx_t;
|
||||
private:
|
||||
ShenandoahHeap* const _heap;
|
||||
ShenandoahRegionPartitions _partitions;
|
||||
|
||||
size_t _total_humongous_waste;
|
||||
|
||||
HeapWord* allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r);
|
||||
|
||||
// Return the address of memory allocated, setting in_new_region to true iff the allocation is taken
|
||||
@ -330,6 +458,105 @@ private:
|
||||
|
||||
const ssize_t INITIAL_ALLOC_BIAS_WEIGHT = 256;
|
||||
|
||||
// bytes used by young
|
||||
size_t _total_young_used;
|
||||
template<bool UsedByMutatorChanged, bool UsedByCollectorChanged>
|
||||
inline void recompute_total_young_used() {
|
||||
if (UsedByMutatorChanged || UsedByCollectorChanged) {
|
||||
shenandoah_assert_heaplocked();
|
||||
_total_young_used = (_partitions.used_by(ShenandoahFreeSetPartitionId::Mutator) +
|
||||
_partitions.used_by(ShenandoahFreeSetPartitionId::Collector));
|
||||
}
|
||||
}
|
||||
|
||||
// bytes used by old
|
||||
size_t _total_old_used;
|
||||
template<bool UsedByOldCollectorChanged>
|
||||
inline void recompute_total_old_used() {
|
||||
if (UsedByOldCollectorChanged) {
|
||||
shenandoah_assert_heaplocked();
|
||||
_total_old_used =_partitions.used_by(ShenandoahFreeSetPartitionId::OldCollector);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
// We make this public so that native code can see its value
|
||||
// bytes used by global
|
||||
size_t _total_global_used;
|
||||
private:
|
||||
// Prerequisite: _total_young_used and _total_old_used are valid
|
||||
template<bool UsedByMutatorChanged, bool UsedByCollectorChanged, bool UsedByOldCollectorChanged>
|
||||
inline void recompute_total_global_used() {
|
||||
if (UsedByMutatorChanged || UsedByCollectorChanged || UsedByOldCollectorChanged) {
|
||||
shenandoah_assert_heaplocked();
|
||||
_total_global_used = _total_young_used + _total_old_used;
|
||||
}
|
||||
}
|
||||
|
||||
template<bool UsedByMutatorChanged, bool UsedByCollectorChanged, bool UsedByOldCollectorChanged>
|
||||
inline void recompute_total_used() {
|
||||
recompute_total_young_used<UsedByMutatorChanged, UsedByCollectorChanged>();
|
||||
recompute_total_old_used<UsedByOldCollectorChanged>();
|
||||
recompute_total_global_used<UsedByMutatorChanged, UsedByCollectorChanged, UsedByOldCollectorChanged>();
|
||||
}
|
||||
|
||||
size_t _young_affiliated_regions;
|
||||
size_t _old_affiliated_regions;
|
||||
size_t _global_affiliated_regions;
|
||||
|
||||
size_t _young_unaffiliated_regions;
|
||||
size_t _global_unaffiliated_regions;
|
||||
|
||||
size_t _total_young_regions;
|
||||
size_t _total_global_regions;
|
||||
|
||||
size_t _mutator_bytes_allocated_since_gc_start;
|
||||
|
||||
// If only affiliation changes are promote-in-place and generation sizes have not changed,
|
||||
// we have AffiliatedChangesAreGlobalNeutral
|
||||
// If only affiliation changes are non-empty regions moved from Mutator to Collector and young size has not changed,
|
||||
// we have AffiliatedChangesAreYoungNeutral
|
||||
// If only unaffiliated changes are empty regions from Mutator to/from Collector, we have UnaffiliatedChangesAreYoungNeutral
|
||||
template<bool MutatorEmptiesChanged, bool CollectorEmptiesChanged, bool OldCollectorEmptiesChanged,
|
||||
bool MutatorSizeChanged, bool CollectorSizeChanged, bool OldCollectorSizeChanged,
|
||||
bool AffiliatedChangesAreYoungNeutral, bool AffiliatedChangesAreGlobalNeutral,
|
||||
bool UnaffiliatedChangesAreYoungNeutral>
|
||||
inline void recompute_total_affiliated() {
|
||||
shenandoah_assert_heaplocked();
|
||||
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
if (!UnaffiliatedChangesAreYoungNeutral && (MutatorEmptiesChanged || CollectorEmptiesChanged)) {
|
||||
_young_unaffiliated_regions = (_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator) +
|
||||
_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector));
|
||||
}
|
||||
if (!AffiliatedChangesAreYoungNeutral &&
|
||||
(MutatorSizeChanged || CollectorSizeChanged || MutatorEmptiesChanged || CollectorEmptiesChanged)) {
|
||||
_young_affiliated_regions = ((_partitions.get_capacity(ShenandoahFreeSetPartitionId::Mutator) +
|
||||
_partitions.get_capacity(ShenandoahFreeSetPartitionId::Collector)) / region_size_bytes -
|
||||
_young_unaffiliated_regions);
|
||||
}
|
||||
if (OldCollectorSizeChanged || OldCollectorEmptiesChanged) {
|
||||
_old_affiliated_regions = (_partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector) / region_size_bytes -
|
||||
_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector));
|
||||
}
|
||||
if (!AffiliatedChangesAreGlobalNeutral &&
|
||||
(MutatorEmptiesChanged || CollectorEmptiesChanged || OldCollectorEmptiesChanged)) {
|
||||
_global_unaffiliated_regions =
|
||||
_young_unaffiliated_regions + _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector);
|
||||
}
|
||||
if (!AffiliatedChangesAreGlobalNeutral &&
|
||||
(MutatorSizeChanged || CollectorSizeChanged || MutatorEmptiesChanged || CollectorEmptiesChanged ||
|
||||
OldCollectorSizeChanged || OldCollectorEmptiesChanged)) {
|
||||
_global_affiliated_regions = _young_affiliated_regions + _old_affiliated_regions;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
if (ShenandoahHeap::heap()->mode()->is_generational()) {
|
||||
assert(_young_affiliated_regions * ShenandoahHeapRegion::region_size_bytes() >= _total_young_used, "sanity");
|
||||
assert(_old_affiliated_regions * ShenandoahHeapRegion::region_size_bytes() >= _total_old_used, "sanity");
|
||||
}
|
||||
assert(_global_affiliated_regions * ShenandoahHeapRegion::region_size_bytes() >= _total_global_used, "sanity");
|
||||
#endif
|
||||
}
|
||||
|
||||
// Increases used memory for the partition if the allocation is successful. `in_new_region` will be set
|
||||
// if this is the first allocation in the region.
|
||||
HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region);
|
||||
@ -347,6 +574,8 @@ private:
|
||||
// Precondition: ShenandoahHeapRegion::requires_humongous(req.size())
|
||||
HeapWord* allocate_contiguous(ShenandoahAllocRequest& req, bool is_humongous);
|
||||
|
||||
bool transfer_one_region_from_mutator_to_old_collector(size_t idx, size_t alloc_capacity);
|
||||
|
||||
// Change region r from the Mutator partition to the GC's Collector or OldCollector partition. This requires that the
|
||||
// region is entirely empty.
|
||||
//
|
||||
@ -374,7 +603,8 @@ private:
|
||||
|
||||
// Search for allocation in region with same affiliation as request, using given iterator.
|
||||
template<typename Iter>
|
||||
HeapWord* allocate_with_affiliation(Iter& iterator, ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region);
|
||||
HeapWord* allocate_with_affiliation(Iter& iterator, ShenandoahAffiliation affiliation,
|
||||
ShenandoahAllocRequest& req, bool& in_new_region);
|
||||
|
||||
// Return true if the respective generation for this request has free regions.
|
||||
bool can_allocate_in_new_region(const ShenandoahAllocRequest& req);
|
||||
@ -392,6 +622,10 @@ private:
|
||||
|
||||
inline bool has_alloc_capacity(ShenandoahHeapRegion *r) const;
|
||||
|
||||
void transfer_empty_regions_from_to(ShenandoahFreeSetPartitionId source_partition,
|
||||
ShenandoahFreeSetPartitionId dest_partition,
|
||||
size_t num_regions);
|
||||
|
||||
size_t transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
|
||||
size_t max_xfer_regions,
|
||||
size_t& bytes_transferred);
|
||||
@ -399,12 +633,8 @@ private:
|
||||
size_t max_xfer_regions,
|
||||
size_t& bytes_transferred);
|
||||
|
||||
|
||||
// Determine whether we prefer to allocate from left to right or from right to left within the OldCollector free-set.
|
||||
void establish_old_collector_alloc_bias();
|
||||
|
||||
// Set max_capacity for young and old generations
|
||||
void establish_generation_sizes(size_t young_region_count, size_t old_region_count);
|
||||
size_t get_usable_free_words(size_t free_bytes) const;
|
||||
|
||||
// log status, assuming lock has already been acquired by the caller.
|
||||
@ -415,10 +645,82 @@ public:
|
||||
|
||||
ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions);
|
||||
|
||||
inline size_t max_regions() const { return _partitions.max(); }
|
||||
ShenandoahFreeSetPartitionId membership(size_t index) const { return _partitions.membership(index); }
|
||||
inline void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition,
|
||||
idx_t low_idx, idx_t high_idx, size_t num_regions) {
|
||||
return _partitions.shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx, num_regions);
|
||||
}
|
||||
|
||||
void reset_bytes_allocated_since_gc_start(size_t initial_bytes_allocated);
|
||||
|
||||
void increase_bytes_allocated(size_t bytes);
|
||||
|
||||
inline size_t get_bytes_allocated_since_gc_start() const {
|
||||
return _mutator_bytes_allocated_since_gc_start;
|
||||
}
|
||||
|
||||
// Public because ShenandoahRegionPartitions assertions require access.
|
||||
inline size_t alloc_capacity(ShenandoahHeapRegion *r) const;
|
||||
inline size_t alloc_capacity(size_t idx) const;
|
||||
|
||||
// Return bytes used by old
|
||||
inline size_t old_used() {
|
||||
return _total_old_used;
|
||||
}
|
||||
|
||||
ShenandoahFreeSetPartitionId prepare_to_promote_in_place(size_t idx, size_t bytes);
|
||||
void account_for_pip_regions(size_t mutator_regions, size_t mutator_bytes, size_t collector_regions, size_t collector_bytes);
|
||||
|
||||
// This is used for unit testing. Not for preoduction. Invokes exit() if old cannot be resized.
|
||||
void resize_old_collector_capacity(size_t desired_regions);
|
||||
|
||||
// Return bytes used by young
|
||||
inline size_t young_used() {
|
||||
return _total_young_used;
|
||||
}
|
||||
|
||||
// Return bytes used by global
|
||||
inline size_t global_used() {
|
||||
return _total_global_used;
|
||||
}
|
||||
|
||||
size_t global_unaffiliated_regions() {
|
||||
return _global_unaffiliated_regions;
|
||||
}
|
||||
|
||||
size_t young_unaffiliated_regions() {
|
||||
return _young_unaffiliated_regions;
|
||||
}
|
||||
|
||||
size_t old_unaffiliated_regions() {
|
||||
return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector);
|
||||
}
|
||||
|
||||
size_t young_affiliated_regions() {
|
||||
return _young_affiliated_regions;
|
||||
}
|
||||
|
||||
size_t old_affiliated_regions() {
|
||||
return _old_affiliated_regions;
|
||||
}
|
||||
|
||||
size_t global_affiliated_regions() {
|
||||
return _global_affiliated_regions;
|
||||
}
|
||||
|
||||
size_t total_young_regions() {
|
||||
return _total_young_regions;
|
||||
}
|
||||
|
||||
size_t total_old_regions() {
|
||||
return _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector) / ShenandoahHeapRegion::region_size_bytes();
|
||||
}
|
||||
|
||||
size_t total_global_regions() {
|
||||
return _total_global_regions;
|
||||
}
|
||||
|
||||
void clear();
|
||||
|
||||
// Examine the existing free set representation, capturing the current state into var arguments:
|
||||
@ -464,6 +766,8 @@ public:
|
||||
// for evacuation, invoke this to make regions available for mutator allocations.
|
||||
void move_regions_from_collector_to_mutator(size_t cset_regions);
|
||||
|
||||
void transfer_humongous_regions_from_mutator_to_old_collector(size_t xfer_regions, size_t humongous_waste_words);
|
||||
|
||||
void recycle_trash();
|
||||
|
||||
// Acquire heap lock and log status, assuming heap lock is not acquired by the caller.
|
||||
@ -482,6 +786,12 @@ public:
|
||||
inline size_t used() const { return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
inline size_t available() const { return _partitions.available_in_not_locked(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
|
||||
inline size_t total_humongous_waste() const { return _total_humongous_waste; }
|
||||
inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
inline size_t humongous_waste_in_old() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::OldCollector); }
|
||||
|
||||
void decrease_humongous_waste_for_regular_bypass(ShenandoahHeapRegion* r, size_t waste);
|
||||
|
||||
HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region);
|
||||
|
||||
/*
|
||||
@ -539,7 +849,8 @@ public:
|
||||
// Ensure that Collector has at least to_reserve bytes of available memory, and OldCollector has at least old_reserve
|
||||
// bytes of available memory. On input, old_region_count holds the number of regions already present in the
|
||||
// OldCollector partition. Upon return, old_region_count holds the updated number of regions in the OldCollector partition.
|
||||
void reserve_regions(size_t to_reserve, size_t old_reserve, size_t &old_region_count);
|
||||
void reserve_regions(size_t to_reserve, size_t old_reserve, size_t &old_region_count,
|
||||
size_t &young_used_regions, size_t &old_used_regions, size_t &young_used_bytes, size_t &old_used_bytes);
|
||||
|
||||
// Reserve space for evacuations, with regions reserved for old evacuations placed to the right
|
||||
// of regions reserved of young evacuations.
|
||||
|
||||
@ -237,7 +237,6 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
|
||||
worker_slices[i] = new ShenandoahHeapRegionSet();
|
||||
}
|
||||
|
||||
ShenandoahGenerationalHeap::TransferResult result;
|
||||
{
|
||||
// The rest of code performs region moves, where region status is undefined
|
||||
// until all phases run together.
|
||||
@ -251,14 +250,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
|
||||
|
||||
phase4_compact_objects(worker_slices);
|
||||
|
||||
result = phase5_epilog();
|
||||
}
|
||||
if (heap->mode()->is_generational()) {
|
||||
LogTarget(Info, gc, ergo) lt;
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
result.print_on("Full GC", &ls);
|
||||
}
|
||||
phase5_epilog();
|
||||
}
|
||||
|
||||
// Resize metaspace
|
||||
@ -984,23 +976,6 @@ public:
|
||||
r->set_live_data(live);
|
||||
r->reset_alloc_metadata();
|
||||
}
|
||||
|
||||
void update_generation_usage() {
|
||||
if (_is_generational) {
|
||||
_heap->old_generation()->establish_usage(_old_regions, _old_usage, _old_humongous_waste);
|
||||
_heap->young_generation()->establish_usage(_young_regions, _young_usage, _young_humongous_waste);
|
||||
} else {
|
||||
assert(_old_regions == 0, "Old regions only expected in generational mode");
|
||||
assert(_old_usage == 0, "Old usage only expected in generational mode");
|
||||
assert(_old_humongous_waste == 0, "Old humongous waste only expected in generational mode");
|
||||
}
|
||||
|
||||
// In generational mode, global usage should be the sum of young and old. This is also true
|
||||
// for non-generational modes except that there are no old regions.
|
||||
_heap->global_generation()->establish_usage(_old_regions + _young_regions,
|
||||
_old_usage + _young_usage,
|
||||
_old_humongous_waste + _young_humongous_waste);
|
||||
}
|
||||
};
|
||||
|
||||
void ShenandoahFullGC::compact_humongous_objects() {
|
||||
@ -1120,10 +1095,9 @@ void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_s
|
||||
}
|
||||
}
|
||||
|
||||
ShenandoahGenerationalHeap::TransferResult ShenandoahFullGC::phase5_epilog() {
|
||||
void ShenandoahFullGC::phase5_epilog() {
|
||||
GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer);
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
ShenandoahGenerationalHeap::TransferResult result;
|
||||
|
||||
// Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
|
||||
// and must ensure the bitmap is in sync.
|
||||
@ -1138,12 +1112,6 @@ ShenandoahGenerationalHeap::TransferResult ShenandoahFullGC::phase5_epilog() {
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
|
||||
ShenandoahPostCompactClosure post_compact;
|
||||
heap->heap_region_iterate(&post_compact);
|
||||
post_compact.update_generation_usage();
|
||||
|
||||
if (heap->mode()->is_generational()) {
|
||||
ShenandoahGenerationalFullGC::balance_generations_after_gc(heap);
|
||||
}
|
||||
|
||||
heap->collection_set()->clear();
|
||||
size_t young_cset_regions, old_cset_regions;
|
||||
size_t first_old, last_old, num_old;
|
||||
@ -1166,11 +1134,7 @@ ShenandoahGenerationalHeap::TransferResult ShenandoahFullGC::phase5_epilog() {
|
||||
_preserved_marks->restore(heap->workers());
|
||||
_preserved_marks->reclaim();
|
||||
|
||||
// We defer generation resizing actions until after cset regions have been recycled. We do this even following an
|
||||
// abbreviated cycle.
|
||||
if (heap->mode()->is_generational()) {
|
||||
result = ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set();
|
||||
ShenandoahGenerationalFullGC::rebuild_remembered_set(heap);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -82,8 +82,7 @@ private:
|
||||
void phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices);
|
||||
void phase3_update_references();
|
||||
void phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices);
|
||||
ShenandoahGenerationalHeap::TransferResult phase5_epilog();
|
||||
|
||||
void phase5_epilog();
|
||||
void distribute_slices(ShenandoahHeapRegionSet** worker_slices);
|
||||
void calculate_target_humongous_objects();
|
||||
void compact_humongous_objects();
|
||||
|
||||
@ -39,6 +39,8 @@
|
||||
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
|
||||
#include "utilities/quickSort.hpp"
|
||||
|
||||
using idx_t = ShenandoahSimpleBitMap::idx_t;
|
||||
|
||||
template <bool PREPARE_FOR_CURRENT_CYCLE, bool FULL_GC = false>
|
||||
class ShenandoahResetBitmapClosure final : public ShenandoahHeapRegionClosure {
|
||||
private:
|
||||
@ -147,19 +149,8 @@ ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode
|
||||
return _heuristics;
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::bytes_allocated_since_gc_start() const {
|
||||
return AtomicAccess::load(&_bytes_allocated_since_gc_start);
|
||||
}
|
||||
|
||||
void ShenandoahGeneration::reset_bytes_allocated_since_gc_start(size_t initial_bytes_allocated) {
|
||||
AtomicAccess::store(&_bytes_allocated_since_gc_start, initial_bytes_allocated);
|
||||
}
|
||||
|
||||
void ShenandoahGeneration::increase_allocated(size_t bytes) {
|
||||
AtomicAccess::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) {
|
||||
shenandoah_assert_heaplocked();
|
||||
_evacuation_reserve = new_val;
|
||||
}
|
||||
|
||||
@ -271,7 +262,7 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
|
||||
|
||||
// maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young
|
||||
const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
|
||||
const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve());
|
||||
size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve());
|
||||
|
||||
// maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted),
|
||||
// clamped by the old generation space available.
|
||||
@ -351,6 +342,11 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
|
||||
const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve);
|
||||
assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory");
|
||||
|
||||
// If any regions have been selected for promotion in place, this has the effect of decreasing available within mutator
|
||||
// and collector partitions, due to padding of remnant memory within each promoted in place region. This will affect
|
||||
// young_evacuation_reserve but not old_evacuation_reserve or consumed_by_advance_promotion. So recompute.
|
||||
young_evacuation_reserve = MIN2(young_evacuation_reserve, young_generation->available_with_reserve());
|
||||
|
||||
// Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this
|
||||
// to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood
|
||||
// of old evacuation failure.
|
||||
@ -435,7 +431,9 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
|
||||
size_t excess_old = old_available - old_consumed;
|
||||
size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions();
|
||||
size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
|
||||
assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available");
|
||||
assert(old_available >= unaffiliated_old,
|
||||
"Unaffiliated old (%zu is %zu * %zu) is a subset of old available (%zu)",
|
||||
unaffiliated_old, unaffiliated_old_regions, region_size_bytes, old_available);
|
||||
|
||||
// Make sure old_evac_committed is unaffiliated
|
||||
if (old_evacuated_committed > 0) {
|
||||
@ -465,15 +463,10 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
|
||||
size_t excess_regions = excess_old / region_size_bytes;
|
||||
regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions);
|
||||
}
|
||||
|
||||
if (regions_to_xfer > 0) {
|
||||
bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer);
|
||||
assert(excess_old >= regions_to_xfer * region_size_bytes,
|
||||
"Cannot transfer (%zu, %zu) more than excess old (%zu)",
|
||||
regions_to_xfer, region_size_bytes, excess_old);
|
||||
excess_old -= regions_to_xfer * region_size_bytes;
|
||||
log_debug(gc, ergo)("%s transferred %zu excess regions to young before start of evacuation",
|
||||
result? "Successfully": "Unsuccessfully", regions_to_xfer);
|
||||
log_debug(gc, ergo)("Before start of evacuation, total_promotion reserve is young_advance_promoted_reserve: %zu "
|
||||
"plus excess: old: %zu", young_advance_promoted_reserve_used, excess_old);
|
||||
}
|
||||
|
||||
// Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated
|
||||
@ -531,6 +524,8 @@ size_t ShenandoahGeneration::select_aged_regions(const size_t old_promotion_rese
|
||||
assert_no_in_place_promotions();
|
||||
|
||||
auto const heap = ShenandoahGenerationalHeap::heap();
|
||||
ShenandoahYoungGeneration* young_gen = heap->young_generation();
|
||||
ShenandoahFreeSet* free_set = heap->free_set();
|
||||
bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions();
|
||||
ShenandoahMarkingContext* const ctx = heap->marking_context();
|
||||
|
||||
@ -547,12 +542,28 @@ size_t ShenandoahGeneration::select_aged_regions(const size_t old_promotion_rese
|
||||
// Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require
|
||||
// less evacuation effort. This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that
|
||||
// have more live data.
|
||||
const size_t num_regions = heap->num_regions();
|
||||
const idx_t num_regions = heap->num_regions();
|
||||
|
||||
ResourceMark rm;
|
||||
AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions);
|
||||
|
||||
for (size_t i = 0; i < num_regions; i++) {
|
||||
ShenandoahFreeSet* freeset = heap->free_set();
|
||||
|
||||
// Any region that is to be promoted in place needs to be retired from its Collector or Mutator partition.
|
||||
idx_t pip_low_collector_idx = freeset->max_regions();
|
||||
idx_t pip_high_collector_idx = -1;
|
||||
idx_t pip_low_mutator_idx = freeset->max_regions();
|
||||
idx_t pip_high_mutator_idx = -1;
|
||||
size_t collector_regions_to_pip = 0;
|
||||
size_t mutator_regions_to_pip = 0;
|
||||
|
||||
size_t pip_mutator_regions = 0;
|
||||
size_t pip_collector_regions = 0;
|
||||
size_t pip_mutator_bytes = 0;
|
||||
size_t pip_collector_bytes = 0;
|
||||
|
||||
size_t min_remnant_size = PLAB::min_size() * HeapWordSize;
|
||||
for (idx_t i = 0; i < num_regions; i++) {
|
||||
ShenandoahHeapRegion* const r = heap->get_region(i);
|
||||
if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) {
|
||||
// skip over regions that aren't regular young with some live data
|
||||
@ -569,18 +580,54 @@ size_t ShenandoahGeneration::select_aged_regions(const size_t old_promotion_rese
|
||||
// we use this field to indicate that this region should be promoted in place during the evacuation
|
||||
// phase.
|
||||
r->save_top_before_promote();
|
||||
|
||||
size_t remnant_size = r->free() / HeapWordSize;
|
||||
if (remnant_size > ShenandoahHeap::min_fill_size()) {
|
||||
ShenandoahHeap::fill_with_object(original_top, remnant_size);
|
||||
size_t remnant_bytes = r->free();
|
||||
size_t remnant_words = remnant_bytes / HeapWordSize;
|
||||
assert(ShenandoahHeap::min_fill_size() <= PLAB::min_size(), "Implementation makes invalid assumptions");
|
||||
if (remnant_words >= ShenandoahHeap::min_fill_size()) {
|
||||
ShenandoahHeap::fill_with_object(original_top, remnant_words);
|
||||
// Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise,
|
||||
// newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any
|
||||
// new allocations would not necessarily be eligible for promotion. This addresses both issues.
|
||||
r->set_top(r->end());
|
||||
promote_in_place_pad += remnant_size * HeapWordSize;
|
||||
// The region r is either in the Mutator or Collector partition if remnant_words > heap()->plab_min_size.
|
||||
// Otherwise, the region is in the NotFree partition.
|
||||
ShenandoahFreeSetPartitionId p = free_set->membership(i);
|
||||
if (p == ShenandoahFreeSetPartitionId::Mutator) {
|
||||
mutator_regions_to_pip++;
|
||||
if (i < pip_low_mutator_idx) {
|
||||
pip_low_mutator_idx = i;
|
||||
}
|
||||
if (i > pip_high_mutator_idx) {
|
||||
pip_high_mutator_idx = i;
|
||||
}
|
||||
pip_mutator_regions++;
|
||||
pip_mutator_bytes += remnant_bytes;
|
||||
} else if (p == ShenandoahFreeSetPartitionId::Collector) {
|
||||
collector_regions_to_pip++;
|
||||
if (i < pip_low_collector_idx) {
|
||||
pip_low_collector_idx = i;
|
||||
}
|
||||
if (i > pip_high_collector_idx) {
|
||||
pip_high_collector_idx = i;
|
||||
}
|
||||
pip_collector_regions++;
|
||||
pip_collector_bytes += remnant_bytes;
|
||||
} else {
|
||||
assert((p == ShenandoahFreeSetPartitionId::NotFree) && (remnant_words < heap->plab_min_size()),
|
||||
"Should be NotFree if not in Collector or Mutator partitions");
|
||||
// In this case, the memory is already counted as used and the region has already been retired. There is
|
||||
// no need for further adjustments to used. Further, the remnant memory for this region will not be
|
||||
// unallocated or made available to OldCollector after pip.
|
||||
remnant_bytes = 0;
|
||||
}
|
||||
promote_in_place_pad += remnant_bytes;
|
||||
free_set->prepare_to_promote_in_place(i, remnant_bytes);
|
||||
} else {
|
||||
// Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental
|
||||
// allocations occurring within this region before the region is promoted in place.
|
||||
// Since the remnant is so small that this region has already been retired, we don't have to worry about any
|
||||
// accidental allocations occurring within this region before the region is promoted in place.
|
||||
|
||||
// This region was already not in the Collector or Mutator set, so no need to remove it.
|
||||
assert(free_set->membership(i) == ShenandoahFreeSetPartitionId::NotFree, "sanity");
|
||||
}
|
||||
}
|
||||
// Else, we do not promote this region (either in place or by copy) because it has received new allocations.
|
||||
@ -621,7 +668,21 @@ size_t ShenandoahGeneration::select_aged_regions(const size_t old_promotion_rese
|
||||
// Subsequent regions may be selected if they have smaller live data.
|
||||
}
|
||||
|
||||
log_info(gc, ergo)("Promotion potential of aged regions with sufficient garbage: " PROPERFMT, PROPERFMTARGS(promo_potential));
|
||||
if (pip_mutator_regions + pip_collector_regions > 0) {
|
||||
freeset->account_for_pip_regions(pip_mutator_regions, pip_mutator_bytes, pip_collector_regions, pip_collector_bytes);
|
||||
}
|
||||
|
||||
// Retire any regions that have been selected for promote in place
|
||||
if (collector_regions_to_pip > 0) {
|
||||
freeset->shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector,
|
||||
pip_low_collector_idx, pip_high_collector_idx,
|
||||
collector_regions_to_pip);
|
||||
}
|
||||
if (mutator_regions_to_pip > 0) {
|
||||
freeset->shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Mutator,
|
||||
pip_low_mutator_idx, pip_high_mutator_idx,
|
||||
mutator_regions_to_pip);
|
||||
}
|
||||
|
||||
// Sort in increasing order according to live data bytes. Note that candidates represents the number of regions
|
||||
// that qualify to be promoted by evacuation.
|
||||
@ -653,6 +714,8 @@ size_t ShenandoahGeneration::select_aged_regions(const size_t old_promotion_rese
|
||||
selected_regions, PROPERFMTARGS(selected_live), PROPERFMTARGS(old_consumed), PROPERFMTARGS(old_promotion_reserve));
|
||||
}
|
||||
|
||||
log_info(gc, ergo)("Promotion potential of aged regions with sufficient garbage: " PROPERFMT, PROPERFMTARGS(promo_potential));
|
||||
|
||||
heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad);
|
||||
heap->old_generation()->set_promotion_potential(promo_potential);
|
||||
return old_consumed;
|
||||
@ -754,9 +817,15 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
|
||||
|
||||
// We are preparing for evacuation. At this time, we ignore cset region tallies.
|
||||
size_t first_old, last_old, num_old;
|
||||
heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
|
||||
if (heap->mode()->is_generational()) {
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
|
||||
gen_heap->compute_old_generation_balance(young_cset_regions, old_cset_regions);
|
||||
}
|
||||
|
||||
// Free set construction uses reserve quantities, because they are known to be valid here
|
||||
heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true);
|
||||
_free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -800,14 +869,12 @@ void ShenandoahGeneration::cancel_marking() {
|
||||
}
|
||||
|
||||
ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type,
|
||||
uint max_workers,
|
||||
size_t max_capacity) :
|
||||
uint max_workers) :
|
||||
_type(type),
|
||||
_task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
|
||||
_ref_processor(new ShenandoahReferenceProcessor(this, MAX2(max_workers, 1U))),
|
||||
_affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0),
|
||||
_used(0), _bytes_allocated_since_gc_start(0),
|
||||
_max_capacity(max_capacity),
|
||||
_evacuation_reserve(0),
|
||||
_free_set(nullptr),
|
||||
_heuristics(nullptr)
|
||||
{
|
||||
_is_marking_complete.set();
|
||||
@ -826,6 +893,11 @@ ShenandoahGeneration::~ShenandoahGeneration() {
|
||||
delete _task_queues;
|
||||
}
|
||||
|
||||
void ShenandoahGeneration::post_initialize(ShenandoahHeap* heap) {
|
||||
_free_set = heap->free_set();
|
||||
assert(_free_set != nullptr, "bad initialization order");
|
||||
}
|
||||
|
||||
void ShenandoahGeneration::reserve_task_queues(uint workers) {
|
||||
_task_queues->reserve(workers);
|
||||
}
|
||||
@ -853,159 +925,26 @@ void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::increment_affiliated_region_count() {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
// During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced
|
||||
// on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with
|
||||
// a coherent value.
|
||||
return AtomicAccess::add(&_affiliated_region_count, (size_t) 1);
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::decrement_affiliated_region_count() {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
// During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced
|
||||
// on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with
|
||||
// a coherent value.
|
||||
auto affiliated_region_count = AtomicAccess::sub(&_affiliated_region_count, (size_t) 1);
|
||||
assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
|
||||
(used() + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
|
||||
"used + humongous cannot exceed regions");
|
||||
return affiliated_region_count;
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::decrement_affiliated_region_count_without_lock() {
|
||||
return AtomicAccess::sub(&_affiliated_region_count, (size_t) 1);
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
return AtomicAccess::add(&_affiliated_region_count, delta);
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
assert(AtomicAccess::load(&_affiliated_region_count) >= delta, "Affiliated region count cannot be negative");
|
||||
|
||||
auto const affiliated_region_count = AtomicAccess::sub(&_affiliated_region_count, delta);
|
||||
assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
|
||||
(_used + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
|
||||
"used + humongous cannot exceed regions");
|
||||
return affiliated_region_count;
|
||||
}
|
||||
|
||||
void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste) {
|
||||
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
|
||||
AtomicAccess::store(&_affiliated_region_count, num_regions);
|
||||
AtomicAccess::store(&_used, num_bytes);
|
||||
_humongous_waste = humongous_waste;
|
||||
}
|
||||
|
||||
void ShenandoahGeneration::increase_used(size_t bytes) {
|
||||
AtomicAccess::add(&_used, bytes);
|
||||
}
|
||||
|
||||
void ShenandoahGeneration::increase_humongous_waste(size_t bytes) {
|
||||
if (bytes > 0) {
|
||||
AtomicAccess::add(&_humongous_waste, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) {
|
||||
if (bytes > 0) {
|
||||
assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes),
|
||||
"Waste (%zu) cannot be negative (after subtracting %zu)", _humongous_waste, bytes);
|
||||
AtomicAccess::sub(&_humongous_waste, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahGeneration::decrease_used(size_t bytes) {
|
||||
assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
|
||||
(_used >= bytes), "cannot reduce bytes used by generation below zero");
|
||||
AtomicAccess::sub(&_used, bytes);
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::used_regions() const {
|
||||
return AtomicAccess::load(&_affiliated_region_count);
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::free_unaffiliated_regions() const {
|
||||
size_t result = max_capacity() / ShenandoahHeapRegion::region_size_bytes();
|
||||
auto const used_regions = this->used_regions();
|
||||
if (used_regions > result) {
|
||||
result = 0;
|
||||
} else {
|
||||
result -= used_regions;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::used_regions_size() const {
|
||||
return used_regions() * ShenandoahHeapRegion::region_size_bytes();
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::available() const {
|
||||
return available(max_capacity());
|
||||
size_t result = available(max_capacity());
|
||||
return result;
|
||||
}
|
||||
|
||||
// For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector.
|
||||
size_t ShenandoahGeneration::available_with_reserve() const {
|
||||
return available(max_capacity());
|
||||
size_t result = available(max_capacity());
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::soft_available() const {
|
||||
return available(ShenandoahHeap::heap()->soft_max_capacity());
|
||||
size_t result = available(ShenandoahHeap::heap()->soft_max_capacity());
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::available(size_t capacity) const {
|
||||
size_t in_use = used() + get_humongous_waste();
|
||||
return in_use > capacity ? 0 : capacity - in_use;
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::increase_capacity(size_t increment) {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
|
||||
// We do not enforce that new capacity >= heap->max_size_for(this). The maximum generation size is treated as a rule of thumb
|
||||
// which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
|
||||
// in place.
|
||||
assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
|
||||
(_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size");
|
||||
assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
|
||||
_max_capacity += increment;
|
||||
|
||||
// This detects arithmetic wraparound on _used
|
||||
assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
|
||||
(used_regions_size() >= used()),
|
||||
"Affiliated regions must hold more than what is currently used");
|
||||
return _max_capacity;
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::set_capacity(size_t byte_size) {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
_max_capacity = byte_size;
|
||||
return _max_capacity;
|
||||
}
|
||||
|
||||
size_t ShenandoahGeneration::decrease_capacity(size_t decrement) {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
|
||||
// We do not enforce that new capacity >= heap->min_size_for(this). The minimum generation size is treated as a rule of thumb
|
||||
// which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
|
||||
// in place.
|
||||
assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
|
||||
assert(_max_capacity >= decrement, "Generation capacity cannot be negative");
|
||||
|
||||
_max_capacity -= decrement;
|
||||
|
||||
// This detects arithmetic wraparound on _used
|
||||
assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
|
||||
(used_regions_size() >= used()),
|
||||
"Affiliated regions must hold more than what is currently used");
|
||||
assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
|
||||
(_used <= _max_capacity), "Cannot use more than capacity");
|
||||
assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
|
||||
(used_regions_size() <= _max_capacity),
|
||||
"Cannot use more than capacity");
|
||||
return _max_capacity;
|
||||
size_t in_use = used();
|
||||
size_t result = in_use > capacity ? 0 : capacity - in_use;
|
||||
return result;
|
||||
}
|
||||
|
||||
void ShenandoahGeneration::record_success_concurrent(bool abbreviated) {
|
||||
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp"
|
||||
#include "gc/shenandoah/shenandoahAffiliation.hpp"
|
||||
#include "gc/shenandoah/shenandoahFreeSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahGenerationType.hpp"
|
||||
#include "gc/shenandoah/shenandoahLock.hpp"
|
||||
#include "gc/shenandoah/shenandoahMarkingContext.hpp"
|
||||
@ -40,7 +41,6 @@ class ShenandoahHeuristics;
|
||||
class ShenandoahMode;
|
||||
class ShenandoahReferenceProcessor;
|
||||
|
||||
|
||||
class ShenandoahGeneration : public CHeapObj<mtGC>, public ShenandoahSpaceInfo {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
@ -52,26 +52,11 @@ private:
|
||||
|
||||
ShenandoahReferenceProcessor* const _ref_processor;
|
||||
|
||||
volatile size_t _affiliated_region_count;
|
||||
|
||||
// How much free memory is left in the last region of humongous objects.
|
||||
// This is _not_ included in used, but it _is_ deducted from available,
|
||||
// which gives the heuristics a more accurate view of how much memory remains
|
||||
// for allocation. This figure is also included the heap status logging.
|
||||
// The units are bytes. The value is only changed on a safepoint or under the
|
||||
// heap lock.
|
||||
size_t _humongous_waste;
|
||||
|
||||
// Bytes reserved within this generation to hold evacuated objects from the collection set
|
||||
size_t _evacuation_reserve;
|
||||
|
||||
protected:
|
||||
// Usage
|
||||
|
||||
volatile size_t _used;
|
||||
volatile size_t _bytes_allocated_since_gc_start;
|
||||
size_t _max_capacity;
|
||||
|
||||
ShenandoahFreeSet* _free_set;
|
||||
ShenandoahHeuristics* _heuristics;
|
||||
|
||||
private:
|
||||
@ -99,41 +84,43 @@ private:
|
||||
// to false.
|
||||
size_t select_aged_regions(size_t old_promotion_reserve);
|
||||
|
||||
// Return available assuming that we can allocate no more than capacity bytes within this generation.
|
||||
size_t available(size_t capacity) const;
|
||||
|
||||
public:
|
||||
ShenandoahGeneration(ShenandoahGenerationType type,
|
||||
uint max_workers,
|
||||
size_t max_capacity);
|
||||
uint max_workers);
|
||||
~ShenandoahGeneration();
|
||||
|
||||
bool is_young() const { return _type == YOUNG; }
|
||||
bool is_old() const { return _type == OLD; }
|
||||
bool is_global() const { return _type == GLOBAL || _type == NON_GEN; }
|
||||
inline bool is_young() const { return _type == YOUNG; }
|
||||
inline bool is_old() const { return _type == OLD; }
|
||||
inline bool is_global() const { return _type == GLOBAL || _type == NON_GEN; }
|
||||
inline ShenandoahGenerationType type() const { return _type; }
|
||||
|
||||
// see description in field declaration
|
||||
void set_evacuation_reserve(size_t new_val);
|
||||
size_t get_evacuation_reserve() const;
|
||||
void augment_evacuation_reserve(size_t increment);
|
||||
|
||||
inline ShenandoahGenerationType type() const { return _type; }
|
||||
|
||||
virtual ShenandoahHeuristics* heuristics() const { return _heuristics; }
|
||||
|
||||
ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; }
|
||||
|
||||
virtual ShenandoahHeuristics* initialize_heuristics(ShenandoahMode* gc_mode);
|
||||
|
||||
size_t max_capacity() const override { return _max_capacity; }
|
||||
virtual size_t used_regions() const;
|
||||
virtual size_t used_regions_size() const;
|
||||
virtual size_t free_unaffiliated_regions() const;
|
||||
size_t used() const override { return AtomicAccess::load(&_used); }
|
||||
virtual void post_initialize(ShenandoahHeap* heap);
|
||||
|
||||
virtual size_t bytes_allocated_since_gc_start() const override = 0;
|
||||
virtual size_t used() const override = 0;
|
||||
virtual size_t used_regions() const = 0;
|
||||
virtual size_t used_regions_size() const = 0;
|
||||
virtual size_t get_humongous_waste() const = 0;
|
||||
virtual size_t free_unaffiliated_regions() const = 0;
|
||||
virtual size_t get_affiliated_region_count() const = 0;
|
||||
virtual size_t max_capacity() const override = 0;
|
||||
|
||||
size_t available() const override;
|
||||
size_t available_with_reserve() const;
|
||||
size_t used_including_humongous_waste() const {
|
||||
return used() + get_humongous_waste();
|
||||
}
|
||||
|
||||
// Returns the memory available based on the _soft_ max heap capacity (soft_max_heap - used).
|
||||
// The soft max heap size may be adjusted lower than the max heap size to cause the trigger
|
||||
@ -141,23 +128,6 @@ private:
|
||||
// max heap size will cause the adaptive heuristic to run more frequent cycles.
|
||||
size_t soft_available() const override;
|
||||
|
||||
size_t bytes_allocated_since_gc_start() const override;
|
||||
|
||||
// Reset the bytes allocated within this generation since the start of GC. The argument initial_bytes_allocated
|
||||
// is normally zero. In the case that some memory was allocated following the last allocation rate sample that
|
||||
// precedes the start of GC, the number of bytes allocated is supplied as the initial value of bytes_allocated_since_gc_start.
|
||||
// We will behave as if these bytes were allocated after the start of GC.
|
||||
void reset_bytes_allocated_since_gc_start(size_t initial_bytes_allocated);
|
||||
void increase_allocated(size_t bytes);
|
||||
|
||||
// These methods change the capacity of the generation by adding or subtracting the given number of bytes from the current
|
||||
// capacity, returning the capacity of the generation following the change.
|
||||
size_t increase_capacity(size_t increment);
|
||||
size_t decrease_capacity(size_t decrement);
|
||||
|
||||
// Set the capacity of the generation, returning the value set
|
||||
size_t set_capacity(size_t byte_size);
|
||||
|
||||
void log_status(const char* msg) const;
|
||||
|
||||
// Used directly by FullGC
|
||||
@ -217,29 +187,6 @@ private:
|
||||
// Scan remembered set at start of concurrent young-gen marking.
|
||||
void scan_remembered_set(bool is_concurrent);
|
||||
|
||||
// Return the updated value of affiliated_region_count
|
||||
size_t increment_affiliated_region_count();
|
||||
|
||||
// Return the updated value of affiliated_region_count
|
||||
size_t decrement_affiliated_region_count();
|
||||
// Same as decrement_affiliated_region_count, but w/o the need to hold heap lock before being called.
|
||||
size_t decrement_affiliated_region_count_without_lock();
|
||||
|
||||
// Return the updated value of affiliated_region_count
|
||||
size_t increase_affiliated_region_count(size_t delta);
|
||||
|
||||
// Return the updated value of affiliated_region_count
|
||||
size_t decrease_affiliated_region_count(size_t delta);
|
||||
|
||||
void establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste);
|
||||
|
||||
void increase_used(size_t bytes);
|
||||
void decrease_used(size_t bytes);
|
||||
|
||||
void increase_humongous_waste(size_t bytes);
|
||||
void decrease_humongous_waste(size_t bytes);
|
||||
size_t get_humongous_waste() const { return _humongous_waste; }
|
||||
|
||||
virtual bool is_concurrent_mark_in_progress() = 0;
|
||||
void confirm_heuristics_mode();
|
||||
|
||||
|
||||
@ -1,208 +0,0 @@
|
||||
/*
|
||||
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shenandoah/shenandoahGeneration.hpp"
|
||||
#include "gc/shenandoah/shenandoahGenerationSizer.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.hpp"
|
||||
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
|
||||
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
|
||||
|
||||
ShenandoahGenerationSizer::ShenandoahGenerationSizer()
|
||||
: _sizer_kind(SizerDefaults),
|
||||
_min_desired_young_regions(0),
|
||||
_max_desired_young_regions(0) {
|
||||
|
||||
if (FLAG_IS_CMDLINE(NewRatio)) {
|
||||
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
|
||||
} else {
|
||||
_sizer_kind = SizerNewRatio;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (NewSize > MaxNewSize) {
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
log_warning(gc, ergo)("NewSize (%zuk) is greater than the MaxNewSize (%zuk). "
|
||||
"A new max generation size of %zuk will be used.",
|
||||
NewSize/K, MaxNewSize/K, NewSize/K);
|
||||
}
|
||||
FLAG_SET_ERGO(MaxNewSize, NewSize);
|
||||
}
|
||||
|
||||
if (FLAG_IS_CMDLINE(NewSize)) {
|
||||
_min_desired_young_regions = MAX2(uint(NewSize / ShenandoahHeapRegion::region_size_bytes()), 1U);
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
_max_desired_young_regions = MAX2(uint(MaxNewSize / ShenandoahHeapRegion::region_size_bytes()), 1U);
|
||||
_sizer_kind = SizerMaxAndNewSize;
|
||||
} else {
|
||||
_sizer_kind = SizerNewSizeOnly;
|
||||
}
|
||||
} else if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
_max_desired_young_regions = MAX2(uint(MaxNewSize / ShenandoahHeapRegion::region_size_bytes()), 1U);
|
||||
_sizer_kind = SizerMaxNewSizeOnly;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ShenandoahGenerationSizer::calculate_min_young_regions(size_t heap_region_count) {
|
||||
size_t min_young_regions = (heap_region_count * ShenandoahMinYoungPercentage) / 100;
|
||||
return MAX2(min_young_regions, (size_t) 1U);
|
||||
}
|
||||
|
||||
size_t ShenandoahGenerationSizer::calculate_max_young_regions(size_t heap_region_count) {
|
||||
size_t max_young_regions = (heap_region_count * ShenandoahMaxYoungPercentage) / 100;
|
||||
return MAX2(max_young_regions, (size_t) 1U);
|
||||
}
|
||||
|
||||
void ShenandoahGenerationSizer::recalculate_min_max_young_length(size_t heap_region_count) {
|
||||
assert(heap_region_count > 0, "Heap must be initialized");
|
||||
|
||||
switch (_sizer_kind) {
|
||||
case SizerDefaults:
|
||||
_min_desired_young_regions = calculate_min_young_regions(heap_region_count);
|
||||
_max_desired_young_regions = calculate_max_young_regions(heap_region_count);
|
||||
break;
|
||||
case SizerNewSizeOnly:
|
||||
_max_desired_young_regions = calculate_max_young_regions(heap_region_count);
|
||||
_max_desired_young_regions = MAX2(_min_desired_young_regions, _max_desired_young_regions);
|
||||
break;
|
||||
case SizerMaxNewSizeOnly:
|
||||
_min_desired_young_regions = calculate_min_young_regions(heap_region_count);
|
||||
_min_desired_young_regions = MIN2(_min_desired_young_regions, _max_desired_young_regions);
|
||||
break;
|
||||
case SizerMaxAndNewSize:
|
||||
// Do nothing. Values set on the command line, don't update them at runtime.
|
||||
break;
|
||||
case SizerNewRatio:
|
||||
_min_desired_young_regions = MAX2(uint(heap_region_count / (NewRatio + 1)), 1U);
|
||||
_max_desired_young_regions = _min_desired_young_regions;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
assert(_min_desired_young_regions <= _max_desired_young_regions, "Invalid min/max young gen size values");
|
||||
}
|
||||
|
||||
void ShenandoahGenerationSizer::heap_size_changed(size_t heap_size) {
|
||||
recalculate_min_max_young_length(heap_size / ShenandoahHeapRegion::region_size_bytes());
|
||||
}
|
||||
|
||||
bool ShenandoahGenerationSizer::transfer_regions(ShenandoahGeneration* src, ShenandoahGeneration* dst, size_t regions) const {
|
||||
const size_t bytes_to_transfer = regions * ShenandoahHeapRegion::region_size_bytes();
|
||||
|
||||
if (src->free_unaffiliated_regions() < regions) {
|
||||
// Source does not have enough free regions for this transfer. The caller should have
|
||||
// already capped the transfer based on available unaffiliated regions.
|
||||
return false;
|
||||
}
|
||||
|
||||
if (dst->max_capacity() + bytes_to_transfer > max_size_for(dst)) {
|
||||
// This transfer would cause the destination generation to grow above its configured maximum size.
|
||||
return false;
|
||||
}
|
||||
|
||||
if (src->max_capacity() - bytes_to_transfer < min_size_for(src)) {
|
||||
// This transfer would cause the source generation to shrink below its configured minimum size.
|
||||
return false;
|
||||
}
|
||||
|
||||
src->decrease_capacity(bytes_to_transfer);
|
||||
dst->increase_capacity(bytes_to_transfer);
|
||||
const size_t new_size = dst->max_capacity();
|
||||
log_info(gc, ergo)("Transfer %zu region(s) from %s to %s, yielding increased size: " PROPERFMT,
|
||||
regions, src->name(), dst->name(), PROPERFMTARGS(new_size));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
size_t ShenandoahGenerationSizer::max_size_for(ShenandoahGeneration* generation) const {
|
||||
switch (generation->type()) {
|
||||
case YOUNG:
|
||||
return max_young_size();
|
||||
case OLD:
|
||||
// On the command line, max size of OLD is specified indirectly, by setting a minimum size of young.
|
||||
// OLD is what remains within the heap after YOUNG has been sized.
|
||||
return ShenandoahHeap::heap()->max_capacity() - min_young_size();
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ShenandoahGenerationSizer::min_size_for(ShenandoahGeneration* generation) const {
|
||||
switch (generation->type()) {
|
||||
case YOUNG:
|
||||
return min_young_size();
|
||||
case OLD:
|
||||
// On the command line, min size of OLD is specified indirectly, by setting a maximum size of young.
|
||||
// OLD is what remains within the heap after YOUNG has been sized.
|
||||
return ShenandoahHeap::heap()->max_capacity() - max_young_size();
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Returns true iff transfer is successful
|
||||
bool ShenandoahGenerationSizer::transfer_to_old(size_t regions) const {
|
||||
ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
|
||||
return transfer_regions(heap->young_generation(), heap->old_generation(), regions);
|
||||
}
|
||||
|
||||
// This is used when promoting humongous or highly utilized regular regions in place. It is not required in this situation
|
||||
// that the transferred regions be unaffiliated.
|
||||
void ShenandoahGenerationSizer::force_transfer_to_old(size_t regions) const {
|
||||
ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
|
||||
ShenandoahGeneration* old_gen = heap->old_generation();
|
||||
ShenandoahGeneration* young_gen = heap->young_generation();
|
||||
const size_t bytes_to_transfer = regions * ShenandoahHeapRegion::region_size_bytes();
|
||||
|
||||
young_gen->decrease_capacity(bytes_to_transfer);
|
||||
old_gen->increase_capacity(bytes_to_transfer);
|
||||
const size_t new_size = old_gen->max_capacity();
|
||||
log_info(gc, ergo)("Forcing transfer of %zu region(s) from %s to %s, yielding increased size: " PROPERFMT,
|
||||
regions, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_size));
|
||||
}
|
||||
|
||||
|
||||
bool ShenandoahGenerationSizer::transfer_to_young(size_t regions) const {
|
||||
ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
|
||||
return transfer_regions(heap->old_generation(), heap->young_generation(), regions);
|
||||
}
|
||||
|
||||
size_t ShenandoahGenerationSizer::min_young_size() const {
|
||||
return min_young_regions() * ShenandoahHeapRegion::region_size_bytes();
|
||||
}
|
||||
|
||||
size_t ShenandoahGenerationSizer::max_young_size() const {
|
||||
return max_young_regions() * ShenandoahHeapRegion::region_size_bytes();
|
||||
}
|
||||
@ -1,93 +0,0 @@
|
||||
/*
|
||||
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONSIZER_HPP
|
||||
#define SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONSIZER_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class ShenandoahGeneration;
|
||||
class ShenandoahGenerationalHeap;
|
||||
|
||||
class ShenandoahGenerationSizer {
|
||||
private:
|
||||
enum SizerKind {
|
||||
SizerDefaults,
|
||||
SizerNewSizeOnly,
|
||||
SizerMaxNewSizeOnly,
|
||||
SizerMaxAndNewSize,
|
||||
SizerNewRatio
|
||||
};
|
||||
SizerKind _sizer_kind;
|
||||
|
||||
size_t _min_desired_young_regions;
|
||||
size_t _max_desired_young_regions;
|
||||
|
||||
static size_t calculate_min_young_regions(size_t heap_region_count);
|
||||
static size_t calculate_max_young_regions(size_t heap_region_count);
|
||||
|
||||
// Update the given values for minimum and maximum young gen length in regions
|
||||
// given the number of heap regions depending on the kind of sizing algorithm.
|
||||
void recalculate_min_max_young_length(size_t heap_region_count);
|
||||
|
||||
// This will attempt to transfer regions from the `src` generation to `dst` generation.
|
||||
// If the transfer would violate the configured minimum size for the source or the configured
|
||||
// maximum size of the destination, it will not perform the transfer and will return false.
|
||||
// Returns true if the transfer is performed.
|
||||
bool transfer_regions(ShenandoahGeneration* src, ShenandoahGeneration* dst, size_t regions) const;
|
||||
|
||||
// Return the configured maximum size in bytes for the given generation.
|
||||
size_t max_size_for(ShenandoahGeneration* generation) const;
|
||||
|
||||
// Return the configured minimum size in bytes for the given generation.
|
||||
size_t min_size_for(ShenandoahGeneration* generation) const;
|
||||
|
||||
public:
|
||||
ShenandoahGenerationSizer();
|
||||
|
||||
// Calculate the maximum length of the young gen given the number of regions
|
||||
// depending on the sizing algorithm.
|
||||
void heap_size_changed(size_t heap_size);
|
||||
|
||||
// Minimum size of young generation in bytes as multiple of region size.
|
||||
size_t min_young_size() const;
|
||||
size_t min_young_regions() const {
|
||||
return _min_desired_young_regions;
|
||||
}
|
||||
|
||||
// Maximum size of young generation in bytes as multiple of region size.
|
||||
size_t max_young_size() const;
|
||||
size_t max_young_regions() const {
|
||||
return _max_desired_young_regions;
|
||||
}
|
||||
|
||||
// True if transfer succeeds, else false. See transfer_regions.
|
||||
bool transfer_to_young(size_t regions) const;
|
||||
bool transfer_to_old(size_t regions) const;
|
||||
|
||||
// force transfer is used when we promote humongous objects. May violate min/max limits on generation sizes
|
||||
void force_transfer_to_old(size_t regions) const;
|
||||
};
|
||||
|
||||
#endif //SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONSIZER_HPP
|
||||
@ -244,7 +244,9 @@ void ShenandoahGenerationalControlThread::run_gc_cycle(const ShenandoahGCRequest
|
||||
|
||||
GCIdMark gc_id_mark;
|
||||
|
||||
_heap->reset_bytes_allocated_since_gc_start();
|
||||
if (gc_mode() != servicing_old) {
|
||||
_heap->reset_bytes_allocated_since_gc_start();
|
||||
}
|
||||
|
||||
MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics();
|
||||
|
||||
@ -288,11 +290,11 @@ void ShenandoahGenerationalControlThread::run_gc_cycle(const ShenandoahGCRequest
|
||||
if (!_heap->cancelled_gc()) {
|
||||
notify_gc_waiters();
|
||||
notify_alloc_failure_waiters();
|
||||
// Report current free set state at the end of cycle if normal completion.
|
||||
// Do not report if cancelled, since we may not have rebuilt free set and content is unreliable.
|
||||
_heap->free_set()->log_status_under_lock();
|
||||
}
|
||||
|
||||
// Report current free set state at the end of cycle, whether
|
||||
// it is a normal completion, or the abort.
|
||||
_heap->free_set()->log_status_under_lock();
|
||||
|
||||
// Notify Universe about new heap usage. This has implications for
|
||||
// global soft refs policy, and we better report it every time heap
|
||||
|
||||
@ -174,11 +174,13 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion
|
||||
assert(!_generation->is_old(), "Sanity check");
|
||||
ShenandoahMarkingContext* const marking_context = _heap->young_generation()->complete_marking_context();
|
||||
HeapWord* const tams = marking_context->top_at_mark_start(region);
|
||||
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
|
||||
{
|
||||
const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100;
|
||||
const size_t old_garbage_threshold = (region_size_bytes * ShenandoahOldGarbageThreshold) / 100;
|
||||
assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
|
||||
assert(region->garbage_before_padded_for_promote() < old_garbage_threshold, "Region %zu has too much garbage for promotion", region->index());
|
||||
assert(region->garbage_before_padded_for_promote() < old_garbage_threshold,
|
||||
"Region %zu has too much garbage for promotion", region->index());
|
||||
assert(region->is_young(), "Only young regions can be promoted");
|
||||
assert(region->is_regular(), "Use different service to promote humongous regions");
|
||||
assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
|
||||
@ -225,35 +227,29 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion
|
||||
ShenandoahHeapLocker locker(_heap->lock());
|
||||
|
||||
HeapWord* update_watermark = region->get_update_watermark();
|
||||
// pip_unpadded is memory too small to be filled above original top
|
||||
size_t pip_unpadded = (region->end() - region->top()) * HeapWordSize;
|
||||
assert((region->top() == region->end())
|
||||
|| (pip_unpadded == (size_t) ((region->end() - region->top()) * HeapWordSize)), "Invariant");
|
||||
assert(pip_unpadded < ShenandoahHeap::min_fill_size() * HeapWordSize, "Sanity");
|
||||
size_t pip_pad_bytes = (region->top() - region->get_top_before_promote()) * HeapWordSize;
|
||||
assert((pip_unpadded == 0) || (pip_pad_bytes == 0), "Only one of pip_unpadded and pip_pad_bytes is non-zero");
|
||||
|
||||
// Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
|
||||
// is_collector_free range.
|
||||
// is_collector_free range. We'll add it to that range below.
|
||||
region->restore_top_before_promote();
|
||||
|
||||
size_t region_used = region->used();
|
||||
#ifdef ASSERT
|
||||
size_t region_to_be_used_in_old = region->used();
|
||||
assert(region_to_be_used_in_old + pip_pad_bytes + pip_unpadded == region_size_bytes, "invariant");
|
||||
#endif
|
||||
|
||||
// The update_watermark was likely established while we had the artificially high value of top. Make it sane now.
|
||||
assert(update_watermark >= region->top(), "original top cannot exceed preserved update_watermark");
|
||||
region->set_update_watermark(region->top());
|
||||
|
||||
// Unconditionally transfer one region from young to old. This represents the newly promoted region.
|
||||
// This expands old and shrinks new by the size of one region. Strictly, we do not "need" to expand old
|
||||
// if there are already enough unaffiliated regions in old to account for this newly promoted region.
|
||||
// However, if we do not transfer the capacities, we end up reducing the amount of memory that would have
|
||||
// otherwise been available to hold old evacuations, because old available is max_capacity - used and now
|
||||
// we would be trading a fully empty region for a partially used region.
|
||||
young_gen->decrease_used(region_used);
|
||||
young_gen->decrement_affiliated_region_count();
|
||||
|
||||
// transfer_to_old() increases capacity of old and decreases capacity of young
|
||||
_heap->generation_sizer()->force_transfer_to_old(1);
|
||||
region->set_affiliation(OLD_GENERATION);
|
||||
|
||||
old_gen->increment_affiliated_region_count();
|
||||
old_gen->increase_used(region_used);
|
||||
|
||||
// add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size()
|
||||
// Transfer this region from young to old, increasing promoted_reserve if available space exceeds plab_min_size()
|
||||
_heap->free_set()->add_promoted_in_place_region_to_old_collector(region);
|
||||
region->set_affiliation(OLD_GENERATION);
|
||||
}
|
||||
}
|
||||
|
||||
@ -268,7 +264,8 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio
|
||||
|
||||
const size_t used_bytes = obj->size() * HeapWordSize;
|
||||
const size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
|
||||
const size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize;
|
||||
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
const size_t humongous_waste = spanned_regions * region_size_bytes - obj->size() * HeapWordSize;
|
||||
const size_t index_limit = region->index() + spanned_regions;
|
||||
|
||||
ShenandoahOldGeneration* const old_gen = _heap->old_generation();
|
||||
@ -282,13 +279,6 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio
|
||||
// usage totals, including humongous waste, after evacuation is done.
|
||||
log_debug(gc)("promoting humongous region %zu, spanning %zu", region->index(), spanned_regions);
|
||||
|
||||
young_gen->decrease_used(used_bytes);
|
||||
young_gen->decrease_humongous_waste(humongous_waste);
|
||||
young_gen->decrease_affiliated_region_count(spanned_regions);
|
||||
|
||||
// transfer_to_old() increases capacity of old and decreases capacity of young
|
||||
_heap->generation_sizer()->force_transfer_to_old(spanned_regions);
|
||||
|
||||
// For this region and each humongous continuation region spanned by this humongous object, change
|
||||
// affiliation to OLD_GENERATION and adjust the generation-use tallies. The remnant of memory
|
||||
// in the last humongous region that is not spanned by obj is currently not used.
|
||||
@ -300,9 +290,8 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio
|
||||
r->set_affiliation(OLD_GENERATION);
|
||||
}
|
||||
|
||||
old_gen->increase_affiliated_region_count(spanned_regions);
|
||||
old_gen->increase_used(used_bytes);
|
||||
old_gen->increase_humongous_waste(humongous_waste);
|
||||
ShenandoahFreeSet* freeset = _heap->free_set();
|
||||
freeset->transfer_humongous_regions_from_mutator_to_old_collector(spanned_regions, humongous_waste);
|
||||
}
|
||||
|
||||
// Since this region may have served previously as OLD, it may hold obsolete object range info.
|
||||
|
||||
@ -41,7 +41,7 @@ void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation
|
||||
}
|
||||
|
||||
void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {
|
||||
assert(generation->used_including_humongous_waste() <= generation->used_regions_size(),
|
||||
assert(generation->used() <= generation->used_regions_size(),
|
||||
"%s consumed can be no larger than span of affiliated regions", generation->name());
|
||||
}
|
||||
#else
|
||||
@ -83,7 +83,7 @@ void ShenandoahGenerationalFullGC::handle_completion(ShenandoahHeap* heap) {
|
||||
assert_usage_not_more_than_regions_used(young);
|
||||
|
||||
// Establish baseline for next old-has-grown trigger.
|
||||
old->set_live_bytes_after_last_mark(old->used_including_humongous_waste());
|
||||
old->set_live_bytes_after_last_mark(old->used());
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalFullGC::rebuild_remembered_set(ShenandoahHeap* heap) {
|
||||
@ -104,33 +104,6 @@ void ShenandoahGenerationalFullGC::rebuild_remembered_set(ShenandoahHeap* heap)
|
||||
heap->old_generation()->set_parsable(true);
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* heap) {
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap);
|
||||
ShenandoahOldGeneration* const old_gen = gen_heap->old_generation();
|
||||
|
||||
size_t old_usage = old_gen->used_regions_size();
|
||||
size_t old_capacity = old_gen->max_capacity();
|
||||
|
||||
assert(old_usage % ShenandoahHeapRegion::region_size_bytes() == 0, "Old usage must align with region size");
|
||||
assert(old_capacity % ShenandoahHeapRegion::region_size_bytes() == 0, "Old capacity must align with region size");
|
||||
|
||||
if (old_capacity > old_usage) {
|
||||
size_t excess_old_regions = (old_capacity - old_usage) / ShenandoahHeapRegion::region_size_bytes();
|
||||
gen_heap->generation_sizer()->transfer_to_young(excess_old_regions);
|
||||
} else if (old_capacity < old_usage) {
|
||||
size_t old_regions_deficit = (old_usage - old_capacity) / ShenandoahHeapRegion::region_size_bytes();
|
||||
gen_heap->generation_sizer()->force_transfer_to_old(old_regions_deficit);
|
||||
}
|
||||
|
||||
log_info(gc, ergo)("FullGC done: young usage: " PROPERFMT ", old usage: " PROPERFMT,
|
||||
PROPERFMTARGS(gen_heap->young_generation()->used()),
|
||||
PROPERFMTARGS(old_gen->used()));
|
||||
}
|
||||
|
||||
ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set() {
|
||||
return ShenandoahGenerationalHeap::heap()->balance_generations();
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalFullGC::log_live_in_old(ShenandoahHeap* heap) {
|
||||
LogTarget(Debug, gc) lt;
|
||||
if (lt.is_enabled()) {
|
||||
|
||||
@ -45,25 +45,11 @@ public:
|
||||
// Records end of cycle for young and old and establishes size of live bytes in old
|
||||
static void handle_completion(ShenandoahHeap* heap);
|
||||
|
||||
// Full GC may have promoted regions and may have temporarily violated constraints on the usage and
|
||||
// capacity of the old generation. This method will balance the accounting of regions between the
|
||||
// young and old generations. This is somewhat vestigial, but the outcome of this method is used
|
||||
// when rebuilding the free sets.
|
||||
static void balance_generations_after_gc(ShenandoahHeap* heap);
|
||||
|
||||
// This will compute the target size for the old generation. It will be expressed in terms of
|
||||
// a region surplus and deficit, which will be redistributed accordingly after rebuilding the
|
||||
// free set.
|
||||
static void compute_balances();
|
||||
|
||||
// Rebuilding the free set may have resulted in regions being pulled in to the old generation
|
||||
// evacuation reserve. For this reason, we must update the usage and capacity of the generations
|
||||
// again. In the distant past, the free set did not know anything about generations, so we had
|
||||
// a layer built above it to represent how much young/old memory was available. This layer is
|
||||
// redundant and adds complexity. We would like to one day remove it. Until then, we must keep it
|
||||
// synchronized with the free set's view of things.
|
||||
static ShenandoahGenerationalHeap::TransferResult balance_generations_after_rebuilding_free_set();
|
||||
|
||||
// Logs the number of live bytes marked in the old generation. This is _not_ the same
|
||||
// value used as the baseline for the old generation _after_ the full gc is complete.
|
||||
// The value reported in the logs does not include objects and regions that may be
|
||||
|
||||
@ -27,6 +27,7 @@
|
||||
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
|
||||
#include "gc/shenandoah/shenandoahFreeSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahGeneration.hpp"
|
||||
#include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
|
||||
#include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
|
||||
#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
|
||||
@ -107,17 +108,25 @@ void ShenandoahGenerationalHeap::initialize_heuristics() {
|
||||
// for old would be total heap - minimum capacity of young. This means the sum of the maximum
|
||||
// allowed for old and young could exceed the total heap size. It remains the case that the
|
||||
// _actual_ capacity of young + old = total.
|
||||
_generation_sizer.heap_size_changed(max_capacity());
|
||||
size_t initial_capacity_young = _generation_sizer.max_young_size();
|
||||
size_t max_capacity_young = _generation_sizer.max_young_size();
|
||||
size_t region_count = num_regions();
|
||||
size_t max_young_regions = MAX2((region_count * ShenandoahMaxYoungPercentage) / 100, (size_t) 1U);
|
||||
size_t initial_capacity_young = max_young_regions * ShenandoahHeapRegion::region_size_bytes();
|
||||
size_t max_capacity_young = initial_capacity_young;
|
||||
size_t initial_capacity_old = max_capacity() - max_capacity_young;
|
||||
size_t max_capacity_old = max_capacity() - initial_capacity_young;
|
||||
|
||||
_young_generation = new ShenandoahYoungGeneration(max_workers(), max_capacity_young);
|
||||
_old_generation = new ShenandoahOldGeneration(max_workers(), max_capacity_old);
|
||||
_young_generation = new ShenandoahYoungGeneration(max_workers());
|
||||
_old_generation = new ShenandoahOldGeneration(max_workers());
|
||||
_young_generation->initialize_heuristics(mode());
|
||||
_old_generation->initialize_heuristics(mode());
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeap::post_initialize_heuristics() {
|
||||
ShenandoahHeap::post_initialize_heuristics();
|
||||
_young_generation->post_initialize(this);
|
||||
_old_generation->post_initialize(this);
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeap::initialize_serviceability() {
|
||||
assert(mode()->is_generational(), "Only for the generational mode");
|
||||
_young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
|
||||
@ -577,39 +586,13 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) {
|
||||
retire_plab(plab, thread);
|
||||
}
|
||||
|
||||
ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
|
||||
ShenandoahOldGeneration* old_gen = old_generation();
|
||||
const ssize_t old_region_balance = old_gen->get_region_balance();
|
||||
old_gen->set_region_balance(0);
|
||||
|
||||
if (old_region_balance > 0) {
|
||||
const auto old_region_surplus = checked_cast<size_t>(old_region_balance);
|
||||
const bool success = generation_sizer()->transfer_to_young(old_region_surplus);
|
||||
return TransferResult {
|
||||
success, old_region_surplus, "young"
|
||||
};
|
||||
}
|
||||
|
||||
if (old_region_balance < 0) {
|
||||
const auto old_region_deficit = checked_cast<size_t>(-old_region_balance);
|
||||
const bool success = generation_sizer()->transfer_to_old(old_region_deficit);
|
||||
if (!success) {
|
||||
old_gen->handle_failed_transfer();
|
||||
}
|
||||
return TransferResult {
|
||||
success, old_region_deficit, "old"
|
||||
};
|
||||
}
|
||||
|
||||
return TransferResult {true, 0, "none"};
|
||||
}
|
||||
|
||||
// Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
|
||||
// and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
|
||||
// xfer_limit, and any surplus is transferred to the young generation.
|
||||
// xfer_limit is the maximum we're able to transfer from young to old.
|
||||
//
|
||||
// xfer_limit is the maximum we're able to transfer from young to old based on either:
|
||||
// 1. an assumption that we will be able to replenish memory "borrowed" from young at the end of collection, or
|
||||
// 2. there is sufficient excess in the allocation runway during GC idle cycles
|
||||
void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) {
|
||||
|
||||
// We can limit the old reserve to the size of anticipated promotions:
|
||||
@ -637,9 +620,9 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_
|
||||
// In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit.
|
||||
|
||||
const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve;
|
||||
const double max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)?
|
||||
bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent),
|
||||
bound_on_old_reserve);
|
||||
const double max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve:
|
||||
MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent)
|
||||
/ double(100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve));
|
||||
|
||||
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
|
||||
@ -648,10 +631,12 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_
|
||||
if (old_generation()->has_unprocessed_collection_candidates()) {
|
||||
// We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we
|
||||
// may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
|
||||
const double max_evac_need = (double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste);
|
||||
const double max_evac_need =
|
||||
(double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste);
|
||||
assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
|
||||
"Unaffiliated available must be less than total available");
|
||||
const double old_fragmented_available = double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes);
|
||||
const double old_fragmented_available =
|
||||
double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes);
|
||||
reserve_for_mixed = max_evac_need + old_fragmented_available;
|
||||
if (reserve_for_mixed > max_old_reserve) {
|
||||
reserve_for_mixed = max_old_reserve;
|
||||
@ -698,6 +683,7 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeap::reset_generation_reserves() {
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
young_generation()->set_evacuation_reserve(0);
|
||||
old_generation()->set_evacuation_reserve(0);
|
||||
old_generation()->set_promoted_reserve(0);
|
||||
@ -1060,15 +1046,6 @@ void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
|
||||
// a more detailed explanation.
|
||||
old_generation()->transfer_pointers_from_satb();
|
||||
}
|
||||
|
||||
// We defer generation resizing actions until after cset regions have been recycled.
|
||||
TransferResult result = balance_generations();
|
||||
LogTarget(Info, gc, ergo) lt;
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
result.print_on("Degenerated GC", &ls);
|
||||
}
|
||||
|
||||
// In case degeneration interrupted concurrent evacuation or update references, we need to clean up
|
||||
// transient state. Otherwise, these actions have no effect.
|
||||
reset_generation_reserves();
|
||||
@ -1090,24 +1067,7 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
|
||||
// throw off the heuristics.
|
||||
entry_global_coalesce_and_fill();
|
||||
}
|
||||
|
||||
log_info(gc, cset)("Concurrent cycle complete, promotions reserved: %zu, promotions expended: %zu, failed count: %zu, failed bytes: %zu",
|
||||
old_generation()->get_promoted_reserve(), old_generation()->get_promoted_expended(),
|
||||
old_generation()->get_promotion_failed_count(), old_generation()->get_promotion_failed_words() * HeapWordSize);
|
||||
|
||||
TransferResult result;
|
||||
{
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
|
||||
result = balance_generations();
|
||||
reset_generation_reserves();
|
||||
}
|
||||
|
||||
LogTarget(Info, gc, ergo) lt;
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
result.print_on("Concurrent GC", &ls);
|
||||
}
|
||||
reset_generation_reserves();
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {
|
||||
|
||||
@ -41,6 +41,7 @@ public:
|
||||
explicit ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy);
|
||||
void post_initialize() override;
|
||||
void initialize_heuristics() override;
|
||||
void post_initialize_heuristics() override;
|
||||
|
||||
static ShenandoahGenerationalHeap* heap() {
|
||||
assert(ShenandoahCardBarrier, "Should have card barrier to use genenrational heap");
|
||||
@ -138,8 +139,6 @@ public:
|
||||
void print_on(const char* when, outputStream* ss) const;
|
||||
};
|
||||
|
||||
const ShenandoahGenerationSizer* generation_sizer() const { return &_generation_sizer; }
|
||||
|
||||
// Zeros out the evacuation and promotion reserves
|
||||
void reset_generation_reserves();
|
||||
|
||||
@ -163,8 +162,6 @@ private:
|
||||
|
||||
MemoryPool* _young_gen_memory_pool;
|
||||
MemoryPool* _old_gen_memory_pool;
|
||||
|
||||
ShenandoahGenerationSizer _generation_sizer;
|
||||
};
|
||||
|
||||
#endif //SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONALHEAP
|
||||
|
||||
@ -37,17 +37,38 @@ const char* ShenandoahGlobalGeneration::name() const {
|
||||
}
|
||||
|
||||
size_t ShenandoahGlobalGeneration::max_capacity() const {
|
||||
return ShenandoahHeap::heap()->max_capacity();
|
||||
size_t total_regions = _free_set->total_global_regions();
|
||||
return total_regions * ShenandoahHeapRegion::region_size_bytes();
|
||||
}
|
||||
|
||||
size_t ShenandoahGlobalGeneration::free_unaffiliated_regions() const {
|
||||
return _free_set->global_unaffiliated_regions();
|
||||
}
|
||||
|
||||
size_t ShenandoahGlobalGeneration::used() const {
|
||||
return _free_set->global_used();
|
||||
}
|
||||
|
||||
size_t ShenandoahGlobalGeneration::bytes_allocated_since_gc_start() const {
|
||||
return _free_set->get_bytes_allocated_since_gc_start();
|
||||
}
|
||||
|
||||
size_t ShenandoahGlobalGeneration::get_affiliated_region_count() const {
|
||||
return _free_set->global_affiliated_regions();
|
||||
}
|
||||
|
||||
size_t ShenandoahGlobalGeneration::get_humongous_waste() const {
|
||||
return _free_set->total_humongous_waste();
|
||||
}
|
||||
|
||||
|
||||
size_t ShenandoahGlobalGeneration::used_regions() const {
|
||||
ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
|
||||
assert(heap->mode()->is_generational(), "Region usage accounting is only for generational mode");
|
||||
return heap->old_generation()->used_regions() + heap->young_generation()->used_regions();
|
||||
return _free_set->global_affiliated_regions();
|
||||
}
|
||||
|
||||
size_t ShenandoahGlobalGeneration::used_regions_size() const {
|
||||
return ShenandoahHeap::heap()->capacity();
|
||||
size_t used_regions = _free_set->global_affiliated_regions();
|
||||
return used_regions * ShenandoahHeapRegion::region_size_bytes();
|
||||
}
|
||||
|
||||
size_t ShenandoahGlobalGeneration::available() const {
|
||||
|
||||
@ -32,15 +32,29 @@
|
||||
// A "generation" that represents the whole heap.
|
||||
class ShenandoahGlobalGeneration : public ShenandoahGeneration {
|
||||
public:
|
||||
ShenandoahGlobalGeneration(bool generational, uint max_queues, size_t max_capacity)
|
||||
: ShenandoahGeneration(generational ? GLOBAL : NON_GEN, max_queues, max_capacity) { }
|
||||
ShenandoahGlobalGeneration(bool generational, uint max_queues)
|
||||
: ShenandoahGeneration(generational ? GLOBAL : NON_GEN, max_queues) {
|
||||
#ifdef ASSERT
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
bool is_generational = heap->mode()->is_generational();
|
||||
assert(is_generational == generational, "sanity");
|
||||
assert((is_generational && (type() == ShenandoahGenerationType::GLOBAL)) ||
|
||||
(!is_generational && (type() == ShenandoahGenerationType::NON_GEN)), "OO sanity");
|
||||
#endif
|
||||
}
|
||||
|
||||
public:
|
||||
const char* name() const override;
|
||||
|
||||
size_t max_capacity() const override;
|
||||
size_t bytes_allocated_since_gc_start() const override;
|
||||
size_t used() const override;
|
||||
size_t used_regions() const override;
|
||||
size_t used_regions_size() const override;
|
||||
size_t get_humongous_waste() const override;
|
||||
size_t free_unaffiliated_regions() const override;
|
||||
size_t get_affiliated_region_count() const override;
|
||||
size_t max_capacity() const override;
|
||||
|
||||
size_t available() const override;
|
||||
size_t soft_available() const override;
|
||||
|
||||
|
||||
@ -201,7 +201,7 @@ jint ShenandoahHeap::initialize() {
|
||||
assert(num_min_regions <= _num_regions, "sanity");
|
||||
_minimum_size = num_min_regions * reg_size_bytes;
|
||||
|
||||
_soft_max_size = SoftMaxHeapSize;
|
||||
_soft_max_size = clamp(SoftMaxHeapSize, min_capacity(), max_capacity());
|
||||
|
||||
_committed = _initial_size;
|
||||
|
||||
@ -252,7 +252,7 @@ jint ShenandoahHeap::initialize() {
|
||||
// it means we're under passive mode and we have to initialize old gen
|
||||
// for the purpose of having card table.
|
||||
if (ShenandoahCardBarrier && !(mode()->is_generational())) {
|
||||
_old_generation = new ShenandoahOldGeneration(max_workers(), max_capacity());
|
||||
_old_generation = new ShenandoahOldGeneration(max_workers());
|
||||
}
|
||||
|
||||
assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
|
||||
@ -411,7 +411,6 @@ jint ShenandoahHeap::initialize() {
|
||||
|
||||
{
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
_free_set = new ShenandoahFreeSet(this, _num_regions);
|
||||
for (size_t i = 0; i < _num_regions; i++) {
|
||||
HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
|
||||
bool is_committed = i < num_committed_regions;
|
||||
@ -426,12 +425,21 @@ jint ShenandoahHeap::initialize() {
|
||||
|
||||
_affiliations[i] = ShenandoahAffiliation::FREE;
|
||||
}
|
||||
_free_set = new ShenandoahFreeSet(this, _num_regions);
|
||||
|
||||
size_t young_cset_regions, old_cset_regions;
|
||||
|
||||
post_initialize_heuristics();
|
||||
// We are initializing free set. We ignore cset region tallies.
|
||||
size_t first_old, last_old, num_old;
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
if (mode()->is_generational()) {
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
|
||||
// We cannot call
|
||||
// gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions)
|
||||
// until after the heap is fully initialized. So we make up a safe value here.
|
||||
size_t allocation_runway = InitialHeapSize / 2;
|
||||
gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
|
||||
}
|
||||
_free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
|
||||
}
|
||||
|
||||
@ -525,10 +533,14 @@ void ShenandoahHeap::initialize_mode() {
|
||||
}
|
||||
|
||||
void ShenandoahHeap::initialize_heuristics() {
|
||||
_global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity());
|
||||
_global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers());
|
||||
_global_generation->initialize_heuristics(mode());
|
||||
}
|
||||
|
||||
void ShenandoahHeap::post_initialize_heuristics() {
|
||||
_global_generation->post_initialize(this);
|
||||
}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning( push )
|
||||
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
||||
@ -673,6 +685,8 @@ public:
|
||||
void ShenandoahHeap::post_initialize() {
|
||||
CollectedHeap::post_initialize();
|
||||
|
||||
check_soft_max_changed();
|
||||
|
||||
// Schedule periodic task to report on gc thread CPU utilization
|
||||
_mmu_tracker.initialize();
|
||||
|
||||
@ -717,75 +731,6 @@ void ShenandoahHeap::decrease_committed(size_t bytes) {
|
||||
_committed -= bytes;
|
||||
}
|
||||
|
||||
// For tracking usage based on allocations, it should be the case that:
|
||||
// * The sum of regions::used == heap::used
|
||||
// * The sum of a generation's regions::used == generation::used
|
||||
// * The sum of a generation's humongous regions::free == generation::humongous_waste
|
||||
// These invariants are checked by the verifier on GC safepoints.
|
||||
//
|
||||
// Additional notes:
|
||||
// * When a mutator's allocation request causes a region to be retired, the
|
||||
// free memory left in that region is considered waste. It does not contribute
|
||||
// to the usage, but it _does_ contribute to allocation rate.
|
||||
// * The bottom of a PLAB must be aligned on card size. In some cases this will
|
||||
// require padding in front of the PLAB (a filler object). Because this padding
|
||||
// is included in the region's used memory we include the padding in the usage
|
||||
// accounting as waste.
|
||||
// * Mutator allocations are used to compute an allocation rate.
|
||||
// * There are three sources of waste:
|
||||
// 1. The padding used to align a PLAB on card size
|
||||
// 2. Region's free is less than minimum TLAB size and is retired
|
||||
// 3. The unused portion of memory in the last region of a humongous object
|
||||
void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
|
||||
size_t actual_bytes = req.actual_size() * HeapWordSize;
|
||||
size_t wasted_bytes = req.waste() * HeapWordSize;
|
||||
ShenandoahGeneration* generation = generation_for(req.affiliation());
|
||||
|
||||
if (req.is_gc_alloc()) {
|
||||
assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
|
||||
increase_used(generation, actual_bytes + wasted_bytes);
|
||||
} else {
|
||||
assert(req.is_mutator_alloc(), "Expected mutator alloc here");
|
||||
// padding and actual size both count towards allocation counter
|
||||
generation->increase_allocated(actual_bytes + wasted_bytes);
|
||||
|
||||
// only actual size counts toward usage for mutator allocations
|
||||
increase_used(generation, actual_bytes);
|
||||
|
||||
if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
|
||||
increase_humongous_waste(generation,wasted_bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
|
||||
generation->increase_humongous_waste(bytes);
|
||||
if (!generation->is_global()) {
|
||||
global_generation()->increase_humongous_waste(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
|
||||
generation->decrease_humongous_waste(bytes);
|
||||
if (!generation->is_global()) {
|
||||
global_generation()->decrease_humongous_waste(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
|
||||
generation->increase_used(bytes);
|
||||
if (!generation->is_global()) {
|
||||
global_generation()->increase_used(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
|
||||
generation->decrease_used(bytes);
|
||||
if (!generation->is_global()) {
|
||||
global_generation()->decrease_used(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
size_t ShenandoahHeap::capacity() const {
|
||||
return committed();
|
||||
}
|
||||
@ -1034,10 +979,6 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
|
||||
req.set_actual_size(0);
|
||||
}
|
||||
|
||||
// This is called regardless of the outcome of the allocation to account
|
||||
// for any waste created by retiring regions with this request.
|
||||
increase_used(req);
|
||||
|
||||
if (result != nullptr) {
|
||||
size_t requested = req.size();
|
||||
size_t actual = req.actual_size();
|
||||
@ -2347,18 +2288,16 @@ void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
|
||||
// the "forced sample" will not happen, and any recently allocated bytes are "unaccounted for". We pretend these
|
||||
// bytes are allocated after the start of subsequent gc.
|
||||
size_t unaccounted_bytes;
|
||||
ShenandoahFreeSet* _free_set = free_set();
|
||||
size_t bytes_allocated = _free_set->get_bytes_allocated_since_gc_start();
|
||||
if (mode()->is_generational()) {
|
||||
size_t bytes_allocated = young_generation()->bytes_allocated_since_gc_start();
|
||||
unaccounted_bytes = young_generation()->heuristics()->force_alloc_rate_sample(bytes_allocated);
|
||||
young_generation()->reset_bytes_allocated_since_gc_start(unaccounted_bytes);
|
||||
unaccounted_bytes = 0;
|
||||
old_generation()->reset_bytes_allocated_since_gc_start(unaccounted_bytes);
|
||||
} else {
|
||||
size_t bytes_allocated = global_generation()->bytes_allocated_since_gc_start();
|
||||
// Single-gen Shenandoah uses global heuristics.
|
||||
unaccounted_bytes = heuristics()->force_alloc_rate_sample(bytes_allocated);
|
||||
}
|
||||
global_generation()->reset_bytes_allocated_since_gc_start(unaccounted_bytes);
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
_free_set->reset_bytes_allocated_since_gc_start(unaccounted_bytes);
|
||||
}
|
||||
|
||||
void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
|
||||
@ -2743,6 +2682,9 @@ GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
|
||||
}
|
||||
|
||||
MemoryUsage ShenandoahHeap::memory_usage() {
|
||||
assert(_initial_size <= ShenandoahHeap::heap()->max_capacity(), "sanity");
|
||||
assert(used() <= ShenandoahHeap::heap()->max_capacity(), "sanity");
|
||||
assert(committed() <= ShenandoahHeap::heap()->max_capacity(), "sanity");
|
||||
return MemoryUsage(_initial_size, used(), committed(), max_capacity());
|
||||
}
|
||||
|
||||
|
||||
@ -35,7 +35,6 @@
|
||||
#include "gc/shenandoah/shenandoahController.hpp"
|
||||
#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
|
||||
#include "gc/shenandoah/shenandoahEvacTracker.hpp"
|
||||
#include "gc/shenandoah/shenandoahGenerationSizer.hpp"
|
||||
#include "gc/shenandoah/shenandoahGenerationType.hpp"
|
||||
#include "gc/shenandoah/shenandoahLock.hpp"
|
||||
#include "gc/shenandoah/shenandoahMmuTracker.hpp"
|
||||
@ -183,6 +182,7 @@ public:
|
||||
void post_initialize() override;
|
||||
void initialize_mode();
|
||||
virtual void initialize_heuristics();
|
||||
virtual void post_initialize_heuristics();
|
||||
virtual void print_init_logger() const;
|
||||
void initialize_serviceability() override;
|
||||
|
||||
@ -212,14 +212,7 @@ private:
|
||||
volatile size_t _committed;
|
||||
shenandoah_padding(1);
|
||||
|
||||
void increase_used(const ShenandoahAllocRequest& req);
|
||||
|
||||
public:
|
||||
void increase_used(ShenandoahGeneration* generation, size_t bytes);
|
||||
void decrease_used(ShenandoahGeneration* generation, size_t bytes);
|
||||
void increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes);
|
||||
void decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes);
|
||||
|
||||
void increase_committed(size_t bytes);
|
||||
void decrease_committed(size_t bytes);
|
||||
|
||||
@ -698,8 +691,6 @@ public:
|
||||
size_t size,
|
||||
Metaspace::MetadataType mdtype) override;
|
||||
|
||||
void notify_mutator_alloc_words(size_t words, size_t waste);
|
||||
|
||||
HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
|
||||
size_t tlab_capacity(Thread *thr) const override;
|
||||
size_t unsafe_max_tlab_alloc(Thread *thread) const override;
|
||||
|
||||
@ -578,16 +578,16 @@ void ShenandoahHeapRegion::recycle_internal() {
|
||||
set_affiliation(FREE);
|
||||
}
|
||||
|
||||
// Upon return, this region has been recycled. We try to recycle it.
|
||||
// We may fail if some other thread recycled it before we do.
|
||||
void ShenandoahHeapRegion::try_recycle_under_lock() {
|
||||
shenandoah_assert_heaplocked();
|
||||
if (is_trash() && _recycling.try_set()) {
|
||||
if (is_trash()) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
ShenandoahGeneration* generation = heap->generation_for(affiliation());
|
||||
|
||||
heap->decrease_used(generation, used());
|
||||
generation->decrement_affiliated_region_count();
|
||||
|
||||
// At freeset rebuild time, which precedes recycling of collection set, we treat all cset regions as
|
||||
// part of capacity, as empty, as fully available, and as unaffiliated. This provides short-lived optimism
|
||||
// for triggering heuristics. It greatly simplifies and reduces the locking overhead required
|
||||
// by more time-precise accounting of these details.
|
||||
recycle_internal();
|
||||
}
|
||||
_recycling.unset();
|
||||
@ -608,11 +608,10 @@ void ShenandoahHeapRegion::try_recycle() {
|
||||
if (is_trash() && _recycling.try_set()) {
|
||||
// Double check region state after win the race to set recycling flag
|
||||
if (is_trash()) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
ShenandoahGeneration* generation = heap->generation_for(affiliation());
|
||||
heap->decrease_used(generation, used());
|
||||
generation->decrement_affiliated_region_count_without_lock();
|
||||
|
||||
// At freeset rebuild time, which precedes recycling of collection set, we treat all cset regions as
|
||||
// part of capacity, as empty, as fully available, and as unaffiliated. This provides short-lived optimism
|
||||
// for triggering and pacing heuristics. It greatly simplifies and reduces the locking overhead required
|
||||
// by more time-precise accounting of these details.
|
||||
recycle_internal();
|
||||
}
|
||||
_recycling.unset();
|
||||
@ -900,12 +899,11 @@ void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation
|
||||
heap->set_affiliation(this, new_affiliation);
|
||||
}
|
||||
|
||||
void ShenandoahHeapRegion::decrement_humongous_waste() const {
|
||||
void ShenandoahHeapRegion::decrement_humongous_waste() {
|
||||
assert(is_humongous(), "Should only use this for humongous regions");
|
||||
size_t waste_bytes = free();
|
||||
if (waste_bytes > 0) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
ShenandoahGeneration* generation = heap->generation_for(affiliation());
|
||||
heap->decrease_humongous_waste(generation, waste_bytes);
|
||||
heap->free_set()->decrease_humongous_waste_for_regular_bypass(this, waste_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
@ -366,6 +366,9 @@ public:
|
||||
// Allocation (return nullptr if full)
|
||||
inline HeapWord* allocate(size_t word_size, const ShenandoahAllocRequest& req);
|
||||
|
||||
// Allocate fill after top
|
||||
inline HeapWord* allocate_fill(size_t word_size);
|
||||
|
||||
inline void clear_live_data();
|
||||
void set_live_data(size_t s);
|
||||
|
||||
@ -492,7 +495,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
void decrement_humongous_waste() const;
|
||||
void decrement_humongous_waste();
|
||||
void do_commit();
|
||||
void do_uncommit();
|
||||
|
||||
|
||||
@ -87,6 +87,23 @@ HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocReq
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* ShenandoahHeapRegion::allocate_fill(size_t size) {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
assert(is_object_aligned(size), "alloc size breaks alignment: %zu", size);
|
||||
assert(size >= ShenandoahHeap::min_fill_size(), "Cannot fill unless min fill size");
|
||||
|
||||
HeapWord* obj = top();
|
||||
HeapWord* new_top = obj + size;
|
||||
ShenandoahHeap::fill_with_object(obj, size);
|
||||
set_top(new_top);
|
||||
|
||||
assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top));
|
||||
assert(is_object_aligned(obj), "obj is not aligned: " PTR_FORMAT, p2i(obj));
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
||||
HeapWord* ShenandoahHeapRegion::allocate(size_t size, const ShenandoahAllocRequest& req) {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
assert(is_object_aligned(size), "alloc size breaks alignment: %zu", size);
|
||||
|
||||
@ -33,7 +33,7 @@
|
||||
|
||||
class ShenandoahMarkBitMap {
|
||||
public:
|
||||
typedef size_t idx_t; // Type used for bit and word indices.
|
||||
typedef size_t idx_t; // Type used for bit and word indices.
|
||||
typedef uintptr_t bm_word_t; // Element type of array that represents the
|
||||
// bitmap, with BitsPerWord bits per element.
|
||||
|
||||
|
||||
@ -62,8 +62,10 @@ MemoryUsage ShenandoahMemoryPool::get_memory_usage() {
|
||||
// the assert below, which would also fail in downstream code. To avoid that, adjust values
|
||||
// to make sense under the race. See JDK-8207200.
|
||||
committed = MAX2(used, committed);
|
||||
assert(used <= committed, "used: %zu, committed: %zu", used, committed);
|
||||
|
||||
assert(used <= committed, "used: %zu, committed: %zu", used, committed);
|
||||
assert(initial <= _heap->max_capacity(), "sanity");
|
||||
assert(committed <= _heap->max_capacity(), "sanity");
|
||||
assert(max <= _heap->max_capacity(), "sanity");
|
||||
return MemoryUsage(initial, used, committed, max);
|
||||
}
|
||||
|
||||
@ -86,6 +88,10 @@ MemoryUsage ShenandoahGenerationalMemoryPool::get_memory_usage() {
|
||||
size_t used = used_in_bytes();
|
||||
size_t committed = _generation->used_regions_size();
|
||||
|
||||
assert(initial <= _heap->max_capacity(), "sanity");
|
||||
assert(used <= _heap->max_capacity(), "sanity");
|
||||
assert(committed <= _heap->max_capacity(), "sanity");
|
||||
assert(max <= _heap->max_capacity(), "sanity");
|
||||
return MemoryUsage(initial, used, committed, max);
|
||||
}
|
||||
|
||||
|
||||
@ -138,21 +138,7 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) {
|
||||
// collection.
|
||||
heap->concurrent_final_roots();
|
||||
|
||||
// We do not rebuild_free following increments of old marking because memory has not been reclaimed. However, we may
|
||||
// need to transfer memory to OLD in order to efficiently support the mixed evacuations that might immediately follow.
|
||||
size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0);
|
||||
heap->compute_old_generation_balance(allocation_runway, 0);
|
||||
|
||||
ShenandoahGenerationalHeap::TransferResult result;
|
||||
{
|
||||
ShenandoahHeapLocker locker(heap->lock());
|
||||
result = heap->balance_generations();
|
||||
}
|
||||
|
||||
LogTarget(Info, gc, ergo) lt;
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
result.print_on("Old Mark", &ls);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
|
||||
/*
|
||||
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
@ -195,8 +196,8 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_capacity)
|
||||
: ShenandoahGeneration(OLD, max_queues, max_capacity),
|
||||
ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues)
|
||||
: ShenandoahGeneration(OLD, max_queues),
|
||||
_coalesce_and_fill_region_array(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, ShenandoahHeap::heap()->num_regions(), mtGC)),
|
||||
_old_heuristics(nullptr),
|
||||
_region_balance(0),
|
||||
@ -214,6 +215,7 @@ ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_cap
|
||||
_growth_before_compaction(INITIAL_GROWTH_BEFORE_COMPACTION),
|
||||
_min_growth_before_compaction ((ShenandoahMinOldGenGrowthPercent * FRACTIONAL_DENOMINATOR) / 100)
|
||||
{
|
||||
assert(type() == ShenandoahGenerationType::OLD, "OO sanity");
|
||||
_live_bytes_after_last_mark = ShenandoahHeap::heap()->capacity() * INITIAL_LIVE_FRACTION / FRACTIONAL_DENOMINATOR;
|
||||
// Always clear references for old generation
|
||||
ref_processor()->set_soft_reference_policy(true);
|
||||
@ -519,12 +521,20 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent
|
||||
ShenandoahPhaseTimings::final_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
|
||||
ShenandoahHeapLocker locker(heap->lock());
|
||||
size_t cset_young_regions, cset_old_regions;
|
||||
size_t young_trash_regions, old_trash_regions;
|
||||
size_t first_old, last_old, num_old;
|
||||
heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions, first_old, last_old, num_old);
|
||||
// This is just old-gen completion. No future budgeting required here. The only reason to rebuild the freeset here
|
||||
// is in case there was any immediate old garbage identified.
|
||||
heap->free_set()->finish_rebuild(cset_young_regions, cset_old_regions, num_old);
|
||||
heap->free_set()->prepare_to_rebuild(young_trash_regions, old_trash_regions, first_old, last_old, num_old);
|
||||
// At the end of old-gen, we may find that we have reclaimed immediate garbage, allowing a longer allocation runway.
|
||||
// We may also find that we have accumulated canddiate regions for mixed evacuation. If so, we will want to expand
|
||||
// the OldCollector reserve in order to make room for these mixed evacuations.
|
||||
assert(ShenandoahHeap::heap()->mode()->is_generational(), "sanity");
|
||||
assert(young_trash_regions == 0, "sanity");
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
|
||||
size_t allocation_runway =
|
||||
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trash_regions);
|
||||
gen_heap->compute_old_generation_balance(allocation_runway, old_trash_regions);
|
||||
|
||||
heap->free_set()->finish_rebuild(young_trash_regions, old_trash_regions, num_old);
|
||||
}
|
||||
}
|
||||
|
||||
@ -729,11 +739,6 @@ void ShenandoahOldGeneration::handle_evacuation(HeapWord* obj, size_t words, boo
|
||||
// do this in batch, in a background GC thread than to try to carefully dirty only cards
|
||||
// that hold interesting pointers right now.
|
||||
_card_scan->mark_range_as_dirty(obj, words);
|
||||
|
||||
if (promotion) {
|
||||
// This evacuation was a promotion, track this as allocation against old gen
|
||||
increase_allocated(words * HeapWordSize);
|
||||
}
|
||||
}
|
||||
|
||||
bool ShenandoahOldGeneration::has_unprocessed_collection_candidates() {
|
||||
@ -839,3 +844,38 @@ void ShenandoahOldGeneration::clear_cards_for(ShenandoahHeapRegion* region) {
|
||||
void ShenandoahOldGeneration::mark_card_as_dirty(void* location) {
|
||||
_card_scan->mark_card_as_dirty((HeapWord*)location);
|
||||
}
|
||||
|
||||
size_t ShenandoahOldGeneration::used() const {
|
||||
return _free_set->old_used();
|
||||
}
|
||||
|
||||
size_t ShenandoahOldGeneration::bytes_allocated_since_gc_start() const {
|
||||
assert(ShenandoahHeap::heap()->mode()->is_generational(), "NON_GEN implies not generational");
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t ShenandoahOldGeneration::get_affiliated_region_count() const {
|
||||
return _free_set->old_affiliated_regions();
|
||||
}
|
||||
|
||||
size_t ShenandoahOldGeneration::get_humongous_waste() const {
|
||||
return _free_set->humongous_waste_in_old();
|
||||
}
|
||||
|
||||
size_t ShenandoahOldGeneration::used_regions() const {
|
||||
return _free_set->old_affiliated_regions();
|
||||
}
|
||||
|
||||
size_t ShenandoahOldGeneration::used_regions_size() const {
|
||||
size_t used_regions = _free_set->old_affiliated_regions();
|
||||
return used_regions * ShenandoahHeapRegion::region_size_bytes();
|
||||
}
|
||||
|
||||
size_t ShenandoahOldGeneration::max_capacity() const {
|
||||
size_t total_regions = _free_set->total_old_regions();
|
||||
return total_regions * ShenandoahHeapRegion::region_size_bytes();
|
||||
}
|
||||
|
||||
size_t ShenandoahOldGeneration::free_unaffiliated_regions() const {
|
||||
return _free_set->old_unaffiliated_regions();
|
||||
}
|
||||
|
||||
@ -94,7 +94,7 @@ private:
|
||||
bool coalesce_and_fill();
|
||||
|
||||
public:
|
||||
ShenandoahOldGeneration(uint max_queues, size_t max_capacity);
|
||||
ShenandoahOldGeneration(uint max_queues);
|
||||
|
||||
ShenandoahHeuristics* initialize_heuristics(ShenandoahMode* gc_mode) override;
|
||||
|
||||
@ -145,7 +145,9 @@ public:
|
||||
void configure_plab_for_current_thread(const ShenandoahAllocRequest &req);
|
||||
|
||||
// See description in field declaration
|
||||
void set_region_balance(ssize_t balance) { _region_balance = balance; }
|
||||
void set_region_balance(ssize_t balance) {
|
||||
_region_balance = balance;
|
||||
}
|
||||
ssize_t get_region_balance() const { return _region_balance; }
|
||||
|
||||
// See description in field declaration
|
||||
@ -331,6 +333,14 @@ public:
|
||||
|
||||
static const char* state_name(State state);
|
||||
|
||||
size_t bytes_allocated_since_gc_start() const override;
|
||||
size_t used() const override;
|
||||
size_t used_regions() const override;
|
||||
size_t used_regions_size() const override;
|
||||
size_t get_humongous_waste() const override;
|
||||
size_t free_unaffiliated_regions() const override;
|
||||
size_t get_affiliated_region_count() const override;
|
||||
size_t max_capacity() const override;
|
||||
};
|
||||
|
||||
|
||||
|
||||
@ -25,7 +25,7 @@
|
||||
|
||||
#include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp"
|
||||
|
||||
ShenandoahSimpleBitMap::ShenandoahSimpleBitMap(size_t num_bits) :
|
||||
ShenandoahSimpleBitMap::ShenandoahSimpleBitMap(idx_t num_bits) :
|
||||
_num_bits(num_bits),
|
||||
_num_words(align_up(num_bits, BitsPerWord) / BitsPerWord),
|
||||
_bitmap(NEW_C_HEAP_ARRAY(uintx, _num_words, mtGC))
|
||||
|
||||
@ -42,22 +42,23 @@
|
||||
// represent index, even though index is "inherently" unsigned. There are two reasons for this choice:
|
||||
// 1. We use -1 as a sentinel value to represent empty partitions. This same value may be used to represent
|
||||
// failure to find a previous set bit or previous range of set bits.
|
||||
// 2. Certain loops are written most naturally if the iterator, which may hold the sentinel -1 value, can be
|
||||
// 2. Certain loops are written most naturally if the induction variable, which may hold the sentinel -1 value, can be
|
||||
// declared as signed and the terminating condition can be < 0.
|
||||
|
||||
typedef ssize_t idx_t;
|
||||
|
||||
// ShenandoahSimpleBitMap resembles CHeapBitMap but adds missing support for find_first_consecutive_set_bits() and
|
||||
// find_last_consecutive_set_bits. An alternative refactoring of code would subclass CHeapBitMap, but this might
|
||||
// break abstraction rules, because efficient implementation requires assumptions about superclass internals that
|
||||
// might be violated through future software maintenance.
|
||||
class ShenandoahSimpleBitMap {
|
||||
public:
|
||||
typedef ssize_t idx_t;
|
||||
private:
|
||||
const idx_t _num_bits;
|
||||
const size_t _num_words;
|
||||
uintx* const _bitmap;
|
||||
|
||||
public:
|
||||
ShenandoahSimpleBitMap(size_t num_bits);
|
||||
ShenandoahSimpleBitMap(idx_t num_bits);
|
||||
|
||||
~ShenandoahSimpleBitMap();
|
||||
|
||||
@ -116,7 +117,6 @@ public:
|
||||
|
||||
inline void clear_bit(idx_t idx) {
|
||||
assert((idx >= 0) && (idx < _num_bits), "precondition");
|
||||
assert(idx >= 0, "precondition");
|
||||
size_t array_idx = idx >> LogBitsPerWord;
|
||||
uintx bit_number = idx & (BitsPerWord - 1);
|
||||
uintx the_bit = nth_bit(bit_number);
|
||||
@ -125,7 +125,6 @@ public:
|
||||
|
||||
inline bool is_set(idx_t idx) const {
|
||||
assert((idx >= 0) && (idx < _num_bits), "precondition");
|
||||
assert(idx >= 0, "precondition");
|
||||
size_t array_idx = idx >> LogBitsPerWord;
|
||||
uintx bit_number = idx & (BitsPerWord - 1);
|
||||
uintx the_bit = nth_bit(bit_number);
|
||||
|
||||
@ -27,6 +27,8 @@
|
||||
|
||||
#include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
|
||||
|
||||
using idx_t = ShenandoahSimpleBitMap::idx_t;
|
||||
|
||||
inline uintx ShenandoahSimpleBitMap::tail_mask(uintx bit_number) {
|
||||
if (bit_number >= BitsPerWord) {
|
||||
return -1;
|
||||
|
||||
@ -365,30 +365,57 @@ public:
|
||||
// a subset (e.g. the young generation or old generation) of the total heap.
|
||||
class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure {
|
||||
private:
|
||||
size_t _used, _committed, _garbage, _regions, _humongous_waste, _trashed_regions;
|
||||
size_t _used, _committed, _garbage, _regions, _humongous_waste, _trashed_regions, _trashed_used;
|
||||
size_t _region_size_bytes, _min_free_size;
|
||||
public:
|
||||
ShenandoahCalculateRegionStatsClosure() :
|
||||
_used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0) {};
|
||||
_used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0), _trashed_used(0)
|
||||
{
|
||||
_region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
|
||||
// Retired regions are not necessarily filled, thouugh their remnant memory is considered used.
|
||||
_min_free_size = PLAB::min_size() * HeapWordSize;
|
||||
};
|
||||
|
||||
void heap_region_do(ShenandoahHeapRegion* r) override {
|
||||
_used += r->used();
|
||||
_garbage += r->garbage();
|
||||
_committed += r->is_committed() ? ShenandoahHeapRegion::region_size_bytes() : 0;
|
||||
if (r->is_humongous()) {
|
||||
_humongous_waste += r->free();
|
||||
}
|
||||
if (r->is_trash()) {
|
||||
_trashed_regions++;
|
||||
if (r->is_cset() || r->is_trash()) {
|
||||
// Count the entire cset or trashed (formerly cset) region as used
|
||||
// Note: Immediate garbage trash regions were never in the cset.
|
||||
_used += _region_size_bytes;
|
||||
_garbage += _region_size_bytes - r->get_live_data_bytes();
|
||||
if (r->is_trash()) {
|
||||
_trashed_regions++;
|
||||
_trashed_used += _region_size_bytes;
|
||||
}
|
||||
} else {
|
||||
if (r->is_humongous()) {
|
||||
_used += _region_size_bytes;
|
||||
_garbage += _region_size_bytes - r->get_live_data_bytes();
|
||||
_humongous_waste += r->free();
|
||||
} else {
|
||||
size_t alloc_capacity = r->free();
|
||||
if (alloc_capacity < _min_free_size) {
|
||||
// this region has been retired already, count it as entirely consumed
|
||||
alloc_capacity = 0;
|
||||
}
|
||||
size_t bytes_used_in_region = _region_size_bytes - alloc_capacity;
|
||||
size_t bytes_garbage_in_region = bytes_used_in_region - r->get_live_data_bytes();
|
||||
size_t waste_bytes = r->free();
|
||||
_used += bytes_used_in_region;
|
||||
_garbage += bytes_garbage_in_region;
|
||||
}
|
||||
}
|
||||
_committed += r->is_committed() ? _region_size_bytes : 0;
|
||||
_regions++;
|
||||
log_debug(gc)("ShenandoahCalculateRegionStatsClosure: adding %zu for %s Region %zu, yielding: %zu",
|
||||
r->used(), (r->is_humongous() ? "humongous" : "regular"), r->index(), _used);
|
||||
}
|
||||
|
||||
size_t used() const { return _used; }
|
||||
size_t used_after_recycle() const { return _used - _trashed_used; }
|
||||
size_t committed() const { return _committed; }
|
||||
size_t garbage() const { return _garbage; }
|
||||
size_t regions() const { return _regions; }
|
||||
size_t trashed_regions() const { return _trashed_regions; }
|
||||
size_t waste() const { return _humongous_waste; }
|
||||
|
||||
// span is the total memory affiliated with these stats (some of which is in use and other is available)
|
||||
@ -398,21 +425,21 @@ public:
|
||||
|
||||
class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure {
|
||||
public:
|
||||
ShenandoahCalculateRegionStatsClosure old;
|
||||
ShenandoahCalculateRegionStatsClosure young;
|
||||
ShenandoahCalculateRegionStatsClosure global;
|
||||
ShenandoahCalculateRegionStatsClosure _old;
|
||||
ShenandoahCalculateRegionStatsClosure _young;
|
||||
ShenandoahCalculateRegionStatsClosure _global;
|
||||
|
||||
void heap_region_do(ShenandoahHeapRegion* r) override {
|
||||
switch (r->affiliation()) {
|
||||
case FREE:
|
||||
return;
|
||||
case YOUNG_GENERATION:
|
||||
young.heap_region_do(r);
|
||||
global.heap_region_do(r);
|
||||
_young.heap_region_do(r);
|
||||
_global.heap_region_do(r);
|
||||
break;
|
||||
case OLD_GENERATION:
|
||||
old.heap_region_do(r);
|
||||
global.heap_region_do(r);
|
||||
_old.heap_region_do(r);
|
||||
_global.heap_region_do(r);
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
@ -426,23 +453,22 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure {
|
||||
byte_size_in_proper_unit(stats.used()), proper_unit_for_byte_size(stats.used()));
|
||||
}
|
||||
|
||||
static void validate_usage(const bool adjust_for_padding,
|
||||
static void validate_usage(const bool adjust_for_padding, const bool adjust_for_trash,
|
||||
const char* label, ShenandoahGeneration* generation, ShenandoahCalculateRegionStatsClosure& stats) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
size_t generation_used = generation->used();
|
||||
size_t generation_used_regions = generation->used_regions();
|
||||
if (adjust_for_padding && (generation->is_young() || generation->is_global())) {
|
||||
size_t pad = heap->old_generation()->get_pad_for_promote_in_place();
|
||||
generation_used += pad;
|
||||
}
|
||||
|
||||
guarantee(stats.used() == generation_used,
|
||||
size_t stats_used = adjust_for_trash? stats.used_after_recycle(): stats.used();
|
||||
guarantee(stats_used == generation_used,
|
||||
"%s: generation (%s) used size must be consistent: generation-used: " PROPERFMT ", regions-used: " PROPERFMT,
|
||||
label, generation->name(), PROPERFMTARGS(generation_used), PROPERFMTARGS(stats.used()));
|
||||
label, generation->name(), PROPERFMTARGS(generation_used), PROPERFMTARGS(stats_used));
|
||||
|
||||
guarantee(stats.regions() == generation_used_regions,
|
||||
"%s: generation (%s) used regions (%zu) must equal regions that are in use (%zu)",
|
||||
label, generation->name(), generation->used_regions(), stats.regions());
|
||||
size_t stats_regions = adjust_for_trash? stats.regions() - stats.trashed_regions(): stats.regions();
|
||||
guarantee(stats_regions == generation_used_regions,
|
||||
"%s: generation (%s) used regions (%zu) must equal regions that are in use (%zu)%s",
|
||||
label, generation->name(), generation->used_regions(), stats_regions,
|
||||
adjust_for_trash? " (after adjusting for trash)": "");
|
||||
|
||||
size_t generation_capacity = generation->max_capacity();
|
||||
guarantee(stats.non_trashed_span() <= generation_capacity,
|
||||
@ -463,11 +489,11 @@ private:
|
||||
ShenandoahHeap* _heap;
|
||||
const char* _phase;
|
||||
ShenandoahVerifier::VerifyRegions _regions;
|
||||
public:
|
||||
public:
|
||||
ShenandoahVerifyHeapRegionClosure(const char* phase, ShenandoahVerifier::VerifyRegions regions) :
|
||||
_heap(ShenandoahHeap::heap()),
|
||||
_phase(phase),
|
||||
_regions(regions) {};
|
||||
_heap(ShenandoahHeap::heap()),
|
||||
_phase(phase),
|
||||
_regions(regions) {};
|
||||
|
||||
void print_failure(ShenandoahHeapRegion* r, const char* label) {
|
||||
ResourceMark rm;
|
||||
@ -620,7 +646,7 @@ public:
|
||||
};
|
||||
|
||||
class ShenandoahVerifyNoIncompleteSatbBuffers : public ThreadClosure {
|
||||
public:
|
||||
public:
|
||||
void do_thread(Thread* thread) override {
|
||||
SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
|
||||
if (!queue.is_empty()) {
|
||||
@ -630,7 +656,7 @@ public:
|
||||
};
|
||||
|
||||
class ShenandoahVerifierMarkedRegionTask : public WorkerTask {
|
||||
private:
|
||||
private:
|
||||
const char* _label;
|
||||
ShenandoahVerifier::VerifyOptions _options;
|
||||
ShenandoahHeap *_heap;
|
||||
@ -760,7 +786,7 @@ public:
|
||||
class VerifyThreadGCState : public ThreadClosure {
|
||||
private:
|
||||
const char* const _label;
|
||||
char const _expected;
|
||||
char const _expected;
|
||||
|
||||
public:
|
||||
VerifyThreadGCState(const char* label, char expected) : _label(label), _expected(expected) {}
|
||||
@ -864,16 +890,18 @@ void ShenandoahVerifier::verify_at_safepoint(ShenandoahGeneration* generation,
|
||||
size_t heap_used;
|
||||
if (_heap->mode()->is_generational() && (sizeness == _verify_size_adjusted_for_padding)) {
|
||||
// Prior to evacuation, regular regions that are to be evacuated in place are padded to prevent further allocations
|
||||
heap_used = _heap->used() + _heap->old_generation()->get_pad_for_promote_in_place();
|
||||
// but this padding is already represented in _heap->used()
|
||||
heap_used = _heap->used();
|
||||
} else if (sizeness != _verify_size_disable) {
|
||||
heap_used = _heap->used();
|
||||
}
|
||||
if (sizeness != _verify_size_disable) {
|
||||
guarantee(cl.used() == heap_used,
|
||||
size_t cl_size = (sizeness == _verify_size_exact_including_trash)? cl.used(): cl.used_after_recycle();
|
||||
guarantee(cl_size == heap_used,
|
||||
"%s: heap used size must be consistent: heap-used = %zu%s, regions-used = %zu%s",
|
||||
label,
|
||||
byte_size_in_proper_unit(heap_used), proper_unit_for_byte_size(heap_used),
|
||||
byte_size_in_proper_unit(cl.used()), proper_unit_for_byte_size(cl.used()));
|
||||
byte_size_in_proper_unit(cl_size), proper_unit_for_byte_size(cl_size));
|
||||
}
|
||||
size_t heap_committed = _heap->committed();
|
||||
guarantee(cl.committed() == heap_committed,
|
||||
@ -911,18 +939,19 @@ void ShenandoahVerifier::verify_at_safepoint(ShenandoahGeneration* generation,
|
||||
_heap->heap_region_iterate(&cl);
|
||||
|
||||
if (LogTarget(Debug, gc)::is_enabled()) {
|
||||
ShenandoahGenerationStatsClosure::log_usage(_heap->old_generation(), cl.old);
|
||||
ShenandoahGenerationStatsClosure::log_usage(_heap->young_generation(), cl.young);
|
||||
ShenandoahGenerationStatsClosure::log_usage(_heap->global_generation(), cl.global);
|
||||
ShenandoahGenerationStatsClosure::log_usage(_heap->old_generation(), cl._old);
|
||||
ShenandoahGenerationStatsClosure::log_usage(_heap->young_generation(), cl._young);
|
||||
ShenandoahGenerationStatsClosure::log_usage(_heap->global_generation(), cl._global);
|
||||
}
|
||||
if (sizeness == _verify_size_adjusted_for_padding) {
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->old_generation(), cl.old);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(true, label, _heap->young_generation(), cl.young);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(true, label, _heap->global_generation(), cl.global);
|
||||
} else if (sizeness == _verify_size_exact) {
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->old_generation(), cl.old);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->young_generation(), cl.young);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->global_generation(), cl.global);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, true, label, _heap->old_generation(), cl._old);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(true, true, label, _heap->young_generation(), cl._young);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(true, true, label, _heap->global_generation(), cl._global);
|
||||
} else if (sizeness == _verify_size_exact || sizeness == _verify_size_exact_including_trash) {
|
||||
bool adjust_trash = (sizeness == _verify_size_exact);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->old_generation(), cl._old);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->young_generation(), cl._young);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->global_generation(), cl._global);
|
||||
}
|
||||
// else: sizeness must equal _verify_size_disable
|
||||
}
|
||||
@ -1139,7 +1168,8 @@ void ShenandoahVerifier::verify_after_update_refs(ShenandoahGeneration* generati
|
||||
_verify_cset_none, // no cset references, all updated
|
||||
_verify_liveness_disable, // no reliable liveness data anymore
|
||||
_verify_regions_nocset, // no cset regions, trash regions have appeared
|
||||
_verify_size_exact, // expect generation and heap sizes to match exactly
|
||||
// expect generation and heap sizes to match exactly, including trash
|
||||
_verify_size_exact_including_trash,
|
||||
_verify_gcstate_stable // update refs had cleaned up forwarded objects
|
||||
);
|
||||
}
|
||||
@ -1405,7 +1435,7 @@ void ShenandoahVerifier::verify_before_rebuilding_free_set() {
|
||||
ShenandoahGenerationStatsClosure cl;
|
||||
_heap->heap_region_iterate(&cl);
|
||||
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, "Before free set rebuild", _heap->old_generation(), cl.old);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, "Before free set rebuild", _heap->young_generation(), cl.young);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, "Before free set rebuild", _heap->global_generation(), cl.global);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, true, "Before free set rebuild", _heap->old_generation(), cl._old);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, true, "Before free set rebuild", _heap->young_generation(), cl._young);
|
||||
ShenandoahGenerationStatsClosure::validate_usage(false, true, "Before free set rebuild", _heap->global_generation(), cl._global);
|
||||
}
|
||||
|
||||
@ -155,7 +155,10 @@ public:
|
||||
_verify_size_exact,
|
||||
|
||||
// Expect promote-in-place adjustments: padding inserted to temporarily prevent further allocation in regular regions
|
||||
_verify_size_adjusted_for_padding
|
||||
_verify_size_adjusted_for_padding,
|
||||
|
||||
// Expected heap size should not include
|
||||
_verify_size_exact_including_trash
|
||||
} VerifySize;
|
||||
|
||||
typedef enum {
|
||||
|
||||
@ -30,9 +30,10 @@
|
||||
#include "gc/shenandoah/shenandoahUtils.hpp"
|
||||
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
|
||||
|
||||
ShenandoahYoungGeneration::ShenandoahYoungGeneration(uint max_queues, size_t max_capacity) :
|
||||
ShenandoahGeneration(YOUNG, max_queues, max_capacity),
|
||||
ShenandoahYoungGeneration::ShenandoahYoungGeneration(uint max_queues) :
|
||||
ShenandoahGeneration(YOUNG, max_queues),
|
||||
_old_gen_task_queues(nullptr) {
|
||||
assert(type() == ShenandoahGenerationType::YOUNG, "OO sanity");
|
||||
}
|
||||
|
||||
void ShenandoahYoungGeneration::set_concurrent_mark_in_progress(bool in_progress) {
|
||||
@ -95,6 +96,41 @@ ShenandoahHeuristics* ShenandoahYoungGeneration::initialize_heuristics(Shenandoa
|
||||
return _heuristics;
|
||||
}
|
||||
|
||||
size_t ShenandoahYoungGeneration::used() const {
|
||||
return _free_set->young_used();
|
||||
}
|
||||
|
||||
size_t ShenandoahYoungGeneration::bytes_allocated_since_gc_start() const {
|
||||
assert(ShenandoahHeap::heap()->mode()->is_generational(), "Young implies generational");
|
||||
return _free_set->get_bytes_allocated_since_gc_start();
|
||||
}
|
||||
|
||||
size_t ShenandoahYoungGeneration::get_affiliated_region_count() const {
|
||||
return _free_set->young_affiliated_regions();
|
||||
}
|
||||
|
||||
size_t ShenandoahYoungGeneration::get_humongous_waste() const {
|
||||
return _free_set->humongous_waste_in_mutator();
|
||||
}
|
||||
|
||||
size_t ShenandoahYoungGeneration::used_regions() const {
|
||||
return _free_set->young_affiliated_regions();
|
||||
}
|
||||
|
||||
size_t ShenandoahYoungGeneration::used_regions_size() const {
|
||||
size_t used_regions = _free_set->young_affiliated_regions();
|
||||
return used_regions * ShenandoahHeapRegion::region_size_bytes();
|
||||
}
|
||||
|
||||
size_t ShenandoahYoungGeneration::max_capacity() const {
|
||||
size_t total_regions = _free_set->total_young_regions();
|
||||
return total_regions * ShenandoahHeapRegion::region_size_bytes();
|
||||
}
|
||||
|
||||
size_t ShenandoahYoungGeneration::free_unaffiliated_regions() const {
|
||||
return _free_set->young_unaffiliated_regions();
|
||||
}
|
||||
|
||||
size_t ShenandoahYoungGeneration::available() const {
|
||||
// The collector reserve may eat into what the mutator is allowed to use. Make sure we are looking
|
||||
// at what is available to the mutator when reporting how much memory is available.
|
||||
|
||||
@ -34,7 +34,7 @@ private:
|
||||
ShenandoahYoungHeuristics* _young_heuristics;
|
||||
|
||||
public:
|
||||
ShenandoahYoungGeneration(uint max_queues, size_t max_capacity);
|
||||
ShenandoahYoungGeneration(uint max_queues);
|
||||
|
||||
ShenandoahHeuristics* initialize_heuristics(ShenandoahMode* gc_mode) override;
|
||||
|
||||
@ -73,10 +73,16 @@ public:
|
||||
return _old_gen_task_queues != nullptr;
|
||||
}
|
||||
|
||||
size_t bytes_allocated_since_gc_start() const override;
|
||||
size_t used() const override;
|
||||
size_t used_regions() const override;
|
||||
size_t used_regions_size() const override;
|
||||
size_t get_humongous_waste() const override;
|
||||
size_t free_unaffiliated_regions() const override;
|
||||
size_t get_affiliated_region_count() const override;
|
||||
size_t max_capacity() const override;
|
||||
|
||||
size_t available() const override;
|
||||
|
||||
// Do not override available_with_reserve() because that needs to see memory reserved for Collector
|
||||
|
||||
size_t soft_available() const override;
|
||||
|
||||
void prepare_gc() override;
|
||||
|
||||
@ -34,9 +34,8 @@
|
||||
nonstatic_field(ShenandoahHeap, _num_regions, size_t) \
|
||||
nonstatic_field(ShenandoahHeap, _regions, ShenandoahHeapRegion**) \
|
||||
nonstatic_field(ShenandoahHeap, _log_min_obj_alignment_in_bytes, int) \
|
||||
nonstatic_field(ShenandoahHeap, _global_generation, ShenandoahGeneration*) \
|
||||
nonstatic_field(ShenandoahHeap, _free_set, ShenandoahFreeSet*) \
|
||||
volatile_nonstatic_field(ShenandoahHeap, _committed, size_t) \
|
||||
volatile_nonstatic_field(ShenandoahGeneration, _used, size_t) \
|
||||
static_field(ShenandoahHeapRegion, RegionSizeBytes, size_t) \
|
||||
static_field(ShenandoahHeapRegion, RegionSizeBytesShift, size_t) \
|
||||
volatile_nonstatic_field(ShenandoahHeapRegion, _state, ShenandoahHeapRegion::RegionState) \
|
||||
@ -44,6 +43,7 @@
|
||||
nonstatic_field(ShenandoahHeapRegion, _bottom, HeapWord* const) \
|
||||
nonstatic_field(ShenandoahHeapRegion, _top, HeapWord*) \
|
||||
nonstatic_field(ShenandoahHeapRegion, _end, HeapWord* const) \
|
||||
nonstatic_field(ShenandoahFreeSet, _total_global_used, size_t) \
|
||||
|
||||
#define VM_INT_CONSTANTS_SHENANDOAH(declare_constant, declare_constant_with_value) \
|
||||
declare_constant(ShenandoahHeapRegion::_empty_uncommitted) \
|
||||
@ -66,7 +66,7 @@
|
||||
declare_toplevel_type(ShenandoahHeap*) \
|
||||
declare_toplevel_type(ShenandoahHeapRegion*) \
|
||||
declare_toplevel_type(ShenandoahHeapRegion::RegionState) \
|
||||
declare_toplevel_type(ShenandoahGeneration) \
|
||||
declare_toplevel_type(ShenandoahGeneration*) \
|
||||
declare_toplevel_type(ShenandoahFreeSet) \
|
||||
declare_toplevel_type(ShenandoahFreeSet*) \
|
||||
|
||||
#endif // SHARE_GC_SHENANDOAH_VMSTRUCTS_SHENANDOAH_HPP
|
||||
|
||||
@ -34,7 +34,7 @@ import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
public class ShenandoahGeneration extends VMObject {
|
||||
public class ShenandoahFreeSet extends VMObject {
|
||||
private static CIntegerField used;
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
@ -45,11 +45,11 @@ public class ShenandoahGeneration extends VMObject {
|
||||
}
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("ShenandoahGeneration");
|
||||
used = type.getCIntegerField("_used");
|
||||
Type type = db.lookupType("ShenandoahFreeSet");
|
||||
used = type.getCIntegerField("_total_global_used");
|
||||
}
|
||||
|
||||
public ShenandoahGeneration(Address addr) {
|
||||
public ShenandoahFreeSet(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
|
||||
@ -43,7 +43,7 @@ import sun.jvm.hotspot.utilities.Observer;
|
||||
|
||||
public class ShenandoahHeap extends CollectedHeap {
|
||||
private static CIntegerField numRegions;
|
||||
private static AddressField globalGeneration;
|
||||
private static AddressField globalFreeSet;
|
||||
private static CIntegerField committed;
|
||||
private static AddressField regions;
|
||||
private static CIntegerField logMinObjAlignmentInBytes;
|
||||
@ -60,7 +60,7 @@ public class ShenandoahHeap extends CollectedHeap {
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("ShenandoahHeap");
|
||||
numRegions = type.getCIntegerField("_num_regions");
|
||||
globalGeneration = type.getAddressField("_global_generation");
|
||||
globalFreeSet = type.getAddressField("_free_set");
|
||||
committed = type.getCIntegerField("_committed");
|
||||
regions = type.getAddressField("_regions");
|
||||
logMinObjAlignmentInBytes = type.getCIntegerField("_log_min_obj_alignment_in_bytes");
|
||||
@ -89,9 +89,9 @@ public class ShenandoahHeap extends CollectedHeap {
|
||||
|
||||
@Override
|
||||
public long used() {
|
||||
Address globalGenerationAddress = globalGeneration.getValue(addr);
|
||||
ShenandoahGeneration global = VMObjectFactory.newObject(ShenandoahGeneration.class, globalGenerationAddress);
|
||||
return global.used();
|
||||
Address globalFreeSetAddress = globalFreeSet.getValue(addr);
|
||||
ShenandoahFreeSet freeset = VMObjectFactory.newObject(ShenandoahFreeSet.class, globalFreeSetAddress);
|
||||
return freeset.used();
|
||||
}
|
||||
|
||||
public long committed() {
|
||||
|
||||
@ -52,7 +52,7 @@ protected:
|
||||
|
||||
ShenandoahHeap::heap()->lock()->lock(false);
|
||||
|
||||
old = new ShenandoahOldGeneration(8, 1024 * 1024);
|
||||
old = new ShenandoahOldGeneration(8);
|
||||
old->set_promoted_reserve(512 * HeapWordSize);
|
||||
old->expend_promoted(256 * HeapWordSize);
|
||||
old->set_evacuation_reserve(512 * HeapWordSize);
|
||||
|
||||
@ -84,7 +84,8 @@ class ShenandoahOldHeuristicTest : public ::testing::Test {
|
||||
_heap->lock()->lock(false);
|
||||
ShenandoahResetRegions reset;
|
||||
_heap->heap_region_iterate(&reset);
|
||||
_heap->old_generation()->set_capacity(ShenandoahHeapRegion::region_size_bytes() * 10);
|
||||
// _heap->old_generation()->set_capacity(ShenandoahHeapRegion::region_size_bytes() * 10)
|
||||
_heap->free_set()->resize_old_collector_capacity(10);
|
||||
_heap->old_generation()->set_evacuation_reserve(ShenandoahHeapRegion::region_size_bytes() * 4);
|
||||
_heuristics->abandon_collection_candidates();
|
||||
_collection_set->clear();
|
||||
|
||||
@ -144,12 +144,25 @@
|
||||
* @requires vm.gc.Shenandoah
|
||||
* @library /test/lib
|
||||
*
|
||||
* @run main/othervm/timeout=240 -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
|
||||
* @run main/othervm/timeout=480 -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
|
||||
* -XX:+UseShenandoahGC
|
||||
* -XX:-UseTLAB -XX:+ShenandoahVerify
|
||||
* TestSieveObjects
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test id=no-tlab-genshen
|
||||
* @summary Acceptance tests: collector can deal with retained objects
|
||||
* @key randomness
|
||||
* @requires vm.gc.Shenandoah
|
||||
* @library /test/lib
|
||||
*
|
||||
* @run main/othervm/timeout=480 -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
|
||||
* -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational
|
||||
* -XX:-UseTLAB -XX:+ShenandoahVerify
|
||||
* TestSieveObjects
|
||||
*/
|
||||
|
||||
import java.util.Random;
|
||||
import jdk.test.lib.Utils;
|
||||
|
||||
|
||||
@ -112,8 +112,10 @@ import com.sun.management.GarbageCollectionNotificationInfo;
|
||||
|
||||
public class TestChurnNotifications {
|
||||
|
||||
static final long HEAP_MB = 128; // adjust for test configuration above
|
||||
static final long TARGET_MB = Long.getLong("target", 2_000); // 2 Gb allocation
|
||||
static final long HEAP_MB = 128; // adjust for test configuration above
|
||||
static final long TARGET_MB = Long.getLong("target", 2_000); // 2 Gb allocation
|
||||
static final long ANTICIPATED_HUMONGOUS_WASTE_PER_ARRAY = 124_272;
|
||||
|
||||
|
||||
// Should we track the churn precisely?
|
||||
// Precise tracking is only reliable when GC is fully stop-the-world. Otherwise,
|
||||
@ -159,7 +161,7 @@ public class TestChurnNotifications {
|
||||
final int size = 100_000;
|
||||
long count = TARGET_MB * 1024 * 1024 / (16 + 4 * size);
|
||||
|
||||
long mem = count * (16 + 4 * size);
|
||||
long mem = count * (16 + 4 * size + ANTICIPATED_HUMONGOUS_WASTE_PER_ARRAY);
|
||||
|
||||
for (int c = 0; c < count; c++) {
|
||||
sink = new int[size];
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user