mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
8369048: GenShen: Defer ShenFreeSet::available() during rebuild
Reviewed-by: wkemper, ysr
This commit is contained in:
parent
9ed0ecbcc1
commit
0d19d91b44
@ -338,7 +338,7 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() {
|
||||
_empty_region_counts[partition_id] = 0;
|
||||
_used[partition_id] = 0;
|
||||
_humongous_waste[partition_id] = 0;
|
||||
_available[partition_id] = FreeSetUnderConstruction;
|
||||
_available[partition_id] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2495,6 +2495,10 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r
|
||||
void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t &old_trashed_regions,
|
||||
size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) {
|
||||
shenandoah_assert_heaplocked();
|
||||
assert(rebuild_lock() != nullptr, "sanity");
|
||||
rebuild_lock()->lock(false);
|
||||
// This resets all state information, removing all regions from all sets.
|
||||
clear();
|
||||
log_debug(gc, free)("Rebuilding FreeSet");
|
||||
|
||||
// This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
|
||||
@ -2524,6 +2528,9 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
|
||||
_total_young_regions = _heap->num_regions() - old_region_count;
|
||||
_total_global_regions = _heap->num_regions();
|
||||
establish_old_collector_alloc_bias();
|
||||
|
||||
// Release the rebuild lock now. What remains in this function is read-only
|
||||
rebuild_lock()->unlock();
|
||||
_partitions.assert_bounds(true);
|
||||
log_status();
|
||||
}
|
||||
@ -3058,7 +3065,7 @@ void ShenandoahFreeSet::log_status() {
|
||||
size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
|
||||
// capacity() is capacity of mutator
|
||||
// used() is used of mutator
|
||||
size_t free = capacity() - used();
|
||||
size_t free = capacity_holding_lock() - used_holding_lock();
|
||||
// Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
|
||||
// retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
|
||||
// my internally tracked values of used() and free().
|
||||
|
||||
@ -28,9 +28,13 @@
|
||||
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahLock.hpp"
|
||||
#include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
|
||||
typedef ShenandoahLock ShenandoahRebuildLock;
|
||||
typedef ShenandoahLocker ShenandoahRebuildLocker;
|
||||
|
||||
// Each ShenandoahHeapRegion is associated with a ShenandoahFreeSetPartitionId.
|
||||
enum class ShenandoahFreeSetPartitionId : uint8_t {
|
||||
Mutator, // Region is in the Mutator free set: available memory is available to mutators.
|
||||
@ -139,8 +143,6 @@ public:
|
||||
ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set);
|
||||
~ShenandoahRegionPartitions() {}
|
||||
|
||||
static const size_t FreeSetUnderConstruction = SIZE_MAX;
|
||||
|
||||
inline idx_t max() const { return _max; }
|
||||
|
||||
// At initialization, reset OldCollector tallies
|
||||
@ -352,6 +354,16 @@ public:
|
||||
return _available[int(which_partition)];
|
||||
}
|
||||
|
||||
// Return available_in assuming caller does not hold the heap lock but does hold the rebuild_lock.
|
||||
// The returned value may be "slightly stale" because we do not assure that every fetch of this value
|
||||
// sees the most recent update of this value. Requiring the caller to hold the rebuild_lock assures
|
||||
// that we don't see "bogus" values that are "worse than stale". During rebuild of the freeset, the
|
||||
// value of _available is not reliable.
|
||||
inline size_t available_in_locked_for_rebuild(ShenandoahFreeSetPartitionId which_partition) const {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
return _available[int(which_partition)];
|
||||
}
|
||||
|
||||
// Returns bytes of humongous waste
|
||||
inline size_t humongous_waste(ShenandoahFreeSetPartitionId which_partition) const {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
@ -359,23 +371,6 @@ public:
|
||||
return _humongous_waste[int(which_partition)];
|
||||
}
|
||||
|
||||
// Return available_in assuming caller does not hold the heap lock. In production builds, available is
|
||||
// returned without acquiring the lock. In debug builds, the global heap lock is acquired in order to
|
||||
// enforce a consistency assert.
|
||||
inline size_t available_in_not_locked(ShenandoahFreeSetPartitionId which_partition) const {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
shenandoah_assert_not_heaplocked();
|
||||
#ifdef ASSERT
|
||||
ShenandoahHeapLocker locker(ShenandoahHeap::heap()->lock());
|
||||
assert((_available[int(which_partition)] == FreeSetUnderConstruction) ||
|
||||
(_available[int(which_partition)] == _capacity[int(which_partition)] - _used[int(which_partition)]),
|
||||
"Expect available (%zu) equals capacity (%zu) - used (%zu) for partition %s",
|
||||
_available[int(which_partition)], _capacity[int(which_partition)], _used[int(which_partition)],
|
||||
partition_membership_name(idx_t(which_partition)));
|
||||
#endif
|
||||
return _available[int(which_partition)];
|
||||
}
|
||||
|
||||
inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value);
|
||||
|
||||
inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
|
||||
@ -440,6 +435,15 @@ private:
|
||||
ShenandoahHeap* const _heap;
|
||||
ShenandoahRegionPartitions _partitions;
|
||||
|
||||
// This locks the rebuild process (in combination with the global heap lock). Whenever we rebuild the free set,
|
||||
// we first acquire the global heap lock and then we acquire this _rebuild_lock in a nested context. Threads that
|
||||
// need to check available, acquire only the _rebuild_lock to make sure that they are not obtaining the value of
|
||||
// available for a partially reconstructed free-set.
|
||||
//
|
||||
// Note that there is rank ordering of nested locks to prevent deadlock. All threads that need to acquire both
|
||||
// locks will acquire them in the same order: first the global heap lock and then the rebuild lock.
|
||||
ShenandoahRebuildLock _rebuild_lock;
|
||||
|
||||
size_t _total_humongous_waste;
|
||||
|
||||
HeapWord* allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r);
|
||||
@ -635,10 +639,12 @@ private:
|
||||
void log_status();
|
||||
|
||||
public:
|
||||
static const size_t FreeSetUnderConstruction = ShenandoahRegionPartitions::FreeSetUnderConstruction;
|
||||
|
||||
ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions);
|
||||
|
||||
ShenandoahRebuildLock* rebuild_lock() {
|
||||
return &_rebuild_lock;
|
||||
}
|
||||
|
||||
inline size_t max_regions() const { return _partitions.max(); }
|
||||
ShenandoahFreeSetPartitionId membership(size_t index) const { return _partitions.membership(index); }
|
||||
inline void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition,
|
||||
@ -776,9 +782,29 @@ public:
|
||||
// adjusts available with respect to lock holders. However, sequential calls to these three functions may produce
|
||||
// inconsistent data: available may not equal capacity - used because the intermediate states of any "atomic"
|
||||
// locked action can be seen by these unlocked functions.
|
||||
inline size_t capacity() const { return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
inline size_t used() const { return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
inline size_t available() const { return _partitions.available_in_not_locked(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
inline size_t capacity_holding_lock() const {
|
||||
shenandoah_assert_heaplocked();
|
||||
return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t capacity_not_holding_lock() {
|
||||
shenandoah_assert_not_heaplocked();
|
||||
ShenandoahRebuildLocker locker(rebuild_lock());
|
||||
return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t used_holding_lock() const {
|
||||
shenandoah_assert_heaplocked();
|
||||
return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t used_not_holding_lock() {
|
||||
shenandoah_assert_not_heaplocked();
|
||||
ShenandoahRebuildLocker locker(rebuild_lock());
|
||||
return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t available() {
|
||||
shenandoah_assert_not_heaplocked();
|
||||
ShenandoahRebuildLocker locker(rebuild_lock());
|
||||
return _partitions.available_in_locked_for_rebuild(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
|
||||
inline size_t total_humongous_waste() const { return _total_humongous_waste; }
|
||||
inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
|
||||
@ -1113,18 +1113,17 @@ void ShenandoahFullGC::phase5_epilog() {
|
||||
ShenandoahPostCompactClosure post_compact;
|
||||
heap->heap_region_iterate(&post_compact);
|
||||
heap->collection_set()->clear();
|
||||
size_t young_cset_regions, old_cset_regions;
|
||||
size_t first_old, last_old, num_old;
|
||||
heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
|
||||
// We also do not expand old generation size following Full GC because we have scrambled age populations and
|
||||
// no longer have objects separated by age into distinct regions.
|
||||
if (heap->mode()->is_generational()) {
|
||||
ShenandoahGenerationalFullGC::compute_balances();
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
ShenandoahFreeSet* free_set = heap->free_set();
|
||||
{
|
||||
free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
// We also do not expand old generation size following Full GC because we have scrambled age populations and
|
||||
// no longer have objects separated by age into distinct regions.
|
||||
if (heap->mode()->is_generational()) {
|
||||
ShenandoahGenerationalFullGC::compute_balances();
|
||||
}
|
||||
free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
|
||||
}
|
||||
|
||||
heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
|
||||
|
||||
// Set mark incomplete because the marking bitmaps have been reset except pinned regions.
|
||||
_generation->set_mark_incomplete();
|
||||
|
||||
|
||||
@ -815,10 +815,9 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
|
||||
ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
|
||||
ShenandoahHeapLocker locker(heap->lock());
|
||||
size_t young_cset_regions, old_cset_regions;
|
||||
|
||||
// We are preparing for evacuation. At this time, we ignore cset region tallies.
|
||||
size_t first_old, last_old, num_old;
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
|
||||
if (heap->mode()->is_generational()) {
|
||||
|
||||
@ -426,8 +426,6 @@ jint ShenandoahHeap::initialize() {
|
||||
_affiliations[i] = ShenandoahAffiliation::FREE;
|
||||
}
|
||||
_free_set = new ShenandoahFreeSet(this, _num_regions);
|
||||
|
||||
|
||||
post_initialize_heuristics();
|
||||
// We are initializing free set. We ignore cset region tallies.
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
@ -1658,7 +1656,7 @@ void ShenandoahHeap::verify(VerifyOption vo) {
|
||||
}
|
||||
}
|
||||
size_t ShenandoahHeap::tlab_capacity() const {
|
||||
return _free_set->capacity();
|
||||
return _free_set->capacity_not_holding_lock();
|
||||
}
|
||||
|
||||
class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
|
||||
@ -2138,7 +2136,7 @@ GCTracer* ShenandoahHeap::tracer() {
|
||||
}
|
||||
|
||||
size_t ShenandoahHeap::tlab_used() const {
|
||||
return _free_set->used();
|
||||
return _free_set->used_not_holding_lock();
|
||||
}
|
||||
|
||||
bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) {
|
||||
@ -2528,8 +2526,7 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
|
||||
ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
size_t young_cset_regions, old_cset_regions;
|
||||
size_t first_old_region, last_old_region, old_region_count;
|
||||
size_t young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count;
|
||||
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
|
||||
// If there are no old regions, first_old_region will be greater than last_old_region
|
||||
assert((first_old_region > last_old_region) ||
|
||||
@ -2548,13 +2545,14 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
|
||||
// The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
|
||||
// available for transfer to old. Note that transfer of humongous regions does not impact available.
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
|
||||
size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
|
||||
size_t allocation_runway =
|
||||
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
|
||||
gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
|
||||
|
||||
// Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
|
||||
// memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
|
||||
// regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation
|
||||
// will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
|
||||
// regions in place when many of these regular regions have an abundant amount of available memory within them.
|
||||
// Fragmentation will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
|
||||
//
|
||||
// We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
|
||||
// within partially consumed regions of memory.
|
||||
|
||||
@ -30,7 +30,7 @@
|
||||
|
||||
ShenandoahMetricsSnapshot::ShenandoahMetricsSnapshot(ShenandoahFreeSet* free_set)
|
||||
: _free_set(free_set)
|
||||
, _used_before(free_set->used())
|
||||
, _used_before(free_set->used_not_holding_lock())
|
||||
, _if_before(free_set->internal_fragmentation())
|
||||
, _ef_before(free_set->external_fragmentation()) {
|
||||
}
|
||||
@ -38,7 +38,6 @@ ShenandoahMetricsSnapshot::ShenandoahMetricsSnapshot(ShenandoahFreeSet* free_set
|
||||
bool ShenandoahMetricsSnapshot::is_good_progress() const {
|
||||
// Under the critical threshold?
|
||||
const size_t free_actual = _free_set->available();
|
||||
assert(free_actual != ShenandoahFreeSet::FreeSetUnderConstruction, "Avoid this race");
|
||||
|
||||
// ShenandoahCriticalFreeThreshold is expressed as a percentage. We multiply this percentage by 1/100th
|
||||
// of the soft max capacity to determine whether the available memory within the mutator partition of the
|
||||
@ -52,7 +51,7 @@ bool ShenandoahMetricsSnapshot::is_good_progress() const {
|
||||
}
|
||||
|
||||
// Freed up enough?
|
||||
const size_t used_after = _free_set->used();
|
||||
const size_t used_after = _free_set->used_not_holding_lock();
|
||||
const size_t progress_actual = (_used_before > used_after) ? _used_before - used_after : 0;
|
||||
const size_t progress_expected = ShenandoahHeapRegion::region_size_bytes();
|
||||
const bool prog_used = progress_actual >= progress_expected;
|
||||
|
||||
@ -412,9 +412,12 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent
|
||||
ShenandoahGCPhase phase(concurrent ?
|
||||
ShenandoahPhaseTimings::final_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
|
||||
ShenandoahFreeSet* free_set = heap->free_set();
|
||||
ShenandoahHeapLocker locker(heap->lock());
|
||||
size_t young_trash_regions, old_trash_regions;
|
||||
size_t first_old, last_old, num_old;
|
||||
|
||||
// This is completion of old-gen marking. We rebuild in order to reclaim immediate garbage and to
|
||||
// prepare for subsequent mixed evacuations.
|
||||
size_t young_trash_regions, old_trash_regions, first_old, last_old, num_old;
|
||||
heap->free_set()->prepare_to_rebuild(young_trash_regions, old_trash_regions, first_old, last_old, num_old);
|
||||
// At the end of old-gen, we may find that we have reclaimed immediate garbage, allowing a longer allocation runway.
|
||||
// We may also find that we have accumulated canddiate regions for mixed evacuation. If so, we will want to expand
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user