8350050: Shenandoah: Disable and purge allocation pacing support

Reviewed-by: wkemper, shade, kdnilsen
This commit is contained in:
Y. Srinivas Ramakrishna 2025-07-23 00:23:20 +00:00
parent 4994bd5942
commit 79f9d8d832
20 changed files with 7 additions and 786 deletions

View File

@ -36,9 +36,6 @@ void ShenandoahPassiveMode::initialize_flags() const {
FLAG_SET_DEFAULT(ExplicitGCInvokesConcurrent, false);
FLAG_SET_DEFAULT(ShenandoahImplicitGCInvokesConcurrent, false);
// Passive runs with max speed for allocation, because GC is always STW
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahPacing);
// No need for evacuation reserve with Full GC, only for Degenerated GC.
if (!ShenandoahDegeneratedGC) {
SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahEvacReserve, 0);

View File

@ -192,7 +192,7 @@ bool ShenandoahCollectorPolicy::should_handle_requested_gc(GCCause::Cause cause)
void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const {
out->print_cr("Under allocation pressure, concurrent cycles may cancel, and either continue cycle");
out->print_cr("under stop-the-world pause or result in stop-the-world Full GC. Increase heap size,");
out->print_cr("tune GC heuristics, set more aggressive pacing delay, or lower allocation rate");
out->print_cr("tune GC heuristics, or lower allocation rate");
out->print_cr("to avoid Degenerated and Full GC cycles. Abbreviated cycles are those which found");
out->print_cr("enough regions with no live objects to skip evacuation.");
out->cr();

View File

@ -205,7 +205,7 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
entry_concurrent_update_refs_prepare(heap);
// Perform update-refs phase.
if (ShenandoahVerify || ShenandoahPacing) {
if (ShenandoahVerify) {
vmop_entry_init_update_refs();
}
@ -629,9 +629,7 @@ void ShenandoahConcurrentGC::entry_reset_after_collect() {
void ShenandoahConcurrentGC::op_reset() {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
if (ShenandoahPacing) {
heap->pacer()->setup_for_reset();
}
// If it is old GC bootstrap cycle, always clear bitmap for global gen
// to ensure bitmap for old gen is clear for old GC cycle after this.
if (_do_old_gc_bootstrap) {
@ -743,9 +741,6 @@ void ShenandoahConcurrentGC::op_init_mark() {
ShenandoahCodeRoots::arm_nmethods_for_mark();
ShenandoahStackWatermark::change_epoch_id();
if (ShenandoahPacing) {
heap->pacer()->setup_for_mark();
}
{
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_propagate_gc_state);
@ -806,9 +801,6 @@ void ShenandoahConcurrentGC::op_final_mark() {
ShenandoahCodeRoots::arm_nmethods_for_evac();
ShenandoahStackWatermark::change_epoch_id();
if (ShenandoahPacing) {
heap->pacer()->setup_for_evac();
}
} else {
if (ShenandoahVerify) {
ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
@ -1136,9 +1128,6 @@ void ShenandoahConcurrentGC::op_init_update_refs() {
ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_update_refs_verify);
heap->verifier()->verify_before_update_refs();
}
if (ShenandoahPacing) {
heap->pacer()->setup_for_update_refs();
}
}
void ShenandoahConcurrentGC::op_update_refs() {

View File

@ -34,7 +34,6 @@
#include "gc/shenandoah/shenandoahGeneration.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
#include "gc/shenandoah/shenandoahPacer.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "logging/log.hpp"
#include "memory/metaspaceStats.hpp"
@ -69,9 +68,6 @@ void ShenandoahControlThread::run_service() {
const bool is_gc_requested = _gc_requested.is_set();
const GCCause::Cause requested_gc_cause = _requested_gc_cause;
// This control loop iteration has seen this much allocation.
const size_t allocs_seen = reset_allocs_seen();
// Choose which GC mode to run in. The block below should select a single mode.
GCMode mode = none;
GCCause::Cause cause = GCCause::_last_gc_cause;
@ -204,9 +200,6 @@ void ShenandoahControlThread::run_service() {
// Commit worker statistics to cycle data
heap->phase_timings()->flush_par_workers_to_cycle();
if (ShenandoahPacing) {
heap->pacer()->flush_stats_to_cycle();
}
// Print GC stats for current cycle
{
@ -215,9 +208,6 @@ void ShenandoahControlThread::run_service() {
ResourceMark rm;
LogStream ls(lt);
heap->phase_timings()->print_cycle_on(&ls);
if (ShenandoahPacing) {
heap->pacer()->print_cycle_on(&ls);
}
}
}
@ -226,16 +216,6 @@ void ShenandoahControlThread::run_service() {
// Print Metaspace change following GC (if logging is enabled).
MetaspaceUtils::print_metaspace_change(meta_sizes);
// GC is over, we are at idle now
if (ShenandoahPacing) {
heap->pacer()->setup_for_idle();
}
} else {
// Report to pacer that we have seen this many words allocated
if (ShenandoahPacing && (allocs_seen > 0)) {
heap->pacer()->report_alloc(allocs_seen);
}
}
// Check if we have seen a new target for soft max heap size or if a gc was requested.

View File

@ -29,14 +29,6 @@
#include "gc/shenandoah/shenandoahHeap.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
void ShenandoahController::pacing_notify_alloc(size_t words) {
assert(ShenandoahPacing, "should only call when pacing is enabled");
Atomic::add(&_allocs_seen, words, memory_order_relaxed);
}
size_t ShenandoahController::reset_allocs_seen() {
return Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
}
void ShenandoahController::update_gc_id() {
Atomic::inc(&_gc_id);

View File

@ -37,11 +37,9 @@
class ShenandoahController: public ConcurrentGCThread {
private:
shenandoah_padding(0);
volatile size_t _allocs_seen;
shenandoah_padding(1);
// A monotonically increasing GC count.
volatile size_t _gc_id;
shenandoah_padding(2);
shenandoah_padding(1);
protected:
// While we could have a single lock for these, it may risk unblocking
@ -55,7 +53,6 @@ protected:
public:
ShenandoahController():
_allocs_seen(0),
_gc_id(0),
_alloc_failure_waiters_lock(Mutex::safepoint-2, "ShenandoahAllocFailureGC_lock", true),
_gc_waiters_lock(Mutex::safepoint-2, "ShenandoahRequestedGC_lock", true)
@ -76,14 +73,6 @@ public:
// Notify threads waiting for GC to complete.
void notify_alloc_failure_waiters();
// This is called for every allocation. The control thread accumulates
// this value when idle. During the gc cycle, the control resets it
// and reports it to the pacer.
void pacing_notify_alloc(size_t words);
// Zeros out the number of allocations seen since the last GC cycle.
size_t reset_allocs_seen();
// Return the value of a monotonic increasing GC count, maintained by the control thread.
size_t get_gc_id();
};

View File

@ -1263,10 +1263,6 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
r->set_top(r->bottom() + used_words);
}
generation->increase_affiliated_region_count(num);
if (remainder != 0) {
// Record this remainder as allocation waste
_heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
}
// retire_range_from_partition() will adjust bounds on Mutator free set if appropriate
_partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end);

View File

@ -37,7 +37,6 @@
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
#include "gc/shenandoah/shenandoahOldGC.hpp"
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
#include "gc/shenandoah/shenandoahPacer.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
#include "logging/log.hpp"
@ -61,13 +60,9 @@ ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() :
void ShenandoahGenerationalControlThread::run_service() {
const int64_t wait_ms = ShenandoahPacing ? ShenandoahControlIntervalMin : 0;
ShenandoahGCRequest request;
while (!should_terminate()) {
// This control loop iteration has seen this much allocation.
const size_t allocs_seen = reset_allocs_seen();
// Figure out if we have pending requests.
check_for_request(request);
@ -77,11 +72,6 @@ void ShenandoahGenerationalControlThread::run_service() {
if (request.cause != GCCause::_no_gc) {
run_gc_cycle(request);
} else {
// Report to pacer that we have seen this many words allocated
if (ShenandoahPacing && (allocs_seen > 0)) {
_heap->pacer()->report_alloc(allocs_seen);
}
}
// If the cycle was cancelled, continue the next iteration to deal with it. Otherwise,
@ -90,7 +80,7 @@ void ShenandoahGenerationalControlThread::run_service() {
MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
if (_requested_gc_cause == GCCause::_no_gc) {
set_gc_mode(ml, none);
ml.wait(wait_ms);
ml.wait();
}
}
}
@ -309,11 +299,6 @@ void ShenandoahGenerationalControlThread::run_gc_cycle(const ShenandoahGCRequest
// Print Metaspace change following GC (if logging is enabled).
MetaspaceUtils::print_metaspace_change(meta_sizes);
// GC is over, we are at idle now
if (ShenandoahPacing) {
_heap->pacer()->setup_for_idle();
}
// Check if we have seen a new target for soft max heap size or if a gc was requested.
// Either of these conditions will attempt to uncommit regions.
if (ShenandoahUncommit) {
@ -331,9 +316,6 @@ void ShenandoahGenerationalControlThread::run_gc_cycle(const ShenandoahGCRequest
void ShenandoahGenerationalControlThread::process_phase_timings() const {
// Commit worker statistics to cycle data
_heap->phase_timings()->flush_par_workers_to_cycle();
if (ShenandoahPacing) {
_heap->pacer()->flush_stats_to_cycle();
}
ShenandoahEvacuationTracker* evac_tracker = _heap->evac_tracker();
ShenandoahCycleStats evac_stats = evac_tracker->flush_cycle_to_global();
@ -347,9 +329,6 @@ void ShenandoahGenerationalControlThread::process_phase_timings() const {
_heap->phase_timings()->print_cycle_on(&ls);
evac_tracker->print_evacuations_on(&ls, &evac_stats.workers,
&evac_stats.mutators);
if (ShenandoahPacing) {
_heap->pacer()->print_cycle_on(&ls);
}
}
}

View File

@ -29,7 +29,6 @@
#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
#include "gc/shenandoah/shenandoahPacer.hpp"
#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
@ -127,9 +126,6 @@ void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() {
if (r->is_cset()) {
assert(r->has_live(), "Region %zu should have been reclaimed early", r->index());
_heap->marked_object_iterate(r, &cl);
if (ShenandoahPacing) {
_heap->pacer()->report_evac(r->used() >> LogHeapWordSize);
}
} else {
maybe_promote_region(r);
}

View File

@ -827,19 +827,15 @@ private:
assert(update_watermark >= r->bottom(), "sanity");
log_debug(gc)("Update refs worker " UINT32_FORMAT ", looking at region %zu", worker_id, r->index());
bool region_progress = false;
if (r->is_active() && !r->is_cset()) {
if (r->is_young()) {
_heap->marked_object_oop_iterate(r, &cl, update_watermark);
region_progress = true;
} else if (r->is_old()) {
if (gc_generation->is_global()) {
_heap->marked_object_oop_iterate(r, &cl, update_watermark);
region_progress = true;
}
// Otherwise, this is an old region in a young or mixed cycle. Process it during a second phase, below.
// Don't bother to report pacing progress in this case.
} else {
// Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
// to a non-free active region while this loop is executing. Whenever this happens, the changing of a region's
@ -857,10 +853,6 @@ private:
}
}
if (region_progress && ShenandoahPacing) {
_heap->pacer()->report_update_refs(pointer_delta(update_watermark, r->bottom()));
}
if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
return;
}
@ -916,10 +908,6 @@ private:
assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
}
if (ShenandoahPacing) {
_heap->pacer()->report_update_refs(pointer_delta(end_of_range, start_of_range));
}
}
}
}

View File

@ -63,7 +63,6 @@
#include "gc/shenandoah/shenandoahMemoryPool.hpp"
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
#include "gc/shenandoah/shenandoahPacer.inline.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
@ -470,11 +469,6 @@ jint ShenandoahHeap::initialize() {
_phase_timings = new ShenandoahPhaseTimings(max_workers());
ShenandoahCodeRoots::initialize();
if (ShenandoahPacing) {
_pacer = new ShenandoahPacer(this);
_pacer->setup_for_idle();
}
initialize_controller();
if (ShenandoahUncommit) {
@ -558,7 +552,6 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
_shenandoah_policy(policy),
_gc_mode(nullptr),
_free_set(nullptr),
_pacer(nullptr),
_verifier(nullptr),
_phase_timings(nullptr),
_monitoring_support(nullptr),
@ -716,8 +709,7 @@ void ShenandoahHeap::decrease_committed(size_t bytes) {
// require padding in front of the PLAB (a filler object). Because this padding
// is included in the region's used memory we include the padding in the usage
// accounting as waste.
// * Mutator allocations are used to compute an allocation rate. They are also
// sent to the Pacer for those purposes.
// * Mutator allocations are used to compute an allocation rate.
// * There are three sources of waste:
// 1. The padding used to align a PLAB on card size
// 2. Region's free is less than minimum TLAB size and is retired
@ -738,9 +730,6 @@ void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
// only actual size counts toward usage for mutator allocations
increase_used(generation, actual_bytes);
// notify pacer of both actual size and waste
notify_mutator_alloc_words(req.actual_size(), req.waste());
if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
increase_humongous_waste(generation,wasted_bytes);
}
@ -775,15 +764,6 @@ void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t byte
}
}
void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
if (ShenandoahPacing) {
control_thread()->pacing_notify_alloc(words);
if (waste > 0) {
pacer()->claim_for_alloc<true>(waste);
}
}
}
size_t ShenandoahHeap::capacity() const {
return committed();
}
@ -965,15 +945,10 @@ HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
}
HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
intptr_t pacer_epoch = 0;
bool in_new_region = false;
HeapWord* result = nullptr;
if (req.is_mutator_alloc()) {
if (ShenandoahPacing) {
pacer()->pace_for_alloc(req.size());
pacer_epoch = pacer()->epoch();
}
if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
result = allocate_memory_under_lock(req, in_new_region);
@ -1048,15 +1023,6 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
assert (req.is_lab_alloc() || (requested == actual),
"Only LAB allocations are elastic: %s, requested = %zu, actual = %zu",
ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
if (req.is_mutator_alloc()) {
// If we requested more than we were granted, give the rest back to pacer.
// This only matters if we are in the same pacing epoch: do not try to unpace
// over the budget for the other phase.
if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
}
}
}
return result;
@ -1206,10 +1172,6 @@ private:
assert(r->has_live(), "Region %zu should have been reclaimed early", r->index());
_sh->marked_object_iterate(r, &cl);
if (ShenandoahPacing) {
_sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
}
if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
break;
}
@ -2484,9 +2446,6 @@ private:
assert (update_watermark >= r->bottom(), "sanity");
if (r->is_active() && !r->is_cset()) {
_heap->marked_object_oop_iterate(r, &cl, update_watermark);
if (ShenandoahPacing) {
_heap->pacer()->report_update_refs(pointer_delta(update_watermark, r->bottom()));
}
}
if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
return;

View File

@ -32,7 +32,6 @@
#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
#include "gc/shenandoah/shenandoahPacer.inline.hpp"
#include "runtime/atomic.hpp"
HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocRequest &req, size_t alignment_in_bytes) {
@ -135,9 +134,6 @@ inline void ShenandoahHeapRegion::increase_live_data_alloc_words(size_t s) {
inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) {
internal_increase_live_data(s);
if (ShenandoahPacing) {
ShenandoahHeap::heap()->pacer()->report_mark(s);
}
}
inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {

View File

@ -1,341 +0,0 @@
/*
* Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahPacer.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "runtime/atomic.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/threadSMR.hpp"
/*
* In normal concurrent cycle, we have to pace the application to let GC finish.
*
* Here, we do not know how large would be the collection set, and what are the
* relative performances of the each stage in the concurrent cycle, and so we have to
* make some assumptions.
*
* For concurrent mark, there is no clear notion of progress. The moderately accurate
* and easy to get metric is the amount of live objects the mark had encountered. But,
* that does directly correlate with the used heap, because the heap might be fully
* dead or fully alive. We cannot assume either of the extremes: we would either allow
* application to run out of memory if we assume heap is fully dead but it is not, and,
* conversely, we would pacify application excessively if we assume heap is fully alive
* but it is not. So we need to guesstimate the particular expected value for heap liveness.
* The best way to do this is apparently recording the past history.
*
* For concurrent evac and update-refs, we are walking the heap per-region, and so the
* notion of progress is clear: we get reported the "used" size from the processed regions
* and use the global heap-used as the baseline.
*
* The allocatable space when GC is running is "free" at the start of phase, but the
* accounted budget is based on "used". So, we need to adjust the tax knowing that.
*/
void ShenandoahPacer::setup_for_mark() {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
size_t live = update_and_get_progress_history();
size_t free = _heap->free_set()->available();
assert(free != ShenandoahFreeSet::FreeSetUnderConstruction, "Avoid this race");
size_t non_taxable = free * ShenandoahPacingCycleSlack / 100;
size_t taxable = free - non_taxable;
taxable = MAX2<size_t>(1, taxable);
double tax = 1.0 * live / taxable; // base tax for available free space
tax *= 1; // mark can succeed with immediate garbage, claim all available space
tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap
restart_with(non_taxable, tax);
log_info(gc, ergo)("Pacer for Mark. Expected Live: %zu%s, Free: %zu%s, "
"Non-Taxable: %zu%s, Alloc Tax Rate: %.1fx",
byte_size_in_proper_unit(live), proper_unit_for_byte_size(live),
byte_size_in_proper_unit(free), proper_unit_for_byte_size(free),
byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable),
tax);
}
void ShenandoahPacer::setup_for_evac() {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
size_t used = _heap->collection_set()->used();
size_t free = _heap->free_set()->available();
assert(free != ShenandoahFreeSet::FreeSetUnderConstruction, "Avoid this race");
size_t non_taxable = free * ShenandoahPacingCycleSlack / 100;
size_t taxable = free - non_taxable;
taxable = MAX2<size_t>(1, taxable);
double tax = 1.0 * used / taxable; // base tax for available free space
tax *= 2; // evac is followed by update-refs, claim 1/2 of remaining free
tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase
tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap
restart_with(non_taxable, tax);
log_info(gc, ergo)("Pacer for Evacuation. Used CSet: %zu%s, Free: %zu%s, "
"Non-Taxable: %zu%s, Alloc Tax Rate: %.1fx",
byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
byte_size_in_proper_unit(free), proper_unit_for_byte_size(free),
byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable),
tax);
}
void ShenandoahPacer::setup_for_update_refs() {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
size_t used = _heap->used();
size_t free = _heap->free_set()->available();
assert(free != ShenandoahFreeSet::FreeSetUnderConstruction, "Avoid this race");
size_t non_taxable = free * ShenandoahPacingCycleSlack / 100;
size_t taxable = free - non_taxable;
taxable = MAX2<size_t>(1, taxable);
double tax = 1.0 * used / taxable; // base tax for available free space
tax *= 1; // update-refs is the last phase, claim the remaining free
tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase
tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap
restart_with(non_taxable, tax);
log_info(gc, ergo)("Pacer for Update Refs. Used: %zu%s, Free: %zu%s, "
"Non-Taxable: %zu%s, Alloc Tax Rate: %.1fx",
byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
byte_size_in_proper_unit(free), proper_unit_for_byte_size(free),
byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable),
tax);
}
/*
* In idle phase, we have to pace the application to let control thread react with GC start.
*
* Here, we have rendezvous with concurrent thread that adds up the budget as it acknowledges
* it had seen recent allocations. It will naturally pace the allocations if control thread is
* not catching up. To bootstrap this feedback cycle, we need to start with some initial budget
* for applications to allocate at.
*/
void ShenandoahPacer::setup_for_idle() {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
size_t initial = _heap->max_capacity() / 100 * ShenandoahPacingIdleSlack;
double tax = 1;
restart_with(initial, tax);
log_info(gc, ergo)("Pacer for Idle. Initial: %zu%s, Alloc Tax Rate: %.1fx",
byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial),
tax);
}
/*
* There is no useful notion of progress for these operations. To avoid stalling
* the allocators unnecessarily, allow them to run unimpeded.
*/
void ShenandoahPacer::setup_for_reset() {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
size_t initial = _heap->max_capacity();
restart_with(initial, 1.0);
log_info(gc, ergo)("Pacer for Reset. Non-Taxable: %zu%s",
byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial));
}
size_t ShenandoahPacer::update_and_get_progress_history() {
if (_progress == -1) {
// First initialization, report some prior
Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO);
return (size_t) (_heap->max_capacity() * 0.1);
} else {
// Record history, and reply historical data
_progress_history->add(_progress);
Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO);
return (size_t) (_progress_history->avg() * HeapWordSize);
}
}
void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) {
size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize;
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
Atomic::xchg(&_budget, (intptr_t)initial, memory_order_relaxed);
Atomic::store(&_tax_rate, tax_rate);
Atomic::inc(&_epoch);
// Shake up stalled waiters after budget update.
_need_notify_waiters.try_set();
}
template<bool FORCE>
bool ShenandoahPacer::claim_for_alloc(size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate));
intptr_t cur = 0;
intptr_t new_val = 0;
do {
cur = Atomic::load(&_budget);
if (cur < tax && !FORCE) {
// Progress depleted, alas.
return false;
}
new_val = cur - tax;
} while (Atomic::cmpxchg(&_budget, cur, new_val, memory_order_relaxed) != cur);
return true;
}
template bool ShenandoahPacer::claim_for_alloc<true>(size_t words);
template bool ShenandoahPacer::claim_for_alloc<false>(size_t words);
void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
if (Atomic::load(&_epoch) != epoch) {
// Stale ticket, no need to unpace.
return;
}
size_t tax = MAX2<size_t>(1, words * Atomic::load(&_tax_rate));
add_budget(tax);
}
intptr_t ShenandoahPacer::epoch() {
return Atomic::load(&_epoch);
}
void ShenandoahPacer::pace_for_alloc(size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
// Fast path: try to allocate right away
bool claimed = claim_for_alloc<false>(words);
if (claimed) {
return;
}
// Threads that are attaching should not block at all: they are not
// fully initialized yet. Blocking them would be awkward.
// This is probably the path that allocates the thread oop itself.
//
// Thread which is not an active Java thread should also not block.
// This can happen during VM init when main thread is still not an
// active Java thread.
JavaThread* current = JavaThread::current();
if (current->is_attaching_via_jni() ||
!current->is_active_Java_thread()) {
claim_for_alloc<true>(words);
return;
}
jlong const start_time = os::javaTimeNanos();
jlong const deadline = start_time + (ShenandoahPacingMaxDelay * NANOSECS_PER_MILLISEC);
while (!claimed && os::javaTimeNanos() < deadline) {
// We could instead assist GC, but this would suffice for now.
wait(1);
claimed = claim_for_alloc<false>(words);
}
if (!claimed) {
// Spent local time budget to wait for enough GC progress.
// Force allocating anyway, which may mean we outpace GC,
// and start Degenerated GC cycle.
claimed = claim_for_alloc<true>(words);
assert(claimed, "Should always succeed");
}
ShenandoahThreadLocalData::add_paced_time(current, (double)(os::javaTimeNanos() - start_time) / NANOSECS_PER_SEC);
}
void ShenandoahPacer::wait(size_t time_ms) {
// Perform timed wait. It works like like sleep(), except without modifying
// the thread interruptible status. MonitorLocker also checks for safepoints.
assert(time_ms > 0, "Should not call this with zero argument, as it would stall until notify");
assert(time_ms <= LONG_MAX, "Sanity");
MonitorLocker locker(_wait_monitor);
_wait_monitor->wait(time_ms);
}
void ShenandoahPacer::notify_waiters() {
if (_need_notify_waiters.try_unset()) {
MonitorLocker locker(_wait_monitor);
_wait_monitor->notify_all();
}
}
void ShenandoahPacer::flush_stats_to_cycle() {
double sum = 0;
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
sum += ShenandoahThreadLocalData::paced_time(t);
}
ShenandoahHeap::heap()->phase_timings()->record_phase_time(ShenandoahPhaseTimings::pacing, sum);
}
void ShenandoahPacer::print_cycle_on(outputStream* out) {
MutexLocker lock(Threads_lock);
double now = os::elapsedTime();
double total = now - _last_time;
_last_time = now;
out->cr();
out->print_cr("Allocation pacing accrued:");
size_t threads_total = 0;
size_t threads_nz = 0;
double sum = 0;
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
double d = ShenandoahThreadLocalData::paced_time(t);
if (d > 0) {
threads_nz++;
sum += d;
out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): %s",
d * 1000, total * 1000, d/total*100, t->name());
}
threads_total++;
ShenandoahThreadLocalData::reset_paced_time(t);
}
out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <total>",
sum * 1000, total * 1000, sum/total*100);
if (threads_total > 0) {
out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <average total>",
sum / threads_total * 1000, total * 1000, sum / threads_total / total * 100);
}
if (threads_nz > 0) {
out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <average non-zero>",
sum / threads_nz * 1000, total * 1000, sum / threads_nz / total * 100);
}
out->cr();
}
void ShenandoahPeriodicPacerNotifyTask::task() {
assert(ShenandoahPacing, "Should not be here otherwise");
_pacer->notify_waiters();
}

View File

@ -1,135 +0,0 @@
/*
* Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHPACER_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHPACER_HPP
#include "gc/shenandoah/shenandoahNumberSeq.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "gc/shenandoah/shenandoahSharedVariables.hpp"
#include "memory/allocation.hpp"
#include "runtime/task.hpp"
class ShenandoahHeap;
class ShenandoahPacer;
// Periodic task to notify blocked paced waiters.
class ShenandoahPeriodicPacerNotifyTask : public PeriodicTask {
private:
ShenandoahPacer* const _pacer;
public:
explicit ShenandoahPeriodicPacerNotifyTask(ShenandoahPacer* pacer) :
PeriodicTask(PeriodicTask::min_interval),
_pacer(pacer) { }
void task() override;
};
#define PACING_PROGRESS_UNINIT (-1)
#define PACING_PROGRESS_ZERO ( 0)
/**
* ShenandoahPacer provides allocation pacing mechanism.
*
* Currently it implements simple tax-and-spend pacing policy: GC threads provide
* credit, allocating thread spend the credit, or stall when credit is not available.
*/
class ShenandoahPacer : public CHeapObj<mtGC> {
private:
ShenandoahHeap* _heap;
double _last_time;
TruncatedSeq* _progress_history;
Monitor* _wait_monitor;
ShenandoahSharedFlag _need_notify_waiters;
ShenandoahPeriodicPacerNotifyTask _notify_waiters_task;
// Set once per phase
volatile intptr_t _epoch;
volatile double _tax_rate;
// Heavily updated, protect from accidental false sharing
shenandoah_padding(0);
volatile intptr_t _budget;
shenandoah_padding(1);
// Heavily updated, protect from accidental false sharing
shenandoah_padding(2);
volatile intptr_t _progress;
shenandoah_padding(3);
public:
explicit ShenandoahPacer(ShenandoahHeap* heap) :
_heap(heap),
_last_time(os::elapsedTime()),
_progress_history(new TruncatedSeq(5)),
_wait_monitor(new Monitor(Mutex::safepoint-1, "ShenandoahWaitMonitor_lock", true)),
_notify_waiters_task(this),
_epoch(0),
_tax_rate(1),
_budget(0),
_progress(PACING_PROGRESS_UNINIT) {
_notify_waiters_task.enroll();
}
void setup_for_idle();
void setup_for_mark();
void setup_for_evac();
void setup_for_update_refs();
void setup_for_reset();
inline void report_mark(size_t words);
inline void report_evac(size_t words);
inline void report_update_refs(size_t words);
inline void report_alloc(size_t words);
template<bool FORCE>
bool claim_for_alloc(size_t words);
void pace_for_alloc(size_t words);
void unpace_for_alloc(intptr_t epoch, size_t words);
void notify_waiters();
intptr_t epoch();
void flush_stats_to_cycle();
void print_cycle_on(outputStream* out);
private:
inline void report_internal(size_t words);
inline void report_progress_internal(size_t words);
inline void add_budget(size_t words);
void restart_with(size_t non_taxable_bytes, double tax_rate);
size_t update_and_get_progress_history();
void wait(size_t time_ms);
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHPACER_HPP

View File

@ -1,73 +0,0 @@
/*
* Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP
#include "gc/shenandoah/shenandoahPacer.hpp"
#include "runtime/atomic.hpp"
inline void ShenandoahPacer::report_mark(size_t words) {
report_internal(words);
report_progress_internal(words);
}
inline void ShenandoahPacer::report_evac(size_t words) {
report_internal(words);
}
inline void ShenandoahPacer::report_update_refs(size_t words) {
report_internal(words);
}
inline void ShenandoahPacer::report_alloc(size_t words) {
report_internal(words);
}
inline void ShenandoahPacer::report_internal(size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
add_budget(words);
}
inline void ShenandoahPacer::report_progress_internal(size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
Atomic::add(&_progress, (intptr_t)words, memory_order_relaxed);
}
inline void ShenandoahPacer::add_budget(size_t words) {
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
intptr_t inc = (intptr_t) words;
intptr_t new_budget = Atomic::add(&_budget, inc, memory_order_relaxed);
// Was the budget replenished beyond zero? Then all pacing claims
// are satisfied, notify the waiters. Avoid taking any locks here,
// as it can be called from hot paths and/or while holding other locks.
if (new_budget >= 0 && (new_budget - inc) < 0) {
_need_notify_waiters.try_set();
}
}
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP

View File

@ -281,17 +281,6 @@ void ShenandoahPhaseTimings::print_global_on(outputStream* out) const {
out->print_cr(" all workers. Dividing the <total> over the root stage time estimates parallelism.");
out->cr();
out->print_cr(" Pacing delays are measured from entering the pacing code till exiting it. Therefore,");
out->print_cr(" observed pacing delays may be higher than the threshold when paced thread spent more");
out->print_cr(" time in the pacing code. It usually happens when thread is de-scheduled while paced,");
out->print_cr(" OS takes longer to unblock the thread, or JVM experiences an STW pause.");
out->cr();
out->print_cr(" Higher delay would prevent application outpacing the GC, but it will hide the GC latencies");
out->print_cr(" from the STW pause times. Pacing affects the individual threads, and so it would also be");
out->print_cr(" invisible to the usual profiling tools, but would add up to end-to-end application latency.");
out->print_cr(" Raise max pacing delay with care.");
out->cr();
for (uint i = 0; i < _num_phases; i++) {
if (_global_data[i].maximum() != 0) {
out->print_cr(SHENANDOAH_PHASE_NAME_FORMAT " = " SHENANDOAH_S_TIME_FORMAT " s "

View File

@ -198,8 +198,6 @@ class outputStream;
f(full_gc_heapdump_post, " Post Heap Dump") \
f(full_gc_propagate_gc_state, " Propagate GC State") \
\
f(pacing, "Pacing") \
\
f(heap_iteration_roots, "Heap Iteration") \
SHENANDOAH_PAR_PHASE_DO(heap_iteration_roots_, " HI: ", f) \
// end

View File

@ -408,40 +408,6 @@
"to be more than this.") \
range(0, 100) \
\
product(bool, ShenandoahPacing, true, EXPERIMENTAL, \
"Pace application allocations to give GC chance to start " \
"and complete before allocation failure is reached.") \
\
product(uintx, ShenandoahPacingMaxDelay, 10, EXPERIMENTAL, \
"Max delay for pacing application allocations. Larger values " \
"provide more resilience against out of memory, at expense at " \
"hiding the GC latencies in the allocation path. Time is in " \
"milliseconds. Setting it to arbitrarily large value makes " \
"GC effectively stall the threads indefinitely instead of going " \
"to degenerated or Full GC.") \
\
product(uintx, ShenandoahPacingIdleSlack, 2, EXPERIMENTAL, \
"How much of heap counted as non-taxable allocations during idle "\
"phases. Larger value makes the pacing milder when collector is " \
"idle, requiring less rendezvous with control thread. Lower " \
"value makes the pacing control less responsive to out-of-cycle " \
"allocs. In percent of total heap size.") \
range(0, 100) \
\
product(uintx, ShenandoahPacingCycleSlack, 10, EXPERIMENTAL, \
"How much of free space to take as non-taxable allocations " \
"the GC cycle. Larger value makes the pacing milder at the " \
"beginning of the GC cycle. Lower value makes the pacing less " \
"uniform during the cycle. In percent of free space.") \
range(0, 100) \
\
product(double, ShenandoahPacingSurcharge, 1.1, EXPERIMENTAL, \
"Additional pacing tax surcharge to help unclutter the heap. " \
"Larger values makes the pacing more aggressive. Lower values " \
"risk GC cycles finish with less memory than were available at " \
"the beginning of it.") \
range(1.0, 100.0) \
\
product(uintx, ShenandoahCriticalFreeThreshold, 1, EXPERIMENTAL, \
"How much of the heap needs to be free after recovery cycles, " \
"either Degenerated or Full GC to be claimed successful. If this "\

View File

@ -48,7 +48,7 @@ import java.util.HashMap;
* -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational
* -XX:NewRatio=1 -XX:+UnlockExperimentalVMOptions
* -XX:ShenandoahGuaranteedGCInterval=3000
* -XX:-UseDynamicNumberOfGCThreads -XX:-ShenandoahPacing
* -XX:-UseDynamicNumberOfGCThreads
* gc.shenandoah.generational.TestConcurrentEvac
*/

View File

@ -1,44 +0,0 @@
/*
* Copyright (c) 2018, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test
* @requires vm.gc.Shenandoah
*
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:-ShenandoahPacing -Xmx128m TestPacing
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ShenandoahPacing -Xmx128m TestPacing
*/
public class TestPacing {
static final long TARGET_MB = Long.getLong("target", 1000); // 1 Gb allocation
static volatile Object sink;
public static void main(String[] args) throws Exception {
long count = TARGET_MB * 1024 * 1024 / 16;
for (long c = 0; c < count; c++) {
sink = new Object();
}
}
}