Changes requested by reviewers

1. Change coordination between ShenandoahAdaptiveHeuristics and
   ShenandoahController/ShenandoahRegulator.  Before,
   ShenandoahAdaptiveHeuristics would poll ShenandoahController or
   ShenandoahRegulator to obtain most recent wake time and planned sleep
   time.  Now ShnandoahController and and ShenandoahRegulator notify
   ShenandoahAdaptiveHeuristics each time the values of these variables
   change.

2. Use available() instead of capacity() - used() when recalculating
   trigger threshold from within
   ShenandoahAdaptiveHeuristcs::resume_idle_span().
This commit is contained in:
Kelvin Nilsen 2026-01-15 17:14:29 +00:00
parent ea38ec153a
commit 70b7ebf82a
9 changed files with 32 additions and 56 deletions

View File

@ -136,14 +136,6 @@ void ShenandoahAdaptiveHeuristics::post_initialize() {
recalculate_trigger_threshold(global_available);
}
double ShenandoahAdaptiveHeuristics::get_most_recent_wake_time() const {
return (_is_generational)? _regulator_thread->get_most_recent_wake_time(): _control_thread->get_most_recent_wake_time();
}
double ShenandoahAdaptiveHeuristics::get_planned_sleep_interval() const {
return (_is_generational)? _regulator_thread->get_planned_sleep_interval(): _control_thread->get_planned_sleep_interval();
}
void ShenandoahAdaptiveHeuristics::recalculate_trigger_threshold(size_t mutator_available) {
// The trigger threshold represents mutator available - "head room".
// We plan for GC to finish before the amount of allocated memory exceeds trigger threshold. This is the same as saying we
@ -183,7 +175,7 @@ void ShenandoahAdaptiveHeuristics::start_idle_span() {
}
void ShenandoahAdaptiveHeuristics::resume_idle_span() {
size_t mutator_available = _free_set->capacity() - _free_set->used();
size_t mutator_available = _free_set->available_holding_lock();
recalculate_trigger_threshold(mutator_available);
}
@ -704,13 +696,7 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() {
accept_trigger_with_type(SPIKE);
return true;
}
if (ShenandoahHeuristics::should_start_gc()) {
// ShenandoahHeuristics::should_start_gc() has accepted trigger, or declined it.
return true;
} else {
return false;
}
return ShenandoahHeuristics::should_start_gc();
}
void ShenandoahAdaptiveHeuristics::adjust_last_trigger_parameters(double amount) {

View File

@ -197,9 +197,6 @@ public:
return (allocated_words < _trigger_threshold)? _trigger_threshold - allocated_words: 0;
}
double get_most_recent_wake_time() const;
double get_planned_sleep_interval() const;
protected:
ShenandoahAllocationRate _allocation_rate;

View File

@ -46,6 +46,8 @@ int ShenandoahHeuristics::compare_by_garbage(RegionData a, RegionData b) {
}
ShenandoahHeuristics::ShenandoahHeuristics(ShenandoahSpaceInfo* space_info) :
_most_recent_trigger_evaluation_time(os::elapsedTime()),
_most_recent_planned_sleep_interval(0.0),
_start_gc_is_pending(false),
_declined_trigger_count(0),
_most_recent_declined_trigger_count(0),

View File

@ -78,6 +78,10 @@ class ShenandoahHeuristics : public CHeapObj<mtGC> {
};
#endif
private:
double _most_recent_trigger_evaluation_time;
double _most_recent_planned_sleep_interval;
protected:
static const uint Moving_Average_Samples = 10; // Number of samples to store in moving averages
@ -102,6 +106,7 @@ protected:
#ifdef ASSERT
UnionTag _union_tag;
#endif
public:
inline void clear() {
@ -192,6 +197,14 @@ protected:
_declined_trigger_count++;
}
inline double get_most_recent_wake_time() const {
return _most_recent_trigger_evaluation_time;
}
inline double get_planned_sleep_interval() const {
return _most_recent_planned_sleep_interval;
}
public:
ShenandoahHeuristics(ShenandoahSpaceInfo* space_info);
virtual ~ShenandoahHeuristics();
@ -214,6 +227,11 @@ public:
virtual void record_cycle_end();
void update_should_start_query_times(double now, double planned_sleep_interval) {
_most_recent_trigger_evaluation_time = now;
_most_recent_planned_sleep_interval = planned_sleep_interval;
}
virtual bool should_start_gc();
inline void cancel_trigger_request() {

View File

@ -45,7 +45,6 @@ ShenandoahControlThread::ShenandoahControlThread() :
_requested_gc_cause(GCCause::_no_gc),
_degen_point(ShenandoahGC::_degenerated_outside_cycle),
_control_lock(Mutex::nosafepoint - 2, "ShenandoahGCRequest_lock", true) {
_planned_sleep_interval = ShenandoahControlIntervalMin / 1000.0;
set_name("Shenandoah Control Thread");
create_and_start();
}
@ -60,6 +59,7 @@ void ShenandoahControlThread::run_service() {
ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
ShenandoahHeuristics* const heuristics = heap->heuristics();
double most_recent_wake_time = os::elapsedTime();
while (!should_terminate()) {
const GCCause::Cause cancelled_cause = heap->cancelled_cause();
if (cancelled_cause == GCCause::_shenandoah_stop_vm) {
@ -223,7 +223,7 @@ void ShenandoahControlThread::run_service() {
// Wait before performing the next action. If allocation happened during this wait,
// we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
// back off exponentially.
const double before_sleep = _most_recent_wake_time;
const double before_sleep = most_recent_wake_time;
if (heap->has_changed()) {
sleep = ShenandoahControlIntervalMin;
} else if ((before_sleep - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
@ -233,10 +233,11 @@ void ShenandoahControlThread::run_service() {
MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
ml.wait(sleep);
// Record a conservative estimate of the longest anticipated sleep duration until we sample again.
_planned_sleep_interval = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)) / 1000.0;
_most_recent_wake_time = os::elapsedTime();
double planned_sleep_interval = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)) / 1000.0;
most_recent_wake_time = os::elapsedTime();
heuristics->update_should_start_query_times(most_recent_wake_time, planned_sleep_interval);
if (LogTarget(Debug, gc, thread)::is_enabled()) {
double elapsed = _most_recent_wake_time - before_sleep;
double elapsed = most_recent_wake_time - before_sleep;
double hiccup = elapsed - double(sleep);
if (hiccup > 0.001) {
log_debug(gc, thread)("Control Thread hiccup time: %.3fs", hiccup);

View File

@ -48,12 +48,6 @@ protected:
Monitor _alloc_failure_waiters_lock;
Monitor _gc_waiters_lock;
// Remember when Controller most recently woke from its periodic sleep
double _most_recent_wake_time;
// How long do we plan to sleep before we again sample control status, in seconds.
double _planned_sleep_interval;
// Increments the internal GC count.
void update_gc_id();
@ -61,9 +55,7 @@ public:
ShenandoahController():
_gc_id(0),
_alloc_failure_waiters_lock(Mutex::safepoint-2, "ShenandoahAllocFailureGC_lock", true),
_gc_waiters_lock(Mutex::safepoint-2, "ShenandoahRequestedGC_lock", true),
_most_recent_wake_time(os::elapsedTime()),
_planned_sleep_interval(0)
_gc_waiters_lock(Mutex::safepoint-2, "ShenandoahRequestedGC_lock", true)
{ }
// Request a collection cycle. This handles "explicit" gc requests
@ -83,13 +75,5 @@ public:
// Return the value of a monotonic increasing GC count, maintained by the control thread.
size_t get_gc_id();
double get_most_recent_wake_time() const {
return _most_recent_wake_time;
}
double get_planned_sleep_interval() const {
return _planned_sleep_interval;
}
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCONTROLLER_HPP

View File

@ -826,6 +826,8 @@ public:
inline size_t used() const { return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator); }
inline size_t reserved() const { return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector); }
inline size_t available() const { return _partitions.available_in_not_locked(ShenandoahFreeSetPartitionId::Mutator); }
inline size_t available_holding_lock() const
{ return _partitions.available_in(ShenandoahFreeSetPartitionId::Mutator); }
inline size_t total_humongous_waste() const { return _total_humongous_waste; }
inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); }

View File

@ -129,6 +129,7 @@ void ShenandoahRegulatorThread::regulator_sleep() {
double wake_time = os::elapsedTime();
_most_recent_period = wake_time - _most_recent_wake_time;
_most_recent_wake_time = wake_time;
_young_heuristics->update_should_start_query_times(_most_recent_wake_time, double(_sleep) / 1000.0);
if (LogTarget(Debug, gc, thread)::is_enabled()) {
double elapsed = _most_recent_wake_time - before_sleep_time;
double hiccup = elapsed - double(_sleep);

View File

@ -84,21 +84,6 @@ class ShenandoahRegulatorThread: public ConcurrentGCThread {
double _most_recent_wake_time;
double _most_recent_period;
double _last_sleep_adjust_time;
public:
inline double get_most_recent_wake_time() const {
return _most_recent_wake_time;
}
// Return actual duration of last regulator period, which is supposed to equal _sleep, but may be higher in case of scheduling jitter.
inline double get_most_recent_period() const {
return _most_recent_period;
}
// return planned sleep duration, in s
inline double get_planned_sleep_interval() const {
return ((double) _sleep) / 1000.0;
}
};