diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp index 7a2be24f84c..65e255011fb 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp @@ -837,19 +837,29 @@ double ShenandoahAllocationRate::force_sample(size_t allocated, size_t &unaccoun const double MinSampleTime = 0.002; // Do not sample if time since last update is less than 2 ms double now = os::elapsedTime(); double time_since_last_update = now - _last_sample_time; + double rate = 0.0; if (time_since_last_update < MinSampleTime) { + // If we choose not to sample right now, the unaccounted_bytes_allocated will be added + // into the next sample taken. These unaccounted_bytes_allocated will be added to + // any additional bytes that are allocated during this GC cycle at the time the rate is + // next sampled. We do not overwrite _last_sample_time on this path, because the + // unaccounted_bytes_allocated were allocated following _last_sample_time. unaccounted_bytes_allocated = allocated - _last_sample_value; - _last_sample_value = 0; - return 0.0; } else { - double rate = instantaneous_rate(now, allocated); + rate = instantaneous_rate(now, allocated); _rate.add(rate); _rate_avg.add(_rate.avg()); _last_sample_time = now; - _last_sample_value = allocated; unaccounted_bytes_allocated = 0; - return rate; } + // force_sample() is called when resetting bytes allocated since gc start. All subsequent + // requests to sample allocated bytes during this GC cycle are measured as a delta from + // _last_sample_value. In the case that we choose not to sample now, we will count the + // unaccounted_bytes_allocated as if they were allocated following the start of this GC + // cycle (but the time span over which these bytes were allocated begins at + // _last_sample_time, which we do not overwrite). + _last_sample_value = 0; + return rate; } double ShenandoahAllocationRate::sample(size_t allocated) { @@ -898,9 +908,7 @@ bool ShenandoahAllocationRate::is_spiking(double rate, double threshold) const { } double ShenandoahAllocationRate::instantaneous_rate(double time, size_t allocated) const { - size_t last_value = _last_sample_value; - double last_time = _last_sample_time; - size_t allocation_delta = (allocated > last_value) ? (allocated - last_value) : 0; - double time_delta_sec = time - last_time; - return (time_delta_sec > 0) ? (allocation_delta / time_delta_sec) : 0; + assert(allocated >= _last_sample_value, "Must be"); + assert(time > _last_sample_time, "Must be"); + return (allocated - _last_sample_value) / (time - _last_sample_time); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index eeff0fde87c..f7ba1f05f47 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -680,32 +680,18 @@ public: } inline size_t get_bytes_allocated_since_previous_sample() { - size_t total_bytes = get_total_bytes_allocated(); - size_t result; - if (total_bytes < _mutator_bytes_at_last_sample) { - // This rare condition may occur if bytes allocated overflows (wraps around) size_t tally of allocations. - // This may also occur in the very rare situation that get_total_bytes_allocated() is queried in the middle of - // reset_bytes_allocated_since_gc_start(). Note that there is no lock to assure that the two global variables - // it modifies are modified atomically (_total_bytes_previously_allocated and _mutator_byts_allocated_since_gc_start) - // This has been observed to occur when an out-of-cycle degenerated cycle is starting (and thus calls - // reset_bytes_allocated_since_gc_start()) at the same time that the control (non-generational mode) or - // regulator (generational-mode) thread calls should_start_gc() (which invokes get_bytes_allocated_since_previous_sample()). - // - // Handle this rare situation by responding with the "innocent" value 0 and resetting internal state so that the - // the next query can recalibrate. - result = 0; - } else { - // Note: there's always the possibility that the tally of total allocations exceeds the 64-bit capacity of our size_t - // counter. We assume that the difference between relevant samples does not exceed this count. Example: - // Suppose _mutator_words_at_last_sample is 0xffff_ffff_ffff_fff0 (18,446,744,073,709,551,600 Decimal) - // and _total_words is 0x0000_0000_0000_0800 ( 32,768 Decimal) - // Then, total_words - _mutator_words_at_last_sample can be done adding 1's complement of subtrahend: - // 1's complement of _mutator_words_at_last_sample is: 0x0000_0000_0000_0010 ( 16 Decimal)) - // plus total_words: 0x0000_0000_0000_0800 (32,768 Decimal) - // sum: 0x0000_0000_0000_0810 (32,784 Decimal) - result = total_bytes - _mutator_bytes_at_last_sample; - } - _mutator_bytes_at_last_sample = total_bytes; + const size_t total_bytes_allocated = get_total_bytes_allocated(); + // total_bytes_allocated could overflow (wraps around) size_t in rare condition, we are relying on + // wrap-around arithmetic of size_t type to produce meaningful result when total_bytes_allocated overflows + // its 64-bit counter. The expression below is equivalent to code: + // if (total_bytes < _mutator_bytes_at_last_sample) { + // // overflow + // return total_bytes + (SIZE_T_MAX - _mutator_bytes_at_last_sample) + 1; + // } else { + // return total_bytes - _mutator_bytes_at_last_sample; + // } + const size_t result = total_bytes_allocated - _mutator_bytes_at_last_sample; + _mutator_bytes_at_last_sample = total_bytes_allocated; return result; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 5b2e050aa0a..1efe6ae963f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -2351,24 +2351,23 @@ address ShenandoahHeap::in_cset_fast_test_addr() { void ShenandoahHeap::reset_bytes_allocated_since_gc_start() { // It is important to force_alloc_rate_sample() before the associated generation's bytes_allocated has been reset. - // Note that there is no lock to prevent additional alloations between sampling bytes_allocated_since_gc_start() and - // reset_bytes_allocated_since_gc_start(). If additional allocations happen, they will be ignored in the average - // allocation rate computations. This effect is considered to be be negligible. - - // unaccounted_bytes is the bytes not accounted for by our forced sample. If the sample interval is too short, - // the "forced sample" will not happen, and any recently allocated bytes are "unaccounted for". We pretend these - // bytes are allocated after the start of subsequent gc. - size_t unaccounted_bytes; - ShenandoahFreeSet* _free_set = free_set(); - size_t bytes_allocated = _free_set->get_bytes_allocated_since_gc_start(); - if (mode()->is_generational()) { - unaccounted_bytes = young_generation()->heuristics()->force_alloc_rate_sample(bytes_allocated); - } else { - // Single-gen Shenandoah uses global heuristics. - unaccounted_bytes = heuristics()->force_alloc_rate_sample(bytes_allocated); + // Note that we obtain heap lock to prevent additional allocations between sampling bytes_allocated_since_gc_start() + // and reset_bytes_allocated_since_gc_start() + { + ShenandoahHeapLocker locker(lock()); + // unaccounted_bytes is the bytes not accounted for by our forced sample. If the sample interval is too short, + // the "forced sample" will not happen, and any recently allocated bytes are "unaccounted for". We pretend these + // bytes are allocated after the start of subsequent gc. + size_t unaccounted_bytes; + size_t bytes_allocated = _free_set->get_bytes_allocated_since_gc_start(); + if (mode()->is_generational()) { + unaccounted_bytes = young_generation()->heuristics()->force_alloc_rate_sample(bytes_allocated); + } else { + // Single-gen Shenandoah uses global heuristics. + unaccounted_bytes = heuristics()->force_alloc_rate_sample(bytes_allocated); + } + _free_set->reset_bytes_allocated_since_gc_start(unaccounted_bytes); } - ShenandoahHeapLocker locker(lock()); - _free_set->reset_bytes_allocated_since_gc_start(unaccounted_bytes); } void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {