From 3a8a432c05999fe478b94de75b416404b5a515d2 Mon Sep 17 00:00:00 2001 From: William Kemper Date: Tue, 4 Mar 2025 00:41:39 +0000 Subject: [PATCH] 8349094: GenShen: Race between control and regulator threads may violate assertions Reviewed-by: ysr, kdnilsen --- src/hotspot/share/gc/shared/gcCause.hpp | 1 + .../heuristics/shenandoahOldHeuristics.cpp | 42 +- .../heuristics/shenandoahOldHeuristics.hpp | 4 +- .../gc/shenandoah/shenandoahBarrierSet.cpp | 2 +- .../shenandoah/shenandoahCollectorPolicy.cpp | 29 +- .../shenandoah/shenandoahCollectorPolicy.hpp | 11 +- .../gc/shenandoah/shenandoahControlThread.cpp | 35 +- .../gc/shenandoah/shenandoahController.cpp | 59 +- .../gc/shenandoah/shenandoahController.hpp | 33 +- .../shenandoahGenerationalControlThread.cpp | 910 +++++++++--------- .../shenandoahGenerationalControlThread.hpp | 107 +- .../share/gc/shenandoah/shenandoahHeap.cpp | 38 +- .../share/gc/shenandoah/shenandoahHeap.hpp | 34 +- .../gc/shenandoah/shenandoahHeap.inline.hpp | 8 +- .../gc/shenandoah/shenandoahOldGeneration.cpp | 2 +- .../shenandoah/shenandoahRegulatorThread.cpp | 31 +- .../shenandoah/shenandoahRegulatorThread.hpp | 15 +- .../shenandoah/shenandoahScanRemembered.cpp | 4 +- 18 files changed, 699 insertions(+), 666 deletions(-) diff --git a/src/hotspot/share/gc/shared/gcCause.hpp b/src/hotspot/share/gc/shared/gcCause.hpp index bd819e8f5c9..ef96bf21567 100644 --- a/src/hotspot/share/gc/shared/gcCause.hpp +++ b/src/hotspot/share/gc/shared/gcCause.hpp @@ -74,6 +74,7 @@ class GCCause : public AllStatic { _shenandoah_stop_vm, _shenandoah_allocation_failure_evac, + _shenandoah_humongous_allocation_failure, _shenandoah_concurrent_gc, _shenandoah_upgrade_to_full_gc, diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index 0393a2bb366..cf1a76ff4ff 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -60,7 +60,6 @@ int ShenandoahOldHeuristics::compare_by_index(RegionData a, RegionData b) { ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap) : ShenandoahHeuristics(generation), _heap(gen_heap), - _old_gen(generation), _first_pinned_candidate(NOT_FOUND), _last_old_collection_candidate(0), _next_old_collection_candidate(0), @@ -567,9 +566,9 @@ void ShenandoahOldHeuristics::set_trigger_if_old_is_fragmented(size_t first_old_ // allocation request will require a STW full GC. size_t allowed_old_gen_span = num_regions - (ShenandoahGenerationalHumongousReserve * num_regions) / 100; - size_t old_available = _old_gen->available() / HeapWordSize; + size_t old_available = _old_generation->available() / HeapWordSize; size_t region_size_words = ShenandoahHeapRegion::region_size_words(); - size_t old_unaffiliated_available = _old_gen->free_unaffiliated_regions() * region_size_words; + size_t old_unaffiliated_available = _old_generation->free_unaffiliated_regions() * region_size_words; assert(old_available >= old_unaffiliated_available, "sanity"); size_t old_fragmented_available = old_available - old_unaffiliated_available; @@ -603,12 +602,12 @@ void ShenandoahOldHeuristics::set_trigger_if_old_is_fragmented(size_t first_old_ } void ShenandoahOldHeuristics::set_trigger_if_old_is_overgrown() { - size_t old_used = _old_gen->used() + _old_gen->get_humongous_waste(); - size_t trigger_threshold = _old_gen->usage_trigger_threshold(); + size_t old_used = _old_generation->used() + _old_generation->get_humongous_waste(); + size_t trigger_threshold = _old_generation->usage_trigger_threshold(); // Detects unsigned arithmetic underflow assert(old_used <= _heap->capacity(), "Old used (%zu, %zu) must not be more than heap capacity (%zu)", - _old_gen->used(), _old_gen->get_humongous_waste(), _heap->capacity()); + _old_generation->used(), _old_generation->get_humongous_waste(), _heap->capacity()); if (old_used > trigger_threshold) { _growth_trigger = true; } @@ -620,13 +619,32 @@ void ShenandoahOldHeuristics::evaluate_triggers(size_t first_old_region, size_t set_trigger_if_old_is_overgrown(); } +bool ShenandoahOldHeuristics::should_resume_old_cycle() { + // If we are preparing to mark old, or if we are already marking old, then try to continue that work. + if (_old_generation->is_concurrent_mark_in_progress()) { + assert(_old_generation->state() == ShenandoahOldGeneration::MARKING, "Unexpected old gen state: %s", _old_generation->state_name()); + log_trigger("Resume marking old"); + return true; + } + + if (_old_generation->is_preparing_for_mark()) { + assert(_old_generation->state() == ShenandoahOldGeneration::FILLING, "Unexpected old gen state: %s", _old_generation->state_name()); + log_trigger("Resume preparing to mark old"); + return true; + } + + return false; +} + bool ShenandoahOldHeuristics::should_start_gc() { - // Cannot start a new old-gen GC until previous one has finished. - // - // Future refinement: under certain circumstances, we might be more sophisticated about this choice. - // For example, we could choose to abandon the previous old collection before it has completed evacuations. - ShenandoahHeap* heap = ShenandoahHeap::heap(); - if (!_old_generation->can_start_gc() || heap->collection_set()->has_old_regions()) { + + const ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (_old_generation->is_doing_mixed_evacuations()) { + // Do not try to start an old cycle if we are waiting for old regions to be evacuated (we need + // a young cycle for this). Note that the young heuristic has a feature to expedite old evacuations. + // Future refinement: under certain circumstances, we might be more sophisticated about this choice. + // For example, we could choose to abandon the previous old collection before it has completed evacuations. + log_debug(gc)("Not starting an old cycle because we are waiting for mixed evacuations"); return false; } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp index d77380926b6..8d3fec746ba 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp @@ -53,7 +53,6 @@ private: static uint NOT_FOUND; ShenandoahGenerationalHeap* _heap; - ShenandoahOldGeneration* _old_gen; // After final marking of the old generation, this heuristic will select // a set of candidate regions to be included in subsequent mixed collections. @@ -186,6 +185,9 @@ public: bool should_start_gc() override; + // Returns true if the old generation needs to prepare for marking, or continue marking. + bool should_resume_old_cycle(); + void record_success_concurrent() override; void record_success_degenerated() override; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp index 37470067ed9..17a89f631c6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp @@ -90,7 +90,7 @@ bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators, Basi void ShenandoahBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) { #if COMPILER2_OR_JVMCI assert(!ReduceInitialCardMarks || !ShenandoahCardBarrier || ShenandoahGenerationalHeap::heap()->is_in_young(new_obj), - "Error: losing card mark on initialzing store to old gen"); + "Allocating new object outside of young generation: " INTPTR_FORMAT, p2i(new_obj)); #endif // COMPILER2_OR_JVMCI assert(thread->deferred_card_mark().is_empty(), "We don't use this"); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp index 782db285c2a..0169795d6f6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp @@ -123,25 +123,28 @@ void ShenandoahCollectorPolicy::record_shutdown() { _in_shutdown.set(); } -bool ShenandoahCollectorPolicy::is_at_shutdown() { +bool ShenandoahCollectorPolicy::is_at_shutdown() const { return _in_shutdown.is_set(); } -bool is_explicit_gc(GCCause::Cause cause) { +bool ShenandoahCollectorPolicy::is_explicit_gc(GCCause::Cause cause) { return GCCause::is_user_requested_gc(cause) - || GCCause::is_serviceability_requested_gc(cause); + || GCCause::is_serviceability_requested_gc(cause) + || cause == GCCause::_wb_full_gc + || cause == GCCause::_wb_young_gc; } bool is_implicit_gc(GCCause::Cause cause) { return cause != GCCause::_no_gc && cause != GCCause::_shenandoah_concurrent_gc && cause != GCCause::_allocation_failure - && !is_explicit_gc(cause); + && !ShenandoahCollectorPolicy::is_explicit_gc(cause); } #ifdef ASSERT bool is_valid_request(GCCause::Cause cause) { - return is_explicit_gc(cause) + return ShenandoahCollectorPolicy::is_explicit_gc(cause) + || ShenandoahCollectorPolicy::is_shenandoah_gc(cause) || cause == GCCause::_metadata_GC_clear_soft_refs || cause == GCCause::_codecache_GC_aggressive || cause == GCCause::_codecache_GC_threshold @@ -153,6 +156,22 @@ bool is_valid_request(GCCause::Cause cause) { } #endif +bool ShenandoahCollectorPolicy::is_shenandoah_gc(GCCause::Cause cause) { + return cause == GCCause::_allocation_failure + || cause == GCCause::_shenandoah_stop_vm + || cause == GCCause::_shenandoah_allocation_failure_evac + || cause == GCCause::_shenandoah_humongous_allocation_failure + || cause == GCCause::_shenandoah_concurrent_gc + || cause == GCCause::_shenandoah_upgrade_to_full_gc; +} + + +bool ShenandoahCollectorPolicy::is_allocation_failure(GCCause::Cause cause) { + return cause == GCCause::_allocation_failure + || cause == GCCause::_shenandoah_allocation_failure_evac + || cause == GCCause::_shenandoah_humongous_allocation_failure; +} + bool ShenandoahCollectorPolicy::is_requested_gc(GCCause::Cause cause) { return is_explicit_gc(cause) || is_implicit_gc(cause); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp index 2c92d91ac99..68579508de5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp @@ -77,9 +77,9 @@ public: void record_collection_cause(GCCause::Cause cause); void record_shutdown(); - bool is_at_shutdown(); + bool is_at_shutdown() const; - ShenandoahTracer* tracer() {return _tracer;} + ShenandoahTracer* tracer() const {return _tracer;} void print_gc_stats(outputStream* out) const; @@ -90,15 +90,18 @@ public: // If the heuristics find that the number of consecutive degenerated cycles is above // ShenandoahFullGCThreshold, then they will initiate a Full GC upon an allocation // failure. - inline size_t consecutive_degenerated_gc_count() const { + size_t consecutive_degenerated_gc_count() const { return _consecutive_degenerated_gcs; } + static bool is_allocation_failure(GCCause::Cause cause); + static bool is_shenandoah_gc(GCCause::Cause cause); static bool is_requested_gc(GCCause::Cause cause); + static bool is_explicit_gc(GCCause::Cause cause); static bool should_run_full_gc(GCCause::Cause cause); static bool should_handle_requested_gc(GCCause::Cause cause); - inline size_t consecutive_young_gc_count() const { + size_t consecutive_young_gc_count() const { return _consecutive_young_gcs; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp index 1ddfb6b7054..4848a69a6f3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp @@ -50,7 +50,6 @@ ShenandoahControlThread::ShenandoahControlThread() : void ShenandoahControlThread::run_service() { ShenandoahHeap* const heap = ShenandoahHeap::heap(); - const GCMode default_mode = concurrent_normal; const GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc; int sleep = ShenandoahControlIntervalMin; @@ -59,9 +58,14 @@ void ShenandoahControlThread::run_service() { ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy(); ShenandoahHeuristics* const heuristics = heap->heuristics(); - while (!in_graceful_shutdown() && !should_terminate()) { + while (!should_terminate()) { + const GCCause::Cause cancelled_cause = heap->cancelled_cause(); + if (cancelled_cause == GCCause::_shenandoah_stop_vm) { + break; + } + // Figure out if we have pending requests. - const bool alloc_failure_pending = _alloc_failure_gc.is_set(); + const bool alloc_failure_pending = ShenandoahCollectorPolicy::is_allocation_failure(cancelled_cause); const bool is_gc_requested = _gc_requested.is_set(); const GCCause::Cause requested_gc_cause = _requested_gc_cause; @@ -254,11 +258,6 @@ void ShenandoahControlThread::run_service() { } os::naked_short_sleep(sleep); } - - // Wait for the actual stop(), can't leave run_service() earlier. - while (!should_terminate()) { - os::naked_short_sleep(ShenandoahControlIntervalMin); - } } void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) { @@ -322,19 +321,24 @@ void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cau bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) { ShenandoahHeap* heap = ShenandoahHeap::heap(); if (heap->cancelled_gc()) { - assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting"); - if (!in_graceful_shutdown()) { + if (heap->cancelled_cause() == GCCause::_shenandoah_stop_vm) { + return true; + } + + if (ShenandoahCollectorPolicy::is_allocation_failure(heap->cancelled_cause())) { assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle, "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point)); _degen_point = point; + return true; } - return true; + + fatal("Unexpected reason for cancellation: %s", GCCause::to_string(heap->cancelled_cause())); } return false; } void ShenandoahControlThread::stop_service() { - // Nothing to do here. + ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_stop_vm); } void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) { @@ -363,6 +367,11 @@ void ShenandoahControlThread::request_gc(GCCause::Cause cause) { } void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { + if (should_terminate()) { + log_info(gc)("Control thread is terminating, no more GCs"); + return; + } + // For normal requested GCs (System.gc) we want to block the caller. However, // for whitebox requested GC, we want to initiate the GC and return immediately. // The whitebox caller thread will arrange for itself to wait until the GC notifies @@ -385,7 +394,7 @@ void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { MonitorLocker ml(&_gc_waiters_lock); size_t current_gc_id = get_gc_id(); size_t required_gc_id = current_gc_id + 1; - while (current_gc_id < required_gc_id) { + while (current_gc_id < required_gc_id && !should_terminate()) { // Although setting gc request is under _gc_waiters_lock, but read side (run_service()) // does not take the lock. We need to enforce following order, so that read side sees // latest requested gc cause when the flag is set. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahController.cpp b/src/hotspot/share/gc/shenandoah/shenandoahController.cpp index a4d5a572349..c430981bfe6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahController.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahController.cpp @@ -25,6 +25,8 @@ #include "gc/shared/gc_globals.hpp" #include "gc/shenandoah/shenandoahController.hpp" + +#include "shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" @@ -37,14 +39,6 @@ size_t ShenandoahController::reset_allocs_seen() { return Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed); } -void ShenandoahController::prepare_for_graceful_shutdown() { - _graceful_shutdown.set(); -} - -bool ShenandoahController::in_graceful_shutdown() { - return _graceful_shutdown.is_set(); -} - void ShenandoahController::update_gc_id() { Atomic::inc(&_gc_id); } @@ -53,59 +47,38 @@ size_t ShenandoahController::get_gc_id() { return Atomic::load(&_gc_id); } -void ShenandoahController::handle_alloc_failure(ShenandoahAllocRequest& req, bool block) { - ShenandoahHeap* heap = ShenandoahHeap::heap(); - +void ShenandoahController::handle_alloc_failure(const ShenandoahAllocRequest& req, bool block) { assert(current()->is_Java_thread(), "expect Java thread here"); - bool is_humongous = ShenandoahHeapRegion::requires_humongous(req.size()); - if (try_set_alloc_failure_gc(is_humongous)) { - // Only report the first allocation failure - log_info(gc)("Failed to allocate %s, %zu%s", - req.type_string(), - byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize)); + const bool is_humongous = ShenandoahHeapRegion::requires_humongous(req.size()); + const GCCause::Cause cause = is_humongous ? GCCause::_shenandoah_humongous_allocation_failure : GCCause::_allocation_failure; - // Now that alloc failure GC is scheduled, we can abort everything else - heap->cancel_gc(GCCause::_allocation_failure); + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + if (heap->cancel_gc(cause)) { + log_info(gc)("Failed to allocate %s, " PROPERFMT, req.type_string(), PROPERFMTARGS(req.size() * HeapWordSize)); + request_gc(cause); } - if (block) { MonitorLocker ml(&_alloc_failure_waiters_lock); - while (is_alloc_failure_gc()) { + while (!should_terminate() && ShenandoahCollectorPolicy::is_allocation_failure(heap->cancelled_cause())) { ml.wait(); } } } void ShenandoahController::handle_alloc_failure_evac(size_t words) { - ShenandoahHeap* heap = ShenandoahHeap::heap(); - bool is_humongous = ShenandoahHeapRegion::requires_humongous(words); - if (try_set_alloc_failure_gc(is_humongous)) { - // Only report the first allocation failure - log_info(gc)("Failed to allocate %zu%s for evacuation", - byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + const bool is_humongous = ShenandoahHeapRegion::requires_humongous(words); + const GCCause::Cause cause = is_humongous ? GCCause::_shenandoah_humongous_allocation_failure : GCCause::_shenandoah_allocation_failure_evac; + + if (heap->cancel_gc(cause)) { + log_info(gc)("Failed to allocate " PROPERFMT " for evacuation", PROPERFMTARGS(words * HeapWordSize)); } - - // Forcefully report allocation failure - heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac); } void ShenandoahController::notify_alloc_failure_waiters() { - _alloc_failure_gc.unset(); - _humongous_alloc_failure_gc.unset(); MonitorLocker ml(&_alloc_failure_waiters_lock); ml.notify_all(); } - -bool ShenandoahController::try_set_alloc_failure_gc(bool is_humongous) { - if (is_humongous) { - _humongous_alloc_failure_gc.try_set(); - } - return _alloc_failure_gc.try_set(); -} - -bool ShenandoahController::is_alloc_failure_gc() { - return _alloc_failure_gc.is_set(); -} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahController.hpp b/src/hotspot/share/gc/shenandoah/shenandoahController.hpp index 6c28ff4e969..83cde94f509 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahController.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahController.hpp @@ -36,27 +36,25 @@ */ class ShenandoahController: public ConcurrentGCThread { private: - ShenandoahSharedFlag _graceful_shutdown; - shenandoah_padding(0); volatile size_t _allocs_seen; shenandoah_padding(1); + // A monotonically increasing GC count. volatile size_t _gc_id; shenandoah_padding(2); protected: - ShenandoahSharedFlag _alloc_failure_gc; - ShenandoahSharedFlag _humongous_alloc_failure_gc; - // While we could have a single lock for these, it may risk unblocking // GC waiters when alloc failure GC cycle finishes. We want instead // to make complete explicit cycle for demanding customers. Monitor _alloc_failure_waiters_lock; Monitor _gc_waiters_lock; + // Increments the internal GC count. + void update_gc_id(); + public: ShenandoahController(): - ConcurrentGCThread(), _allocs_seen(0), _gc_id(0), _alloc_failure_waiters_lock(Mutex::safepoint-2, "ShenandoahAllocFailureGC_lock", true), @@ -68,38 +66,25 @@ public: virtual void request_gc(GCCause::Cause cause) = 0; // This cancels the collection cycle and has an option to block - // until another cycle runs and clears the alloc failure gc flag. - void handle_alloc_failure(ShenandoahAllocRequest& req, bool block); + // until another cycle completes successfully. + void handle_alloc_failure(const ShenandoahAllocRequest& req, bool block); // Invoked for allocation failures during evacuation. This cancels // the collection cycle without blocking. void handle_alloc_failure_evac(size_t words); - // Return true if setting the flag which indicates allocation failure succeeds. - bool try_set_alloc_failure_gc(bool is_humongous); - // Notify threads waiting for GC to complete. void notify_alloc_failure_waiters(); - // True if allocation failure flag has been set. - bool is_alloc_failure_gc(); - // This is called for every allocation. The control thread accumulates // this value when idle. During the gc cycle, the control resets it // and reports it to the pacer. void pacing_notify_alloc(size_t words); + + // Zeros out the number of allocations seen since the last GC cycle. size_t reset_allocs_seen(); - // These essentially allows to cancel a collection cycle for the - // purpose of shutting down the JVM, without trying to start a degenerated - // cycle. - void prepare_for_graceful_shutdown(); - bool in_graceful_shutdown(); - - - // Returns the internal gc count used by the control thread. Probably - // doesn't need to be exposed. + // Return the value of a monotonic increasing GC count, maintained by the control thread. size_t get_gc_id(); - void update_gc_id(); }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHCONTROLLER_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp index db4b517a1f5..cabe51edc51 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp @@ -24,7 +24,6 @@ * */ -#include "gc/shenandoah/mode/shenandoahMode.hpp" #include "gc/shenandoah/shenandoahAsserts.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahConcurrentGC.hpp" @@ -45,294 +44,301 @@ #include "memory/metaspaceUtils.hpp" #include "memory/metaspaceStats.hpp" #include "runtime/atomic.hpp" +#include "utilities/events.hpp" ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() : - ShenandoahController(), - _control_lock(Mutex::nosafepoint - 2, "ShenandoahControlGC_lock", true), - _regulator_lock(Mutex::nosafepoint - 2, "ShenandoahRegulatorGC_lock", true), + _control_lock(Mutex::nosafepoint - 2, "ShenandoahGCRequest_lock", true), _requested_gc_cause(GCCause::_no_gc), - _requested_generation(GLOBAL), - _degen_point(ShenandoahGC::_degenerated_outside_cycle), - _degen_generation(nullptr), - _mode(none) { + _requested_generation(nullptr), + _gc_mode(none), + _degen_point(ShenandoahGC::_degenerated_unset), + _heap(ShenandoahGenerationalHeap::heap()), + _age_period(0) { shenandoah_assert_generational(); set_name("Shenandoah Control Thread"); create_and_start(); } void ShenandoahGenerationalControlThread::run_service() { - ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap(); - const GCMode default_mode = concurrent_normal; - ShenandoahGenerationType generation = GLOBAL; - - uint age_period = 0; - - ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy(); - - // Heuristics are notified of allocation failures here and other outcomes - // of the cycle. They're also used here to control whether the Nth consecutive - // degenerated cycle should be 'promoted' to a full cycle. The decision to - // trigger a cycle or not is evaluated on the regulator thread. - ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics(); - while (!in_graceful_shutdown() && !should_terminate()) { - // Figure out if we have pending requests. - const bool alloc_failure_pending = _alloc_failure_gc.is_set(); - const bool humongous_alloc_failure_pending = _humongous_alloc_failure_gc.is_set(); - - GCCause::Cause cause = Atomic::xchg(&_requested_gc_cause, GCCause::_no_gc); - - const bool is_gc_requested = ShenandoahCollectorPolicy::is_requested_gc(cause); + const int64_t wait_ms = ShenandoahPacing ? ShenandoahControlIntervalMin : 0; + ShenandoahGCRequest request; + while (!should_terminate()) { // This control loop iteration has seen this much allocation. const size_t allocs_seen = reset_allocs_seen(); - // Check if we have seen a new target for soft max heap size. - const bool soft_max_changed = heap->check_soft_max_changed(); + // Figure out if we have pending requests. + check_for_request(request); - // Choose which GC mode to run in. The block below should select a single mode. - set_gc_mode(none); - ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset; - - if (alloc_failure_pending) { - // Allocation failure takes precedence: we have to deal with it first thing - cause = GCCause::_allocation_failure; - - // Consume the degen point, and seed it with default value - degen_point = _degen_point; - _degen_point = ShenandoahGC::_degenerated_outside_cycle; - - if (degen_point == ShenandoahGC::_degenerated_outside_cycle) { - _degen_generation = heap->young_generation(); - } else { - assert(_degen_generation != nullptr, "Need to know which generation to resume"); - } - - ShenandoahHeuristics* heuristics = _degen_generation->heuristics(); - generation = _degen_generation->type(); - bool old_gen_evacuation_failed = heap->old_generation()->clear_failed_evacuation(); - - heuristics->log_trigger("Handle Allocation Failure"); - - // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed - if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && - !old_gen_evacuation_failed && !humongous_alloc_failure_pending) { - heuristics->record_allocation_failure_gc(); - policy->record_alloc_failure_to_degenerated(degen_point); - set_gc_mode(stw_degenerated); - } else { - heuristics->record_allocation_failure_gc(); - policy->record_alloc_failure_to_full(); - generation = GLOBAL; - set_gc_mode(stw_full); - } - } else if (is_gc_requested) { - generation = GLOBAL; - global_heuristics->log_trigger("GC request (%s)", GCCause::to_string(cause)); - global_heuristics->record_requested_gc(); - - if (ShenandoahCollectorPolicy::should_run_full_gc(cause)) { - set_gc_mode(stw_full); - } else { - set_gc_mode(default_mode); - // Unload and clean up everything - heap->set_unload_classes(global_heuristics->can_unload_classes()); - } - } else { - // We should only be here if the regulator requested a cycle or if - // there is an old generation mark in progress. - if (cause == GCCause::_shenandoah_concurrent_gc) { - if (_requested_generation == OLD && heap->old_generation()->is_doing_mixed_evacuations()) { - // If a request to start an old cycle arrived while an old cycle was running, but _before_ - // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want - // the heuristic to run a young collection so that we can evacuate some old regions. - assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking"); - generation = YOUNG; - } else { - generation = _requested_generation; - } - - // preemption was requested or this is a regular cycle - set_gc_mode(default_mode); - - // Don't start a new old marking if there is one already in progress - if (generation == OLD && heap->is_concurrent_old_mark_in_progress()) { - set_gc_mode(servicing_old); - } - - if (generation == GLOBAL) { - heap->set_unload_classes(global_heuristics->should_unload_classes()); - } else { - heap->set_unload_classes(false); - } - } else if (heap->is_concurrent_old_mark_in_progress() || heap->is_prepare_for_old_mark_in_progress()) { - // Nobody asked us to do anything, but we have an old-generation mark or old-generation preparation for - // mixed evacuation in progress, so resume working on that. - log_info(gc)("Resume old GC: marking is%s in progress, preparing is%s in progress", - heap->is_concurrent_old_mark_in_progress() ? "" : " NOT", - heap->is_prepare_for_old_mark_in_progress() ? "" : " NOT"); - - cause = GCCause::_shenandoah_concurrent_gc; - generation = OLD; - set_gc_mode(servicing_old); - heap->set_unload_classes(false); - } + if (request.cause == GCCause::_shenandoah_stop_vm) { + break; } - const bool gc_requested = (gc_mode() != none); - assert (!gc_requested || cause != GCCause::_no_gc, "GC cause should be set"); - - if (gc_requested) { - // Cannot uncommit bitmap slices during concurrent reset - ShenandoahNoUncommitMark forbid_region_uncommit(heap); - - // Blow away all soft references on this cycle, if handling allocation failure, - // either implicit or explicit GC request, or we are requested to do so unconditionally. - if (generation == GLOBAL && (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs)) { - heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); - } - - // GC is starting, bump the internal ID - update_gc_id(); - - heap->reset_bytes_allocated_since_gc_start(); - - MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics(); - - // If GC was requested, we are sampling the counters even without actual triggers - // from allocation machinery. This captures GC phases more accurately. - heap->set_forced_counters_update(true); - - // If GC was requested, we better dump freeset data for performance debugging - heap->free_set()->log_status_under_lock(); - - // In case this is a degenerated cycle, remember whether original cycle was aging. - const bool was_aging_cycle = heap->is_aging_cycle(); - heap->set_aging_cycle(false); - - switch (gc_mode()) { - case concurrent_normal: { - // At this point: - // if (generation == YOUNG), this is a normal YOUNG cycle - // if (generation == OLD), this is a bootstrap OLD cycle - // if (generation == GLOBAL), this is a GLOBAL cycle triggered by System.gc() - // In all three cases, we want to age old objects if this is an aging cycle - if (age_period-- == 0) { - heap->set_aging_cycle(true); - age_period = ShenandoahAgingCyclePeriod - 1; - } - service_concurrent_normal_cycle(heap, generation, cause); - break; - } - case stw_degenerated: { - heap->set_aging_cycle(was_aging_cycle); - service_stw_degenerated_cycle(cause, degen_point); - break; - } - case stw_full: { - if (age_period-- == 0) { - heap->set_aging_cycle(true); - age_period = ShenandoahAgingCyclePeriod - 1; - } - service_stw_full_cycle(cause); - break; - } - case servicing_old: { - assert(generation == OLD, "Expected old generation here"); - GCIdMark gc_id_mark; - service_concurrent_old_cycle(heap, cause); - break; - } - default: - ShouldNotReachHere(); - } - - // If this was the requested GC cycle, notify waiters about it - if (is_gc_requested) { - notify_gc_waiters(); - } - - // If this was the allocation failure GC cycle, notify waiters about it - if (alloc_failure_pending) { - notify_alloc_failure_waiters(); - } - - // Report current free set state at the end of cycle, whether - // it is a normal completion, or the abort. - heap->free_set()->log_status_under_lock(); - - // Notify Universe about new heap usage. This has implications for - // global soft refs policy, and we better report it every time heap - // usage goes down. - heap->update_capacity_and_used_at_gc(); - - // Signal that we have completed a visit to all live objects. - heap->record_whole_heap_examined_timestamp(); - - // Disable forced counters update, and update counters one more time - // to capture the state at the end of GC session. - heap->handle_force_counters_update(); - heap->set_forced_counters_update(false); - - // Retract forceful part of soft refs policy - heap->soft_ref_policy()->set_should_clear_all_soft_refs(false); - - // Clear metaspace oom flag, if current cycle unloaded classes - if (heap->unload_classes()) { - global_heuristics->clear_metaspace_oom(); - } - - process_phase_timings(heap); - - // Print Metaspace change following GC (if logging is enabled). - MetaspaceUtils::print_metaspace_change(meta_sizes); - - // GC is over, we are at idle now - if (ShenandoahPacing) { - heap->pacer()->setup_for_idle(); - } + if (request.cause != GCCause::_no_gc) { + run_gc_cycle(request); } else { // Report to pacer that we have seen this many words allocated if (ShenandoahPacing && (allocs_seen > 0)) { - heap->pacer()->report_alloc(allocs_seen); + _heap->pacer()->report_alloc(allocs_seen); } } - // Check if we have seen a new target for soft max heap size or if a gc was requested. - // Either of these conditions will attempt to uncommit regions. - if (ShenandoahUncommit) { - if (heap->check_soft_max_changed()) { - heap->notify_soft_max_changed(); - } else if (is_gc_requested) { - heap->notify_explicit_gc_requested(); + // If the cycle was cancelled, continue the next iteration to deal with it. Otherwise, + // if there was no other cycle requested, cleanup and wait for the next request. + if (!_heap->cancelled_gc()) { + MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); + if (_requested_gc_cause == GCCause::_no_gc) { + set_gc_mode(ml, none); + ml.wait(wait_ms); } } - - // Wait for ShenandoahControlIntervalMax unless there was an allocation failure or another request was made mid-cycle. - if (!is_alloc_failure_gc() && _requested_gc_cause == GCCause::_no_gc) { - // The timed wait is necessary because this thread has a responsibility to send - // 'alloc_words' to the pacer when it does not perform a GC. - MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag); - lock.wait(ShenandoahControlIntervalMax); - } } + // In case any threads are waiting for a cycle to happen, notify them so they observe the shutdown. + notify_gc_waiters(); + notify_alloc_failure_waiters(); set_gc_mode(stopped); +} - // Wait for the actual stop(), can't leave run_service() earlier. - while (!should_terminate()) { - os::naked_short_sleep(ShenandoahControlIntervalMin); +void ShenandoahGenerationalControlThread::stop_service() { + log_debug(gc, thread)("Stopping control thread"); + MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); + _heap->cancel_gc(GCCause::_shenandoah_stop_vm); + _requested_gc_cause = GCCause::_shenandoah_stop_vm; + notify_cancellation(ml, GCCause::_shenandoah_stop_vm); + // We can't wait here because it may interfere with the active cycle's ability + // to reach a safepoint (this runs on a java thread). +} + +void ShenandoahGenerationalControlThread::check_for_request(ShenandoahGCRequest& request) { + // Hold the lock while we read request cause and generation + MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); + if (_heap->cancelled_gc()) { + // The previous request was cancelled. Either it was cancelled for an allocation + // failure (degenerated cycle), or old marking was cancelled to run a young collection. + // In either case, the correct generation for the next cycle can be determined by + // the cancellation cause. + request.cause = _heap->cancelled_cause(); + if (request.cause == GCCause::_shenandoah_concurrent_gc) { + request.generation = _heap->young_generation(); + _heap->clear_cancelled_gc(false); + } + } else { + request.cause = _requested_gc_cause; + request.generation = _requested_generation; + + // Only clear these if we made a request from them. In the case of a cancelled gc, + // we do not want to inadvertently lose this pending request. + _requested_gc_cause = GCCause::_no_gc; + _requested_generation = nullptr; + } + + if (request.cause == GCCause::_no_gc || request.cause == GCCause::_shenandoah_stop_vm) { + return; + } + + GCMode mode; + if (ShenandoahCollectorPolicy::is_allocation_failure(request.cause)) { + mode = prepare_for_allocation_failure_gc(request); + } else if (ShenandoahCollectorPolicy::is_explicit_gc(request.cause)) { + mode = prepare_for_explicit_gc(request); + } else { + mode = prepare_for_concurrent_gc(request); + } + set_gc_mode(ml, mode); +} + +ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_allocation_failure_gc(ShenandoahGCRequest &request) { + + if (_degen_point == ShenandoahGC::_degenerated_unset) { + _degen_point = ShenandoahGC::_degenerated_outside_cycle; + request.generation = _heap->young_generation(); + } else if (request.generation->is_old()) { + // This means we degenerated during the young bootstrap for the old generation + // cycle. The following degenerated cycle should therefore also be young. + request.generation = _heap->young_generation(); + } + + ShenandoahHeuristics* heuristics = request.generation->heuristics(); + bool old_gen_evacuation_failed = _heap->old_generation()->clear_failed_evacuation(); + + heuristics->log_trigger("Handle Allocation Failure"); + + // Do not bother with degenerated cycle if old generation evacuation failed or if humongous allocation failed + if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle() && + !old_gen_evacuation_failed && request.cause != GCCause::_shenandoah_humongous_allocation_failure) { + heuristics->record_allocation_failure_gc(); + _heap->shenandoah_policy()->record_alloc_failure_to_degenerated(_degen_point); + return stw_degenerated; + } else { + heuristics->record_allocation_failure_gc(); + _heap->shenandoah_policy()->record_alloc_failure_to_full(); + request.generation = _heap->global_generation(); + return stw_full; } } -void ShenandoahGenerationalControlThread::process_phase_timings(const ShenandoahGenerationalHeap* heap) { - // Commit worker statistics to cycle data - heap->phase_timings()->flush_par_workers_to_cycle(); - if (ShenandoahPacing) { - heap->pacer()->flush_stats_to_cycle(); +ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_explicit_gc(ShenandoahGCRequest &request) const { + ShenandoahHeuristics* global_heuristics = _heap->global_generation()->heuristics(); + request.generation = _heap->global_generation(); + global_heuristics->log_trigger("GC request (%s)", GCCause::to_string(request.cause)); + global_heuristics->record_requested_gc(); + + if (ShenandoahCollectorPolicy::should_run_full_gc(request.cause)) { + return stw_full;; + } else { + // Unload and clean up everything. Note that this is an _explicit_ request and so does not use + // the same `should_unload_classes` call as the regulator's concurrent gc request. + _heap->set_unload_classes(global_heuristics->can_unload_classes()); + return concurrent_normal; + } +} + +ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_concurrent_gc(const ShenandoahGCRequest &request) const { + assert(!(request.generation->is_old() && _heap->old_generation()->is_doing_mixed_evacuations()), + "Old heuristic should not request cycles while it waits for mixed evacuations"); + + if (request.generation->is_global()) { + ShenandoahHeuristics* global_heuristics = _heap->global_generation()->heuristics(); + _heap->set_unload_classes(global_heuristics->should_unload_classes()); + } else { + _heap->set_unload_classes(false); } - ShenandoahEvacuationTracker* evac_tracker = heap->evac_tracker(); + // preemption was requested or this is a regular cycle + return request.generation->is_old() ? servicing_old : concurrent_normal; +} + +void ShenandoahGenerationalControlThread::maybe_set_aging_cycle() { + if (_age_period-- == 0) { + _heap->set_aging_cycle(true); + _age_period = ShenandoahAgingCyclePeriod - 1; + } else { + _heap->set_aging_cycle(false); + } +} + +void ShenandoahGenerationalControlThread::run_gc_cycle(const ShenandoahGCRequest& request) { + + log_debug(gc, thread)("Starting GC (%s): %s, %s", gc_mode_name(gc_mode()), GCCause::to_string(request.cause), request.generation->name()); + assert(gc_mode() != none, "GC mode cannot be none here"); + + // Blow away all soft references on this cycle, if handling allocation failure, + // either implicit or explicit GC request, or we are requested to do so unconditionally. + if (request.generation->is_global() && (ShenandoahCollectorPolicy::is_allocation_failure(request.cause) || ShenandoahCollectorPolicy::is_explicit_gc(request.cause) || ShenandoahAlwaysClearSoftRefs)) { + _heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); + } + + // GC is starting, bump the internal ID + update_gc_id(); + + _heap->reset_bytes_allocated_since_gc_start(); + + MetaspaceCombinedStats meta_sizes = MetaspaceUtils::get_combined_statistics(); + + // If GC was requested, we are sampling the counters even without actual triggers + // from allocation machinery. This captures GC phases more accurately. + _heap->set_forced_counters_update(true); + + // If GC was requested, we better dump freeset data for performance debugging + _heap->free_set()->log_status_under_lock(); + + { + // Cannot uncommit bitmap slices during concurrent reset + ShenandoahNoUncommitMark forbid_region_uncommit(_heap); + + switch (gc_mode()) { + case concurrent_normal: { + service_concurrent_normal_cycle(request); + break; + } + case stw_degenerated: { + service_stw_degenerated_cycle(request); + break; + } + case stw_full: { + service_stw_full_cycle(request.cause); + break; + } + case servicing_old: { + assert(request.generation->is_old(), "Expected old generation here"); + GCIdMark gc_id_mark; + service_concurrent_old_cycle(request); + break; + } + default: + ShouldNotReachHere(); + } + } + + // If this was the requested GC cycle, notify waiters about it + if (ShenandoahCollectorPolicy::is_explicit_gc(request.cause)) { + notify_gc_waiters(); + } + + // If this was an allocation failure GC cycle, notify waiters about it + if (ShenandoahCollectorPolicy::is_allocation_failure(request.cause)) { + notify_alloc_failure_waiters(); + } + + // Report current free set state at the end of cycle, whether + // it is a normal completion, or the abort. + _heap->free_set()->log_status_under_lock(); + + // Notify Universe about new heap usage. This has implications for + // global soft refs policy, and we better report it every time heap + // usage goes down. + _heap->update_capacity_and_used_at_gc(); + + // Signal that we have completed a visit to all live objects. + _heap->record_whole_heap_examined_timestamp(); + + // Disable forced counters update, and update counters one more time + // to capture the state at the end of GC session. + _heap->handle_force_counters_update(); + _heap->set_forced_counters_update(false); + + // Retract forceful part of soft refs policy + _heap->soft_ref_policy()->set_should_clear_all_soft_refs(false); + + // Clear metaspace oom flag, if current cycle unloaded classes + if (_heap->unload_classes()) { + _heap->global_generation()->heuristics()->clear_metaspace_oom(); + } + + process_phase_timings(); + + // Print Metaspace change following GC (if logging is enabled). + MetaspaceUtils::print_metaspace_change(meta_sizes); + + // GC is over, we are at idle now + if (ShenandoahPacing) { + _heap->pacer()->setup_for_idle(); + } + + // Check if we have seen a new target for soft max heap size or if a gc was requested. + // Either of these conditions will attempt to uncommit regions. + if (ShenandoahUncommit) { + if (_heap->check_soft_max_changed()) { + _heap->notify_soft_max_changed(); + } else if (ShenandoahCollectorPolicy::is_explicit_gc(request.cause)) { + _heap->notify_explicit_gc_requested(); + } + } + + log_debug(gc, thread)("Completed GC (%s): %s, %s, cancelled: %s", + gc_mode_name(gc_mode()), GCCause::to_string(request.cause), request.generation->name(), GCCause::to_string(_heap->cancelled_cause())); +} + +void ShenandoahGenerationalControlThread::process_phase_timings() const { + // Commit worker statistics to cycle data + _heap->phase_timings()->flush_par_workers_to_cycle(); + if (ShenandoahPacing) { + _heap->pacer()->flush_stats_to_cycle(); + } + + ShenandoahEvacuationTracker* evac_tracker = _heap->evac_tracker(); ShenandoahCycleStats evac_stats = evac_tracker->flush_cycle_to_global(); // Print GC stats for current cycle @@ -341,17 +347,17 @@ void ShenandoahGenerationalControlThread::process_phase_timings(const Shenandoah if (lt.is_enabled()) { ResourceMark rm; LogStream ls(lt); - heap->phase_timings()->print_cycle_on(&ls); + _heap->phase_timings()->print_cycle_on(&ls); evac_tracker->print_evacuations_on(&ls, &evac_stats.workers, &evac_stats.mutators); if (ShenandoahPacing) { - heap->pacer()->print_cycle_on(&ls); + _heap->pacer()->print_cycle_on(&ls); } } } // Commit statistics to globals - heap->phase_timings()->flush_cycle_to_global(); + _heap->phase_timings()->flush_cycle_to_global(); } // Young and old concurrent cycles are initiated by the regulator. Implicit @@ -379,46 +385,27 @@ void ShenandoahGenerationalControlThread::process_phase_timings(const Shenandoah // | v v | // +---> Global Degen +--------------------> Full <----+ // -void ShenandoahGenerationalControlThread::service_concurrent_normal_cycle(ShenandoahGenerationalHeap* heap, - const ShenandoahGenerationType generation, - GCCause::Cause cause) { +void ShenandoahGenerationalControlThread::service_concurrent_normal_cycle(const ShenandoahGCRequest& request) { GCIdMark gc_id_mark; - switch (generation) { - case YOUNG: { - // Run a young cycle. This might or might not, have interrupted an ongoing - // concurrent mark in the old generation. We need to think about promotions - // in this case. Promoted objects should be above the TAMS in the old regions - // they end up in, but we have to be sure we don't promote into any regions - // that are in the cset. - log_info(gc, ergo)("Start GC cycle (Young)"); - service_concurrent_cycle(heap->young_generation(), cause, false); - break; - } - case OLD: { - log_info(gc, ergo)("Start GC cycle (Old)"); - service_concurrent_old_cycle(heap, cause); - break; - } - case GLOBAL: { - log_info(gc, ergo)("Start GC cycle (Global)"); - service_concurrent_cycle(heap->global_generation(), cause, false); - break; - } - default: - ShouldNotReachHere(); + log_info(gc, ergo)("Start GC cycle (%s)", request.generation->name()); + if (request.generation->is_old()) { + service_concurrent_old_cycle(request); + } else { + service_concurrent_cycle(request.generation, request.cause, false); } } -void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(ShenandoahGenerationalHeap* heap, GCCause::Cause &cause) { - ShenandoahOldGeneration* old_generation = heap->old_generation(); - ShenandoahYoungGeneration* young_generation = heap->young_generation(); +void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(const ShenandoahGCRequest& request) { + ShenandoahOldGeneration* old_generation = _heap->old_generation(); + ShenandoahYoungGeneration* young_generation = _heap->young_generation(); ShenandoahOldGeneration::State original_state = old_generation->state(); - TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); + TraceCollectorStats tcs(_heap->monitoring_support()->concurrent_collection_counters()); switch (original_state) { case ShenandoahOldGeneration::FILLING: { - ShenandoahGCSession session(cause, old_generation); + ShenandoahGCSession session(request.cause, old_generation); + assert(gc_mode() == servicing_old, "Filling should be servicing old"); _allow_old_preemption.set(); old_generation->entry_coalesce_and_fill(); _allow_old_preemption.unset(); @@ -430,7 +417,7 @@ void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(Shenandoa // acknowledge the cancellation request, the subsequent young cycle will observe // the request and essentially cancel itself. if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) { - log_info(gc)("Preparation for old generation cycle was cancelled"); + log_info(gc, thread)("Preparation for old generation cycle was cancelled"); return; } @@ -448,20 +435,16 @@ void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(Shenandoa // task queues but it will not traverse them. set_gc_mode(bootstrapping_old); young_generation->set_old_gen_task_queues(old_generation->task_queues()); - ShenandoahGCSession session(cause, young_generation); - service_concurrent_cycle(heap, young_generation, cause, true); - process_phase_timings(heap); - if (heap->cancelled_gc()) { + service_concurrent_cycle(young_generation, request.cause, true); + process_phase_timings(); + if (_heap->cancelled_gc()) { // Young generation bootstrap cycle has failed. Concurrent mark for old generation // is going to resume after degenerated bootstrap cycle completes. log_info(gc)("Bootstrap cycle for old generation was cancelled"); return; } - // Reset the degenerated point. Normally this would happen at the top - // of the control loop, but here we have just completed a young cycle - // which has bootstrapped the old concurrent marking. - _degen_point = ShenandoahGC::_degenerated_outside_cycle; + assert(_degen_point == ShenandoahGC::_degenerated_unset, "Degen point should not be set if gc wasn't cancelled"); // From here we will 'resume' the old concurrent mark. This will skip reset // and init mark for the concurrent mark. All of that work will have been @@ -470,17 +453,17 @@ void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(Shenandoa old_generation->transition_to(ShenandoahOldGeneration::MARKING); } case ShenandoahOldGeneration::MARKING: { - ShenandoahGCSession session(cause, old_generation); - bool marking_complete = resume_concurrent_old_cycle(old_generation, cause); + ShenandoahGCSession session(request.cause, old_generation); + bool marking_complete = resume_concurrent_old_cycle(old_generation, request.cause); if (marking_complete) { assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking"); if (original_state == ShenandoahOldGeneration::MARKING) { - heap->mmu_tracker()->record_old_marking_increment(true); - heap->log_heap_status("At end of Concurrent Old Marking finishing increment"); + _heap->mmu_tracker()->record_old_marking_increment(true); + _heap->log_heap_status("At end of Concurrent Old Marking finishing increment"); } } else if (original_state == ShenandoahOldGeneration::MARKING) { - heap->mmu_tracker()->record_old_marking_increment(false); - heap->log_heap_status("At end of Concurrent Old Marking increment"); + _heap->mmu_tracker()->record_old_marking_increment(false); + _heap->log_heap_status("At end of Concurrent Old Marking increment"); } break; } @@ -490,21 +473,19 @@ void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(Shenandoa } bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause) { - assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress"); + assert(_heap->is_concurrent_old_mark_in_progress(), "Old mark should be in progress"); log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks()); - ShenandoahHeap* heap = ShenandoahHeap::heap(); - // We can only tolerate being cancelled during concurrent marking or during preparation for mixed // evacuation. This flag here (passed by reference) is used to control precisely where the regulator // is allowed to cancel a GC. ShenandoahOldGC gc(generation, _allow_old_preemption); if (gc.collect(cause)) { - heap->notify_gc_progress(); + _heap->notify_gc_progress(); generation->record_success_concurrent(false); } - if (heap->cancelled_gc()) { + if (_heap->cancelled_gc()) { // It's possible the gc cycle was cancelled after the last time // the collection checked for cancellation. In which case, the // old gc cycle is still completed, and we have to deal with this @@ -515,89 +496,84 @@ bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(Shenandoah // cycle, then we are not actually going to a degenerated cycle, // so the degenerated point doesn't matter here. check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle); - if (_requested_gc_cause == GCCause::_shenandoah_concurrent_gc) { - heap->shenandoah_policy()->record_interrupted_old(); + if (cause == GCCause::_shenandoah_concurrent_gc) { + _heap->shenandoah_policy()->record_interrupted_old(); } return false; } return true; } -void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool do_old_gc_bootstrap) { - // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during - // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. - // If second allocation failure happens during Degenerated GC cycle (for example, when GC - // tries to evac something and no memory is available), cycle degrades to Full GC. - // - // There are also a shortcut through the normal cycle: immediate garbage shortcut, when - // heuristics says there are no regions to compact, and all the collection comes from immediately - // reclaimable regions. - // - // ................................................................................................ - // - // (immediate garbage shortcut) Concurrent GC - // /-------------------------------------------\ - // | | - // | | - // | | - // | v - // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] - // | | | ^ - // | (af) | (af) | (af) | - // ..................|....................|.................|..............|....................... - // | | | | - // | | | | Degenerated GC - // v v v | - // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o - // | | | ^ - // | (af) | (af) | (af) | - // ..................|....................|.................|..............|....................... - // | | | | - // | v | | Full GC - // \------------------->o<----------------/ | - // | | - // v | - // Full GC --------------------------/ - // - if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return; +// Normal cycle goes via all concurrent phases. If allocation failure (af) happens during +// any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. +// If second allocation failure happens during Degenerated GC cycle (for example, when GC +// tries to evac something and no memory is available), cycle degrades to Full GC. +// +// There are also a shortcut through the normal cycle: immediate garbage shortcut, when +// heuristics says there are no regions to compact, and all the collection comes from immediately +// reclaimable regions. +// +// ................................................................................................ +// +// (immediate garbage shortcut) Concurrent GC +// /-------------------------------------------\ +// | | +// | | +// | | +// | v +// [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] +// | | | ^ +// | (af) | (af) | (af) | +// ..................|....................|.................|..............|....................... +// | | | | +// | | | | Degenerated GC +// v v v | +// STW Mark ----------> STW Evac ----> STW Update-Refs ----->o +// | | | ^ +// | (af) | (af) | (af) | +// ..................|....................|.................|..............|....................... +// | | | | +// | v | | Full GC +// \------------------->o<----------------/ | +// | | +// v | +// Full GC --------------------------/ +// +void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahGeneration* generation, + GCCause::Cause cause, + bool do_old_gc_bootstrap) { + // At this point: + // if (generation == YOUNG), this is a normal young cycle or a bootstrap cycle + // if (generation == GLOBAL), this is a GLOBAL cycle + // In either case, we want to age old objects if this is an aging cycle + maybe_set_aging_cycle(); - ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahGCSession session(cause, generation); - TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); + TraceCollectorStats tcs(_heap->monitoring_support()->concurrent_collection_counters()); - service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap); -} - -void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahHeap* heap, - ShenandoahGeneration* generation, - GCCause::Cause& cause, - bool do_old_gc_bootstrap) { assert(!generation->is_old(), "Old GC takes a different control path"); ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap); if (gc.collect(cause)) { // Cycle is complete - heap->notify_gc_progress(); + _heap->notify_gc_progress(); generation->record_success_concurrent(gc.abbreviated()); } else { - assert(heap->cancelled_gc(), "Must have been cancelled"); + assert(_heap->cancelled_gc(), "Must have been cancelled"); check_cancellation_or_degen(gc.degen_point()); - - // Concurrent young-gen collection degenerates to young - // collection. Same for global collections. - _degen_generation = generation; } + const char* msg; - ShenandoahMmuTracker* mmu_tracker = heap->mmu_tracker(); + ShenandoahMmuTracker* mmu_tracker = _heap->mmu_tracker(); if (generation->is_young()) { - if (heap->cancelled_gc()) { + if (_heap->cancelled_gc()) { msg = (do_old_gc_bootstrap) ? "At end of Interrupted Concurrent Bootstrap GC" : "At end of Interrupted Concurrent Young GC"; } else { // We only record GC results if GC was successful msg = (do_old_gc_bootstrap) ? "At end of Concurrent Bootstrap GC" : "At end of Concurrent Young GC"; - if (heap->collection_set()->has_old_regions()) { + if (_heap->collection_set()->has_old_regions()) { mmu_tracker->record_mixed(get_gc_id()); } else if (do_old_gc_bootstrap) { mmu_tracker->record_bootstrap(get_gc_id()); @@ -608,7 +584,7 @@ void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahHea } else { assert(generation->is_global(), "If not young, must be GLOBAL"); assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC"); - if (heap->cancelled_gc()) { + if (_heap->cancelled_gc()) { msg = "At end of Interrupted Concurrent GLOBAL GC"; } else { // We only record GC results if GC was successful @@ -616,39 +592,27 @@ void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahHea mmu_tracker->record_global(get_gc_id()); } } - heap->log_heap_status(msg); + _heap->log_heap_status(msg); } bool ShenandoahGenerationalControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) { - ShenandoahHeap* heap = ShenandoahHeap::heap(); - if (!heap->cancelled_gc()) { + if (!_heap->cancelled_gc()) { return false; } - if (in_graceful_shutdown()) { + if (_heap->cancelled_cause() == GCCause::_shenandoah_stop_vm + || _heap->cancelled_cause() == GCCause::_shenandoah_concurrent_gc) { + log_debug(gc, thread)("Cancellation detected, reason: %s", GCCause::to_string(_heap->cancelled_cause())); return true; } - assert(_degen_point == ShenandoahGC::_degenerated_outside_cycle, - "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point)); - - if (is_alloc_failure_gc()) { + if (ShenandoahCollectorPolicy::is_allocation_failure(_heap->cancelled_cause())) { + assert(_degen_point == ShenandoahGC::_degenerated_unset, + "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point)); _degen_point = point; - _preemption_requested.unset(); - return true; - } - - if (_preemption_requested.is_set()) { - assert(_requested_generation == YOUNG, "Only young GCs may preempt old."); - _preemption_requested.unset(); - - // Old generation marking is only cancellable during concurrent marking. - // Once final mark is complete, the code does not check again for cancellation. - // If old generation was cancelled for an allocation failure, we wouldn't - // make it to this case. The calling code is responsible for forcing a - // cancellation due to allocation failure into a degenerated cycle. - _degen_point = point; - heap->clear_cancelled_gc(false /* clear oom handler */); + log_debug(gc, thread)("Cancellation detected:, reason: %s, degen point: %s", + GCCause::to_string(_heap->cancelled_cause()), + ShenandoahGC::degen_point_to_string(_degen_point)); return true; } @@ -656,38 +620,32 @@ bool ShenandoahGenerationalControlThread::check_cancellation_or_degen(Shenandoah return false; } -void ShenandoahGenerationalControlThread::stop_service() { - // Nothing to do here. -} - void ShenandoahGenerationalControlThread::service_stw_full_cycle(GCCause::Cause cause) { - ShenandoahHeap* const heap = ShenandoahHeap::heap(); - GCIdMark gc_id_mark; - ShenandoahGCSession session(cause, heap->global_generation()); - + ShenandoahGCSession session(cause, _heap->global_generation()); + maybe_set_aging_cycle(); ShenandoahFullGC gc; gc.collect(cause); + _degen_point = ShenandoahGC::_degenerated_unset; } -void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, - ShenandoahGC::ShenandoahDegenPoint point) { - assert(point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set"); - ShenandoahHeap* const heap = ShenandoahHeap::heap(); +void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(const ShenandoahGCRequest& request) { + assert(_degen_point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set"); GCIdMark gc_id_mark; - ShenandoahGCSession session(cause, _degen_generation); + ShenandoahGCSession session(request.cause, request.generation); - ShenandoahDegenGC gc(point, _degen_generation); - gc.collect(cause); + ShenandoahDegenGC gc(_degen_point, request.generation); + gc.collect(request.cause); + _degen_point = ShenandoahGC::_degenerated_unset; - assert(heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks"); - if (_degen_generation->is_global()) { - assert(heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks"); - assert(heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks"); + assert(_heap->young_generation()->task_queues()->is_empty(), "Unexpected young generation marking tasks"); + if (request.generation->is_global()) { + assert(_heap->old_generation()->task_queues()->is_empty(), "Unexpected old generation marking tasks"); + assert(_heap->global_generation()->task_queues()->is_empty(), "Unexpected global generation marking tasks"); } else { - assert(_degen_generation->is_young(), "Expected degenerated young cycle, if not global."); - ShenandoahOldGeneration* old = heap->old_generation(); + assert(request.generation->is_young(), "Expected degenerated young cycle, if not global."); + ShenandoahOldGeneration* old = _heap->old_generation(); if (old->is_bootstrapping()) { old->transition_to(ShenandoahOldGeneration::MARKING); } @@ -695,72 +653,87 @@ void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(GCCause: } void ShenandoahGenerationalControlThread::request_gc(GCCause::Cause cause) { - if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) { + if (ShenandoahCollectorPolicy::is_allocation_failure(cause)) { + // GC should already be cancelled. Here we are just notifying the control thread to + // wake up and handle the cancellation request, so we don't need to set _requested_gc_cause. + notify_cancellation(cause); + } else if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) { handle_requested_gc(cause); } } -bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGenerationType generation) { - if (_preemption_requested.is_set() || _requested_gc_cause != GCCause::_no_gc || ShenandoahHeap::heap()->cancelled_gc()) { +bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGeneration* generation) { + if (_heap->cancelled_gc()) { // Ignore subsequent requests from the heuristics - log_debug(gc, thread)("Reject request for concurrent gc: preemption_requested: %s, gc_requested: %s, gc_cancelled: %s", - BOOL_TO_STR(_preemption_requested.is_set()), + log_debug(gc, thread)("Reject request for concurrent gc: gc_requested: %s, gc_cancelled: %s", GCCause::to_string(_requested_gc_cause), - BOOL_TO_STR(ShenandoahHeap::heap()->cancelled_gc())); + BOOL_TO_STR(_heap->cancelled_gc())); return false; } - if (gc_mode() == none) { - GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc); - if (existing != GCCause::_no_gc) { - log_debug(gc, thread)("Reject request for concurrent gc because another gc is pending: %s", GCCause::to_string(existing)); + MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); + if (gc_mode() == servicing_old) { + if (!preempt_old_marking(generation)) { + log_debug(gc, thread)("Cannot start young, old collection is not preemptible"); return false; } - _requested_generation = generation; - notify_control_thread(); - - MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag); - while (gc_mode() == none) { - ml.wait(); - } - return true; - } - - if (preempt_old_marking(generation)) { - assert(gc_mode() == servicing_old, "Expected to be servicing old, but was: %s.", gc_mode_name(gc_mode())); - GCCause::Cause existing = Atomic::cmpxchg(&_requested_gc_cause, GCCause::_no_gc, GCCause::_shenandoah_concurrent_gc); - if (existing != GCCause::_no_gc) { - log_debug(gc, thread)("Reject request to interrupt old gc because another gc is pending: %s", GCCause::to_string(existing)); - return false; - } - - log_info(gc)("Preempting old generation mark to allow %s GC", shenandoah_generation_name(generation)); - _requested_generation = generation; - _preemption_requested.set(); - ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc); - notify_control_thread(); - - MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag); + // Cancel the old GC and wait for the control thread to start servicing the new request. + log_info(gc)("Preempting old generation mark to allow %s GC", generation->name()); while (gc_mode() == servicing_old) { + ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc); + notify_cancellation(ml, GCCause::_shenandoah_concurrent_gc); ml.wait(); } return true; } + if (gc_mode() == none) { + while (gc_mode() == none) { + if (_requested_gc_cause != GCCause::_no_gc) { + log_debug(gc, thread)("Reject request for concurrent gc because another gc is pending: %s", GCCause::to_string(_requested_gc_cause)); + return false; + } + + notify_control_thread(ml, GCCause::_shenandoah_concurrent_gc, generation); + ml.wait(); + } + return true; + } + + log_debug(gc, thread)("Reject request for concurrent gc: mode: %s, allow_old_preemption: %s", gc_mode_name(gc_mode()), BOOL_TO_STR(_allow_old_preemption.is_set())); return false; } -void ShenandoahGenerationalControlThread::notify_control_thread() { - MonitorLocker locker(&_control_lock, Mutex::_no_safepoint_check_flag); - _control_lock.notify(); +void ShenandoahGenerationalControlThread::notify_control_thread(GCCause::Cause cause, ShenandoahGeneration* generation) { + MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); + notify_control_thread(ml, cause, generation); } -bool ShenandoahGenerationalControlThread::preempt_old_marking(ShenandoahGenerationType generation) { - return (generation == YOUNG) && _allow_old_preemption.try_unset(); +void ShenandoahGenerationalControlThread::notify_control_thread(MonitorLocker& ml, GCCause::Cause cause, ShenandoahGeneration* generation) { + assert(_control_lock.is_locked(), "Request lock must be held here"); + log_debug(gc, thread)("Notify control (%s): %s, %s", gc_mode_name(gc_mode()), GCCause::to_string(cause), generation->name()); + _requested_gc_cause = cause; + _requested_generation = generation; + ml.notify(); +} + +void ShenandoahGenerationalControlThread::notify_cancellation(GCCause::Cause cause) { + MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); + notify_cancellation(ml, cause); +} + +void ShenandoahGenerationalControlThread::notify_cancellation(MonitorLocker& ml, GCCause::Cause cause) { + assert(_heap->cancelled_gc(), "GC should already be cancelled"); + log_debug(gc,thread)("Notify control (%s): %s", gc_mode_name(gc_mode()), GCCause::to_string(cause)); + ml.notify(); +} + +bool ShenandoahGenerationalControlThread::preempt_old_marking(ShenandoahGeneration* generation) { + return generation->is_young() && _allow_old_preemption.try_unset(); } void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cause) { @@ -769,8 +742,7 @@ void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cau // The whitebox caller thread will arrange for itself to wait until the GC notifies // it that has reached the requested breakpoint (phase in the GC). if (cause == GCCause::_wb_breakpoint) { - Atomic::xchg(&_requested_gc_cause, cause); - notify_control_thread(); + notify_control_thread(cause, ShenandoahHeap::heap()->global_generation()); return; } @@ -785,17 +757,10 @@ void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cau MonitorLocker ml(&_gc_waiters_lock); size_t current_gc_id = get_gc_id(); - size_t required_gc_id = current_gc_id + 1; - while (current_gc_id < required_gc_id) { - // This races with the regulator thread to start a concurrent gc and the - // control thread to clear it at the start of a cycle. Threads here are - // allowed to escalate a heuristic's request for concurrent gc. - GCCause::Cause existing = Atomic::xchg(&_requested_gc_cause, cause); - if (existing != GCCause::_no_gc) { - log_debug(gc, thread)("GC request supersedes existing request: %s", GCCause::to_string(existing)); - } - - notify_control_thread(); + const size_t required_gc_id = current_gc_id + 1; + while (current_gc_id < required_gc_id && !should_terminate()) { + // Make requests to run a global cycle until at least one is completed + notify_control_thread(cause, ShenandoahHeap::heap()->global_generation()); ml.wait(); current_gc_id = get_gc_id(); } @@ -806,7 +771,7 @@ void ShenandoahGenerationalControlThread::notify_gc_waiters() { ml.notify_all(); } -const char* ShenandoahGenerationalControlThread::gc_mode_name(ShenandoahGenerationalControlThread::GCMode mode) { +const char* ShenandoahGenerationalControlThread::gc_mode_name(GCMode mode) { switch (mode) { case none: return "idle"; case concurrent_normal: return "normal"; @@ -819,11 +784,16 @@ const char* ShenandoahGenerationalControlThread::gc_mode_name(ShenandoahGenerati } } -void ShenandoahGenerationalControlThread::set_gc_mode(ShenandoahGenerationalControlThread::GCMode new_mode) { - if (_mode != new_mode) { - log_debug(gc)("Transition from: %s to: %s", gc_mode_name(_mode), gc_mode_name(new_mode)); - MonitorLocker ml(&_regulator_lock, Mutex::_no_safepoint_check_flag); - _mode = new_mode; +void ShenandoahGenerationalControlThread::set_gc_mode(GCMode new_mode) { + MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag); + set_gc_mode(ml, new_mode); +} + +void ShenandoahGenerationalControlThread::set_gc_mode(MonitorLocker& ml, GCMode new_mode) { + if (_gc_mode != new_mode) { + log_debug(gc, thread)("Transition from: %s to: %s", gc_mode_name(_gc_mode), gc_mode_name(new_mode)); + EventMark event("Control thread transition from: %s, to %s", gc_mode_name(_gc_mode), gc_mode_name(new_mode)); + _gc_mode = new_mode; ml.notify_all(); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.hpp index 46072b98255..1586205742a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.hpp @@ -28,10 +28,9 @@ #include "gc/shared/gcCause.hpp" #include "gc/shenandoah/shenandoahController.hpp" -#include "gc/shenandoah/shenandoahGenerationType.hpp" #include "gc/shenandoah/shenandoahGC.hpp" -#include "gc/shenandoah/shenandoahPadding.hpp" #include "gc/shenandoah/shenandoahSharedVariables.hpp" +#include "runtime/mutexLocker.hpp" class ShenandoahOldGeneration; class ShenandoahGeneration; @@ -52,21 +51,43 @@ public: stopped } GCMode; + class ShenandoahGCRequest { + public: + ShenandoahGCRequest() : generation(nullptr), cause(GCCause::_no_gc) {} + ShenandoahGeneration* generation; + GCCause::Cause cause; + }; + private: + // This lock is used to coordinate setting the _requested_gc_cause, _requested generation + // and _gc_mode. It is important that these be changed together and have a consistent view. Monitor _control_lock; - Monitor _regulator_lock; - - ShenandoahSharedFlag _allow_old_preemption; - ShenandoahSharedFlag _preemption_requested; + // Represents a normal (non cancellation) gc request. This can be set by mutators (System.gc, + // whitebox gc, etc.) or by the regulator thread when the heuristics want to start a cycle. GCCause::Cause _requested_gc_cause; - volatile ShenandoahGenerationType _requested_generation; - ShenandoahGC::ShenandoahDegenPoint _degen_point; - ShenandoahGeneration* _degen_generation; - shenandoah_padding(0); - volatile GCMode _mode; - shenandoah_padding(1); + // This is the generation the request should operate on. + ShenandoahGeneration* _requested_generation; + + // The mode is read frequently by requesting threads and only ever written by the control thread. + // This may be read without taking the _control_lock, but should be read again under the lock + // before making any state changes (double-checked locking idiom). + volatile GCMode _gc_mode; + + // Only the control thread knows the correct degeneration point. This is used to have the + // control thread resume a STW cycle from the point where the concurrent cycle was cancelled. + ShenandoahGC::ShenandoahDegenPoint _degen_point; + + // A reference to the heap + ShenandoahGenerationalHeap* _heap; + + // This is used to keep track of whether to age objects during the current cycle. + uint _age_period; + + // This is true when the old generation cycle is in an interruptible phase (i.e., marking or + // preparing for mark). + ShenandoahSharedFlag _allow_old_preemption; public: ShenandoahGenerationalControlThread(); @@ -77,54 +98,68 @@ public: void request_gc(GCCause::Cause cause) override; // Return true if the request to start a concurrent GC for the given generation succeeded. - bool request_concurrent_gc(ShenandoahGenerationType generation); + bool request_concurrent_gc(ShenandoahGeneration* generation); - GCMode gc_mode() { - return _mode; + // Returns the current state of the control thread + GCMode gc_mode() const { + return _gc_mode; } private: - // Returns true if the cycle has been cancelled or degenerated. bool check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point); + // Executes one GC cycle + void run_gc_cycle(const ShenandoahGCRequest& request); + // Returns true if the old generation marking completed (i.e., final mark executed for old generation). bool resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause); + + // Various service methods handle different gc cycle types void service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool reset_old_bitmap_specially); void service_stw_full_cycle(GCCause::Cause cause); - void service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point); + void service_stw_degenerated_cycle(const ShenandoahGCRequest& request); + void service_concurrent_normal_cycle(const ShenandoahGCRequest& request); + void service_concurrent_old_cycle(const ShenandoahGCRequest& request); void notify_gc_waiters(); - // Handle GC request. - // Blocks until GC is over. + // Blocks until at least one global GC cycle is complete. void handle_requested_gc(GCCause::Cause cause); - bool is_explicit_gc(GCCause::Cause cause) const; - bool is_implicit_gc(GCCause::Cause cause) const; - // Returns true if the old generation marking was interrupted to allow a young cycle. - bool preempt_old_marking(ShenandoahGenerationType generation); + bool preempt_old_marking(ShenandoahGeneration* generation); - void process_phase_timings(const ShenandoahGenerationalHeap* heap); - - void service_concurrent_normal_cycle(ShenandoahGenerationalHeap* heap, - ShenandoahGenerationType generation, - GCCause::Cause cause); - - void service_concurrent_old_cycle(ShenandoahGenerationalHeap* heap, - GCCause::Cause &cause); + // Flushes cycle timings to global timings and prints the phase timings for the last completed cycle. + void process_phase_timings() const; + // Set the gc mode and post a notification if it has changed. The overloaded variant should be used + // when the _control_lock is already held. void set_gc_mode(GCMode new_mode); + void set_gc_mode(MonitorLocker& ml, GCMode new_mode); + // Return printable name for the given gc mode. static const char* gc_mode_name(GCMode mode); - void notify_control_thread(); + // Takes the request lock and updates the requested cause and generation, then notifies the control thread. + // The overloaded variant should be used when the _control_lock is already held. + void notify_control_thread(GCCause::Cause cause, ShenandoahGeneration* generation); + void notify_control_thread(MonitorLocker& ml, GCCause::Cause cause, ShenandoahGeneration* generation); - void service_concurrent_cycle(ShenandoahHeap* heap, - ShenandoahGeneration* generation, - GCCause::Cause &cause, - bool do_old_gc_bootstrap); + // Notifies the control thread, but does not update the requested cause or generation. + // The overloaded variant should be used when the _control_lock is already held. + void notify_cancellation(GCCause::Cause cause); + void notify_cancellation(MonitorLocker& ml, GCCause::Cause cause); + // Configure the heap to age objects and regions if the aging period has elapsed. + void maybe_set_aging_cycle(); + + // Take the _control_lock and check for a request to run a gc cycle. If a request is found, + // the `prepare` methods are used to configure the heap and update heuristics accordingly. + void check_for_request(ShenandoahGCRequest& request); + + GCMode prepare_for_allocation_failure_gc(ShenandoahGCRequest &request); + GCMode prepare_for_explicit_gc(ShenandoahGCRequest &request) const; + GCMode prepare_for_concurrent_gc(const ShenandoahGCRequest &request) const; }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONALCONTROLTHREAD_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 68b83cbf75a..fc1ff230688 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -583,6 +583,7 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : { // Initialize GC mode early, many subsequent initialization procedures depend on it initialize_mode(); + _cancelled_gc.set(GCCause::_no_gc); } #ifdef _MSC_VER @@ -993,13 +994,13 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { // a) We experienced a GC that had good progress, or // b) We experienced at least one Full GC (whether or not it had good progress) - size_t original_count = shenandoah_policy()->full_gc_count(); - while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) { + const size_t original_count = shenandoah_policy()->full_gc_count(); + while (result == nullptr && should_retry_allocation(original_count)) { control_thread()->handle_alloc_failure(req, true); result = allocate_memory_under_lock(req, in_new_region); } if (result != nullptr) { - // If our allocation request has been satisifed after it initially failed, we count this as good gc progress + // If our allocation request has been satisfied after it initially failed, we count this as good gc progress notify_gc_progress(); } if (log_develop_is_enabled(Debug, gc, alloc)) { @@ -1050,6 +1051,11 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { return result; } +inline bool ShenandoahHeap::should_retry_allocation(size_t original_full_gc_count) const { + return shenandoah_policy()->full_gc_count() == original_full_gc_count + && !shenandoah_policy()->is_at_shutdown(); +} + HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) { // If we are dealing with mutator allocation, then we may need to block for safepoint. // We cannot block for safepoint for GC allocations, because there is a high chance @@ -2120,9 +2126,9 @@ size_t ShenandoahHeap::tlab_used(Thread* thread) const { return _free_set->used(); } -bool ShenandoahHeap::try_cancel_gc() { - jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE); - return prev == CANCELLABLE; +bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) { + jbyte prev = _cancelled_gc.cmpxchg(cause, GCCause::_no_gc); + return prev == GCCause::_no_gc; } void ShenandoahHeap::cancel_concurrent_mark() { @@ -2136,13 +2142,15 @@ void ShenandoahHeap::cancel_concurrent_mark() { ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking(); } -void ShenandoahHeap::cancel_gc(GCCause::Cause cause) { - if (try_cancel_gc()) { +bool ShenandoahHeap::cancel_gc(GCCause::Cause cause) { + if (try_cancel_gc(cause)) { FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause)); - log_info(gc)("%s", msg.buffer()); + log_info(gc,thread)("%s", msg.buffer()); Events::log(Thread::current(), "%s", msg.buffer()); _cancel_requested_time = os::elapsedTime(); + return true; } + return false; } uint ShenandoahHeap::max_workers() { @@ -2155,18 +2163,10 @@ void ShenandoahHeap::stop() { // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown _shenandoah_policy->record_shutdown(); - // Step 0a. Stop reporting on gc thread cpu utilization + // Step 1. Stop reporting on gc thread cpu utilization mmu_tracker()->stop(); - // Step 1. Notify control thread that we are in shutdown. - // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown. - // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below. - control_thread()->prepare_for_graceful_shutdown(); - - // Step 2. Notify GC workers that we are cancelling GC. - cancel_gc(GCCause::_shenandoah_stop_vm); - - // Step 3. Wait until GC worker exits normally. + // Step 2. Wait until GC worker exits normally (this will cancel any ongoing GC). control_thread()->stop(); // Stop 4. Shutdown uncommit thread. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index 785e1742b0c..33d2db0b2f1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -430,35 +430,38 @@ public: private: void manage_satb_barrier(bool active); - enum CancelState { - // Normal state. GC has not been cancelled and is open for cancellation. - // Worker threads can suspend for safepoint. - CANCELLABLE, - - // GC has been cancelled. Worker threads can not suspend for - // safepoint but must finish their work as soon as possible. - CANCELLED - }; - + // Records the time of the first successful cancellation request. This is used to measure + // the responsiveness of the heuristic when starting a cycle. double _cancel_requested_time; - ShenandoahSharedEnumFlag _cancelled_gc; + + // Indicates the reason the current GC has been cancelled (GCCause::_no_gc means the gc is not cancelled). + ShenandoahSharedEnumFlag _cancelled_gc; // Returns true if cancel request was successfully communicated. // Returns false if some other thread already communicated cancel // request. A true return value does not mean GC has been // cancelled, only that the process of cancelling GC has begun. - bool try_cancel_gc(); + bool try_cancel_gc(GCCause::Cause cause); public: + // True if gc has been cancelled inline bool cancelled_gc() const; + + // Used by workers in the GC cycle to detect cancellation and honor STS requirements inline bool check_cancelled_gc_and_yield(bool sts_active = true); + // This indicates the reason the last GC cycle was cancelled. + inline GCCause::Cause cancelled_cause() const; + + // Clears the cancellation cause and optionally resets the oom handler (cancelling an + // old mark does _not_ touch the oom handler). inline void clear_cancelled_gc(bool clear_oom_handler = true); void cancel_concurrent_mark(); - void cancel_gc(GCCause::Cause cause); -public: + // Returns true if and only if this call caused a gc to be cancelled. + bool cancel_gc(GCCause::Cause cause); + // Returns true if the soft maximum heap has been changed using management APIs. bool check_soft_max_changed(); @@ -690,6 +693,9 @@ private: HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size); HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size); + // We want to retry an unsuccessful attempt at allocation until at least a full gc. + bool should_retry_allocation(size_t original_full_gc_count) const; + public: HeapWord* allocate_memory(ShenandoahAllocRequest& request); HeapWord* mem_allocate(size_t size, bool* what) override; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp index 461447cf9ba..13c203d423c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -252,7 +252,7 @@ inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) } inline bool ShenandoahHeap::cancelled_gc() const { - return _cancelled_gc.get() == CANCELLED; + return _cancelled_gc.get() != GCCause::_no_gc; } inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) { @@ -264,8 +264,12 @@ inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) { return cancelled_gc(); } +inline GCCause::Cause ShenandoahHeap::cancelled_cause() const { + return _cancelled_gc.get(); +} + inline void ShenandoahHeap::clear_cancelled_gc(bool clear_oom_handler) { - _cancelled_gc.set(CANCELLABLE); + _cancelled_gc.set(GCCause::_no_gc); if (_cancel_requested_time > 0) { log_debug(gc)("GC cancellation took %.3fs", (os::elapsedTime() - _cancel_requested_time)); _cancel_requested_time = 0; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index 38887217255..9b030905b6d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -495,7 +495,7 @@ const char* ShenandoahOldGeneration::state_name(State state) { void ShenandoahOldGeneration::transition_to(State new_state) { if (_state != new_state) { - log_debug(gc)("Old generation transition from %s to %s", state_name(_state), state_name(new_state)); + log_debug(gc, thread)("Old generation transition from %s to %s", state_name(_state), state_name(new_state)); EventMark event("Old was %s, now is %s", state_name(_state), state_name(new_state)); validate_transition(new_state); _state = new_state; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp index 752ad743520..f9e6f714c7f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp @@ -33,15 +33,14 @@ #include "logging/log.hpp" ShenandoahRegulatorThread::ShenandoahRegulatorThread(ShenandoahGenerationalControlThread* control_thread) : - ConcurrentGCThread(), + _heap(ShenandoahHeap::heap()), _control_thread(control_thread), _sleep(ShenandoahControlIntervalMin), _last_sleep_adjust_time(os::elapsedTime()) { shenandoah_assert_generational(); - ShenandoahHeap* heap = ShenandoahHeap::heap(); - _old_heuristics = heap->old_generation()->heuristics(); - _young_heuristics = heap->young_generation()->heuristics(); - _global_heuristics = heap->global_generation()->heuristics(); + _old_heuristics = _heap->old_generation()->heuristics(); + _young_heuristics = _heap->young_generation()->heuristics(); + _global_heuristics = _heap->global_generation()->heuristics(); set_name("Shenandoah Regulator Thread"); create_and_start(); @@ -62,7 +61,7 @@ void ShenandoahRegulatorThread::regulate_young_and_old_cycles() { ShenandoahGenerationalControlThread::GCMode mode = _control_thread->gc_mode(); if (mode == ShenandoahGenerationalControlThread::none) { if (should_start_metaspace_gc()) { - if (request_concurrent_gc(GLOBAL)) { + if (request_concurrent_gc(_heap->global_generation())) { // Some of vmTestbase/metaspace tests depend on following line to count GC cycles _global_heuristics->log_trigger("%s", GCCause::to_string(GCCause::_metadata_GC_threshold)); _global_heuristics->cancel_trigger_request(); @@ -75,10 +74,14 @@ void ShenandoahRegulatorThread::regulate_young_and_old_cycles() { log_debug(gc)("Heuristics request for old collection accepted"); _young_heuristics->cancel_trigger_request(); _old_heuristics->cancel_trigger_request(); - } else if (request_concurrent_gc(YOUNG)) { + } else if (request_concurrent_gc(_heap->young_generation())) { log_debug(gc)("Heuristics request for young collection accepted"); _young_heuristics->cancel_trigger_request(); } + } else if (_old_heuristics->should_resume_old_cycle() || _old_heuristics->should_start_gc()) { + if (request_concurrent_gc(_heap->old_generation())) { + log_debug(gc)("Heuristics request to resume old collection accepted"); + } } } } else if (mode == ShenandoahGenerationalControlThread::servicing_old) { @@ -132,19 +135,19 @@ void ShenandoahRegulatorThread::regulator_sleep() { } } -bool ShenandoahRegulatorThread::start_old_cycle() { - return _old_heuristics->should_start_gc() && request_concurrent_gc(OLD); +bool ShenandoahRegulatorThread::start_old_cycle() const { + return _old_heuristics->should_start_gc() && request_concurrent_gc(_heap->old_generation()); } -bool ShenandoahRegulatorThread::start_young_cycle() { - return _young_heuristics->should_start_gc() && request_concurrent_gc(YOUNG); +bool ShenandoahRegulatorThread::start_young_cycle() const { + return _young_heuristics->should_start_gc() && request_concurrent_gc(_heap->young_generation()); } -bool ShenandoahRegulatorThread::start_global_cycle() { - return _global_heuristics->should_start_gc() && request_concurrent_gc(GLOBAL); +bool ShenandoahRegulatorThread::start_global_cycle() const { + return _global_heuristics->should_start_gc() && request_concurrent_gc(_heap->global_generation()); } -bool ShenandoahRegulatorThread::request_concurrent_gc(ShenandoahGenerationType generation) { +bool ShenandoahRegulatorThread::request_concurrent_gc(ShenandoahGeneration* generation) const { double now = os::elapsedTime(); bool accepted = _control_thread->request_concurrent_gc(generation); if (LogTarget(Debug, gc, thread)::is_enabled() && accepted) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.hpp index f9f7e25f97c..a72d6004beb 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.hpp @@ -26,8 +26,11 @@ #include "gc/shared/concurrentGCThread.hpp" +class ShenandoahHeap; class ShenandoahHeuristics; +class ShenandoahGeneration; class ShenandoahGenerationalControlThread; +class ShenandoahOldHeuristics; /* * The purpose of this class (and thread) is to allow us to continue @@ -58,9 +61,10 @@ class ShenandoahRegulatorThread: public ConcurrentGCThread { void regulate_young_and_global_cycles(); // These return true if a cycle was started. - bool start_old_cycle(); - bool start_young_cycle(); - bool start_global_cycle(); + bool start_old_cycle() const; + bool start_young_cycle() const; + bool start_global_cycle() const; + bool resume_old_cycle(); // The generational mode can only unload classes in a global cycle. The regulator // thread itself will trigger a global cycle if metaspace is out of memory. @@ -70,11 +74,12 @@ class ShenandoahRegulatorThread: public ConcurrentGCThread { void regulator_sleep(); // Provides instrumentation to track how long it takes to acknowledge a request. - bool request_concurrent_gc(ShenandoahGenerationType generation); + bool request_concurrent_gc(ShenandoahGeneration* generation) const; + ShenandoahHeap* _heap; ShenandoahGenerationalControlThread* _control_thread; ShenandoahHeuristics* _young_heuristics; - ShenandoahHeuristics* _old_heuristics; + ShenandoahOldHeuristics* _old_heuristics; ShenandoahHeuristics* _global_heuristics; uint _sleep; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp index 9bbb76b3e48..109e940e2bd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp @@ -475,7 +475,7 @@ HeapWord* ShenandoahScanRemembered::addr_for_cluster(size_t cluster_no) { void ShenandoahScanRemembered::roots_do(OopIterateClosure* cl) { ShenandoahHeap* heap = ShenandoahHeap::heap(); bool old_bitmap_stable = heap->old_generation()->is_mark_complete(); - log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable)); + log_debug(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable)); for (size_t i = 0, n = heap->num_regions(); i < n; ++i) { ShenandoahHeapRegion* region = heap->get_region(i); if (region->is_old() && region->is_active() && !region->is_cset()) { @@ -653,7 +653,7 @@ ShenandoahScanRememberedTask::ShenandoahScanRememberedTask(ShenandoahObjToScanQu WorkerTask("Scan Remembered Set"), _queue_set(queue_set), _old_queue_set(old_queue_set), _rp(rp), _work_list(work_list), _is_concurrent(is_concurrent) { bool old_bitmap_stable = ShenandoahHeap::heap()->old_generation()->is_mark_complete(); - log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable)); + log_debug(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable)); } void ShenandoahScanRememberedTask::work(uint worker_id) {