mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-31 21:48:23 +00:00
8349094: GenShen: Race between control and regulator threads may violate assertions
Reviewed-by: ysr, kdnilsen
This commit is contained in:
parent
99fb350bf6
commit
3a8a432c05
@ -74,6 +74,7 @@ class GCCause : public AllStatic {
|
||||
|
||||
_shenandoah_stop_vm,
|
||||
_shenandoah_allocation_failure_evac,
|
||||
_shenandoah_humongous_allocation_failure,
|
||||
_shenandoah_concurrent_gc,
|
||||
_shenandoah_upgrade_to_full_gc,
|
||||
|
||||
|
||||
@ -60,7 +60,6 @@ int ShenandoahOldHeuristics::compare_by_index(RegionData a, RegionData b) {
|
||||
ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap) :
|
||||
ShenandoahHeuristics(generation),
|
||||
_heap(gen_heap),
|
||||
_old_gen(generation),
|
||||
_first_pinned_candidate(NOT_FOUND),
|
||||
_last_old_collection_candidate(0),
|
||||
_next_old_collection_candidate(0),
|
||||
@ -567,9 +566,9 @@ void ShenandoahOldHeuristics::set_trigger_if_old_is_fragmented(size_t first_old_
|
||||
// allocation request will require a STW full GC.
|
||||
size_t allowed_old_gen_span = num_regions - (ShenandoahGenerationalHumongousReserve * num_regions) / 100;
|
||||
|
||||
size_t old_available = _old_gen->available() / HeapWordSize;
|
||||
size_t old_available = _old_generation->available() / HeapWordSize;
|
||||
size_t region_size_words = ShenandoahHeapRegion::region_size_words();
|
||||
size_t old_unaffiliated_available = _old_gen->free_unaffiliated_regions() * region_size_words;
|
||||
size_t old_unaffiliated_available = _old_generation->free_unaffiliated_regions() * region_size_words;
|
||||
assert(old_available >= old_unaffiliated_available, "sanity");
|
||||
size_t old_fragmented_available = old_available - old_unaffiliated_available;
|
||||
|
||||
@ -603,12 +602,12 @@ void ShenandoahOldHeuristics::set_trigger_if_old_is_fragmented(size_t first_old_
|
||||
}
|
||||
|
||||
void ShenandoahOldHeuristics::set_trigger_if_old_is_overgrown() {
|
||||
size_t old_used = _old_gen->used() + _old_gen->get_humongous_waste();
|
||||
size_t trigger_threshold = _old_gen->usage_trigger_threshold();
|
||||
size_t old_used = _old_generation->used() + _old_generation->get_humongous_waste();
|
||||
size_t trigger_threshold = _old_generation->usage_trigger_threshold();
|
||||
// Detects unsigned arithmetic underflow
|
||||
assert(old_used <= _heap->capacity(),
|
||||
"Old used (%zu, %zu) must not be more than heap capacity (%zu)",
|
||||
_old_gen->used(), _old_gen->get_humongous_waste(), _heap->capacity());
|
||||
_old_generation->used(), _old_generation->get_humongous_waste(), _heap->capacity());
|
||||
if (old_used > trigger_threshold) {
|
||||
_growth_trigger = true;
|
||||
}
|
||||
@ -620,13 +619,32 @@ void ShenandoahOldHeuristics::evaluate_triggers(size_t first_old_region, size_t
|
||||
set_trigger_if_old_is_overgrown();
|
||||
}
|
||||
|
||||
bool ShenandoahOldHeuristics::should_resume_old_cycle() {
|
||||
// If we are preparing to mark old, or if we are already marking old, then try to continue that work.
|
||||
if (_old_generation->is_concurrent_mark_in_progress()) {
|
||||
assert(_old_generation->state() == ShenandoahOldGeneration::MARKING, "Unexpected old gen state: %s", _old_generation->state_name());
|
||||
log_trigger("Resume marking old");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (_old_generation->is_preparing_for_mark()) {
|
||||
assert(_old_generation->state() == ShenandoahOldGeneration::FILLING, "Unexpected old gen state: %s", _old_generation->state_name());
|
||||
log_trigger("Resume preparing to mark old");
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ShenandoahOldHeuristics::should_start_gc() {
|
||||
// Cannot start a new old-gen GC until previous one has finished.
|
||||
//
|
||||
// Future refinement: under certain circumstances, we might be more sophisticated about this choice.
|
||||
// For example, we could choose to abandon the previous old collection before it has completed evacuations.
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (!_old_generation->can_start_gc() || heap->collection_set()->has_old_regions()) {
|
||||
|
||||
const ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (_old_generation->is_doing_mixed_evacuations()) {
|
||||
// Do not try to start an old cycle if we are waiting for old regions to be evacuated (we need
|
||||
// a young cycle for this). Note that the young heuristic has a feature to expedite old evacuations.
|
||||
// Future refinement: under certain circumstances, we might be more sophisticated about this choice.
|
||||
// For example, we could choose to abandon the previous old collection before it has completed evacuations.
|
||||
log_debug(gc)("Not starting an old cycle because we are waiting for mixed evacuations");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -53,7 +53,6 @@ private:
|
||||
static uint NOT_FOUND;
|
||||
|
||||
ShenandoahGenerationalHeap* _heap;
|
||||
ShenandoahOldGeneration* _old_gen;
|
||||
|
||||
// After final marking of the old generation, this heuristic will select
|
||||
// a set of candidate regions to be included in subsequent mixed collections.
|
||||
@ -186,6 +185,9 @@ public:
|
||||
|
||||
bool should_start_gc() override;
|
||||
|
||||
// Returns true if the old generation needs to prepare for marking, or continue marking.
|
||||
bool should_resume_old_cycle();
|
||||
|
||||
void record_success_concurrent() override;
|
||||
|
||||
void record_success_degenerated() override;
|
||||
|
||||
@ -90,7 +90,7 @@ bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators, Basi
|
||||
void ShenandoahBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
|
||||
#if COMPILER2_OR_JVMCI
|
||||
assert(!ReduceInitialCardMarks || !ShenandoahCardBarrier || ShenandoahGenerationalHeap::heap()->is_in_young(new_obj),
|
||||
"Error: losing card mark on initialzing store to old gen");
|
||||
"Allocating new object outside of young generation: " INTPTR_FORMAT, p2i(new_obj));
|
||||
#endif // COMPILER2_OR_JVMCI
|
||||
assert(thread->deferred_card_mark().is_empty(), "We don't use this");
|
||||
}
|
||||
|
||||
@ -123,25 +123,28 @@ void ShenandoahCollectorPolicy::record_shutdown() {
|
||||
_in_shutdown.set();
|
||||
}
|
||||
|
||||
bool ShenandoahCollectorPolicy::is_at_shutdown() {
|
||||
bool ShenandoahCollectorPolicy::is_at_shutdown() const {
|
||||
return _in_shutdown.is_set();
|
||||
}
|
||||
|
||||
bool is_explicit_gc(GCCause::Cause cause) {
|
||||
bool ShenandoahCollectorPolicy::is_explicit_gc(GCCause::Cause cause) {
|
||||
return GCCause::is_user_requested_gc(cause)
|
||||
|| GCCause::is_serviceability_requested_gc(cause);
|
||||
|| GCCause::is_serviceability_requested_gc(cause)
|
||||
|| cause == GCCause::_wb_full_gc
|
||||
|| cause == GCCause::_wb_young_gc;
|
||||
}
|
||||
|
||||
bool is_implicit_gc(GCCause::Cause cause) {
|
||||
return cause != GCCause::_no_gc
|
||||
&& cause != GCCause::_shenandoah_concurrent_gc
|
||||
&& cause != GCCause::_allocation_failure
|
||||
&& !is_explicit_gc(cause);
|
||||
&& !ShenandoahCollectorPolicy::is_explicit_gc(cause);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool is_valid_request(GCCause::Cause cause) {
|
||||
return is_explicit_gc(cause)
|
||||
return ShenandoahCollectorPolicy::is_explicit_gc(cause)
|
||||
|| ShenandoahCollectorPolicy::is_shenandoah_gc(cause)
|
||||
|| cause == GCCause::_metadata_GC_clear_soft_refs
|
||||
|| cause == GCCause::_codecache_GC_aggressive
|
||||
|| cause == GCCause::_codecache_GC_threshold
|
||||
@ -153,6 +156,22 @@ bool is_valid_request(GCCause::Cause cause) {
|
||||
}
|
||||
#endif
|
||||
|
||||
bool ShenandoahCollectorPolicy::is_shenandoah_gc(GCCause::Cause cause) {
|
||||
return cause == GCCause::_allocation_failure
|
||||
|| cause == GCCause::_shenandoah_stop_vm
|
||||
|| cause == GCCause::_shenandoah_allocation_failure_evac
|
||||
|| cause == GCCause::_shenandoah_humongous_allocation_failure
|
||||
|| cause == GCCause::_shenandoah_concurrent_gc
|
||||
|| cause == GCCause::_shenandoah_upgrade_to_full_gc;
|
||||
}
|
||||
|
||||
|
||||
bool ShenandoahCollectorPolicy::is_allocation_failure(GCCause::Cause cause) {
|
||||
return cause == GCCause::_allocation_failure
|
||||
|| cause == GCCause::_shenandoah_allocation_failure_evac
|
||||
|| cause == GCCause::_shenandoah_humongous_allocation_failure;
|
||||
}
|
||||
|
||||
bool ShenandoahCollectorPolicy::is_requested_gc(GCCause::Cause cause) {
|
||||
return is_explicit_gc(cause) || is_implicit_gc(cause);
|
||||
}
|
||||
|
||||
@ -77,9 +77,9 @@ public:
|
||||
void record_collection_cause(GCCause::Cause cause);
|
||||
|
||||
void record_shutdown();
|
||||
bool is_at_shutdown();
|
||||
bool is_at_shutdown() const;
|
||||
|
||||
ShenandoahTracer* tracer() {return _tracer;}
|
||||
ShenandoahTracer* tracer() const {return _tracer;}
|
||||
|
||||
void print_gc_stats(outputStream* out) const;
|
||||
|
||||
@ -90,15 +90,18 @@ public:
|
||||
// If the heuristics find that the number of consecutive degenerated cycles is above
|
||||
// ShenandoahFullGCThreshold, then they will initiate a Full GC upon an allocation
|
||||
// failure.
|
||||
inline size_t consecutive_degenerated_gc_count() const {
|
||||
size_t consecutive_degenerated_gc_count() const {
|
||||
return _consecutive_degenerated_gcs;
|
||||
}
|
||||
|
||||
static bool is_allocation_failure(GCCause::Cause cause);
|
||||
static bool is_shenandoah_gc(GCCause::Cause cause);
|
||||
static bool is_requested_gc(GCCause::Cause cause);
|
||||
static bool is_explicit_gc(GCCause::Cause cause);
|
||||
static bool should_run_full_gc(GCCause::Cause cause);
|
||||
static bool should_handle_requested_gc(GCCause::Cause cause);
|
||||
|
||||
inline size_t consecutive_young_gc_count() const {
|
||||
size_t consecutive_young_gc_count() const {
|
||||
return _consecutive_young_gcs;
|
||||
}
|
||||
|
||||
|
||||
@ -50,7 +50,6 @@ ShenandoahControlThread::ShenandoahControlThread() :
|
||||
|
||||
void ShenandoahControlThread::run_service() {
|
||||
ShenandoahHeap* const heap = ShenandoahHeap::heap();
|
||||
|
||||
const GCMode default_mode = concurrent_normal;
|
||||
const GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
|
||||
int sleep = ShenandoahControlIntervalMin;
|
||||
@ -59,9 +58,14 @@ void ShenandoahControlThread::run_service() {
|
||||
|
||||
ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
|
||||
ShenandoahHeuristics* const heuristics = heap->heuristics();
|
||||
while (!in_graceful_shutdown() && !should_terminate()) {
|
||||
while (!should_terminate()) {
|
||||
const GCCause::Cause cancelled_cause = heap->cancelled_cause();
|
||||
if (cancelled_cause == GCCause::_shenandoah_stop_vm) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Figure out if we have pending requests.
|
||||
const bool alloc_failure_pending = _alloc_failure_gc.is_set();
|
||||
const bool alloc_failure_pending = ShenandoahCollectorPolicy::is_allocation_failure(cancelled_cause);
|
||||
const bool is_gc_requested = _gc_requested.is_set();
|
||||
const GCCause::Cause requested_gc_cause = _requested_gc_cause;
|
||||
|
||||
@ -254,11 +258,6 @@ void ShenandoahControlThread::run_service() {
|
||||
}
|
||||
os::naked_short_sleep(sleep);
|
||||
}
|
||||
|
||||
// Wait for the actual stop(), can't leave run_service() earlier.
|
||||
while (!should_terminate()) {
|
||||
os::naked_short_sleep(ShenandoahControlIntervalMin);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
|
||||
@ -322,19 +321,24 @@ void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cau
|
||||
bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (heap->cancelled_gc()) {
|
||||
assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
|
||||
if (!in_graceful_shutdown()) {
|
||||
if (heap->cancelled_cause() == GCCause::_shenandoah_stop_vm) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (ShenandoahCollectorPolicy::is_allocation_failure(heap->cancelled_cause())) {
|
||||
assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
|
||||
"Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
|
||||
_degen_point = point;
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
|
||||
fatal("Unexpected reason for cancellation: %s", GCCause::to_string(heap->cancelled_cause()));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void ShenandoahControlThread::stop_service() {
|
||||
// Nothing to do here.
|
||||
ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_stop_vm);
|
||||
}
|
||||
|
||||
void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
|
||||
@ -363,6 +367,11 @@ void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
|
||||
}
|
||||
|
||||
void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
|
||||
if (should_terminate()) {
|
||||
log_info(gc)("Control thread is terminating, no more GCs");
|
||||
return;
|
||||
}
|
||||
|
||||
// For normal requested GCs (System.gc) we want to block the caller. However,
|
||||
// for whitebox requested GC, we want to initiate the GC and return immediately.
|
||||
// The whitebox caller thread will arrange for itself to wait until the GC notifies
|
||||
@ -385,7 +394,7 @@ void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
|
||||
MonitorLocker ml(&_gc_waiters_lock);
|
||||
size_t current_gc_id = get_gc_id();
|
||||
size_t required_gc_id = current_gc_id + 1;
|
||||
while (current_gc_id < required_gc_id) {
|
||||
while (current_gc_id < required_gc_id && !should_terminate()) {
|
||||
// Although setting gc request is under _gc_waiters_lock, but read side (run_service())
|
||||
// does not take the lock. We need to enforce following order, so that read side sees
|
||||
// latest requested gc cause when the flag is set.
|
||||
|
||||
@ -25,6 +25,8 @@
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shenandoah/shenandoahController.hpp"
|
||||
|
||||
#include "shenandoahCollectorPolicy.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
|
||||
|
||||
@ -37,14 +39,6 @@ size_t ShenandoahController::reset_allocs_seen() {
|
||||
return Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void ShenandoahController::prepare_for_graceful_shutdown() {
|
||||
_graceful_shutdown.set();
|
||||
}
|
||||
|
||||
bool ShenandoahController::in_graceful_shutdown() {
|
||||
return _graceful_shutdown.is_set();
|
||||
}
|
||||
|
||||
void ShenandoahController::update_gc_id() {
|
||||
Atomic::inc(&_gc_id);
|
||||
}
|
||||
@ -53,59 +47,38 @@ size_t ShenandoahController::get_gc_id() {
|
||||
return Atomic::load(&_gc_id);
|
||||
}
|
||||
|
||||
void ShenandoahController::handle_alloc_failure(ShenandoahAllocRequest& req, bool block) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
void ShenandoahController::handle_alloc_failure(const ShenandoahAllocRequest& req, bool block) {
|
||||
assert(current()->is_Java_thread(), "expect Java thread here");
|
||||
bool is_humongous = ShenandoahHeapRegion::requires_humongous(req.size());
|
||||
|
||||
if (try_set_alloc_failure_gc(is_humongous)) {
|
||||
// Only report the first allocation failure
|
||||
log_info(gc)("Failed to allocate %s, %zu%s",
|
||||
req.type_string(),
|
||||
byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
|
||||
const bool is_humongous = ShenandoahHeapRegion::requires_humongous(req.size());
|
||||
const GCCause::Cause cause = is_humongous ? GCCause::_shenandoah_humongous_allocation_failure : GCCause::_allocation_failure;
|
||||
|
||||
// Now that alloc failure GC is scheduled, we can abort everything else
|
||||
heap->cancel_gc(GCCause::_allocation_failure);
|
||||
ShenandoahHeap* const heap = ShenandoahHeap::heap();
|
||||
if (heap->cancel_gc(cause)) {
|
||||
log_info(gc)("Failed to allocate %s, " PROPERFMT, req.type_string(), PROPERFMTARGS(req.size() * HeapWordSize));
|
||||
request_gc(cause);
|
||||
}
|
||||
|
||||
|
||||
if (block) {
|
||||
MonitorLocker ml(&_alloc_failure_waiters_lock);
|
||||
while (is_alloc_failure_gc()) {
|
||||
while (!should_terminate() && ShenandoahCollectorPolicy::is_allocation_failure(heap->cancelled_cause())) {
|
||||
ml.wait();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahController::handle_alloc_failure_evac(size_t words) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
bool is_humongous = ShenandoahHeapRegion::requires_humongous(words);
|
||||
|
||||
if (try_set_alloc_failure_gc(is_humongous)) {
|
||||
// Only report the first allocation failure
|
||||
log_info(gc)("Failed to allocate %zu%s for evacuation",
|
||||
byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
|
||||
ShenandoahHeap* const heap = ShenandoahHeap::heap();
|
||||
const bool is_humongous = ShenandoahHeapRegion::requires_humongous(words);
|
||||
const GCCause::Cause cause = is_humongous ? GCCause::_shenandoah_humongous_allocation_failure : GCCause::_shenandoah_allocation_failure_evac;
|
||||
|
||||
if (heap->cancel_gc(cause)) {
|
||||
log_info(gc)("Failed to allocate " PROPERFMT " for evacuation", PROPERFMTARGS(words * HeapWordSize));
|
||||
}
|
||||
|
||||
// Forcefully report allocation failure
|
||||
heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
|
||||
}
|
||||
|
||||
void ShenandoahController::notify_alloc_failure_waiters() {
|
||||
_alloc_failure_gc.unset();
|
||||
_humongous_alloc_failure_gc.unset();
|
||||
MonitorLocker ml(&_alloc_failure_waiters_lock);
|
||||
ml.notify_all();
|
||||
}
|
||||
|
||||
bool ShenandoahController::try_set_alloc_failure_gc(bool is_humongous) {
|
||||
if (is_humongous) {
|
||||
_humongous_alloc_failure_gc.try_set();
|
||||
}
|
||||
return _alloc_failure_gc.try_set();
|
||||
}
|
||||
|
||||
bool ShenandoahController::is_alloc_failure_gc() {
|
||||
return _alloc_failure_gc.is_set();
|
||||
}
|
||||
|
||||
@ -36,27 +36,25 @@
|
||||
*/
|
||||
class ShenandoahController: public ConcurrentGCThread {
|
||||
private:
|
||||
ShenandoahSharedFlag _graceful_shutdown;
|
||||
|
||||
shenandoah_padding(0);
|
||||
volatile size_t _allocs_seen;
|
||||
shenandoah_padding(1);
|
||||
// A monotonically increasing GC count.
|
||||
volatile size_t _gc_id;
|
||||
shenandoah_padding(2);
|
||||
|
||||
protected:
|
||||
ShenandoahSharedFlag _alloc_failure_gc;
|
||||
ShenandoahSharedFlag _humongous_alloc_failure_gc;
|
||||
|
||||
// While we could have a single lock for these, it may risk unblocking
|
||||
// GC waiters when alloc failure GC cycle finishes. We want instead
|
||||
// to make complete explicit cycle for demanding customers.
|
||||
Monitor _alloc_failure_waiters_lock;
|
||||
Monitor _gc_waiters_lock;
|
||||
|
||||
// Increments the internal GC count.
|
||||
void update_gc_id();
|
||||
|
||||
public:
|
||||
ShenandoahController():
|
||||
ConcurrentGCThread(),
|
||||
_allocs_seen(0),
|
||||
_gc_id(0),
|
||||
_alloc_failure_waiters_lock(Mutex::safepoint-2, "ShenandoahAllocFailureGC_lock", true),
|
||||
@ -68,38 +66,25 @@ public:
|
||||
virtual void request_gc(GCCause::Cause cause) = 0;
|
||||
|
||||
// This cancels the collection cycle and has an option to block
|
||||
// until another cycle runs and clears the alloc failure gc flag.
|
||||
void handle_alloc_failure(ShenandoahAllocRequest& req, bool block);
|
||||
// until another cycle completes successfully.
|
||||
void handle_alloc_failure(const ShenandoahAllocRequest& req, bool block);
|
||||
|
||||
// Invoked for allocation failures during evacuation. This cancels
|
||||
// the collection cycle without blocking.
|
||||
void handle_alloc_failure_evac(size_t words);
|
||||
|
||||
// Return true if setting the flag which indicates allocation failure succeeds.
|
||||
bool try_set_alloc_failure_gc(bool is_humongous);
|
||||
|
||||
// Notify threads waiting for GC to complete.
|
||||
void notify_alloc_failure_waiters();
|
||||
|
||||
// True if allocation failure flag has been set.
|
||||
bool is_alloc_failure_gc();
|
||||
|
||||
// This is called for every allocation. The control thread accumulates
|
||||
// this value when idle. During the gc cycle, the control resets it
|
||||
// and reports it to the pacer.
|
||||
void pacing_notify_alloc(size_t words);
|
||||
|
||||
// Zeros out the number of allocations seen since the last GC cycle.
|
||||
size_t reset_allocs_seen();
|
||||
|
||||
// These essentially allows to cancel a collection cycle for the
|
||||
// purpose of shutting down the JVM, without trying to start a degenerated
|
||||
// cycle.
|
||||
void prepare_for_graceful_shutdown();
|
||||
bool in_graceful_shutdown();
|
||||
|
||||
|
||||
// Returns the internal gc count used by the control thread. Probably
|
||||
// doesn't need to be exposed.
|
||||
// Return the value of a monotonic increasing GC count, maintained by the control thread.
|
||||
size_t get_gc_id();
|
||||
void update_gc_id();
|
||||
};
|
||||
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCONTROLLER_HPP
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -28,10 +28,9 @@
|
||||
|
||||
#include "gc/shared/gcCause.hpp"
|
||||
#include "gc/shenandoah/shenandoahController.hpp"
|
||||
#include "gc/shenandoah/shenandoahGenerationType.hpp"
|
||||
#include "gc/shenandoah/shenandoahGC.hpp"
|
||||
#include "gc/shenandoah/shenandoahPadding.hpp"
|
||||
#include "gc/shenandoah/shenandoahSharedVariables.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
class ShenandoahOldGeneration;
|
||||
class ShenandoahGeneration;
|
||||
@ -52,21 +51,43 @@ public:
|
||||
stopped
|
||||
} GCMode;
|
||||
|
||||
class ShenandoahGCRequest {
|
||||
public:
|
||||
ShenandoahGCRequest() : generation(nullptr), cause(GCCause::_no_gc) {}
|
||||
ShenandoahGeneration* generation;
|
||||
GCCause::Cause cause;
|
||||
};
|
||||
|
||||
private:
|
||||
// This lock is used to coordinate setting the _requested_gc_cause, _requested generation
|
||||
// and _gc_mode. It is important that these be changed together and have a consistent view.
|
||||
Monitor _control_lock;
|
||||
Monitor _regulator_lock;
|
||||
|
||||
ShenandoahSharedFlag _allow_old_preemption;
|
||||
ShenandoahSharedFlag _preemption_requested;
|
||||
|
||||
// Represents a normal (non cancellation) gc request. This can be set by mutators (System.gc,
|
||||
// whitebox gc, etc.) or by the regulator thread when the heuristics want to start a cycle.
|
||||
GCCause::Cause _requested_gc_cause;
|
||||
volatile ShenandoahGenerationType _requested_generation;
|
||||
ShenandoahGC::ShenandoahDegenPoint _degen_point;
|
||||
ShenandoahGeneration* _degen_generation;
|
||||
|
||||
shenandoah_padding(0);
|
||||
volatile GCMode _mode;
|
||||
shenandoah_padding(1);
|
||||
// This is the generation the request should operate on.
|
||||
ShenandoahGeneration* _requested_generation;
|
||||
|
||||
// The mode is read frequently by requesting threads and only ever written by the control thread.
|
||||
// This may be read without taking the _control_lock, but should be read again under the lock
|
||||
// before making any state changes (double-checked locking idiom).
|
||||
volatile GCMode _gc_mode;
|
||||
|
||||
// Only the control thread knows the correct degeneration point. This is used to have the
|
||||
// control thread resume a STW cycle from the point where the concurrent cycle was cancelled.
|
||||
ShenandoahGC::ShenandoahDegenPoint _degen_point;
|
||||
|
||||
// A reference to the heap
|
||||
ShenandoahGenerationalHeap* _heap;
|
||||
|
||||
// This is used to keep track of whether to age objects during the current cycle.
|
||||
uint _age_period;
|
||||
|
||||
// This is true when the old generation cycle is in an interruptible phase (i.e., marking or
|
||||
// preparing for mark).
|
||||
ShenandoahSharedFlag _allow_old_preemption;
|
||||
|
||||
public:
|
||||
ShenandoahGenerationalControlThread();
|
||||
@ -77,54 +98,68 @@ public:
|
||||
void request_gc(GCCause::Cause cause) override;
|
||||
|
||||
// Return true if the request to start a concurrent GC for the given generation succeeded.
|
||||
bool request_concurrent_gc(ShenandoahGenerationType generation);
|
||||
bool request_concurrent_gc(ShenandoahGeneration* generation);
|
||||
|
||||
GCMode gc_mode() {
|
||||
return _mode;
|
||||
// Returns the current state of the control thread
|
||||
GCMode gc_mode() const {
|
||||
return _gc_mode;
|
||||
}
|
||||
private:
|
||||
|
||||
// Returns true if the cycle has been cancelled or degenerated.
|
||||
bool check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point);
|
||||
|
||||
// Executes one GC cycle
|
||||
void run_gc_cycle(const ShenandoahGCRequest& request);
|
||||
|
||||
// Returns true if the old generation marking completed (i.e., final mark executed for old generation).
|
||||
bool resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause);
|
||||
|
||||
// Various service methods handle different gc cycle types
|
||||
void service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool reset_old_bitmap_specially);
|
||||
void service_stw_full_cycle(GCCause::Cause cause);
|
||||
void service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point);
|
||||
void service_stw_degenerated_cycle(const ShenandoahGCRequest& request);
|
||||
void service_concurrent_normal_cycle(const ShenandoahGCRequest& request);
|
||||
void service_concurrent_old_cycle(const ShenandoahGCRequest& request);
|
||||
|
||||
void notify_gc_waiters();
|
||||
|
||||
// Handle GC request.
|
||||
// Blocks until GC is over.
|
||||
// Blocks until at least one global GC cycle is complete.
|
||||
void handle_requested_gc(GCCause::Cause cause);
|
||||
|
||||
bool is_explicit_gc(GCCause::Cause cause) const;
|
||||
bool is_implicit_gc(GCCause::Cause cause) const;
|
||||
|
||||
// Returns true if the old generation marking was interrupted to allow a young cycle.
|
||||
bool preempt_old_marking(ShenandoahGenerationType generation);
|
||||
bool preempt_old_marking(ShenandoahGeneration* generation);
|
||||
|
||||
void process_phase_timings(const ShenandoahGenerationalHeap* heap);
|
||||
|
||||
void service_concurrent_normal_cycle(ShenandoahGenerationalHeap* heap,
|
||||
ShenandoahGenerationType generation,
|
||||
GCCause::Cause cause);
|
||||
|
||||
void service_concurrent_old_cycle(ShenandoahGenerationalHeap* heap,
|
||||
GCCause::Cause &cause);
|
||||
// Flushes cycle timings to global timings and prints the phase timings for the last completed cycle.
|
||||
void process_phase_timings() const;
|
||||
|
||||
// Set the gc mode and post a notification if it has changed. The overloaded variant should be used
|
||||
// when the _control_lock is already held.
|
||||
void set_gc_mode(GCMode new_mode);
|
||||
void set_gc_mode(MonitorLocker& ml, GCMode new_mode);
|
||||
|
||||
// Return printable name for the given gc mode.
|
||||
static const char* gc_mode_name(GCMode mode);
|
||||
|
||||
void notify_control_thread();
|
||||
// Takes the request lock and updates the requested cause and generation, then notifies the control thread.
|
||||
// The overloaded variant should be used when the _control_lock is already held.
|
||||
void notify_control_thread(GCCause::Cause cause, ShenandoahGeneration* generation);
|
||||
void notify_control_thread(MonitorLocker& ml, GCCause::Cause cause, ShenandoahGeneration* generation);
|
||||
|
||||
void service_concurrent_cycle(ShenandoahHeap* heap,
|
||||
ShenandoahGeneration* generation,
|
||||
GCCause::Cause &cause,
|
||||
bool do_old_gc_bootstrap);
|
||||
// Notifies the control thread, but does not update the requested cause or generation.
|
||||
// The overloaded variant should be used when the _control_lock is already held.
|
||||
void notify_cancellation(GCCause::Cause cause);
|
||||
void notify_cancellation(MonitorLocker& ml, GCCause::Cause cause);
|
||||
|
||||
// Configure the heap to age objects and regions if the aging period has elapsed.
|
||||
void maybe_set_aging_cycle();
|
||||
|
||||
// Take the _control_lock and check for a request to run a gc cycle. If a request is found,
|
||||
// the `prepare` methods are used to configure the heap and update heuristics accordingly.
|
||||
void check_for_request(ShenandoahGCRequest& request);
|
||||
|
||||
GCMode prepare_for_allocation_failure_gc(ShenandoahGCRequest &request);
|
||||
GCMode prepare_for_explicit_gc(ShenandoahGCRequest &request) const;
|
||||
GCMode prepare_for_concurrent_gc(const ShenandoahGCRequest &request) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONALCONTROLTHREAD_HPP
|
||||
|
||||
@ -583,6 +583,7 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
|
||||
{
|
||||
// Initialize GC mode early, many subsequent initialization procedures depend on it
|
||||
initialize_mode();
|
||||
_cancelled_gc.set(GCCause::_no_gc);
|
||||
}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
@ -993,13 +994,13 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
|
||||
// a) We experienced a GC that had good progress, or
|
||||
// b) We experienced at least one Full GC (whether or not it had good progress)
|
||||
|
||||
size_t original_count = shenandoah_policy()->full_gc_count();
|
||||
while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
|
||||
const size_t original_count = shenandoah_policy()->full_gc_count();
|
||||
while (result == nullptr && should_retry_allocation(original_count)) {
|
||||
control_thread()->handle_alloc_failure(req, true);
|
||||
result = allocate_memory_under_lock(req, in_new_region);
|
||||
}
|
||||
if (result != nullptr) {
|
||||
// If our allocation request has been satisifed after it initially failed, we count this as good gc progress
|
||||
// If our allocation request has been satisfied after it initially failed, we count this as good gc progress
|
||||
notify_gc_progress();
|
||||
}
|
||||
if (log_develop_is_enabled(Debug, gc, alloc)) {
|
||||
@ -1050,6 +1051,11 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
|
||||
return result;
|
||||
}
|
||||
|
||||
inline bool ShenandoahHeap::should_retry_allocation(size_t original_full_gc_count) const {
|
||||
return shenandoah_policy()->full_gc_count() == original_full_gc_count
|
||||
&& !shenandoah_policy()->is_at_shutdown();
|
||||
}
|
||||
|
||||
HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
|
||||
// If we are dealing with mutator allocation, then we may need to block for safepoint.
|
||||
// We cannot block for safepoint for GC allocations, because there is a high chance
|
||||
@ -2120,9 +2126,9 @@ size_t ShenandoahHeap::tlab_used(Thread* thread) const {
|
||||
return _free_set->used();
|
||||
}
|
||||
|
||||
bool ShenandoahHeap::try_cancel_gc() {
|
||||
jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
|
||||
return prev == CANCELLABLE;
|
||||
bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) {
|
||||
jbyte prev = _cancelled_gc.cmpxchg(cause, GCCause::_no_gc);
|
||||
return prev == GCCause::_no_gc;
|
||||
}
|
||||
|
||||
void ShenandoahHeap::cancel_concurrent_mark() {
|
||||
@ -2136,13 +2142,15 @@ void ShenandoahHeap::cancel_concurrent_mark() {
|
||||
ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
|
||||
}
|
||||
|
||||
void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
|
||||
if (try_cancel_gc()) {
|
||||
bool ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
|
||||
if (try_cancel_gc(cause)) {
|
||||
FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
|
||||
log_info(gc)("%s", msg.buffer());
|
||||
log_info(gc,thread)("%s", msg.buffer());
|
||||
Events::log(Thread::current(), "%s", msg.buffer());
|
||||
_cancel_requested_time = os::elapsedTime();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
uint ShenandoahHeap::max_workers() {
|
||||
@ -2155,18 +2163,10 @@ void ShenandoahHeap::stop() {
|
||||
// Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
|
||||
_shenandoah_policy->record_shutdown();
|
||||
|
||||
// Step 0a. Stop reporting on gc thread cpu utilization
|
||||
// Step 1. Stop reporting on gc thread cpu utilization
|
||||
mmu_tracker()->stop();
|
||||
|
||||
// Step 1. Notify control thread that we are in shutdown.
|
||||
// Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
|
||||
// Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
|
||||
control_thread()->prepare_for_graceful_shutdown();
|
||||
|
||||
// Step 2. Notify GC workers that we are cancelling GC.
|
||||
cancel_gc(GCCause::_shenandoah_stop_vm);
|
||||
|
||||
// Step 3. Wait until GC worker exits normally.
|
||||
// Step 2. Wait until GC worker exits normally (this will cancel any ongoing GC).
|
||||
control_thread()->stop();
|
||||
|
||||
// Stop 4. Shutdown uncommit thread.
|
||||
|
||||
@ -430,35 +430,38 @@ public:
|
||||
private:
|
||||
void manage_satb_barrier(bool active);
|
||||
|
||||
enum CancelState {
|
||||
// Normal state. GC has not been cancelled and is open for cancellation.
|
||||
// Worker threads can suspend for safepoint.
|
||||
CANCELLABLE,
|
||||
|
||||
// GC has been cancelled. Worker threads can not suspend for
|
||||
// safepoint but must finish their work as soon as possible.
|
||||
CANCELLED
|
||||
};
|
||||
|
||||
// Records the time of the first successful cancellation request. This is used to measure
|
||||
// the responsiveness of the heuristic when starting a cycle.
|
||||
double _cancel_requested_time;
|
||||
ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
|
||||
|
||||
// Indicates the reason the current GC has been cancelled (GCCause::_no_gc means the gc is not cancelled).
|
||||
ShenandoahSharedEnumFlag<GCCause::Cause> _cancelled_gc;
|
||||
|
||||
// Returns true if cancel request was successfully communicated.
|
||||
// Returns false if some other thread already communicated cancel
|
||||
// request. A true return value does not mean GC has been
|
||||
// cancelled, only that the process of cancelling GC has begun.
|
||||
bool try_cancel_gc();
|
||||
bool try_cancel_gc(GCCause::Cause cause);
|
||||
|
||||
public:
|
||||
// True if gc has been cancelled
|
||||
inline bool cancelled_gc() const;
|
||||
|
||||
// Used by workers in the GC cycle to detect cancellation and honor STS requirements
|
||||
inline bool check_cancelled_gc_and_yield(bool sts_active = true);
|
||||
|
||||
// This indicates the reason the last GC cycle was cancelled.
|
||||
inline GCCause::Cause cancelled_cause() const;
|
||||
|
||||
// Clears the cancellation cause and optionally resets the oom handler (cancelling an
|
||||
// old mark does _not_ touch the oom handler).
|
||||
inline void clear_cancelled_gc(bool clear_oom_handler = true);
|
||||
|
||||
void cancel_concurrent_mark();
|
||||
void cancel_gc(GCCause::Cause cause);
|
||||
|
||||
public:
|
||||
// Returns true if and only if this call caused a gc to be cancelled.
|
||||
bool cancel_gc(GCCause::Cause cause);
|
||||
|
||||
// Returns true if the soft maximum heap has been changed using management APIs.
|
||||
bool check_soft_max_changed();
|
||||
|
||||
@ -690,6 +693,9 @@ private:
|
||||
HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
|
||||
HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
|
||||
|
||||
// We want to retry an unsuccessful attempt at allocation until at least a full gc.
|
||||
bool should_retry_allocation(size_t original_full_gc_count) const;
|
||||
|
||||
public:
|
||||
HeapWord* allocate_memory(ShenandoahAllocRequest& request);
|
||||
HeapWord* mem_allocate(size_t size, bool* what) override;
|
||||
|
||||
@ -252,7 +252,7 @@ inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare)
|
||||
}
|
||||
|
||||
inline bool ShenandoahHeap::cancelled_gc() const {
|
||||
return _cancelled_gc.get() == CANCELLED;
|
||||
return _cancelled_gc.get() != GCCause::_no_gc;
|
||||
}
|
||||
|
||||
inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
|
||||
@ -264,8 +264,12 @@ inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
|
||||
return cancelled_gc();
|
||||
}
|
||||
|
||||
inline GCCause::Cause ShenandoahHeap::cancelled_cause() const {
|
||||
return _cancelled_gc.get();
|
||||
}
|
||||
|
||||
inline void ShenandoahHeap::clear_cancelled_gc(bool clear_oom_handler) {
|
||||
_cancelled_gc.set(CANCELLABLE);
|
||||
_cancelled_gc.set(GCCause::_no_gc);
|
||||
if (_cancel_requested_time > 0) {
|
||||
log_debug(gc)("GC cancellation took %.3fs", (os::elapsedTime() - _cancel_requested_time));
|
||||
_cancel_requested_time = 0;
|
||||
|
||||
@ -495,7 +495,7 @@ const char* ShenandoahOldGeneration::state_name(State state) {
|
||||
|
||||
void ShenandoahOldGeneration::transition_to(State new_state) {
|
||||
if (_state != new_state) {
|
||||
log_debug(gc)("Old generation transition from %s to %s", state_name(_state), state_name(new_state));
|
||||
log_debug(gc, thread)("Old generation transition from %s to %s", state_name(_state), state_name(new_state));
|
||||
EventMark event("Old was %s, now is %s", state_name(_state), state_name(new_state));
|
||||
validate_transition(new_state);
|
||||
_state = new_state;
|
||||
|
||||
@ -33,15 +33,14 @@
|
||||
#include "logging/log.hpp"
|
||||
|
||||
ShenandoahRegulatorThread::ShenandoahRegulatorThread(ShenandoahGenerationalControlThread* control_thread) :
|
||||
ConcurrentGCThread(),
|
||||
_heap(ShenandoahHeap::heap()),
|
||||
_control_thread(control_thread),
|
||||
_sleep(ShenandoahControlIntervalMin),
|
||||
_last_sleep_adjust_time(os::elapsedTime()) {
|
||||
shenandoah_assert_generational();
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
_old_heuristics = heap->old_generation()->heuristics();
|
||||
_young_heuristics = heap->young_generation()->heuristics();
|
||||
_global_heuristics = heap->global_generation()->heuristics();
|
||||
_old_heuristics = _heap->old_generation()->heuristics();
|
||||
_young_heuristics = _heap->young_generation()->heuristics();
|
||||
_global_heuristics = _heap->global_generation()->heuristics();
|
||||
|
||||
set_name("Shenandoah Regulator Thread");
|
||||
create_and_start();
|
||||
@ -62,7 +61,7 @@ void ShenandoahRegulatorThread::regulate_young_and_old_cycles() {
|
||||
ShenandoahGenerationalControlThread::GCMode mode = _control_thread->gc_mode();
|
||||
if (mode == ShenandoahGenerationalControlThread::none) {
|
||||
if (should_start_metaspace_gc()) {
|
||||
if (request_concurrent_gc(GLOBAL)) {
|
||||
if (request_concurrent_gc(_heap->global_generation())) {
|
||||
// Some of vmTestbase/metaspace tests depend on following line to count GC cycles
|
||||
_global_heuristics->log_trigger("%s", GCCause::to_string(GCCause::_metadata_GC_threshold));
|
||||
_global_heuristics->cancel_trigger_request();
|
||||
@ -75,10 +74,14 @@ void ShenandoahRegulatorThread::regulate_young_and_old_cycles() {
|
||||
log_debug(gc)("Heuristics request for old collection accepted");
|
||||
_young_heuristics->cancel_trigger_request();
|
||||
_old_heuristics->cancel_trigger_request();
|
||||
} else if (request_concurrent_gc(YOUNG)) {
|
||||
} else if (request_concurrent_gc(_heap->young_generation())) {
|
||||
log_debug(gc)("Heuristics request for young collection accepted");
|
||||
_young_heuristics->cancel_trigger_request();
|
||||
}
|
||||
} else if (_old_heuristics->should_resume_old_cycle() || _old_heuristics->should_start_gc()) {
|
||||
if (request_concurrent_gc(_heap->old_generation())) {
|
||||
log_debug(gc)("Heuristics request to resume old collection accepted");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (mode == ShenandoahGenerationalControlThread::servicing_old) {
|
||||
@ -132,19 +135,19 @@ void ShenandoahRegulatorThread::regulator_sleep() {
|
||||
}
|
||||
}
|
||||
|
||||
bool ShenandoahRegulatorThread::start_old_cycle() {
|
||||
return _old_heuristics->should_start_gc() && request_concurrent_gc(OLD);
|
||||
bool ShenandoahRegulatorThread::start_old_cycle() const {
|
||||
return _old_heuristics->should_start_gc() && request_concurrent_gc(_heap->old_generation());
|
||||
}
|
||||
|
||||
bool ShenandoahRegulatorThread::start_young_cycle() {
|
||||
return _young_heuristics->should_start_gc() && request_concurrent_gc(YOUNG);
|
||||
bool ShenandoahRegulatorThread::start_young_cycle() const {
|
||||
return _young_heuristics->should_start_gc() && request_concurrent_gc(_heap->young_generation());
|
||||
}
|
||||
|
||||
bool ShenandoahRegulatorThread::start_global_cycle() {
|
||||
return _global_heuristics->should_start_gc() && request_concurrent_gc(GLOBAL);
|
||||
bool ShenandoahRegulatorThread::start_global_cycle() const {
|
||||
return _global_heuristics->should_start_gc() && request_concurrent_gc(_heap->global_generation());
|
||||
}
|
||||
|
||||
bool ShenandoahRegulatorThread::request_concurrent_gc(ShenandoahGenerationType generation) {
|
||||
bool ShenandoahRegulatorThread::request_concurrent_gc(ShenandoahGeneration* generation) const {
|
||||
double now = os::elapsedTime();
|
||||
bool accepted = _control_thread->request_concurrent_gc(generation);
|
||||
if (LogTarget(Debug, gc, thread)::is_enabled() && accepted) {
|
||||
|
||||
@ -26,8 +26,11 @@
|
||||
|
||||
#include "gc/shared/concurrentGCThread.hpp"
|
||||
|
||||
class ShenandoahHeap;
|
||||
class ShenandoahHeuristics;
|
||||
class ShenandoahGeneration;
|
||||
class ShenandoahGenerationalControlThread;
|
||||
class ShenandoahOldHeuristics;
|
||||
|
||||
/*
|
||||
* The purpose of this class (and thread) is to allow us to continue
|
||||
@ -58,9 +61,10 @@ class ShenandoahRegulatorThread: public ConcurrentGCThread {
|
||||
void regulate_young_and_global_cycles();
|
||||
|
||||
// These return true if a cycle was started.
|
||||
bool start_old_cycle();
|
||||
bool start_young_cycle();
|
||||
bool start_global_cycle();
|
||||
bool start_old_cycle() const;
|
||||
bool start_young_cycle() const;
|
||||
bool start_global_cycle() const;
|
||||
bool resume_old_cycle();
|
||||
|
||||
// The generational mode can only unload classes in a global cycle. The regulator
|
||||
// thread itself will trigger a global cycle if metaspace is out of memory.
|
||||
@ -70,11 +74,12 @@ class ShenandoahRegulatorThread: public ConcurrentGCThread {
|
||||
void regulator_sleep();
|
||||
|
||||
// Provides instrumentation to track how long it takes to acknowledge a request.
|
||||
bool request_concurrent_gc(ShenandoahGenerationType generation);
|
||||
bool request_concurrent_gc(ShenandoahGeneration* generation) const;
|
||||
|
||||
ShenandoahHeap* _heap;
|
||||
ShenandoahGenerationalControlThread* _control_thread;
|
||||
ShenandoahHeuristics* _young_heuristics;
|
||||
ShenandoahHeuristics* _old_heuristics;
|
||||
ShenandoahOldHeuristics* _old_heuristics;
|
||||
ShenandoahHeuristics* _global_heuristics;
|
||||
|
||||
uint _sleep;
|
||||
|
||||
@ -475,7 +475,7 @@ HeapWord* ShenandoahScanRemembered::addr_for_cluster(size_t cluster_no) {
|
||||
void ShenandoahScanRemembered::roots_do(OopIterateClosure* cl) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
bool old_bitmap_stable = heap->old_generation()->is_mark_complete();
|
||||
log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
|
||||
log_debug(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
|
||||
for (size_t i = 0, n = heap->num_regions(); i < n; ++i) {
|
||||
ShenandoahHeapRegion* region = heap->get_region(i);
|
||||
if (region->is_old() && region->is_active() && !region->is_cset()) {
|
||||
@ -653,7 +653,7 @@ ShenandoahScanRememberedTask::ShenandoahScanRememberedTask(ShenandoahObjToScanQu
|
||||
WorkerTask("Scan Remembered Set"),
|
||||
_queue_set(queue_set), _old_queue_set(old_queue_set), _rp(rp), _work_list(work_list), _is_concurrent(is_concurrent) {
|
||||
bool old_bitmap_stable = ShenandoahHeap::heap()->old_generation()->is_mark_complete();
|
||||
log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
|
||||
log_debug(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
|
||||
}
|
||||
|
||||
void ShenandoahScanRememberedTask::work(uint worker_id) {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user