8080226: G1: Replace collector state booleans with explicit state variable(s)

Reviewed-by: iwalulya, ayang
This commit is contained in:
Thomas Schatzl 2026-03-23 09:38:37 +00:00
parent caf37add7a
commit 700a385634
11 changed files with 100 additions and 118 deletions

View File

@ -2481,7 +2481,7 @@ void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
void G1CollectedHeap::gc_prologue(bool full) {
// Update common counters.
increment_total_collections(full /* full gc */);
if (full || collector_state()->in_concurrent_start_gc()) {
if (full || collector_state()->is_in_concurrent_start_gc()) {
increment_old_marking_cycles_started();
}
}
@ -2651,7 +2651,7 @@ void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType
verify_numa_regions("GC End");
_verifier->verify_region_sets_optional();
if (collector_state()->in_concurrent_start_gc()) {
if (collector_state()->is_in_concurrent_start_gc()) {
log_debug(gc, verify)("Marking state");
_verifier->verify_marking_state();
}
@ -2732,7 +2732,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint(size_t allocation_word_si
// Record whether this pause may need to trigger a concurrent operation. Later,
// when we signal the G1ConcurrentMarkThread, the collector state has already
// been reset for the next pause.
bool should_start_concurrent_mark_operation = collector_state()->in_concurrent_start_gc();
bool should_start_concurrent_mark_operation = collector_state()->is_in_concurrent_start_gc();
// Perform the collection.
G1YoungCollector collector(gc_cause(), allocation_word_size);
@ -2827,7 +2827,7 @@ bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
}
void G1CollectedHeap::make_pending_list_reachable() {
if (collector_state()->in_concurrent_start_gc()) {
if (collector_state()->is_in_concurrent_start_gc()) {
oop pll_head = Universe::reference_pending_list();
if (pll_head != nullptr) {
// Any valid worker id is fine here as we are in the VM thread and single-threaded.
@ -3212,7 +3212,7 @@ void G1CollectedHeap::retire_gc_alloc_region(G1HeapRegion* alloc_region,
_survivor.add_used_bytes(allocated_bytes);
}
bool const during_im = collector_state()->in_concurrent_start_gc();
bool const during_im = collector_state()->is_in_concurrent_start_gc();
if (during_im && allocated_bytes > 0) {
_cm->add_root_region(alloc_region);
}

View File

@ -325,7 +325,7 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi
guarantee(target_pause_time_ms > 0.0,
"target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
bool in_young_only_phase = _policy->collector_state()->in_young_only_phase();
bool in_young_only_phase = _policy->collector_state()->is_in_young_only_phase();
size_t pending_cards = _policy->analytics()->predict_pending_cards(in_young_only_phase);
log_trace(gc, ergo, cset)("Start choosing CSet. Pending cards: %zu target pause time: %1.2fms",
@ -378,7 +378,7 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
if (!candidates()->is_empty()) {
candidates()->verify();
if (collector_state()->in_mixed_phase()) {
if (collector_state()->is_in_mixed_phase()) {
time_remaining_ms = select_candidates_from_marking(time_remaining_ms);
} else {
log_debug(gc, ergo, cset)("Do not add marking candidates to collection set due to pause type.");

View File

@ -26,24 +26,19 @@
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/g1/g1GCPauseType.hpp"
#include "runtime/safepoint.hpp"
G1GCPauseType G1CollectorState::young_gc_pause_type(bool concurrent_operation_is_full_mark) const {
assert(!in_full_gc(), "must be");
if (in_concurrent_start_gc()) {
assert(!in_young_gc_before_mixed(), "must be");
return concurrent_operation_is_full_mark ? G1GCPauseType::ConcurrentStartMarkGC :
G1GCPauseType::ConcurrentStartUndoGC;
} else if (in_young_gc_before_mixed()) {
assert(!in_concurrent_start_gc(), "must be");
return G1GCPauseType::LastYoungGC;
} else if (in_mixed_phase()) {
assert(!in_concurrent_start_gc(), "must be");
assert(!in_young_gc_before_mixed(), "must be");
return G1GCPauseType::MixedGC;
} else {
assert(!in_concurrent_start_gc(), "must be");
assert(!in_young_gc_before_mixed(), "must be");
return G1GCPauseType::YoungGC;
G1GCPauseType G1CollectorState::gc_pause_type(bool concurrent_operation_is_full_mark) const {
assert(SafepointSynchronize::is_at_safepoint(), "must be");
switch (_phase) {
case Phase::YoungNormal: return G1GCPauseType::YoungGC;
case Phase::YoungLastYoung: return G1GCPauseType::LastYoungGC;
case Phase::YoungConcurrentStart:
return concurrent_operation_is_full_mark ? G1GCPauseType::ConcurrentStartMarkGC :
G1GCPauseType::ConcurrentStartUndoGC;
case Phase::Mixed: return G1GCPauseType::MixedGC;
case Phase::FullGC: return G1GCPauseType::FullGC;
default: ShouldNotReachHere();
}
}

View File

@ -29,68 +29,61 @@
#include "utilities/globalDefinitions.hpp"
// State of the G1 collection.
//
// The rough phasing is Young-Only, Mixed / Space Reclamation and
// Full GC "phase".
//
// We split the Young-only phase into three parts to cover interesting
// sub-phases and avoid separate tracking.
class G1CollectorState {
// Indicates whether we are in the phase where we do partial gcs that only contain
// the young generation. Not set while _in_full_gc is set.
bool _in_young_only_phase;
enum class Phase {
// Indicates that the next GC in the Young-Only phase will (likely) be a "Normal"
// young GC.
YoungNormal,
// We are in a concurrent start GC during the Young-Only phase. This is only set
// during that GC because we only decide whether we do this type of GC at the start
// of the pause.
YoungConcurrentStart,
// Indicates that we are about to start or in the last young gc in the Young-Only
// phase before the Mixed phase. This GC is required to keep pause time requirements.
YoungLastYoung,
// Doing extra old generation evacuation.
Mixed,
// The Full GC phase (that coincides with the Full GC pause).
FullGC
} _phase;
// Indicates whether we are in the last young gc before the mixed gc phase. This GC
// is required to keep pause time requirements.
bool _in_young_gc_before_mixed;
// If _initiate_conc_mark_if_possible is set at the beginning of a
// pause, it is a suggestion that the pause should start a marking
// cycle by doing the concurrent start work. However, it is possible
// that the concurrent marking thread is still finishing up the
// previous marking cycle (e.g., clearing the marking bitmap).
// If that is the case we cannot start a new cycle and
// we'll have to wait for the concurrent marking thread to finish
// what it is doing. In this case we will postpone the marking cycle
// initiation decision for the next pause. When we eventually decide
// to start a cycle, we will set _in_concurrent_start_gc which
// will stay true until the end of the concurrent start pause doing the
// concurrent start work.
volatile bool _in_concurrent_start_gc;
// At the end of a pause we check the heap occupancy and we decide
// whether we will start a marking cycle during the next pause. If
// we decide that we want to do that, set this parameter. This parameter will
// stay set until the beginning of a subsequent pause (not necessarily
// the next one) when we decide that we will indeed start a marking cycle and
// do the concurrent start phase work.
// _initiate_conc_mark_if_possible indicates that there has been a request to start
// a concurrent cycle but we have not been able to fulfill it because another one
// has been in progress when the request came in.
//
// This flag remembers that there is an unfullfilled request.
volatile bool _initiate_conc_mark_if_possible;
// Set during a full gc pause.
bool _in_full_gc;
public:
G1CollectorState() :
_in_young_only_phase(true),
_in_young_gc_before_mixed(false),
_in_concurrent_start_gc(false),
_initiate_conc_mark_if_possible(false),
_in_full_gc(false) { }
_phase(Phase::YoungNormal),
_initiate_conc_mark_if_possible(false) { }
// Phase setters
void set_in_young_only_phase(bool v) { _in_young_only_phase = v; }
void set_in_normal_young_gc() { _phase = Phase::YoungNormal; }
void set_in_space_reclamation_phase() { _phase = Phase::Mixed; }
void set_in_full_gc() { _phase = Phase::FullGC; }
// Pause setters
void set_in_young_gc_before_mixed(bool v) { _in_young_gc_before_mixed = v; }
void set_in_concurrent_start_gc(bool v) { _in_concurrent_start_gc = v; }
void set_in_full_gc(bool v) { _in_full_gc = v; }
void set_in_young_gc_before_mixed() { _phase = Phase::YoungLastYoung; }
void set_in_concurrent_start_gc() { _phase = Phase::YoungConcurrentStart; _initiate_conc_mark_if_possible = false; }
void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
// Phase getters
bool in_young_only_phase() const { return _in_young_only_phase && !_in_full_gc; }
bool in_mixed_phase() const { return !_in_young_only_phase && !_in_full_gc; }
bool is_in_young_only_phase() const { return _phase == Phase::YoungNormal || _phase == Phase::YoungConcurrentStart || _phase == Phase::YoungLastYoung; }
bool is_in_mixed_phase() const { return _phase == Phase::Mixed; }
// Specific pauses
bool in_young_gc_before_mixed() const { return _in_young_gc_before_mixed; }
bool in_full_gc() const { return _in_full_gc; }
bool in_concurrent_start_gc() const { return _in_concurrent_start_gc; }
bool is_in_young_gc_before_mixed() const { return _phase == Phase::YoungLastYoung; }
bool is_in_full_gc() const { return _phase == Phase::FullGC; }
bool is_in_concurrent_start_gc() const { return _phase == Phase::YoungConcurrentStart; }
bool initiate_conc_mark_if_possible() const { return _initiate_conc_mark_if_possible; }
@ -100,7 +93,7 @@ public:
bool is_in_reset_for_next_cycle() const;
// Calculate GC Pause Type from internal state.
G1GCPauseType young_gc_pause_type(bool concurrent_operation_is_full_mark) const;
G1GCPauseType gc_pause_type(bool concurrent_operation_is_full_mark) const;
};
#endif // SHARE_GC_G1_G1COLLECTORSTATE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -236,7 +236,7 @@ private:
VerifyOption _vo;
bool _failures;
bool is_in_full_gc() const { return G1CollectedHeap::heap()->collector_state()->in_full_gc(); }
bool is_in_full_gc() const { return G1CollectedHeap::heap()->collector_state()->is_in_full_gc(); }
public:
VerifyRegionClosure(VerifyOption vo)
@ -349,7 +349,7 @@ void G1HeapVerifier::verify(VerifyOption vo) {
bool failures = rootsCl.failures() || codeRootsCl.failures();
if (!_g1h->policy()->collector_state()->in_full_gc()) {
if (!_g1h->policy()->collector_state()->is_in_full_gc()) {
// If we're verifying during a full GC then the region sets
// will have been torn down at the start of the GC. Therefore
// verifying the region sets will fail. So we only verify
@ -494,7 +494,7 @@ public:
};
void G1HeapVerifier::verify_marking_state() {
assert(G1CollectedHeap::heap()->collector_state()->in_concurrent_start_gc(), "must be");
assert(G1CollectedHeap::heap()->collector_state()->is_in_concurrent_start_gc(), "must be");
// Verify TAMSes, bitmaps and liveness statistics.
//

View File

@ -177,7 +177,7 @@ uint G1Policy::calculate_desired_eden_length_by_mmu() const {
void G1Policy::update_young_length_bounds() {
assert(!Universe::is_fully_initialized() || SafepointSynchronize::is_at_safepoint(), "must be");
bool for_young_only_phase = collector_state()->in_young_only_phase();
bool for_young_only_phase = collector_state()->is_in_young_only_phase();
update_young_length_bounds(_analytics->predict_pending_cards(for_young_only_phase),
_analytics->predict_card_rs_length(for_young_only_phase),
_analytics->predict_code_root_rs_length(for_young_only_phase));
@ -505,7 +505,7 @@ uint G1Policy::calculate_desired_eden_length_before_mixed(double base_time_ms,
double G1Policy::predict_survivor_regions_evac_time() const {
double survivor_regions_evac_time = predict_young_region_other_time_ms(_g1h->survivor()->length());
for (G1HeapRegion* r : _g1h->survivor()->regions()) {
survivor_regions_evac_time += predict_region_copy_time_ms(r, _g1h->collector_state()->in_young_only_phase());
survivor_regions_evac_time += predict_region_copy_time_ms(r, _g1h->collector_state()->is_in_young_only_phase());
}
return survivor_regions_evac_time;
@ -561,8 +561,7 @@ void G1Policy::revise_young_list_target_length(size_t pending_cards, size_t card
void G1Policy::record_full_collection_start() {
record_pause_start_time();
// Release the future to-space so that it is available for compaction into.
collector_state()->set_in_young_only_phase(false);
collector_state()->set_in_full_gc(true);
collector_state()->set_in_full_gc();
_collection_set->abandon_all_candidates();
}
@ -571,14 +570,10 @@ void G1Policy::record_full_collection_end(size_t allocation_word_size) {
// since last pause.
double end_sec = os::elapsedTime();
collector_state()->set_in_full_gc(false);
// "Nuke" the heuristics that control the young/mixed GC
// transitions and make sure we start with young GCs after the Full GC.
collector_state()->set_in_young_only_phase(true);
collector_state()->set_in_young_gc_before_mixed(false);
collector_state()->set_in_normal_young_gc();
collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", allocation_word_size));
collector_state()->set_in_concurrent_start_gc(false);
_eden_surv_rate_group->start_adding_regions();
// also call this on any additional surv rate groups
@ -697,7 +692,7 @@ void G1Policy::record_young_collection_start() {
void G1Policy::record_concurrent_mark_init_end() {
assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
collector_state()->set_in_concurrent_start_gc(false);
collector_state()->set_in_normal_young_gc();
}
void G1Policy::record_concurrent_mark_remark_end() {
@ -735,7 +730,7 @@ double G1Policy::constant_other_time_ms(double pause_time_ms) const {
}
bool G1Policy::about_to_start_mixed_phase() const {
return collector_state()->is_in_concurrent_cycle() || collector_state()->in_young_gc_before_mixed();
return collector_state()->is_in_concurrent_cycle() || collector_state()->is_in_young_gc_before_mixed();
}
bool G1Policy::need_to_start_conc_mark(const char* source, size_t allocation_word_size) {
@ -748,7 +743,7 @@ bool G1Policy::need_to_start_conc_mark(const char* source, size_t allocation_wor
bool result = false;
if (non_young_occupancy > marking_initiating_used_threshold) {
result = collector_state()->in_young_only_phase();
result = collector_state()->is_in_young_only_phase();
log_debug(gc, ergo, ihop)("%s non-young occupancy: %zuB allocation request: %zuB threshold: %zuB (%1.2f) source: %s",
result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
non_young_occupancy, allocation_word_size * HeapWordSize, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source);
@ -757,7 +752,7 @@ bool G1Policy::need_to_start_conc_mark(const char* source, size_t allocation_wor
}
bool G1Policy::concurrent_operation_is_full_mark(const char* msg, size_t allocation_word_size) {
return collector_state()->in_concurrent_start_gc() &&
return collector_state()->is_in_concurrent_start_gc() &&
((_g1h->gc_cause() != GCCause::_g1_humongous_allocation) || need_to_start_conc_mark(msg, allocation_word_size));
}
@ -796,7 +791,7 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
double end_time_sec = Ticks::now().seconds();
double pause_time_ms = (end_time_sec - start_time_sec) * 1000.0;
G1GCPauseType this_pause = collector_state()->young_gc_pause_type(concurrent_operation_is_full_mark);
G1GCPauseType this_pause = collector_state()->gc_pause_type(concurrent_operation_is_full_mark);
bool is_young_only_pause = G1GCPauseTypeHelper::is_young_only_pause(this_pause);
if (G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause)) {
@ -947,14 +942,13 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
// This has been the young GC before we start doing mixed GCs. We already
// decided to start mixed GCs much earlier, so there is nothing to do except
// advancing the state.
collector_state()->set_in_young_only_phase(false);
collector_state()->set_in_young_gc_before_mixed(false);
collector_state()->set_in_space_reclamation_phase();
} else if (G1GCPauseTypeHelper::is_mixed_pause(this_pause)) {
// This is a mixed GC. Here we decide whether to continue doing more
// mixed GCs or not.
if (!next_gc_should_be_mixed()) {
log_debug(gc, ergo)("do not continue mixed GCs (candidate old regions not available)");
collector_state()->set_in_young_only_phase(true);
collector_state()->set_in_normal_young_gc();
assert(!candidates()->has_more_marking_candidates(),
"only end mixed if all candidates from marking were processed");
@ -1073,7 +1067,7 @@ void G1Policy::record_young_gc_pause_end(bool evacuation_failed) {
double G1Policy::predict_base_time_ms(size_t pending_cards,
size_t card_rs_length,
size_t code_root_rs_length) const {
bool in_young_only_phase = collector_state()->in_young_only_phase();
bool in_young_only_phase = collector_state()->is_in_young_only_phase();
// Cards from the refinement table and the cards from the young gen remset are
// unique to each other as they are located on the card table.
@ -1097,7 +1091,7 @@ double G1Policy::predict_base_time_ms(size_t pending_cards,
}
double G1Policy::predict_base_time_ms(size_t pending_cards, size_t card_rs_length) const {
bool for_young_only_phase = collector_state()->in_young_only_phase();
bool for_young_only_phase = collector_state()->is_in_young_only_phase();
size_t code_root_rs_length = _analytics->predict_code_root_rs_length(for_young_only_phase);
return predict_base_time_ms(pending_cards, card_rs_length, code_root_rs_length);
}
@ -1137,7 +1131,7 @@ double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) co
if (bytes_to_copy != nullptr) {
*bytes_to_copy = expected_bytes;
}
return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->in_young_only_phase());
return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->is_in_young_only_phase());
}
double G1Policy::predict_region_copy_time_ms(G1HeapRegion* hr, bool for_young_only_phase) const {
@ -1243,8 +1237,7 @@ bool G1Policy::force_concurrent_start_if_outside_cycle(GCCause::Cause gc_cause)
}
void G1Policy::initiate_conc_mark() {
collector_state()->set_in_concurrent_start_gc(true);
collector_state()->set_initiate_conc_mark_if_possible(false);
collector_state()->set_in_concurrent_start_gc();
}
static const char* requester_for_mixed_abort(GCCause::Cause cause) {
@ -1266,7 +1259,7 @@ void G1Policy::decide_on_concurrent_start_pause() {
// will set it here if we have to. However, it should be cleared by
// the end of the pause (it's only set for the duration of a
// concurrent start pause).
assert(!collector_state()->in_concurrent_start_gc(), "pre-condition");
assert(!collector_state()->is_in_concurrent_start_gc(), "pre-condition");
if (collector_state()->initiate_conc_mark_if_possible()) {
// We had noticed on a previous pause that the heap occupancy has
@ -1279,7 +1272,7 @@ void G1Policy::decide_on_concurrent_start_pause() {
if ((cause != GCCause::_wb_breakpoint) &&
ConcurrentGCBreakpoints::is_controlled()) {
log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)");
} else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) {
} else if (!about_to_start_mixed_phase() && collector_state()->is_in_young_only_phase()) {
// Initiate a new concurrent start if there is no marking or reclamation going on.
initiate_conc_mark();
log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
@ -1288,8 +1281,7 @@ void G1Policy::decide_on_concurrent_start_pause() {
(cause == GCCause::_wb_breakpoint)) {
// Initiate a concurrent start. A concurrent start must be a young only
// GC, so the collector state must be updated to reflect this.
collector_state()->set_in_young_only_phase(true);
collector_state()->set_in_young_gc_before_mixed(false);
collector_state()->set_in_normal_young_gc();
// We might have ended up coming here about to start a mixed phase with a collection set
// active. The following remark might change the change the "evacuation efficiency" of
@ -1318,10 +1310,10 @@ void G1Policy::decide_on_concurrent_start_pause() {
}
// Result consistency checks.
// We do not allow concurrent start to be piggy-backed on a mixed GC.
assert(!collector_state()->in_concurrent_start_gc() ||
collector_state()->in_young_only_phase(), "sanity");
assert(!collector_state()->is_in_concurrent_start_gc() ||
collector_state()->is_in_young_only_phase(), "sanity");
// We also do not allow mixed GCs during marking/rebuilding.
assert(!collector_state()->is_in_mark_or_rebuild() || collector_state()->in_young_only_phase(), "sanity %d %d", collector_state()->is_in_concurrent_cycle(), collector_state()->in_young_only_phase());
assert(!collector_state()->is_in_mark_or_rebuild() || collector_state()->is_in_young_only_phase(), "sanity %d %d", collector_state()->is_in_concurrent_cycle(), collector_state()->is_in_young_only_phase());
}
void G1Policy::record_concurrent_mark_cleanup_end(bool has_rebuilt_remembered_sets) {
@ -1340,7 +1332,9 @@ void G1Policy::record_concurrent_mark_cleanup_end(bool has_rebuilt_remembered_se
abort_time_to_mixed_tracking();
log_debug(gc, ergo)("request young-only gcs (candidate old regions not available)");
}
collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
if (mixed_gc_pending) {
collector_state()->set_in_young_gc_before_mixed();
}
double end_sec = os::elapsedTime();
double start_sec = cur_pause_start_sec();

View File

@ -115,7 +115,7 @@ class G1Policy: public CHeapObj<mtGC> {
G1ConcurrentStartToMixedTimeTracker _concurrent_start_to_mixed;
bool should_update_surv_rate_group_predictors() {
return collector_state()->in_young_only_phase() && !collector_state()->is_in_mark_or_rebuild();
return collector_state()->is_in_young_only_phase() && !collector_state()->is_in_mark_or_rebuild();
}
double pending_cards_processing_time() const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,7 +72,7 @@ G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1Colle
G1ParScanThreadState* pss,
bool process_only_dirty_klasses) {
G1EvacuationRootClosures* res = nullptr;
if (g1h->collector_state()->in_concurrent_start_gc()) {
if (g1h->collector_state()->is_in_concurrent_start_gc()) {
if (ClassUnloadingWithConcurrentMark) {
res = new G1ConcurrentStartMarkClosures<false>(g1h, pss);
} else {

View File

@ -105,7 +105,7 @@ public:
// Take snapshot of current pause type at start as it may be modified during gc.
// The strings for all Concurrent Start pauses are the same, so the parameter
// does not matter here.
_pause_type(_collector->collector_state()->young_gc_pause_type(false /* concurrent_operation_is_full_mark */)),
_pause_type(_collector->collector_state()->gc_pause_type(false /* concurrent_operation_is_full_mark */)),
_pause_cause(cause),
// Fake a "no cause" and manually add the correct string in update_young_gc_name()
// to make the string look more natural.
@ -160,9 +160,9 @@ class G1YoungGCVerifierMark : public StackObj {
static G1HeapVerifier::G1VerifyType young_collection_verify_type() {
G1CollectorState* state = G1CollectedHeap::heap()->collector_state();
if (state->in_concurrent_start_gc()) {
if (state->is_in_concurrent_start_gc()) {
return G1HeapVerifier::G1VerifyConcurrentStart;
} else if (state->in_young_only_phase()) {
} else if (state->is_in_young_only_phase()) {
return G1HeapVerifier::G1VerifyYoungNormal;
} else {
return G1HeapVerifier::G1VerifyMixed;
@ -530,7 +530,7 @@ void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info)
// Needs log buffers flushed.
calculate_collection_set(evacuation_info, policy()->max_pause_time_ms());
if (collector_state()->in_concurrent_start_gc()) {
if (collector_state()->is_in_concurrent_start_gc()) {
Ticks start = Ticks::now();
concurrent_mark()->pre_concurrent_start(_gc_cause);
phase_times()->record_prepare_concurrent_task_time_ms((Ticks::now() - start).seconds() * 1000.0);
@ -1037,7 +1037,7 @@ void G1YoungCollector::post_evacuate_cleanup_2(G1ParScanThreadStateSet* per_thre
}
void G1YoungCollector::enqueue_candidates_as_root_regions() {
assert(collector_state()->in_concurrent_start_gc(), "must be");
assert(collector_state()->is_in_concurrent_start_gc(), "must be");
G1CollectionSetCandidates* candidates = collection_set()->candidates();
candidates->iterate_regions([&] (G1HeapRegion* r) {
@ -1077,7 +1077,7 @@ void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info,
// Regions in the collection set candidates are roots for the marking (they are
// not marked through considering they are very likely to be reclaimed soon.
// They need to be enqueued explicitly compared to survivor regions.
if (collector_state()->in_concurrent_start_gc()) {
if (collector_state()->is_in_concurrent_start_gc()) {
enqueue_candidates_as_root_regions();
}
@ -1180,7 +1180,7 @@ void G1YoungCollector::collect() {
// Need to report the collection pause now since record_collection_pause_end()
// modifies it to the next state.
jtm.report_pause_type(collector_state()->young_gc_pause_type(_concurrent_operation_is_full_mark));
jtm.report_pause_type(collector_state()->gc_pause_type(_concurrent_operation_is_full_mark));
policy()->record_young_collection_end(_concurrent_operation_is_full_mark, evacuation_alloc_failed(), _allocation_word_size);
}

View File

@ -89,8 +89,8 @@ void G1YoungGCAllocationFailureInjector::arm_if_needed() {
// Now check if evacuation failure injection should be enabled for the current GC.
G1CollectorState* collector_state = g1h->collector_state();
const bool in_young_only_phase = collector_state->in_young_only_phase();
const bool in_concurrent_start_gc = collector_state->in_concurrent_start_gc();
const bool in_young_only_phase = collector_state->is_in_young_only_phase();
const bool in_concurrent_start_gc = collector_state->is_in_concurrent_start_gc();
const bool in_concurrent_cycle = collector_state->is_in_concurrent_cycle();
_inject_allocation_failure_for_current_gc &=

View File

@ -501,7 +501,7 @@ class G1PostEvacuateCollectionSetCleanupTask2::ProcessEvacuationFailedRegionsTas
// Concurrent mark does not mark through regions that we retain (they are root
// regions wrt to marking), so we must clear their mark data (tams, bitmap, ...)
// set eagerly or during evacuation failure.
bool clear_mark_data = !g1h->collector_state()->in_concurrent_start_gc() ||
bool clear_mark_data = !g1h->collector_state()->is_in_concurrent_start_gc() ||
g1h->policy()->should_retain_evac_failed_region(r);
if (clear_mark_data) {